summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/.gitignore18
-rw-r--r--deps/v8/AUTHORS2
-rw-r--r--deps/v8/BUILD.gn198
-rw-r--r--deps/v8/ChangeLog1687
-rw-r--r--deps/v8/DEPS16
-rw-r--r--deps/v8/Makefile8
-rw-r--r--deps/v8/PRESUBMIT.py4
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h61
-rw-r--r--deps/v8/build_overrides/v8.gni2
-rw-r--r--deps/v8/gni/isolate.gni25
-rw-r--r--deps/v8/gni/v8.gni24
-rw-r--r--deps/v8/gypfiles/all.gyp6
-rwxr-xr-xdeps/v8/gypfiles/get_landmines.py1
-rw-r--r--deps/v8/gypfiles/isolate.gypi2
-rw-r--r--deps/v8/gypfiles/standalone.gypi5
-rw-r--r--deps/v8/include/libplatform/DEPS4
-rw-r--r--deps/v8/include/libplatform/libplatform-export.h29
-rw-r--r--deps/v8/include/libplatform/libplatform.h10
-rw-r--r--deps/v8/include/libplatform/v8-tracing.h23
-rw-r--r--deps/v8/include/v8-profiler.h2
-rw-r--r--deps/v8/include/v8-util.h17
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h225
-rw-r--r--deps/v8/infra/config/cq.cfg2
-rw-r--r--deps/v8/infra/mb/mb_config.pyl28
-rw-r--r--deps/v8/samples/samples.gyp1
-rw-r--r--deps/v8/src/DEPS1
-rw-r--r--deps/v8/src/accessors.cc144
-rw-r--r--deps/v8/src/accessors.h33
-rw-r--r--deps/v8/src/address-map.cc10
-rw-r--r--deps/v8/src/address-map.h64
-rw-r--r--deps/v8/src/allocation.h3
-rw-r--r--deps/v8/src/api-arguments.h2
-rw-r--r--deps/v8/src/api-natives.cc23
-rw-r--r--deps/v8/src/api.cc558
-rw-r--r--deps/v8/src/api.h8
-rw-r--r--deps/v8/src/arguments.h3
-rw-r--r--deps/v8/src/arm/assembler-arm.cc12
-rw-r--r--deps/v8/src/arm/assembler-arm.h6
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc458
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc2
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc15
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc313
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h45
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc8
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h3
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc438
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc18
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc228
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h49
-rw-r--r--deps/v8/src/asmjs/asm-js.cc8
-rw-r--r--deps/v8/src/asmjs/asm-typer.cc120
-rw-r--r--deps/v8/src/asmjs/asm-typer.h15
-rw-r--r--deps/v8/src/asmjs/asm-types.h17
-rw-r--r--deps/v8/src/asmjs/asm-wasm-builder.cc131
-rw-r--r--deps/v8/src/asmjs/asm-wasm-builder.h7
-rw-r--r--deps/v8/src/asmjs/switch-logic.cc (renamed from deps/v8/src/wasm/switch-logic.cc)2
-rw-r--r--deps/v8/src/asmjs/switch-logic.h (renamed from deps/v8/src/wasm/switch-logic.h)3
-rw-r--r--deps/v8/src/assembler.cc37
-rw-r--r--deps/v8/src/assembler.h25
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.cc9
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.h2
-rw-r--r--deps/v8/src/ast/ast-numbering.cc32
-rw-r--r--deps/v8/src/ast/ast-traversal-visitor.h9
-rw-r--r--deps/v8/src/ast/ast-types.cc44
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc47
-rw-r--r--deps/v8/src/ast/ast-value-factory.h53
-rw-r--r--deps/v8/src/ast/ast.cc95
-rw-r--r--deps/v8/src/ast/ast.h101
-rw-r--r--deps/v8/src/ast/modules.cc103
-rw-r--r--deps/v8/src/ast/modules.h32
-rw-r--r--deps/v8/src/ast/prettyprinter.cc21
-rw-r--r--deps/v8/src/ast/prettyprinter.h2
-rw-r--r--deps/v8/src/ast/scopeinfo.cc113
-rw-r--r--deps/v8/src/ast/scopes.cc320
-rw-r--r--deps/v8/src/ast/scopes.h82
-rw-r--r--deps/v8/src/ast/variables.cc10
-rw-r--r--deps/v8/src/ast/variables.h12
-rw-r--r--deps/v8/src/background-parsing-task.cc12
-rw-r--r--deps/v8/src/bailout-reason.h12
-rw-r--r--deps/v8/src/base/atomic-utils.h56
-rw-r--r--deps/v8/src/base/atomicops.h41
-rw-r--r--deps/v8/src/base/atomicops_internals_arm64_gcc.h317
-rw-r--r--deps/v8/src/base/atomicops_internals_arm_gcc.h304
-rw-r--r--deps/v8/src/base/atomicops_internals_atomicword_compat.h10
-rw-r--r--deps/v8/src/base/atomicops_internals_mac.h216
-rw-r--r--deps/v8/src/base/atomicops_internals_mips64_gcc.h310
-rw-r--r--deps/v8/src/base/atomicops_internals_mips_gcc.h161
-rw-r--r--deps/v8/src/base/atomicops_internals_portable.h172
-rw-r--r--deps/v8/src/base/atomicops_internals_ppc_gcc.h168
-rw-r--r--deps/v8/src/base/atomicops_internals_s390_gcc.h152
-rw-r--r--deps/v8/src/base/atomicops_internals_tsan.h363
-rw-r--r--deps/v8/src/base/atomicops_internals_x86_gcc.cc116
-rw-r--r--deps/v8/src/base/atomicops_internals_x86_gcc.h275
-rw-r--r--deps/v8/src/base/atomicops_internals_x86_msvc.h41
-rw-r--r--deps/v8/src/base/base-export.h31
-rw-r--r--deps/v8/src/base/bits.h32
-rw-r--r--deps/v8/src/base/compiler-specific.h42
-rw-r--r--deps/v8/src/base/cpu.cc8
-rw-r--r--deps/v8/src/base/cpu.h3
-rw-r--r--deps/v8/src/base/debug/stack_trace.h5
-rw-r--r--deps/v8/src/base/division-by-constant.cc11
-rw-r--r--deps/v8/src/base/division-by-constant.h25
-rw-r--r--deps/v8/src/base/file-utils.cc6
-rw-r--r--deps/v8/src/base/file-utils.h13
-rw-r--r--deps/v8/src/base/functional.h9
-rw-r--r--deps/v8/src/base/hashmap.h19
-rw-r--r--deps/v8/src/base/ieee754.h42
-rw-r--r--deps/v8/src/base/logging.h14
-rw-r--r--deps/v8/src/base/once.h4
-rw-r--r--deps/v8/src/base/platform/condition-variable.h3
-rw-r--r--deps/v8/src/base/platform/mutex.h5
-rw-r--r--deps/v8/src/base/platform/platform.h11
-rw-r--r--deps/v8/src/base/platform/semaphore.h3
-rw-r--r--deps/v8/src/base/platform/time.h16
-rw-r--r--deps/v8/src/base/ring-buffer.h54
-rw-r--r--deps/v8/src/base/sys-info.h4
-rw-r--r--deps/v8/src/base/utils/random-number-generator.h3
-rw-r--r--deps/v8/src/bootstrapper.cc575
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc111
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc109
-rw-r--r--deps/v8/src/builtins/builtins-api.cc2
-rw-r--r--deps/v8/src/builtins/builtins-array.cc645
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc4
-rw-r--r--deps/v8/src/builtins/builtins-boolean.cc2
-rw-r--r--deps/v8/src/builtins/builtins-conversion.cc186
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc4
-rw-r--r--deps/v8/src/builtins/builtins-date.cc4
-rw-r--r--deps/v8/src/builtins/builtins-error.cc4
-rw-r--r--deps/v8/src/builtins/builtins-function.cc4
-rw-r--r--deps/v8/src/builtins/builtins-generator.cc3
-rw-r--r--deps/v8/src/builtins/builtins-global.cc6
-rw-r--r--deps/v8/src/builtins/builtins-handler.cc33
-rw-r--r--deps/v8/src/builtins/builtins-iterator.cc53
-rw-r--r--deps/v8/src/builtins/builtins-math.cc402
-rw-r--r--deps/v8/src/builtins/builtins-number.cc1467
-rw-r--r--deps/v8/src/builtins/builtins-object.cc259
-rw-r--r--deps/v8/src/builtins/builtins-promise.cc84
-rw-r--r--deps/v8/src/builtins/builtins-regexp.cc1828
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc9
-rw-r--r--deps/v8/src/builtins/builtins-string.cc507
-rw-r--r--deps/v8/src/builtins/builtins-typedarray.cc88
-rw-r--r--deps/v8/src/builtins/builtins-utils.h8
-rw-r--r--deps/v8/src/builtins/builtins.cc8
-rw-r--r--deps/v8/src/builtins/builtins.h288
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc114
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc111
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc111
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc113
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc113
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc112
-rw-r--r--deps/v8/src/builtins/x87/builtins-x87.cc114
-rw-r--r--deps/v8/src/cancelable-task.cc17
-rw-r--r--deps/v8/src/cancelable-task.h19
-rw-r--r--deps/v8/src/char-predicates.h4
-rw-r--r--deps/v8/src/code-factory.cc278
-rw-r--r--deps/v8/src/code-factory.h15
-rw-r--r--deps/v8/src/code-stub-assembler.cc4622
-rw-r--r--deps/v8/src/code-stub-assembler.h476
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc319
-rw-r--r--deps/v8/src/code-stubs.cc3696
-rw-r--r--deps/v8/src/code-stubs.h559
-rw-r--r--deps/v8/src/codegen.h4
-rw-r--r--deps/v8/src/compilation-dependencies.h4
-rw-r--r--deps/v8/src/compilation-info.cc29
-rw-r--r--deps/v8/src/compilation-info.h74
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc28
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h5
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc171
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h98
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc18
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h4
-rw-r--r--deps/v8/src/compiler.cc590
-rw-r--r--deps/v8/src/compiler.h1
-rw-r--r--deps/v8/src/compiler/access-builder.cc101
-rw-r--r--deps/v8/src/compiler/access-builder.h24
-rw-r--r--deps/v8/src/compiler/access-info.cc37
-rw-r--r--deps/v8/src/compiler/access-info.h5
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc288
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc186
-rw-r--r--deps/v8/src/compiler/arm/unwinding-info-writer-arm.cc5
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc114
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc61
-rw-r--r--deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc5
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc296
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h33
-rw-r--r--deps/v8/src/compiler/branch-elimination.h6
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc569
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h38
-rw-r--r--deps/v8/src/compiler/checkpoint-elimination.h5
-rw-r--r--deps/v8/src/compiler/code-assembler.cc108
-rw-r--r--deps/v8/src/compiler/code-assembler.h41
-rw-r--r--deps/v8/src/compiler/code-generator.cc72
-rw-r--r--deps/v8/src/compiler/code-generator.h2
-rw-r--r--deps/v8/src/compiler/common-node-cache.h5
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc7
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h5
-rw-r--r--deps/v8/src/compiler/common-operator.cc93
-rw-r--r--deps/v8/src/compiler/common-operator.h37
-rw-r--r--deps/v8/src/compiler/compiler-source-position-table.cc (renamed from deps/v8/src/compiler/source-position.cc)8
-rw-r--r--deps/v8/src/compiler/compiler-source-position-table.h (renamed from deps/v8/src/compiler/source-position.h)43
-rw-r--r--deps/v8/src/compiler/control-equivalence.h11
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.h4
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.h5
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc508
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h20
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc3
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h8
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc69
-rw-r--r--deps/v8/src/compiler/escape-analysis.h3
-rw-r--r--deps/v8/src/compiler/frame-elider.cc3
-rw-r--r--deps/v8/src/compiler/gap-resolver.cc189
-rw-r--r--deps/v8/src/compiler/gap-resolver.h16
-rw-r--r--deps/v8/src/compiler/graph-reducer.h7
-rw-r--r--deps/v8/src/compiler/graph-replay.cc2
-rw-r--r--deps/v8/src/compiler/graph-trimmer.h3
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc58
-rw-r--r--deps/v8/src/compiler/graph-visualizer.h7
-rw-r--r--deps/v8/src/compiler/graph.cc8
-rw-r--r--deps/v8/src/compiler/graph.h7
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc62
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc68
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc126
-rw-r--r--deps/v8/src/compiler/instruction-codes.h14
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc1
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h1
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc233
-rw-r--r--deps/v8/src/compiler/instruction-selector.h10
-rw-r--r--deps/v8/src/compiler/instruction.cc70
-rw-r--r--deps/v8/src/compiler/instruction.h88
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc8
-rw-r--r--deps/v8/src/compiler/int64-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc656
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h21
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc129
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h9
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc181
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h13
-rw-r--r--deps/v8/src/compiler/js-frame-specialization.cc11
-rw-r--r--deps/v8/src/compiler/js-frame-specialization.h9
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc19
-rw-r--r--deps/v8/src/compiler/js-global-object-specialization.cc53
-rw-r--r--deps/v8/src/compiler/js-global-object-specialization.h12
-rw-r--r--deps/v8/src/compiler/js-graph.cc7
-rw-r--r--deps/v8/src/compiler/js-graph.h15
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc2
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h5
-rw-r--r--deps/v8/src/compiler/js-inlining.cc38
-rw-r--r--deps/v8/src/compiler/js-inlining.h10
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc32
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h8
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc423
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h48
-rw-r--r--deps/v8/src/compiler/js-operator.cc18
-rw-r--r--deps/v8/src/compiler/js-operator.h13
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc269
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h9
-rw-r--r--deps/v8/src/compiler/jump-threading.cc2
-rw-r--r--deps/v8/src/compiler/linkage.cc19
-rw-r--r--deps/v8/src/compiler/linkage.h17
-rw-r--r--deps/v8/src/compiler/liveness-analyzer.cc73
-rw-r--r--deps/v8/src/compiler/liveness-analyzer.h39
-rw-r--r--deps/v8/src/compiler/load-elimination.cc42
-rw-r--r--deps/v8/src/compiler/load-elimination.h6
-rw-r--r--deps/v8/src/compiler/loop-analysis.h3
-rw-r--r--deps/v8/src/compiler/loop-peeling.h6
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc61
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h5
-rw-r--r--deps/v8/src/compiler/machine-operator.h9
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc157
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc297
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc139
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h3
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc440
-rw-r--r--deps/v8/src/compiler/move-optimizer.cc143
-rw-r--r--deps/v8/src/compiler/move-optimizer.h8
-rw-r--r--deps/v8/src/compiler/node-aux-data.h25
-rw-r--r--deps/v8/src/compiler/node-cache.cc10
-rw-r--r--deps/v8/src/compiler/node-matchers.h11
-rw-r--r--deps/v8/src/compiler/node-properties.cc68
-rw-r--r--deps/v8/src/compiler/node-properties.h15
-rw-r--r--deps/v8/src/compiler/node.h7
-rw-r--r--deps/v8/src/compiler/opcodes.h17
-rw-r--r--deps/v8/src/compiler/operation-typer.cc23
-rw-r--r--deps/v8/src/compiler/operator-properties.h3
-rw-r--r--deps/v8/src/compiler/operator.cc4
-rw-r--r--deps/v8/src/compiler/operator.h6
-rw-r--r--deps/v8/src/compiler/osr.cc54
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.cc11
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.h8
-rw-r--r--deps/v8/src/compiler/pipeline.cc302
-rw-r--r--deps/v8/src/compiler/pipeline.h7
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc82
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc114
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc48
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h12
-rw-r--r--deps/v8/src/compiler/register-allocator.cc548
-rw-r--r--deps/v8/src/compiler/register-allocator.h50
-rw-r--r--deps/v8/src/compiler/representation-change.cc172
-rw-r--r--deps/v8/src/compiler/representation-change.h45
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc97
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc120
-rw-r--r--deps/v8/src/compiler/schedule.cc2
-rw-r--r--deps/v8/src/compiler/schedule.h9
-rw-r--r--deps/v8/src/compiler/scheduler.h5
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc410
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h78
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc296
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h4
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc12
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h5
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc41
-rw-r--r--deps/v8/src/compiler/simplified-operator.h29
-rw-r--r--deps/v8/src/compiler/state-values-utils.cc3
-rw-r--r--deps/v8/src/compiler/state-values-utils.h7
-rw-r--r--deps/v8/src/compiler/tail-call-optimization.cc9
-rw-r--r--deps/v8/src/compiler/tail-call-optimization.h3
-rw-r--r--deps/v8/src/compiler/type-cache.h9
-rw-r--r--deps/v8/src/compiler/type-hint-analyzer.cc16
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc43
-rw-r--r--deps/v8/src/compiler/typed-optimization.h7
-rw-r--r--deps/v8/src/compiler/typer.cc177
-rw-r--r--deps/v8/src/compiler/typer.h16
-rw-r--r--deps/v8/src/compiler/types.cc155
-rw-r--r--deps/v8/src/compiler/types.h87
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer.cc82
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer.h8
-rw-r--r--deps/v8/src/compiler/verifier.cc70
-rw-r--r--deps/v8/src/compiler/verifier.h3
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc268
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h19
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc58
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc113
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc73
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc99
-rw-r--r--deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc5
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc64
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc126
-rw-r--r--deps/v8/src/compiler/zone-stats.cc (renamed from deps/v8/src/compiler/zone-pool.cc)89
-rw-r--r--deps/v8/src/compiler/zone-stats.h (renamed from deps/v8/src/compiler/zone-pool.h)42
-rw-r--r--deps/v8/src/contexts.cc2
-rw-r--r--deps/v8/src/contexts.h106
-rw-r--r--deps/v8/src/counters-inl.h15
-rw-r--r--deps/v8/src/counters.cc171
-rw-r--r--deps/v8/src/counters.h211
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.cc72
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.h92
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc155
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.cc69
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.h96
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc189
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h25
-rw-r--r--deps/v8/src/crankshaft/compilation-phase.cc2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.cc103
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.h423
-rw-r--r--deps/v8/src/crankshaft/hydrogen-mark-deoptimize.cc62
-rw-r--r--deps/v8/src/crankshaft/hydrogen-mark-deoptimize.h53
-rw-r--r--deps/v8/src/crankshaft/hydrogen-osr.cc2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-representation-changes.cc66
-rw-r--r--deps/v8/src/crankshaft/hydrogen.cc982
-rw-r--r--deps/v8/src/crankshaft/hydrogen.h93
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc154
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.cc73
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.h92
-rw-r--r--deps/v8/src/crankshaft/lithium-allocator.cc2
-rw-r--r--deps/v8/src/crankshaft/lithium-codegen.cc55
-rw-r--r--deps/v8/src/crankshaft/lithium-codegen.h2
-rw-r--r--deps/v8/src/crankshaft/lithium.cc4
-rw-r--r--deps/v8/src/crankshaft/lithium.h9
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc174
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.h21
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.cc72
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.h93
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc174
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h21
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.cc72
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.h93
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc174
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h18
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.cc71
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.h91
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc168
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-codegen-s390.h18
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-s390.cc63
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-s390.h86
-rw-r--r--deps/v8/src/crankshaft/typing.cc25
-rw-r--r--deps/v8/src/crankshaft/typing.h2
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc166
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.cc69
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.h94
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc154
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.cc73
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.h92
-rw-r--r--deps/v8/src/d8-posix.cc8
-rw-r--r--deps/v8/src/d8-windows.cc7
-rw-r--r--deps/v8/src/d8.cc397
-rw-r--r--deps/v8/src/d8.gyp1
-rw-r--r--deps/v8/src/d8.h9
-rw-r--r--deps/v8/src/date.cc2
-rw-r--r--deps/v8/src/debug/debug-frames.cc16
-rw-r--r--deps/v8/src/debug/debug-interface.h209
-rw-r--r--deps/v8/src/debug/debug-scopes.cc53
-rw-r--r--deps/v8/src/debug/debug-scopes.h3
-rw-r--r--deps/v8/src/debug/debug.cc160
-rw-r--r--deps/v8/src/debug/debug.h24
-rw-r--r--deps/v8/src/debug/debug.js23
-rw-r--r--deps/v8/src/debug/liveedit.cc17
-rw-r--r--deps/v8/src/debug/mirrors.js25
-rw-r--r--deps/v8/src/deoptimize-reason.h132
-rw-r--r--deps/v8/src/deoptimizer.cc178
-rw-r--r--deps/v8/src/deoptimizer.h15
-rw-r--r--deps/v8/src/disassembler.cc7
-rw-r--r--deps/v8/src/eh-frame.cc2
-rw-r--r--deps/v8/src/eh-frame.h9
-rw-r--r--deps/v8/src/elements.cc225
-rw-r--r--deps/v8/src/elements.h7
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc33
-rw-r--r--deps/v8/src/external-reference-table.cc78
-rw-r--r--deps/v8/src/external-reference-table.h23
-rw-r--r--deps/v8/src/factory.cc213
-rw-r--r--deps/v8/src/factory.h79
-rw-r--r--deps/v8/src/fast-accessor-assembler.cc2
-rw-r--r--deps/v8/src/field-index-inl.h54
-rw-r--r--deps/v8/src/field-index.h5
-rw-r--r--deps/v8/src/field-type.cc2
-rw-r--r--deps/v8/src/flag-definitions.h111
-rw-r--r--deps/v8/src/frames.cc96
-rw-r--r--deps/v8/src/frames.h64
-rw-r--r--deps/v8/src/full-codegen/arm/full-codegen-arm.cc129
-rw-r--r--deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc116
-rw-r--r--deps/v8/src/full-codegen/full-codegen.cc192
-rw-r--r--deps/v8/src/full-codegen/full-codegen.h16
-rw-r--r--deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc126
-rw-r--r--deps/v8/src/full-codegen/mips/full-codegen-mips.cc123
-rw-r--r--deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc123
-rw-r--r--deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc118
-rw-r--r--deps/v8/src/full-codegen/s390/full-codegen-s390.cc110
-rw-r--r--deps/v8/src/full-codegen/x64/full-codegen-x64.cc124
-rw-r--r--deps/v8/src/full-codegen/x87/full-codegen-x87.cc126
-rw-r--r--deps/v8/src/gdb-jit.cc4
-rw-r--r--deps/v8/src/global-handles.cc84
-rw-r--r--deps/v8/src/global-handles.h3
-rw-r--r--deps/v8/src/globals.h68
-rw-r--r--deps/v8/src/handles.cc2
-rw-r--r--deps/v8/src/handles.h4
-rw-r--r--deps/v8/src/heap-symbols.h99
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.h2
-rw-r--r--deps/v8/src/heap/gc-tracer.cc225
-rw-r--r--deps/v8/src/heap/gc-tracer.h108
-rw-r--r--deps/v8/src/heap/heap-inl.h62
-rw-r--r--deps/v8/src/heap/heap.cc561
-rw-r--r--deps/v8/src/heap/heap.h206
-rw-r--r--deps/v8/src/heap/incremental-marking.cc22
-rw-r--r--deps/v8/src/heap/incremental-marking.h5
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h10
-rw-r--r--deps/v8/src/heap/mark-compact.cc714
-rw-r--r--deps/v8/src/heap/mark-compact.h149
-rw-r--r--deps/v8/src/heap/memory-reducer.cc41
-rw-r--r--deps/v8/src/heap/memory-reducer.h6
-rw-r--r--deps/v8/src/heap/object-stats.cc118
-rw-r--r--deps/v8/src/heap/object-stats.h11
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h5
-rw-r--r--deps/v8/src/heap/objects-visiting.cc39
-rw-r--r--deps/v8/src/heap/page-parallel-job.h3
-rw-r--r--deps/v8/src/heap/remembered-set.cc107
-rw-r--r--deps/v8/src/heap/remembered-set.h17
-rw-r--r--deps/v8/src/heap/scavenge-job.h3
-rw-r--r--deps/v8/src/heap/scavenger-inl.h6
-rw-r--r--deps/v8/src/heap/scavenger.cc41
-rw-r--r--deps/v8/src/heap/scavenger.h3
-rw-r--r--deps/v8/src/heap/slot-set.h100
-rw-r--r--deps/v8/src/heap/spaces-inl.h15
-rw-r--r--deps/v8/src/heap/spaces.cc314
-rw-r--r--deps/v8/src/heap/spaces.h336
-rw-r--r--deps/v8/src/heap/store-buffer.cc135
-rw-r--r--deps/v8/src/heap/store-buffer.h112
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc5
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h3
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc560
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc16
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc260
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h32
-rw-r--r--deps/v8/src/ic/access-compiler-data.h48
-rw-r--r--deps/v8/src/ic/access-compiler.cc13
-rw-r--r--deps/v8/src/ic/access-compiler.h12
-rw-r--r--deps/v8/src/ic/arm/access-compiler-arm.cc22
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc73
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc244
-rw-r--r--deps/v8/src/ic/arm64/access-compiler-arm64.cc23
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc80
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc271
-rw-r--r--deps/v8/src/ic/handler-compiler.cc64
-rw-r--r--deps/v8/src/ic/handler-compiler.h15
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h145
-rw-r--r--deps/v8/src/ic/handler-configuration.h200
-rw-r--r--deps/v8/src/ic/ia32/access-compiler-ia32.cc21
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc71
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc244
-rw-r--r--deps/v8/src/ic/ic-compiler.cc20
-rw-r--r--deps/v8/src/ic/ic-inl.h6
-rw-r--r--deps/v8/src/ic/ic-state.cc10
-rw-r--r--deps/v8/src/ic/ic-state.h10
-rw-r--r--deps/v8/src/ic/ic.cc657
-rw-r--r--deps/v8/src/ic/ic.h39
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc549
-rw-r--r--deps/v8/src/ic/keyed-store-generic.h23
-rw-r--r--deps/v8/src/ic/mips/access-compiler-mips.cc22
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc69
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc244
-rw-r--r--deps/v8/src/ic/mips64/access-compiler-mips64.cc22
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc69
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc245
-rw-r--r--deps/v8/src/ic/ppc/access-compiler-ppc.cc22
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc67
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc250
-rw-r--r--deps/v8/src/ic/s390/access-compiler-s390.cc21
-rw-r--r--deps/v8/src/ic/s390/handler-compiler-s390.cc73
-rw-r--r--deps/v8/src/ic/s390/ic-s390.cc242
-rw-r--r--deps/v8/src/ic/stub-cache.cc44
-rw-r--r--deps/v8/src/ic/stub-cache.h6
-rw-r--r--deps/v8/src/ic/x64/access-compiler-x64.cc23
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc71
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc243
-rw-r--r--deps/v8/src/ic/x87/access-compiler-x87.cc21
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc71
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc244
-rw-r--r--deps/v8/src/icu_util.cc4
-rw-r--r--deps/v8/src/inspector/BUILD.gn3
-rw-r--r--deps/v8/src/inspector/DEPS5
-rw-r--r--deps/v8/src/inspector/debugger-script.js179
-rw-r--r--deps/v8/src/inspector/debugger_script_externs.js25
-rw-r--r--deps/v8/src/inspector/injected-script-source.js12
-rw-r--r--deps/v8/src/inspector/injected-script.cc321
-rw-r--r--deps/v8/src/inspector/injected-script.h94
-rw-r--r--deps/v8/src/inspector/inspected-context.cc35
-rw-r--r--deps/v8/src/inspector/inspected-context.h3
-rw-r--r--deps/v8/src/inspector/inspector.gyp6
-rw-r--r--deps/v8/src/inspector/java-script-call-frame.cc7
-rw-r--r--deps/v8/src/inspector/js_protocol.json12
-rw-r--r--deps/v8/src/inspector/remote-object-id.cc48
-rw-r--r--deps/v8/src/inspector/remote-object-id.h7
-rw-r--r--deps/v8/src/inspector/string-16.cc24
-rw-r--r--deps/v8/src/inspector/string-16.h93
-rw-r--r--deps/v8/src/inspector/string-util.cc88
-rw-r--r--deps/v8/src/inspector/string-util.h5
-rw-r--r--deps/v8/src/inspector/v8-console-agent-impl.cc15
-rw-r--r--deps/v8/src/inspector/v8-console-agent-impl.h8
-rw-r--r--deps/v8/src/inspector/v8-console.cc11
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc694
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h108
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc110
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h14
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc181
-rw-r--r--deps/v8/src/inspector/v8-debugger.h33
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc123
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.h38
-rw-r--r--deps/v8/src/inspector/v8-injected-script-host.cc67
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc120
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h11
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc47
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.h13
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc365
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.h66
-rw-r--r--deps/v8/src/inspector/v8-schema-agent-impl.cc4
-rw-r--r--deps/v8/src/inspector/v8-schema-agent-impl.h5
-rw-r--r--deps/v8/src/inspector/v8-value-copier.cc96
-rw-r--r--deps/v8/src/inspector/v8-value-copier.h5
-rw-r--r--deps/v8/src/interface-descriptors.cc64
-rw-r--r--deps/v8/src/interface-descriptors.h44
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc661
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h71
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h3
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc10
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h5
-rw-r--r--deps/v8/src/interpreter/bytecode-dead-code-optimizer.h7
-rw-r--r--deps/v8/src/interpreter/bytecode-decoder.h3
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc360
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h33
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.h20
-rw-r--r--deps/v8/src/interpreter/bytecode-peephole-optimizer.cc42
-rw-r--r--deps/v8/src/interpreter/bytecode-peephole-optimizer.h7
-rw-r--r--deps/v8/src/interpreter/bytecode-peephole-table.h1
-rw-r--r--deps/v8/src/interpreter/bytecode-pipeline.cc13
-rw-r--r--deps/v8/src/interpreter/bytecode-pipeline.h111
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.h21
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc263
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h115
-rw-r--r--deps/v8/src/interpreter/bytecode-register.h6
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc7
-rw-r--r--deps/v8/src/interpreter/bytecodes.h38
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h3
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc417
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h12
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.cc16
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h1
-rw-r--r--deps/v8/src/interpreter/interpreter.cc399
-rw-r--r--deps/v8/src/interpreter/interpreter.h24
-rw-r--r--deps/v8/src/interpreter/mkpeephole.cc27
-rw-r--r--deps/v8/src/isolate-inl.h16
-rw-r--r--deps/v8/src/isolate.cc361
-rw-r--r--deps/v8/src/isolate.h73
-rw-r--r--deps/v8/src/js/array-iterator.js168
-rw-r--r--deps/v8/src/js/array.js11
-rw-r--r--deps/v8/src/js/async-await.js20
-rw-r--r--deps/v8/src/js/collection.js3
-rw-r--r--deps/v8/src/js/i18n.js67
-rw-r--r--deps/v8/src/js/macros.py33
-rw-r--r--deps/v8/src/js/math.js60
-rw-r--r--deps/v8/src/js/prologue.js17
-rw-r--r--deps/v8/src/js/promise.js256
-rw-r--r--deps/v8/src/js/regexp.js1058
-rw-r--r--deps/v8/src/js/string.js197
-rw-r--r--deps/v8/src/js/typedarray.js7
-rw-r--r--deps/v8/src/js/v8natives.js99
-rw-r--r--deps/v8/src/json-parser.cc2
-rw-r--r--deps/v8/src/keys.cc4
-rw-r--r--deps/v8/src/layout-descriptor-inl.h4
-rw-r--r--deps/v8/src/libplatform/default-platform.cc1
-rw-r--r--deps/v8/src/libplatform/default-platform.h4
-rw-r--r--deps/v8/src/libplatform/task-queue.h3
-rw-r--r--deps/v8/src/libplatform/tracing/trace-config.cc5
-rw-r--r--deps/v8/src/libplatform/worker-thread.h4
-rw-r--r--deps/v8/src/libsampler/sampler.cc2
-rw-r--r--deps/v8/src/list.h3
-rw-r--r--deps/v8/src/log-utils.cc1
-rw-r--r--deps/v8/src/log-utils.h4
-rw-r--r--deps/v8/src/log.cc73
-rw-r--r--deps/v8/src/log.h8
-rw-r--r--deps/v8/src/lookup-cache.cc67
-rw-r--r--deps/v8/src/lookup-cache.h59
-rw-r--r--deps/v8/src/lookup.cc39
-rw-r--r--deps/v8/src/lookup.h12
-rw-r--r--deps/v8/src/machine-type.h16
-rw-r--r--deps/v8/src/messages.cc142
-rw-r--r--deps/v8/src/messages.h35
-rw-r--r--deps/v8/src/mips/assembler-mips.cc95
-rw-r--r--deps/v8/src/mips/assembler-mips.h5
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc456
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc15
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc314
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h39
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h3
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc458
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc15
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc309
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h40
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc2
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h39
-rw-r--r--deps/v8/src/objects-debug.cc191
-rw-r--r--deps/v8/src/objects-inl.h295
-rw-r--r--deps/v8/src/objects-printer.cc143
-rw-r--r--deps/v8/src/objects.cc1334
-rw-r--r--deps/v8/src/objects.h868
-rw-r--r--deps/v8/src/ostreams.h3
-rw-r--r--deps/v8/src/parsing/expression-classifier.h18
-rw-r--r--deps/v8/src/parsing/parse-info.cc27
-rw-r--r--deps/v8/src/parsing/parse-info.h30
-rw-r--r--deps/v8/src/parsing/parser-base.h203
-rw-r--r--deps/v8/src/parsing/parser.cc601
-rw-r--r--deps/v8/src/parsing/parser.h183
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc15
-rw-r--r--deps/v8/src/parsing/preparse-data-format.h17
-rw-r--r--deps/v8/src/parsing/preparse-data.cc53
-rw-r--r--deps/v8/src/parsing/preparse-data.h158
-rw-r--r--deps/v8/src/parsing/preparser.cc128
-rw-r--r--deps/v8/src/parsing/preparser.h178
-rw-r--r--deps/v8/src/parsing/rewriter.cc149
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc184
-rw-r--r--deps/v8/src/parsing/scanner.cc6
-rw-r--r--deps/v8/src/parsing/scanner.h9
-rw-r--r--deps/v8/src/pending-compilation-error-handler.cc27
-rw-r--r--deps/v8/src/pending-compilation-error-handler.h3
-rw-r--r--deps/v8/src/perf-jit.cc8
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h3
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc453
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc15
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc268
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h28
-rw-r--r--deps/v8/src/profiler/cpu-profiler-inl.h2
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc22
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h3
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc2
-rw-r--r--deps/v8/src/profiler/profile-generator-inl.h11
-rw-r--r--deps/v8/src/profiler/profile-generator.cc171
-rw-r--r--deps/v8/src/profiler/profile-generator.h40
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc94
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc2
-rw-r--r--deps/v8/src/profiler/tick-sample.cc9
-rw-r--r--deps/v8/src/profiler/tracing-cpu-profiler.cc57
-rw-r--r--deps/v8/src/profiler/tracing-cpu-profiler.h21
-rw-r--r--deps/v8/src/promise-utils.cc75
-rw-r--r--deps/v8/src/promise-utils.h32
-rw-r--r--deps/v8/src/property.h2
-rw-r--r--deps/v8/src/prototype.h47
-rw-r--r--deps/v8/src/regexp/OWNERS4
-rw-r--r--deps/v8/src/regexp/jsregexp.cc113
-rw-r--r--deps/v8/src/regexp/jsregexp.h55
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.cc6
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc2
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc192
-rw-r--r--deps/v8/src/regexp/regexp-utils.h52
-rw-r--r--deps/v8/src/register-configuration.cc12
-rw-r--r--deps/v8/src/register-configuration.h11
-rw-r--r--deps/v8/src/runtime-profiler.cc76
-rw-r--r--deps/v8/src/runtime/runtime-array.cc90
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc4
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc2
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc199
-rw-r--r--deps/v8/src/runtime/runtime-function.cc16
-rw-r--r--deps/v8/src/runtime/runtime-i18n.cc59
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc119
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc14
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc2
-rw-r--r--deps/v8/src/runtime/runtime-maths.cc75
-rw-r--r--deps/v8/src/runtime/runtime-module.cc39
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc34
-rw-r--r--deps/v8/src/runtime/runtime-object.cc185
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc193
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc668
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc2
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc80
-rw-r--r--deps/v8/src/runtime/runtime-test.cc33
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc8
-rw-r--r--deps/v8/src/runtime/runtime.h75
-rw-r--r--deps/v8/src/s390/assembler-s390.h3
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc440
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc13
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc238
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h25
-rw-r--r--deps/v8/src/s390/simulator-s390.cc45
-rw-r--r--deps/v8/src/signature.h11
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc30
-rw-r--r--deps/v8/src/snapshot/code-serializer.h60
-rw-r--r--deps/v8/src/snapshot/deserializer.cc32
-rw-r--r--deps/v8/src/snapshot/deserializer.h1
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc48
-rw-r--r--deps/v8/src/snapshot/partial-serializer.h7
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc47
-rw-r--r--deps/v8/src/snapshot/serializer-common.h14
-rw-r--r--deps/v8/src/snapshot/serializer.cc6
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h13
-rw-r--r--deps/v8/src/source-position-table.cc37
-rw-r--r--deps/v8/src/source-position-table.h17
-rw-r--r--deps/v8/src/source-position.cc131
-rw-r--r--deps/v8/src/source-position.h135
-rw-r--r--deps/v8/src/startup-data-util.cc5
-rw-r--r--deps/v8/src/string-builder.h1
-rw-r--r--deps/v8/src/string-stream.cc2
-rw-r--r--deps/v8/src/tracing/trace-event.cc12
-rw-r--r--deps/v8/src/tracing/trace-event.h85
-rw-r--r--deps/v8/src/tracing/traced-value.cc203
-rw-r--r--deps/v8/src/tracing/traced-value.h67
-rw-r--r--deps/v8/src/tracing/tracing-category-observer.cc58
-rw-r--r--deps/v8/src/tracing/tracing-category-observer.h35
-rw-r--r--deps/v8/src/transitions-inl.h2
-rw-r--r--deps/v8/src/transitions.cc2
-rw-r--r--deps/v8/src/type-feedback-vector-inl.h90
-rw-r--r--deps/v8/src/type-feedback-vector.cc43
-rw-r--r--deps/v8/src/type-feedback-vector.h4
-rw-r--r--deps/v8/src/type-hints.cc23
-rw-r--r--deps/v8/src/type-hints.h21
-rw-r--r--deps/v8/src/type-info.cc75
-rw-r--r--deps/v8/src/unicode.cc2
-rw-r--r--deps/v8/src/unicode.h8
-rw-r--r--deps/v8/src/uri.cc2
-rw-r--r--deps/v8/src/utils.h90
-rw-r--r--deps/v8/src/v8.cc4
-rw-r--r--deps/v8/src/v8.gyp115
-rw-r--r--deps/v8/src/v8.h3
-rw-r--r--deps/v8/src/value-serializer.cc188
-rw-r--r--deps/v8/src/value-serializer.h17
-rw-r--r--deps/v8/src/vector.h3
-rw-r--r--deps/v8/src/wasm/ast-decoder.cc176
-rw-r--r--deps/v8/src/wasm/ast-decoder.h45
-rw-r--r--deps/v8/src/wasm/decoder.h91
-rw-r--r--deps/v8/src/wasm/managed.h56
-rw-r--r--deps/v8/src/wasm/module-decoder.cc327
-rw-r--r--deps/v8/src/wasm/module-decoder.h40
-rw-r--r--deps/v8/src/wasm/signature-map.cc51
-rw-r--r--deps/v8/src/wasm/signature-map.h41
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc253
-rw-r--r--deps/v8/src/wasm/wasm-debug.h46
-rw-r--r--deps/v8/src/wasm/wasm-function-name-table.cc71
-rw-r--r--deps/v8/src/wasm/wasm-function-name-table.h33
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc104
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h8
-rw-r--r--deps/v8/src/wasm/wasm-js.cc475
-rw-r--r--deps/v8/src/wasm/wasm-js.h8
-rw-r--r--deps/v8/src/wasm/wasm-macro-gen.h33
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc137
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h33
-rw-r--r--deps/v8/src/wasm/wasm-module.cc2444
-rw-r--r--deps/v8/src/wasm/wasm-module.h324
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc359
-rw-r--r--deps/v8/src/wasm/wasm-objects.h308
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc15
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h420
-rw-r--r--deps/v8/src/wasm/wasm-result.cc21
-rw-r--r--deps/v8/src/wasm/wasm-result.h10
-rw-r--r--deps/v8/src/x64/assembler-x64.cc5
-rw-r--r--deps/v8/src/x64/assembler-x64.h3
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc447
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc2
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc15
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc289
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h41
-rw-r--r--deps/v8/src/x87/assembler-x87.cc5
-rw-r--r--deps/v8/src/x87/assembler-x87.h3
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc545
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc16
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc260
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.h32
-rw-r--r--deps/v8/src/zone/accounting-allocator.cc158
-rw-r--r--deps/v8/src/zone/accounting-allocator.h61
-rw-r--r--deps/v8/src/zone/zone-allocator.h3
-rw-r--r--deps/v8/src/zone/zone-chunk-list.h452
-rw-r--r--deps/v8/src/zone/zone-segment.cc1
-rw-r--r--deps/v8/src/zone/zone-segment.h7
-rw-r--r--deps/v8/src/zone/zone.cc80
-rw-r--r--deps/v8/src/zone/zone.h51
-rw-r--r--deps/v8/test/BUILD.gn11
-rw-r--r--deps/v8/test/bot_default.gyp7
-rw-r--r--deps/v8/test/bot_default.isolate2
-rw-r--r--deps/v8/test/cctest/BUILD.gn36
-rw-r--r--deps/v8/test/cctest/asmjs/test-asm-typer.cc77
-rw-r--r--deps/v8/test/cctest/cctest.cc2
-rw-r--r--deps/v8/test/cctest/cctest.gyp19
-rw-r--r--deps/v8/test/cctest/cctest.h16
-rw-r--r--deps/v8/test/cctest/cctest.status111
-rw-r--r--deps/v8/test/cctest/compiler/code-assembler-tester.h18
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.cc24
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.h5
-rw-r--r--deps/v8/test/cctest/compiler/test-gap-resolver.cc153
-rw-r--r--deps/v8/test/cctest/compiler/test-graph-visualizer.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-js-context-specialization.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc14
-rw-r--r--deps/v8/test/cctest/compiler/test-jump-threading.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc8
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-analysis.cc21
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-multiple-return.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-node.cc36
-rw-r--r--deps/v8/test/cctest/compiler/test-osr.cc575
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc375
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-inlining.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jscalls.cc50
-rw-r--r--deps/v8/test/cctest/compiler/test-run-load-store.cc13
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc120
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc70
-rw-r--r--deps/v8/test/cctest/compiler/test-run-stubs.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-run-wasm-machops.cc4
-rw-r--r--deps/v8/test/cctest/heap/heap-tester.h7
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.cc22
-rw-r--r--deps/v8/test/cctest/heap/heap-utils.h2
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc17
-rw-r--r--deps/v8/test/cctest/heap/test-array-buffer-tracker.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc243
-rw-r--r--deps/v8/test/cctest/heap/test-mark-compact.cc16
-rw-r--r--deps/v8/test/cctest/heap/test-page-promotion.cc41
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc21
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden3
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden36
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden46
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden26
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden7
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden21
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden43
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden19
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden23
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden48
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden547
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden32
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden5
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden7
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden17
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden151
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden223
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden22
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden23
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden51
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden91
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden453
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden13
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden64
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden1034
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden7
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden9
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/source-position-matcher.cc3
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc6
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc110
-rw-r--r--deps/v8/test/cctest/libplatform/test-tracing.cc11
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner-streams.cc9
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner.cc13
-rw-r--r--deps/v8/test/cctest/test-api-accessors.cc140
-rw-r--r--deps/v8/test/cctest/test-api.cc479
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc28
-rw-r--r--deps/v8/test/cctest/test-ast-types.cc2
-rw-r--r--deps/v8/test/cctest/test-ast.cc4
-rw-r--r--deps/v8/test/cctest/test-atomicops.cc10
-rw-r--r--deps/v8/test/cctest/test-bit-vector.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc513
-rw-r--r--deps/v8/test/cctest/test-compiler.cc199
-rw-r--r--deps/v8/test/cctest/test-conversions.cc4
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc126
-rw-r--r--deps/v8/test/cctest/test-debug.cc49
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc6
-rw-r--r--deps/v8/test/cctest/test-extra.js7
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc6
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc26
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc13
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc6
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc4
-rw-r--r--deps/v8/test/cctest/test-liveedit.cc2
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc3
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc92
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc91
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc91
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc6
-rw-r--r--deps/v8/test/cctest/test-modules.cc24
-rw-r--r--deps/v8/test/cctest/test-parsing.cc534
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc23
-rw-r--r--deps/v8/test/cctest/test-regexp.cc54
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-arm.cc6
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc2
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc10
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-x64.cc6
-rw-r--r--deps/v8/test/cctest/test-run-wasm-relocation-x87.cc10
-rw-r--r--deps/v8/test/cctest/test-serialize.cc241
-rw-r--r--deps/v8/test/cctest/test-strings.cc36
-rw-r--r--deps/v8/test/cctest/test-traced-value.cc126
-rw-r--r--deps/v8/test/cctest/test-types.cc221
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc4
-rw-r--r--deps/v8/test/cctest/test-unique.cc24
-rw-r--r--deps/v8/test/cctest/types-fuzz.h38
-rw-r--r--deps/v8/test/cctest/wasm/test-managed.cc59
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc123
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc82
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc72
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-js.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc1086
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd-lowering.cc96
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc142
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-function-name-table.cc120
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc20
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-trap-position.cc14
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h87
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc89
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.h12
-rw-r--r--deps/v8/test/debugger/debug-evaluate-locals-optimized-double.js (renamed from deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js)61
-rw-r--r--deps/v8/test/debugger/debug/compiler/osr-typing-debug-change.js118
-rw-r--r--deps/v8/test/debugger/debug/debug-allscopes-on-debugger.js (renamed from deps/v8/test/mjsunit/debug-allscopes-on-debugger.js)8
-rw-r--r--deps/v8/test/debugger/debug/debug-break-inline.js (renamed from deps/v8/test/mjsunit/debug-break-inline.js)21
-rw-r--r--deps/v8/test/debugger/debug/debug-compile-optimized.js (renamed from deps/v8/test/mjsunit/debug-compile-optimized.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-constructor.js (renamed from deps/v8/test/mjsunit/debug-constructor.js)2
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-closure.js (renamed from deps/v8/test/mjsunit/debug-evaluate-closure.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-declaration.js (renamed from deps/v8/test/mjsunit/debug-evaluate-declaration.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-locals-capturing.js (renamed from deps/v8/test/mjsunit/debug-evaluate-locals-capturing.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-locals-optimized.js (renamed from deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js)64
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-modify-catch-block-scope.js (renamed from deps/v8/test/mjsunit/debug-evaluate-modify-catch-block-scope.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-modify-this.js (renamed from deps/v8/test/mjsunit/debug-evaluate-modify-this.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-nested-let.js (renamed from deps/v8/test/mjsunit/debug-evaluate-nested-let.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-shadowed-context-2.js (renamed from deps/v8/test/mjsunit/debug-evaluate-shadowed-context-2.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-with.js (renamed from deps/v8/test/mjsunit/debug-evaluate-with.js)2
-rw-r--r--deps/v8/test/debugger/debug/debug-event-listener.js (renamed from deps/v8/test/mjsunit/debug-event-listener.js)2
-rw-r--r--deps/v8/test/debugger/debug/debug-exceptions.js (renamed from deps/v8/test/mjsunit/debug-exceptions.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-generator-break-on-stack.js (renamed from deps/v8/test/mjsunit/debug-generator-break-on-stack.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-generator-break.js (renamed from deps/v8/test/mjsunit/debug-generator-break.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-materialized.js (renamed from deps/v8/test/mjsunit/debug-materialized.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-optimize.js (renamed from deps/v8/test/mjsunit/debug-optimize.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-print.js (renamed from deps/v8/test/mjsunit/debug-print.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-stack-check-position.js (renamed from deps/v8/test/mjsunit/debug-stack-check-position.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-step-2.js61
-rw-r--r--deps/v8/test/debugger/debug/debug-step-3.js68
-rw-r--r--deps/v8/test/debugger/debug/debug-step-4.js80
-rw-r--r--deps/v8/test/debugger/debug/debug-step-end-of-script.js (renamed from deps/v8/test/mjsunit/debug-step-end-of-script.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-step-into-json.js (renamed from deps/v8/test/mjsunit/debug-step-into-json.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-step-into-valueof.js (renamed from deps/v8/test/mjsunit/debug-step-into-valueof.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-step-stub-callfunction.js (renamed from deps/v8/test/mjsunit/debug-step-stub-callfunction.js)2
-rw-r--r--deps/v8/test/debugger/debug/debug-step-turbofan.js (renamed from deps/v8/test/mjsunit/debug-step-turbofan.js)2
-rw-r--r--deps/v8/test/debugger/debug/debug-step.js43
-rw-r--r--deps/v8/test/debugger/debug/debug-stepin-accessor-ic.js (renamed from deps/v8/test/mjsunit/debug-stepin-accessor-ic.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-stepin-accessor.js (renamed from deps/v8/test/mjsunit/debug-stepin-accessor.js)2
-rw-r--r--deps/v8/test/debugger/debug/debug-stepin-builtin-callback-opt.js (renamed from deps/v8/test/mjsunit/debug-stepin-builtin-callback-opt.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-stepin-builtin-callback.js (renamed from deps/v8/test/mjsunit/debug-stepin-builtin-callback.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-stepin-builtin.js (renamed from deps/v8/test/mjsunit/debug-stepin-builtin.js)2
-rw-r--r--deps/v8/test/debugger/debug/debug-stepin-call-function-stub.js (renamed from deps/v8/test/mjsunit/debug-stepin-call-function-stub.js)8
-rw-r--r--deps/v8/test/debugger/debug/debug-stepin-construct-call.js (renamed from deps/v8/test/mjsunit/debug-stepin-construct-call.js)5
-rw-r--r--deps/v8/test/debugger/debug/debug-stepin-constructor.js (renamed from deps/v8/test/mjsunit/debug-stepin-constructor.js)5
-rw-r--r--deps/v8/test/debugger/debug/debug-stepin-foreach.js (renamed from deps/v8/test/mjsunit/debug-stepin-foreach.js)9
-rw-r--r--deps/v8/test/debugger/debug/debug-stepin-function-call.js (renamed from deps/v8/test/mjsunit/debug-stepin-function-call.js)2
-rw-r--r--deps/v8/test/debugger/debug/debug-stepin-property-function-call.js (renamed from deps/v8/test/mjsunit/debug-stepin-property-function-call.js)3
-rw-r--r--deps/v8/test/debugger/debug/debug-stepnext-do-while.js (renamed from deps/v8/test/mjsunit/debug-stepnext-do-while.js)2
-rw-r--r--deps/v8/test/debugger/debug/debug-stepout-recursive-function.js (renamed from deps/v8/test/mjsunit/debug-stepout-recursive-function.js)2
-rw-r--r--deps/v8/test/debugger/debug/debug-stepout-scope-part1.js (renamed from deps/v8/test/mjsunit/debug-stepout-scope-part1.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-stepout-scope-part2.js (renamed from deps/v8/test/mjsunit/debug-stepout-scope-part2.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-stepout-scope-part3.js (renamed from deps/v8/test/mjsunit/debug-stepout-scope-part3.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-stepout-scope-part4.js (renamed from deps/v8/test/mjsunit/debug-stepout-scope-part4.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-stepout-scope-part5.js (renamed from deps/v8/test/mjsunit/debug-stepout-scope-part5.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-stepout-scope-part6.js (renamed from deps/v8/test/mjsunit/debug-stepout-scope-part6.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-stepout-scope-part7.js (renamed from deps/v8/test/mjsunit/debug-stepout-scope-part7.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-stepout-scope-part8.js (renamed from deps/v8/test/mjsunit/debug-stepout-scope-part8.js)1
-rw-r--r--deps/v8/test/debugger/debug/debug-stepout-to-builtin.js (renamed from deps/v8/test/mjsunit/debug-stepout-to-builtin.js)2
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-break-default-constructor.js (renamed from deps/v8/test/mjsunit/es6/debug-break-default-constructor.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/async-task-event.js (renamed from deps/v8/test/mjsunit/es6/debug-promises/async-task-event.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/evaluate-across-microtasks.js (renamed from deps/v8/test/mjsunit/es6/debug-promises/evaluate-across-microtasks.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/promise-all-caught.js (renamed from deps/v8/test/mjsunit/es6/debug-promises/promise-all-caught.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/promise-race-caught.js (renamed from deps/v8/test/mjsunit/es6/debug-promises/promise-race-caught.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/reentry.js (renamed from deps/v8/test/mjsunit/es6/debug-promises/reentry.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/reject-after-resolve.js (renamed from deps/v8/test/mjsunit/es6/debug-promises/reject-after-resolve.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/reject-caught-late.js (renamed from deps/v8/test/mjsunit/es6/debug-promises/reject-caught-late.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/reject-caught-uncaught.js (renamed from deps/v8/test/mjsunit/es6/debug-promises/reject-caught-uncaught.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/stepin-constructor.js (renamed from deps/v8/test/mjsunit/es6/debug-promises/stepin-constructor.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/stepin-handler.js (renamed from deps/v8/test/mjsunit/es6/debug-promises/stepin-handler.js)10
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/throw-caught-late.js (renamed from deps/v8/test/mjsunit/es6/debug-promises/throw-caught-late.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/throw-caught-uncaught.js (renamed from deps/v8/test/mjsunit/es6/debug-promises/throw-caught-uncaught.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-promises/throw-eventually-caught.js (renamed from deps/v8/test/mjsunit/es6/debug-promises/throw-eventually-caught.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-step-destructuring-assignment.js (renamed from deps/v8/test/mjsunit/es6/debug-step-destructuring-assignment.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-step-destructuring-bind.js (renamed from deps/v8/test/mjsunit/es6/debug-step-destructuring-bind.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-step-into-class-extends.js (renamed from deps/v8/test/mjsunit/es6/debug-step-into-class-extends.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-step-into-constructor.js (renamed from deps/v8/test/mjsunit/es6/debug-step-into-constructor.js)42
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-step-into-regexp-subclass.js (renamed from deps/v8/test/mjsunit/es6/debug-step-into-regexp-subclass.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-stepin-collections-foreach.js (renamed from deps/v8/test/mjsunit/es6/debug-stepin-collections-foreach.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-stepin-generators.js (renamed from deps/v8/test/mjsunit/es6/debug-stepin-generators.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-stepin-tailcalls.js (renamed from deps/v8/test/mjsunit/es6/debug-stepin-tailcalls.js)2
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-stepout-tailcalls.js (renamed from deps/v8/test/mjsunit/es6/debug-stepout-tailcalls.js)2
-rw-r--r--deps/v8/test/debugger/debug/es6/generators-relocation.js (renamed from deps/v8/test/mjsunit/es6/generators-relocation.js)1
-rw-r--r--deps/v8/test/debugger/debug/es6/regress/regress-3280.js (renamed from deps/v8/test/mjsunit/es6/regress/regress-3280.js)1
-rw-r--r--deps/v8/test/debugger/debug/for-in-opt.js (renamed from deps/v8/test/mjsunit/for-in-opt.js)1
-rw-r--r--deps/v8/test/debugger/debug/harmony/async-debug-basic.js (renamed from deps/v8/test/mjsunit/harmony/async-debug-basic.js)3
-rw-r--r--deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases.js (renamed from deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases.js)2
-rw-r--r--deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases0.js8
-rw-r--r--deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases1.js8
-rw-r--r--deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases2.js8
-rw-r--r--deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases3.js8
-rw-r--r--deps/v8/test/debugger/debug/harmony/async-debug-caught-exception.js (renamed from deps/v8/test/mjsunit/harmony/async-debug-caught-exception.js)2
-rw-r--r--deps/v8/test/debugger/debug/harmony/async-debug-step-abort-at-break.js (renamed from deps/v8/test/mjsunit/harmony/async-debug-step-abort-at-break.js)2
-rw-r--r--deps/v8/test/debugger/debug/harmony/async-debug-step-continue-at-break.js (renamed from deps/v8/test/mjsunit/harmony/async-debug-step-continue-at-break.js)2
-rw-r--r--deps/v8/test/debugger/debug/harmony/async-debug-step-in-and-out.js (renamed from deps/v8/test/mjsunit/harmony/async-debug-step-in-and-out.js)2
-rw-r--r--deps/v8/test/debugger/debug/harmony/async-debug-step-in-out-out.js (renamed from deps/v8/test/mjsunit/harmony/async-debug-step-in-out-out.js)2
-rw-r--r--deps/v8/test/debugger/debug/harmony/async-debug-step-in.js (renamed from deps/v8/test/mjsunit/harmony/async-debug-step-in.js)2
-rw-r--r--deps/v8/test/debugger/debug/harmony/async-debug-step-nested.js (renamed from deps/v8/test/mjsunit/harmony/async-debug-step-nested.js)2
-rw-r--r--deps/v8/test/debugger/debug/harmony/async-debug-step-next-constant.js (renamed from deps/v8/test/mjsunit/harmony/async-debug-step-next-constant.js)2
-rw-r--r--deps/v8/test/debugger/debug/harmony/async-debug-step-next.js (renamed from deps/v8/test/mjsunit/harmony/async-debug-step-next.js)2
-rw-r--r--deps/v8/test/debugger/debug/harmony/async-debug-step-out.js (renamed from deps/v8/test/mjsunit/harmony/async-debug-step-out.js)2
-rw-r--r--deps/v8/test/debugger/debug/harmony/debug-async-break-on-stack.js (renamed from deps/v8/test/mjsunit/harmony/debug-async-break-on-stack.js)3
-rw-r--r--deps/v8/test/debugger/debug/harmony/debug-async-break.js (renamed from deps/v8/test/mjsunit/harmony/debug-async-break.js)3
-rw-r--r--deps/v8/test/debugger/debug/harmony/debug-async-function-async-task-event.js (renamed from deps/v8/test/mjsunit/harmony/debug-async-function-async-task-event.js)24
-rw-r--r--deps/v8/test/debugger/debug/ignition/debug-break-mixed-stack.js (renamed from deps/v8/test/mjsunit/ignition/debug-break-mixed-stack.js)1
-rw-r--r--deps/v8/test/debugger/debug/ignition/debug-break-on-stack.js (renamed from deps/v8/test/mjsunit/ignition/debug-break-on-stack.js)1
-rw-r--r--deps/v8/test/debugger/debug/ignition/debug-break.js (renamed from deps/v8/test/mjsunit/ignition/debug-break.js)1
-rw-r--r--deps/v8/test/debugger/debug/ignition/debug-scope-on-return.js (renamed from deps/v8/test/mjsunit/ignition/debug-scope-on-return.js)1
-rw-r--r--deps/v8/test/debugger/debug/ignition/debug-step-mixed-stack.js (renamed from deps/v8/test/mjsunit/ignition/debug-step-mixed-stack.js)1
-rw-r--r--deps/v8/test/debugger/debug/ignition/debugger-statement.js (renamed from deps/v8/test/mjsunit/ignition/debugger-statement.js)2
-rw-r--r--deps/v8/test/debugger/debug/regress-3225.js (renamed from deps/v8/test/mjsunit/regress-3225.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/debug-prepare-step-in.js (renamed from deps/v8/test/mjsunit/regress/debug-prepare-step-in.js)3
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-1170187.js (renamed from deps/v8/test/mjsunit/regress/regress-1170187.js)5
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-131994.js (renamed from deps/v8/test/mjsunit/regress/regress-131994.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-147497.js (renamed from deps/v8/test/mjsunit/regress/regress-147497.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-1523.js (renamed from deps/v8/test/mjsunit/regress/regress-1523.js)2
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-1586.js (renamed from deps/v8/test/mjsunit/regress/regress-1586.js)2
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-269.js (renamed from deps/v8/test/mjsunit/regress/regress-269.js)2
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-2825.js (renamed from deps/v8/test/mjsunit/regress/regress-2825.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-325676.js (renamed from deps/v8/test/mjsunit/regress/regress-325676.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-3717.js (renamed from deps/v8/test/mjsunit/regress/regress-3717.js)2
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-392114.js (renamed from deps/v8/test/mjsunit/regress/regress-392114.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-4309-1.js (renamed from deps/v8/test/mjsunit/regress/regress-4309-1.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-4309-3.js (renamed from deps/v8/test/mjsunit/regress/regress-4309-3.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-4320.js (renamed from deps/v8/test/mjsunit/regress/regress-4320.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-4703.js (renamed from deps/v8/test/mjsunit/regress/regress-4703.js)9
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-491536.js (renamed from deps/v8/test/mjsunit/regress/regress-491536.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-5071.js32
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-514362.js (renamed from deps/v8/test/mjsunit/regress/regress-514362.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-5279.js (renamed from deps/v8/test/mjsunit/regress/regress-5279.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-5559.js (renamed from deps/v8/test/mjsunit/regress/regress-5559.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-617882.js (renamed from deps/v8/test/mjsunit/regress/regress-617882.js)0
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-662935.js (renamed from deps/v8/test/mjsunit/regress/regress-662935.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-94873.js (renamed from deps/v8/test/mjsunit/regress/regress-94873.js)2
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-107996.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-107996.js)14
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-323936.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-323936.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-387599.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-387599.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-401915.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-401915.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-405491.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-405491.js)0
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-405922.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-405922.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-409614.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-409614.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-467180.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-467180.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-568477-1.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-568477-1.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-568477-3.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-568477-3.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-568477-4.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-568477-4.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-582048.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-582048.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-582051.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-582051.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-609046.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-609046.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-633999.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-633999.js)2
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-debug-code-recompilation.js (renamed from deps/v8/test/mjsunit/regress/regress-debug-code-recompilation.js)3
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-debug-context-load.js (renamed from deps/v8/test/mjsunit/regress/regress-debug-context-load.js)1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-opt-after-debug-deopt.js (renamed from deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js)1
-rw-r--r--deps/v8/test/debugger/debug/wasm/asm-debug.js89
-rw-r--r--deps/v8/test/debugger/debugger.gyp26
-rw-r--r--deps/v8/test/debugger/debugger.isolate19
-rw-r--r--deps/v8/test/debugger/debugger.status44
-rw-r--r--deps/v8/test/debugger/protocol/basic-round-trip.js24
-rw-r--r--deps/v8/test/debugger/regress/regress-5575-1.js21
-rw-r--r--deps/v8/test/debugger/regress/regress-5575-2.js21
-rw-r--r--deps/v8/test/debugger/regress/regress-5575-3.js29
-rw-r--r--deps/v8/test/debugger/regress/regress-5610.js33
-rw-r--r--deps/v8/test/debugger/test-api.js432
-rw-r--r--deps/v8/test/debugger/testcfg.py72
-rw-r--r--deps/v8/test/debugger/wrapper/break-on-debugger-stmt.js20
-rw-r--r--deps/v8/test/debugger/wrapper/enable-disable.js29
-rw-r--r--deps/v8/test/default.gyp7
-rw-r--r--deps/v8/test/default.isolate2
-rw-r--r--deps/v8/test/fuzzer/fuzzer.cc2
-rw-r--r--deps/v8/test/fuzzer/fuzzer.gyp55
-rw-r--r--deps/v8/test/fuzzer/fuzzer.isolate2
-rw-r--r--deps/v8/test/fuzzer/parser.cc3
-rw-r--r--deps/v8/test/fuzzer/regexp.cc5
-rw-r--r--deps/v8/test/fuzzer/testcfg.py4
-rw-r--r--deps/v8/test/fuzzer/wasm-call.cc183
-rw-r--r--deps/v8/test/fuzzer/wasm-code.cc21
-rw-r--r--deps/v8/test/fuzzer/wasm-section-fuzzers.cc4
-rw-r--r--deps/v8/test/fuzzer/wasm_call/foo0
-rw-r--r--deps/v8/test/inspector/BUILD.gn51
-rw-r--r--deps/v8/test/inspector/DEPS2
-rw-r--r--deps/v8/test/inspector/console/destroy-context-during-log-expected.txt8
-rw-r--r--deps/v8/test/inspector/console/destroy-context-during-log.js38
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt109
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js128
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt99
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js165
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-stack-expected.txt17
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-stack.js79
-rw-r--r--deps/v8/test/inspector/debugger/break-on-exception-expected.txt12
-rw-r--r--deps/v8/test/inspector/debugger/break-on-exception.js71
-rw-r--r--deps/v8/test/inspector/debugger/caught-uncaught-exceptions-expected.txt5
-rw-r--r--deps/v8/test/inspector/debugger/caught-uncaught-exceptions.js25
-rw-r--r--deps/v8/test/inspector/debugger/command-line-api-with-bound-function-expected.txt28
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal-expected.txt27
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal.js12
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt1109
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints.js203
-rw-r--r--deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt253
-rw-r--r--deps/v8/test/inspector/debugger/object-preview-internal-properties.js107
-rw-r--r--deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt664
-rw-r--r--deps/v8/test/inspector/debugger/script-on-after-compile.js93
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt3
-rw-r--r--deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints-expected.txt11
-rw-r--r--deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints.js26
-rw-r--r--deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges.js4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stack-expected.txt18
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stack.js94
-rw-r--r--deps/v8/test/inspector/inspector-impl.cc37
-rw-r--r--deps/v8/test/inspector/inspector-test.cc208
-rw-r--r--deps/v8/test/inspector/inspector.gyp43
-rw-r--r--deps/v8/test/inspector/inspector.isolate27
-rw-r--r--deps/v8/test/inspector/protocol-test.js27
-rw-r--r--deps/v8/test/inspector/task-runner.cc60
-rw-r--r--deps/v8/test/inspector/task-runner.h25
-rw-r--r--deps/v8/test/inspector_protocol_parser_test/BUILD.gn6
-rw-r--r--deps/v8/test/inspector_protocol_parser_test/DEPS3
-rw-r--r--deps/v8/test/inspector_protocol_parser_test/RunTests.cpp17
-rw-r--r--deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.gyp12
-rw-r--r--deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.isolate13
-rw-r--r--deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.status6
-rw-r--r--deps/v8/test/intl/general/case-mapping.js16
-rw-r--r--deps/v8/test/js-perf-test/JSTests.json11
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed.json27
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/LICENSE.sixspeed22
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/spread/run.js25
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/spread/spread.js73
-rw-r--r--deps/v8/test/message/message.status10
-rw-r--r--deps/v8/test/message/modules-cycle1.js (renamed from deps/v8/test/mjsunit/modules-fail-cyclic-1.js)2
-rw-r--r--deps/v8/test/message/modules-cycle1.out5
-rw-r--r--deps/v8/test/message/modules-cycle2.js (renamed from deps/v8/test/mjsunit/modules-fail-cyclic-2.js)2
-rw-r--r--deps/v8/test/message/modules-cycle2.out5
-rw-r--r--deps/v8/test/message/modules-cycle3.js8
-rw-r--r--deps/v8/test/message/modules-cycle3.out5
-rw-r--r--deps/v8/test/message/modules-cycle4.js (renamed from deps/v8/test/mjsunit/modules-fail-1.js)2
-rw-r--r--deps/v8/test/message/modules-cycle4.out5
-rw-r--r--deps/v8/test/message/modules-duplicate-export1.js (renamed from deps/v8/test/mjsunit/modules-fail-3.js)3
-rw-r--r--deps/v8/test/message/modules-duplicate-export1.out5
-rw-r--r--deps/v8/test/message/modules-duplicate-export2.js (renamed from deps/v8/test/mjsunit/modules-fail-8.js)3
-rw-r--r--deps/v8/test/message/modules-duplicate-export2.out5
-rw-r--r--deps/v8/test/message/modules-duplicate-export3.js9
-rw-r--r--deps/v8/test/message/modules-duplicate-export3.out5
-rw-r--r--deps/v8/test/message/modules-duplicate-export4.js9
-rw-r--r--deps/v8/test/message/modules-duplicate-export4.out5
-rw-r--r--deps/v8/test/message/modules-import-redeclare1.js8
-rw-r--r--deps/v8/test/message/modules-import-redeclare1.out5
-rw-r--r--deps/v8/test/message/modules-import-redeclare2.js8
-rw-r--r--deps/v8/test/message/modules-import-redeclare2.out5
-rw-r--r--deps/v8/test/message/modules-import-redeclare3.js8
-rw-r--r--deps/v8/test/message/modules-import-redeclare3.out5
-rw-r--r--deps/v8/test/message/modules-import1.js (renamed from deps/v8/test/mjsunit/modules-fail-2.js)2
-rw-r--r--deps/v8/test/message/modules-import1.out5
-rw-r--r--deps/v8/test/message/modules-import2.js7
-rw-r--r--deps/v8/test/message/modules-import2.out5
-rw-r--r--deps/v8/test/message/modules-import3.js7
-rw-r--r--deps/v8/test/message/modules-import3.out5
-rw-r--r--deps/v8/test/message/modules-import4.js (renamed from deps/v8/test/mjsunit/modules-fail-4.js)4
-rw-r--r--deps/v8/test/message/modules-import4.out5
-rw-r--r--deps/v8/test/message/modules-import5.js9
-rw-r--r--deps/v8/test/message/modules-import5.out5
-rw-r--r--deps/v8/test/message/modules-import6.js9
-rw-r--r--deps/v8/test/message/modules-import6.out5
-rw-r--r--deps/v8/test/message/modules-skip-cycle2.js (renamed from deps/v8/test/mjsunit/modules-skip-cyclic.js)2
-rw-r--r--deps/v8/test/message/modules-skip-cycle3.js6
-rw-r--r--deps/v8/test/message/modules-star-conflict1.js7
-rw-r--r--deps/v8/test/message/modules-star-conflict1.out5
-rw-r--r--deps/v8/test/message/modules-star-conflict2.js10
-rw-r--r--deps/v8/test/message/modules-star-conflict2.out5
-rw-r--r--deps/v8/test/message/modules-star-default.js (renamed from deps/v8/test/mjsunit/modules-fail-6.js)2
-rw-r--r--deps/v8/test/message/modules-star-default.out5
-rw-r--r--deps/v8/test/message/modules-undefined-export1.js7
-rw-r--r--deps/v8/test/message/modules-undefined-export1.out5
-rw-r--r--deps/v8/test/message/modules-undefined-export2.js7
-rw-r--r--deps/v8/test/message/modules-undefined-export2.out5
-rw-r--r--deps/v8/test/message/redeclaration1.js6
-rw-r--r--deps/v8/test/message/redeclaration1.out5
-rw-r--r--deps/v8/test/message/redeclaration2.js6
-rw-r--r--deps/v8/test/message/redeclaration2.out5
-rw-r--r--deps/v8/test/message/redeclaration3.js6
-rw-r--r--deps/v8/test/message/redeclaration3.out5
-rw-r--r--deps/v8/test/mjsunit/array-literal-transitions.js3
-rw-r--r--deps/v8/test/mjsunit/asm/asm-validation.js82
-rw-r--r--deps/v8/test/mjsunit/asm/b5528-comma.js31
-rw-r--r--deps/v8/test/mjsunit/asm/regress-660813.js12
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-numberoroddball-binop.js155
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js31
-rw-r--r--deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-5538.js53
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-664490.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-665680.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-673244.js15
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-v8-5573.js15
-rw-r--r--deps/v8/test/mjsunit/compiler/uint8-clamped-array.js73
-rw-r--r--deps/v8/test/mjsunit/debug-evaluate-arguments.js93
-rw-r--r--deps/v8/test/mjsunit/debug-script.js6
-rw-r--r--deps/v8/test/mjsunit/debug-set-variable-value.js33
-rw-r--r--deps/v8/test/mjsunit/debug-step-2.js89
-rw-r--r--deps/v8/test/mjsunit/debug-step-3.js94
-rw-r--r--deps/v8/test/mjsunit/debug-step-4.js103
-rw-r--r--deps/v8/test/mjsunit/debug-step.js72
-rw-r--r--deps/v8/test/mjsunit/debug-version.js90
-rw-r--r--deps/v8/test/mjsunit/es6/array-iterator-turbo.js243
-rw-r--r--deps/v8/test/mjsunit/es6/classes-subclass-builtins.js1
-rw-r--r--deps/v8/test/mjsunit/es6/classes.js9
-rw-r--r--deps/v8/test/mjsunit/es6/completion.js7
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js8
-rw-r--r--deps/v8/test/mjsunit/es6/default-parameters-debug.js24
-rw-r--r--deps/v8/test/mjsunit/es6/reflect.js16
-rw-r--r--deps/v8/test/mjsunit/es6/regexp-flags.js8
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-468661.js7
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-5598.js (renamed from deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases2.js)14
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-660925.js13
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-666622.js39
-rw-r--r--deps/v8/test/mjsunit/es6/rest-params.js2
-rw-r--r--deps/v8/test/mjsunit/es6/sloppy-restrictive-block-function.js (renamed from deps/v8/test/mjsunit/harmony/sloppy-restrictive-block-function.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/spread-call.js118
-rw-r--r--deps/v8/test/mjsunit/es6/string-endswith.js1
-rw-r--r--deps/v8/test/mjsunit/es6/string-includes.js5
-rw-r--r--deps/v8/test/mjsunit/es6/string-startswith.js1
-rw-r--r--deps/v8/test/mjsunit/es6/symbols.js2
-rw-r--r--deps/v8/test/mjsunit/es8/object-entries.js (renamed from deps/v8/test/mjsunit/harmony/object-entries.js)1
-rw-r--r--deps/v8/test/mjsunit/es8/object-get-own-property-descriptors.js (renamed from deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js)1
-rw-r--r--deps/v8/test/mjsunit/es8/object-values.js (renamed from deps/v8/test/mjsunit/harmony/object-values.js)1
-rw-r--r--deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js587
-rw-r--r--deps/v8/test/mjsunit/for-in.js9
-rw-r--r--deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases1.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/async-function-debug-scopes.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/for-in.js9
-rw-r--r--deps/v8/test/mjsunit/harmony/generators-turbo.js667
-rw-r--r--deps/v8/test/mjsunit/harmony/private.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/sloppy-implicit-block-function.js97
-rw-r--r--deps/v8/test/mjsunit/ignition/osr-from-bytecode.js2
-rw-r--r--deps/v8/test/mjsunit/ignition/regress-662418.js18
-rw-r--r--deps/v8/test/mjsunit/ignition/regress-664146.js27
-rw-r--r--deps/v8/test/mjsunit/ignition/regress-672027.js17
-rw-r--r--deps/v8/test/mjsunit/invalid-lhs.js10
-rw-r--r--deps/v8/test/mjsunit/lazy-inner-functions.js2
-rw-r--r--deps/v8/test/mjsunit/math-floor-part2.js3
-rw-r--r--deps/v8/test/mjsunit/math-floor-part3.js3
-rw-r--r--deps/v8/test/mjsunit/math-floor-part4.js4
-rw-r--r--deps/v8/test/mjsunit/mirror-script.js7
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js39
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status214
-rw-r--r--deps/v8/test/mjsunit/modules-debug-scopes1.js897
-rw-r--r--deps/v8/test/mjsunit/modules-debug-scopes2.js239
-rw-r--r--deps/v8/test/mjsunit/modules-fail-5.js9
-rw-r--r--deps/v8/test/mjsunit/modules-fail-7.js8
-rw-r--r--deps/v8/test/mjsunit/modules-fail-cyclic-3.js8
-rw-r--r--deps/v8/test/mjsunit/modules-init3.js5
-rw-r--r--deps/v8/test/mjsunit/modules-namespace1.js107
-rw-r--r--deps/v8/test/mjsunit/modules-namespace2.js22
-rw-r--r--deps/v8/test/mjsunit/modules-namespace3.js11
-rw-r--r--deps/v8/test/mjsunit/modules-namespace4.js59
-rw-r--r--deps/v8/test/mjsunit/modules-relative-path.js (renamed from deps/v8/test/mjsunit/modules-fail-star-exports-conflict.js)10
-rw-r--r--deps/v8/test/mjsunit/modules-skip-2.js1
-rw-r--r--deps/v8/test/mjsunit/modules-skip-cyclic-3.js6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-init3.js6
-rw-r--r--deps/v8/test/mjsunit/modules-skip-namespace.js13
-rw-r--r--deps/v8/test/mjsunit/modules-turbo.js14
-rw-r--r--deps/v8/test/mjsunit/object-create.js32
-rw-r--r--deps/v8/test/mjsunit/preparse-toplevel-strict-eval.js13
-rw-r--r--deps/v8/test/mjsunit/print.js6
-rw-r--r--deps/v8/test/mjsunit/prototype-non-existing.js92
-rw-r--r--deps/v8/test/mjsunit/regexp-regexpexec.js (renamed from deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases3.js)8
-rw-r--r--deps/v8/test/mjsunit/regexp.js33
-rw-r--r--deps/v8/test/mjsunit/regress-3456.js2
-rw-r--r--deps/v8/test/mjsunit/regress-604044.js2
-rw-r--r--deps/v8/test/mjsunit/regress-sync-optimized-lists.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1387.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2296.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2318.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-252797.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2618.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5071.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5252.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5262.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-536751.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-542099.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-542100.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5434.js42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5476.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5566.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-568765.js93
-rw-r--r--deps/v8/test/mjsunit/regress/regress-575364.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5763-1.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5763-2.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5790.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5802.js117
-rw-r--r--deps/v8/test/mjsunit/regress/regress-632289.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-653407.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-662845.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-662904.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-664087.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-998565.js51
-rw-r--r--deps/v8/test/mjsunit/regress/regress-abort-preparsing-params.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-632800.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-635923.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-644631.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-656275.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-658185.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-658528.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-658691.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-659915a.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-659915b.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-659967.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-660379.js42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-661949.js (renamed from deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases0.js)13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-662367.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-662410.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-662830.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-663750.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-664084.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-664469.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-664506.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-664802.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-664942.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-664974.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-665886.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-679202.js (renamed from deps/v8/test/mjsunit/regress/regress-409533.js)9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-682194.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-prepare-break-while-recompile.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-trap-allocation-memento.js50
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/loop-stack-check.js19
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-02256b.js502
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-5531.js22
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-648079.js324
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-651961.js6
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-654377.js23
-rw-r--r--deps/v8/test/mjsunit/shared-function-tier-up-default.js32
-rw-r--r--deps/v8/test/mjsunit/shared-function-tier-up-ignition.js41
-rw-r--r--deps/v8/test/mjsunit/shared-function-tier-up-turbo.js32
-rw-r--r--deps/v8/test/mjsunit/strict-mode.js8
-rw-r--r--deps/v8/test/mjsunit/tools/profviz-test.default460
-rw-r--r--deps/v8/test/mjsunit/tools/profviz.js3
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-stack.js105
-rw-r--r--deps/v8/test/mjsunit/wasm/compiled-module-management.js71
-rw-r--r--deps/v8/test/mjsunit/wasm/compiled-module-serialization.js30
-rw-r--r--deps/v8/test/mjsunit/wasm/data-segments.js66
-rw-r--r--deps/v8/test/mjsunit/wasm/debug-disassembly.js128
-rw-r--r--deps/v8/test/mjsunit/wasm/errors.js134
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/export-table.js31
-rw-r--r--deps/v8/test/mjsunit/wasm/ffi.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/frame-inspection.js43
-rw-r--r--deps/v8/test/mjsunit/wasm/function-prototype.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-buffer.js42
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-stress.js37
-rw-r--r--deps/v8/test/mjsunit/wasm/globals.js88
-rw-r--r--deps/v8/test/mjsunit/wasm/grow-memory.js157
-rw-r--r--deps/v8/test/mjsunit/wasm/import-memory.js231
-rw-r--r--deps/v8/test/mjsunit/wasm/import-table.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/incrementer.wasmbin45 -> 45 bytes
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-calls.js138
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-tables.js377
-rw-r--r--deps/v8/test/mjsunit/wasm/instance-gc.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/instantiate-module-basic.js35
-rw-r--r--deps/v8/test/mjsunit/wasm/memory-size.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/module-memory.js12
-rw-r--r--deps/v8/test/mjsunit/wasm/stack.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/start-function.js41
-rw-r--r--deps/v8/test/mjsunit/wasm/table.js161
-rw-r--r--deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js80
-rw-r--r--deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js94
-rw-r--r--deps/v8/test/mjsunit/wasm/trap-location.js3
-rw-r--r--deps/v8/test/mjsunit/wasm/verify-function-basic-errors.js18
-rw-r--r--deps/v8/test/mjsunit/wasm/verify-function-simple.js38
-rw-r--r--deps/v8/test/mjsunit/wasm/verify-module-basic-errors.js20
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-constants.js480
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-module-builder.js295
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-object-api.js9
-rw-r--r--deps/v8/test/mozilla/mozilla.status12
-rw-r--r--deps/v8/test/optimize_for_size.isolate2
-rw-r--r--deps/v8/test/perf.isolate5
-rwxr-xr-xdeps/v8/test/test262/list.py7
-rw-r--r--deps/v8/test/test262/test262.gyp2
-rw-r--r--deps/v8/test/test262/test262.status92
-rw-r--r--deps/v8/test/test262/testcfg.py10
-rw-r--r--deps/v8/test/unittests/BUILD.gn24
-rw-r--r--deps/v8/test/unittests/base/atomic-utils-unittest.cc44
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc51
-rw-r--r--deps/v8/test/unittests/compiler/branch-elimination-unittest.cc20
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/control-equivalence-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc37
-rw-r--r--deps/v8/test/unittests/compiler/escape-analysis-unittest.cc25
-rw-r--r--deps/v8/test/unittests/compiler/graph-reducer-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc12
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h4
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.cc9
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.h3
-rw-r--r--deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc103
-rw-r--r--deps/v8/test/unittests/compiler/instruction-sequence-unittest.h80
-rw-r--r--deps/v8/test/unittests/compiler/instruction-unittest.cc175
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc36
-rw-r--r--deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc8
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc67
-rw-r--r--deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc9
-rw-r--r--deps/v8/test/unittests/compiler/loop-peeling-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc120
-rw-r--r--deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc258
-rw-r--r--deps/v8/test/unittests/compiler/move-optimizer-unittest.cc137
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc8
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h1
-rw-r--r--deps/v8/test/unittests/compiler/register-allocator-unittest.cc16
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-unittest.cc96
-rw-r--r--deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc24
-rw-r--r--deps/v8/test/unittests/compiler/typed-optimization-unittest.cc26
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc28
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc140
-rw-r--r--deps/v8/test/unittests/compiler/zone-stats-unittest.cc (renamed from deps/v8/test/unittests/compiler/zone-pool-unittest.cc)47
-rw-r--r--deps/v8/test/unittests/eh-frame-iterator-unittest.cc2
-rw-r--r--deps/v8/test/unittests/eh-frame-writer-unittest.cc31
-rw-r--r--deps/v8/test/unittests/heap/gc-tracer-unittest.cc10
-rw-r--r--deps/v8/test/unittests/heap/marking-unittest.cc20
-rw-r--r--deps/v8/test/unittests/heap/memory-reducer-unittest.cc3
-rw-r--r--deps/v8/test/unittests/heap/slot-set-unittest.cc77
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc50
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc12
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc30
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc10
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc169
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc31
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc17
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc238
-rw-r--r--deps/v8/test/unittests/interpreter/bytecodes-unittest.cc36
-rw-r--r--deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc38
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc41
-rw-r--r--deps/v8/test/unittests/register-configuration-unittest.cc19
-rw-r--r--deps/v8/test/unittests/source-position-table-unittest.cc30
-rw-r--r--deps/v8/test/unittests/test-utils.cc8
-rw-r--r--deps/v8/test/unittests/test-utils.h18
-rw-r--r--deps/v8/test/unittests/unittests.gyp19
-rw-r--r--deps/v8/test/unittests/unittests.status2
-rw-r--r--deps/v8/test/unittests/value-serializer-unittest.cc296
-rw-r--r--deps/v8/test/unittests/wasm/asm-types-unittest.cc4
-rw-r--r--deps/v8/test/unittests/wasm/ast-decoder-unittest.cc102
-rw-r--r--deps/v8/test/unittests/wasm/decoder-unittest.cc83
-rw-r--r--deps/v8/test/unittests/wasm/leb-helper-unittest.cc52
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc354
-rw-r--r--deps/v8/test/unittests/wasm/switch-logic-unittest.cc2
-rw-r--r--deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc18
-rw-r--r--deps/v8/test/unittests/zone/segmentpool-unittest.cc35
-rw-r--r--deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc207
-rw-r--r--deps/v8/test/unittests/zone/zone-unittest.cc2
-rw-r--r--deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt7
-rw-r--r--deps/v8/test/webkit/fast/js/basic-strict-mode.js7
-rw-r--r--deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt4
-rw-r--r--deps/v8/test/webkit/fast/js/parser-syntax-check.js4
-rw-r--r--deps/v8/test/webkit/function-declaration-statement-expected.txt43
-rw-r--r--deps/v8/test/webkit/function-declaration-statement.js185
-rw-r--r--deps/v8/third_party/binutils/.gitignore8
-rw-r--r--deps/v8/third_party/binutils/Linux_ia32/binutils.tar.bz2.sha11
-rw-r--r--deps/v8/third_party/binutils/Linux_x64/binutils.tar.bz2.sha11
-rwxr-xr-xdeps/v8/third_party/binutils/download.py117
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/CheckProtocolCompatibility.py479
-rw-r--r--deps/v8/third_party/inspector_protocol/CodeGenerator.py498
-rwxr-xr-xdeps/v8/third_party/inspector_protocol/ConcatenateProtocols.py39
-rw-r--r--deps/v8/third_party/inspector_protocol/LICENSE (renamed from deps/v8/test/mjsunit/regress/regress-220.js)28
-rw-r--r--deps/v8/third_party/inspector_protocol/OWNERS9
-rw-r--r--deps/v8/third_party/inspector_protocol/README.v816
-rw-r--r--deps/v8/third_party/inspector_protocol/inspector_protocol.gni80
-rw-r--r--deps/v8/third_party/inspector_protocol/inspector_protocol.gypi33
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Allocator_h.template30
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Array_h.template136
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Collections_h.template43
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template225
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template120
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template61
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template35
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Forward_h.template38
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template24
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Maybe_h.template86
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Object_cpp.template37
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Object_h.template32
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template553
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Parser_h.template22
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Protocol_cpp.template12
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template171
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Values_cpp.template407
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Values_h.template246
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/Exported_h.template65
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/Imported_h.template51
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template364
-rw-r--r--deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template297
-rw-r--r--deps/v8/third_party/jinja2/AUTHORS33
-rw-r--r--deps/v8/third_party/jinja2/Jinja2-2.8.tar.gz.md51
-rw-r--r--deps/v8/third_party/jinja2/Jinja2-2.8.tar.gz.sha5121
-rw-r--r--deps/v8/third_party/jinja2/LICENSE31
-rw-r--r--deps/v8/third_party/jinja2/OWNERS3
-rw-r--r--deps/v8/third_party/jinja2/README.chromium25
-rw-r--r--deps/v8/third_party/jinja2/__init__.py70
-rw-r--r--deps/v8/third_party/jinja2/_compat.py111
-rw-r--r--deps/v8/third_party/jinja2/_stringdefs.py132
-rw-r--r--deps/v8/third_party/jinja2/bccache.py362
-rw-r--r--deps/v8/third_party/jinja2/compiler.py1686
-rw-r--r--deps/v8/third_party/jinja2/constants.py32
-rw-r--r--deps/v8/third_party/jinja2/debug.py350
-rw-r--r--deps/v8/third_party/jinja2/defaults.py43
-rw-r--r--deps/v8/third_party/jinja2/environment.py1213
-rw-r--r--deps/v8/third_party/jinja2/exceptions.py146
-rw-r--r--deps/v8/third_party/jinja2/ext.py636
-rw-r--r--deps/v8/third_party/jinja2/filters.py996
-rwxr-xr-xdeps/v8/third_party/jinja2/get_jinja2.sh122
-rw-r--r--deps/v8/third_party/jinja2/lexer.py734
-rw-r--r--deps/v8/third_party/jinja2/loaders.py481
-rw-r--r--deps/v8/third_party/jinja2/meta.py103
-rw-r--r--deps/v8/third_party/jinja2/nodes.py919
-rw-r--r--deps/v8/third_party/jinja2/optimizer.py68
-rw-r--r--deps/v8/third_party/jinja2/parser.py899
-rw-r--r--deps/v8/third_party/jinja2/runtime.py667
-rw-r--r--deps/v8/third_party/jinja2/sandbox.py367
-rw-r--r--deps/v8/third_party/jinja2/tests.py173
-rw-r--r--deps/v8/third_party/jinja2/utils.py531
-rw-r--r--deps/v8/third_party/jinja2/visitor.py87
-rw-r--r--deps/v8/third_party/markupsafe/AUTHORS13
-rw-r--r--deps/v8/third_party/markupsafe/LICENSE33
-rw-r--r--deps/v8/third_party/markupsafe/MarkupSafe-0.18.tar.gz.md51
-rw-r--r--deps/v8/third_party/markupsafe/MarkupSafe-0.18.tar.gz.sha5121
-rw-r--r--deps/v8/third_party/markupsafe/OWNERS3
-rw-r--r--deps/v8/third_party/markupsafe/README.chromium24
-rw-r--r--deps/v8/third_party/markupsafe/__init__.py234
-rw-r--r--deps/v8/third_party/markupsafe/_compat.py24
-rw-r--r--deps/v8/third_party/markupsafe/_constants.py267
-rw-r--r--deps/v8/third_party/markupsafe/_native.py46
-rw-r--r--deps/v8/third_party/markupsafe/_speedups.c239
-rwxr-xr-xdeps/v8/third_party/markupsafe/get_markupsafe.sh121
-rw-r--r--deps/v8/tools/callstats.html7
-rwxr-xr-xdeps/v8/tools/dev/v8gen.py2
-rw-r--r--deps/v8/tools/external-reference-check.py44
-rw-r--r--deps/v8/tools/gdbinit27
-rw-r--r--deps/v8/tools/ic-explorer.html70
-rwxr-xr-xdeps/v8/tools/ignition/linux_perf_report.py42
-rw-r--r--deps/v8/tools/ignition/linux_perf_report_test.py41
-rw-r--r--deps/v8/tools/parser-shell.cc6
-rw-r--r--deps/v8/tools/parser-shell.gyp1
-rwxr-xr-xdeps/v8/tools/presubmit.py8
-rw-r--r--deps/v8/tools/profviz/composer.js2
-rw-r--r--deps/v8/tools/profviz/stdio.js6
-rwxr-xr-xdeps/v8/tools/release/auto_roll.py2
-rw-r--r--deps/v8/tools/release/test_scripts.py2
-rwxr-xr-xdeps/v8/tools/run-tests.py50
-rwxr-xr-xdeps/v8/tools/sanitizers/sancov_merger.py4
-rw-r--r--deps/v8/tools/sanitizers/sancov_merger_test.py66
-rw-r--r--deps/v8/tools/testrunner/local/execution.py10
-rw-r--r--deps/v8/tools/testrunner/testrunner.isolate7
-rw-r--r--deps/v8/tools/turbolizer/schedule-view.js2
-rwxr-xr-xdeps/v8/tools/update-wasm-fuzzers.sh14
-rw-r--r--deps/v8/tools/whitespace.txt2
1607 files changed, 85464 insertions, 51446 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 29fd9226de..cd72bebbd6 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -65,17 +65,9 @@ shell_g
!/testing/gtest/include/gtest
/testing/gtest/include/gtest/*
!/testing/gtest/include/gtest/gtest_prod.h
-/third_party
-/third_party/android_tools
-/third_party/cygwin
-/third_party/icu
-/third_party/instrumented_libraries
-/third_party/inspector_protocol
-/third_party/jinga2
-/third_party/llvm
-/third_party/llvm-build
-/third_party/markupsafe
-/third_party/WebKit
+/third_party/*
+!/third_party/binutils
+!/third_party/inspector_protocol
/tools/clang
/tools/gcmole/gcmole-tools
/tools/gcmole/gcmole-tools.tar.gz
@@ -109,4 +101,6 @@ v8.ignition_dispatches_table.json
/test/fuzzer/wasm.tar.gz
/test/fuzzer/wasm_asmjs.tar.gz
/src/inspector/build/closure-compiler.tar.gz
-/src/inspector/build/closure-compiler \ No newline at end of file
+/src/inspector/build/closure-compiler
+!/third_party/jinja2
+!/third_party/markupsafe
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 0229c9259e..476d0c3632 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -65,6 +65,7 @@ Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
Geoffrey Garside <ggarside@gmail.com>
Gwang Yoon Hwang <ryumiel@company100.net>
Han Choongwoo <cwhan.tunz@gmail.com>
+Henrique Ferreiro <henrique.ferreiro@gmail.com>
Hirofumi Mako <mkhrfm@gmail.com>
Honggyu Kim <honggyu.kp@gmail.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
@@ -111,6 +112,7 @@ Robert Mustacchi <rm@fingolfin.org>
Robert Nagy <robert.nagy@gmail.com>
Ryan Dahl <ry@tinyclouds.org>
Sakthipriyan Vairamani (thefourtheye) <thechargingvolcano@gmail.com>
+Sander Mathijs van Veen <sander@leaningtech.com>
Sandro Santilli <strk@keybit.net>
Sanjoy Das <sanjoy@playingwithpointers.com>
Seo Sanghyeon <sanxiyn@gmail.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 06870b6039..8587356ddc 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -43,10 +43,6 @@ declare_args() {
# Sets -dENABLE_HANDLE_ZAPPING.
v8_enable_handle_zapping = is_debug
- # Enable ECMAScript Internationalization API. Enabling this feature will
- # add a dependency on the ICU library.
- v8_enable_i18n_support = true
-
# Enable slow dchecks.
v8_enable_slow_dchecks = false
@@ -65,6 +61,9 @@ declare_args() {
# Switches off inlining in V8.
v8_no_inline = false
+ # Override OS page size when generating snapshot
+ v8_os_page_size = "0"
+
# Similar to vfp but on MIPS.
v8_can_use_fpu_instructions = true
@@ -128,6 +127,20 @@ config("internal_config_base") {
# This config should be applied to code using the libplatform.
config("libplatform_config") {
include_dirs = [ "include" ]
+ if (is_component_build) {
+ defines = [ "USING_V8_PLATFORM_SHARED" ]
+ }
+}
+
+# This config should be applied to code using the libbase.
+config("libbase_config") {
+ if (is_component_build) {
+ defines = [ "USING_V8_BASE_SHARED" ]
+ }
+ libs = []
+ if (is_android && current_toolchain != host_toolchain) {
+ libs += [ "log" ]
+ }
}
# This config should be applied to code using the libsampler.
@@ -145,10 +158,6 @@ config("external_config") {
if (v8_enable_inspector_override) {
include_dirs += [ "$target_gen_dir/include" ]
}
- libs = []
- if (is_android && current_toolchain != host_toolchain) {
- libs += [ "log" ]
- }
}
# This config should only be applied to code that needs to be explicitly
@@ -361,6 +370,20 @@ config("toolchain") {
"-fno-inline",
]
}
+
+ if (is_clang) {
+ cflags += [
+ "-Wsign-compare",
+
+ # TODO(hans): Remove once http://crbug.com/428099 is resolved.
+ "-Winconsistent-missing-override",
+ ]
+
+ if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
+ v8_current_cpu == "mips64el") {
+ cflags += [ "-Wshorten-64-to-32" ]
+ }
+ }
}
###############################################################################
@@ -388,8 +411,6 @@ action("js2c") {
"src/js/symbol.js",
"src/js/array.js",
"src/js/string.js",
- "src/js/math.js",
- "src/js/regexp.js",
"src/js/arraybuffer.js",
"src/js/typedarray.js",
"src/js/collection.js",
@@ -397,7 +418,6 @@ action("js2c") {
"src/js/collection-iterator.js",
"src/js/promise.js",
"src/js/messages.js",
- "src/js/array-iterator.js",
"src/js/templates.js",
"src/js/spread.js",
"src/js/proxy.js",
@@ -662,6 +682,13 @@ action("run_mksnapshot") {
]
}
+ if (v8_os_page_size != "0") {
+ args += [
+ "--v8_os_page_size",
+ v8_os_page_size,
+ ]
+ }
+
if (v8_use_external_startup_data) {
outputs += [ "$root_out_dir/snapshot_blob.bin" ]
args += [
@@ -715,6 +742,7 @@ action("v8_dump_build_config") {
"is_tsan=$is_tsan",
"target_cpu=\"$target_cpu\"",
"v8_enable_i18n_support=$v8_enable_i18n_support",
+ "v8_enable_inspector=$v8_enable_inspector_override",
"v8_target_cpu=\"$v8_target_cpu\"",
"v8_use_snapshot=$v8_use_snapshot",
]
@@ -863,6 +891,8 @@ v8_source_set("v8_base") {
"src/asmjs/asm-types.h",
"src/asmjs/asm-wasm-builder.cc",
"src/asmjs/asm-wasm-builder.h",
+ "src/asmjs/switch-logic.cc",
+ "src/asmjs/switch-logic.h",
"src/assembler.cc",
"src/assembler.h",
"src/assert-scope.cc",
@@ -930,6 +960,7 @@ v8_source_set("v8_base") {
"src/builtins/builtins-math.cc",
"src/builtins/builtins-number.cc",
"src/builtins/builtins-object.cc",
+ "src/builtins/builtins-promise.cc",
"src/builtins/builtins-proxy.cc",
"src/builtins/builtins-reflect.cc",
"src/builtins/builtins-regexp.cc",
@@ -969,6 +1000,8 @@ v8_source_set("v8_base") {
"src/compilation-statistics.h",
"src/compiler-dispatcher/compiler-dispatcher-job.cc",
"src/compiler-dispatcher/compiler-dispatcher-job.h",
+ "src/compiler-dispatcher/compiler-dispatcher-tracer.cc",
+ "src/compiler-dispatcher/compiler-dispatcher-tracer.h",
"src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
"src/compiler-dispatcher/optimizing-compile-dispatcher.h",
"src/compiler.cc",
@@ -1007,6 +1040,8 @@ v8_source_set("v8_base") {
"src/compiler/common-operator-reducer.h",
"src/compiler/common-operator.cc",
"src/compiler/common-operator.h",
+ "src/compiler/compiler-source-position-table.cc",
+ "src/compiler/compiler-source-position-table.h",
"src/compiler/control-builders.cc",
"src/compiler/control-builders.h",
"src/compiler/control-equivalence.cc",
@@ -1145,14 +1180,14 @@ v8_source_set("v8_base") {
"src/compiler/scheduler.h",
"src/compiler/select-lowering.cc",
"src/compiler/select-lowering.h",
+ "src/compiler/simd-scalar-lowering.cc",
+ "src/compiler/simd-scalar-lowering.h",
"src/compiler/simplified-lowering.cc",
"src/compiler/simplified-lowering.h",
"src/compiler/simplified-operator-reducer.cc",
"src/compiler/simplified-operator-reducer.h",
"src/compiler/simplified-operator.cc",
"src/compiler/simplified-operator.h",
- "src/compiler/source-position.cc",
- "src/compiler/source-position.h",
"src/compiler/state-values-utils.cc",
"src/compiler/state-values-utils.h",
"src/compiler/store-store-elimination.cc",
@@ -1177,8 +1212,8 @@ v8_source_set("v8_base") {
"src/compiler/wasm-compiler.cc",
"src/compiler/wasm-compiler.h",
"src/compiler/wasm-linkage.cc",
- "src/compiler/zone-pool.cc",
- "src/compiler/zone-pool.h",
+ "src/compiler/zone-stats.cc",
+ "src/compiler/zone-stats.h",
"src/context-measure.cc",
"src/context-measure.h",
"src/contexts-inl.h",
@@ -1218,8 +1253,6 @@ v8_source_set("v8_base") {
"src/crankshaft/hydrogen-instructions.h",
"src/crankshaft/hydrogen-load-elimination.cc",
"src/crankshaft/hydrogen-load-elimination.h",
- "src/crankshaft/hydrogen-mark-deoptimize.cc",
- "src/crankshaft/hydrogen-mark-deoptimize.h",
"src/crankshaft/hydrogen-mark-unreachable.cc",
"src/crankshaft/hydrogen-mark-unreachable.h",
"src/crankshaft/hydrogen-osr.cc",
@@ -1262,6 +1295,7 @@ v8_source_set("v8_base") {
"src/debug/debug-evaluate.h",
"src/debug/debug-frames.cc",
"src/debug/debug-frames.h",
+ "src/debug/debug-interface.h",
"src/debug/debug-scopes.cc",
"src/debug/debug-scopes.h",
"src/debug/debug.cc",
@@ -1363,7 +1397,6 @@ v8_source_set("v8_base") {
"src/heap/objects-visiting.cc",
"src/heap/objects-visiting.h",
"src/heap/page-parallel-job.h",
- "src/heap/remembered-set.cc",
"src/heap/remembered-set.h",
"src/heap/scavenge-job.cc",
"src/heap/scavenge-job.h",
@@ -1378,12 +1411,14 @@ v8_source_set("v8_base") {
"src/heap/store-buffer.h",
"src/i18n.cc",
"src/i18n.h",
+ "src/ic/access-compiler-data.h",
"src/ic/access-compiler.cc",
"src/ic/access-compiler.h",
"src/ic/call-optimization.cc",
"src/ic/call-optimization.h",
"src/ic/handler-compiler.cc",
"src/ic/handler-compiler.h",
+ "src/ic/handler-configuration-inl.h",
"src/ic/handler-configuration.h",
"src/ic/ic-compiler.cc",
"src/ic/ic-compiler.h",
@@ -1392,6 +1427,8 @@ v8_source_set("v8_base") {
"src/ic/ic-state.h",
"src/ic/ic.cc",
"src/ic/ic.h",
+ "src/ic/keyed-store-generic.cc",
+ "src/ic/keyed-store-generic.h",
"src/ic/stub-cache.cc",
"src/ic/stub-cache.h",
"src/icu_util.cc",
@@ -1541,6 +1578,8 @@ v8_source_set("v8_base") {
"src/profiler/tracing-cpu-profiler.h",
"src/profiler/unbound-queue-inl.h",
"src/profiler/unbound-queue.h",
+ "src/promise-utils.cc",
+ "src/promise-utils.h",
"src/property-descriptor.cc",
"src/property-descriptor.h",
"src/property-details.h",
@@ -1566,6 +1605,8 @@ v8_source_set("v8_base") {
"src/regexp/regexp-parser.h",
"src/regexp/regexp-stack.cc",
"src/regexp/regexp-stack.h",
+ "src/regexp/regexp-utils.cc",
+ "src/regexp/regexp-utils.h",
"src/register-configuration.cc",
"src/register-configuration.h",
"src/runtime-profiler.cc",
@@ -1588,9 +1629,11 @@ v8_source_set("v8_base") {
"src/runtime/runtime-literals.cc",
"src/runtime/runtime-liveedit.cc",
"src/runtime/runtime-maths.cc",
+ "src/runtime/runtime-module.cc",
"src/runtime/runtime-numbers.cc",
"src/runtime/runtime-object.cc",
"src/runtime/runtime-operators.cc",
+ "src/runtime/runtime-promise.cc",
"src/runtime/runtime-proxy.cc",
"src/runtime/runtime-regexp.cc",
"src/runtime/runtime-scopes.cc",
@@ -1628,6 +1671,7 @@ v8_source_set("v8_base") {
"src/snapshot/startup-serializer.h",
"src/source-position-table.cc",
"src/source-position-table.h",
+ "src/source-position.cc",
"src/source-position.h",
"src/splay-tree-inl.h",
"src/splay-tree.h",
@@ -1642,6 +1686,10 @@ v8_source_set("v8_base") {
"src/strtod.h",
"src/tracing/trace-event.cc",
"src/tracing/trace-event.h",
+ "src/tracing/traced-value.cc",
+ "src/tracing/traced-value.h",
+ "src/tracing/tracing-category-observer.cc",
+ "src/tracing/tracing-category-observer.h",
"src/transitions-inl.h",
"src/transitions.cc",
"src/transitions.h",
@@ -1680,16 +1728,14 @@ v8_source_set("v8_base") {
"src/wasm/ast-decoder.h",
"src/wasm/decoder.h",
"src/wasm/leb-helper.h",
+ "src/wasm/managed.h",
"src/wasm/module-decoder.cc",
"src/wasm/module-decoder.h",
- "src/wasm/switch-logic.cc",
- "src/wasm/switch-logic.h",
+ "src/wasm/signature-map.cc",
+ "src/wasm/signature-map.h",
"src/wasm/wasm-debug.cc",
- "src/wasm/wasm-debug.h",
"src/wasm/wasm-external-refs.cc",
"src/wasm/wasm-external-refs.h",
- "src/wasm/wasm-function-name-table.cc",
- "src/wasm/wasm-function-name-table.h",
"src/wasm/wasm-interpreter.cc",
"src/wasm/wasm-interpreter.h",
"src/wasm/wasm-js.cc",
@@ -1699,6 +1745,8 @@ v8_source_set("v8_base") {
"src/wasm/wasm-module-builder.h",
"src/wasm/wasm-module.cc",
"src/wasm/wasm-module.h",
+ "src/wasm/wasm-objects.cc",
+ "src/wasm/wasm-objects.h",
"src/wasm/wasm-opcodes.cc",
"src/wasm/wasm-opcodes.h",
"src/wasm/wasm-result.cc",
@@ -1707,6 +1755,7 @@ v8_source_set("v8_base") {
"src/zone/accounting-allocator.h",
"src/zone/zone-allocator.h",
"src/zone/zone-allocator.h",
+ "src/zone/zone-chunk-list.h",
"src/zone/zone-containers.h",
"src/zone/zone-segment.cc",
"src/zone/zone-segment.h",
@@ -2152,24 +2201,15 @@ v8_source_set("v8_base") {
}
}
-v8_source_set("v8_libbase") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
-
+v8_component("v8_libbase") {
sources = [
"src/base/adapters.h",
"src/base/atomic-utils.h",
"src/base/atomicops.h",
- "src/base/atomicops_internals_arm64_gcc.h",
- "src/base/atomicops_internals_arm_gcc.h",
"src/base/atomicops_internals_atomicword_compat.h",
- "src/base/atomicops_internals_mac.h",
- "src/base/atomicops_internals_mips64_gcc.h",
- "src/base/atomicops_internals_mips_gcc.h",
- "src/base/atomicops_internals_s390_gcc.h",
- "src/base/atomicops_internals_tsan.h",
- "src/base/atomicops_internals_x86_gcc.cc",
- "src/base/atomicops_internals_x86_gcc.h",
+ "src/base/atomicops_internals_portable.h",
"src/base/atomicops_internals_x86_msvc.h",
+ "src/base/base-export.h",
"src/base/bits.cc",
"src/base/bits.h",
"src/base/build_config.h",
@@ -2208,6 +2248,7 @@ v8_source_set("v8_libbase") {
"src/base/platform/semaphore.h",
"src/base/platform/time.cc",
"src/base/platform/time.h",
+ "src/base/ring-buffer.h",
"src/base/safe_conversions.h",
"src/base/safe_conversions_impl.h",
"src/base/safe_math.h",
@@ -2220,8 +2261,14 @@ v8_source_set("v8_libbase") {
configs = [ ":internal_config_base" ]
+ public_configs = [ ":libbase_config" ]
+
defines = []
+ if (is_component_build) {
+ defines = [ "BUILDING_V8_BASE_SHARED" ]
+ }
+
if (is_posix) {
sources += [ "src/base/platform/platform-posix.cc" ]
}
@@ -2285,9 +2332,10 @@ v8_source_set("v8_libbase") {
# TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris.
}
-v8_source_set("v8_libplatform") {
+v8_component("v8_libplatform") {
sources = [
"//base/trace_event/common/trace_event_common.h",
+ "include/libplatform/libplatform-export.h",
"include/libplatform/libplatform.h",
"include/libplatform/v8-tracing.h",
"src/libplatform/default-platform.cc",
@@ -2307,6 +2355,10 @@ v8_source_set("v8_libplatform") {
configs = [ ":internal_config_base" ]
+ if (is_component_build) {
+ defines = [ "BUILDING_V8_PLATFORM_SHARED" ]
+ }
+
public_configs = [ ":libplatform_config" ]
deps = [
@@ -2344,27 +2396,7 @@ v8_source_set("fuzzer_support") {
]
public_deps = [
- ":v8_libplatform",
- ]
-}
-
-# Used by fuzzers that would require exposing too many symbols for a proper
-# component build.
-v8_source_set("fuzzer_support_nocomponent") {
- visibility = [ ":*" ] # Only targets in this file can depend on this.
-
- sources = [
- "test/fuzzer/fuzzer-support.cc",
- "test/fuzzer/fuzzer-support.h",
- ]
-
- configs = [ ":internal_config_base" ]
-
- deps = [
- ":v8_maybe_snapshot",
- ]
-
- public_deps = [
+ ":v8_libbase",
":v8_libplatform",
]
}
@@ -2393,6 +2425,7 @@ if (current_toolchain == v8_snapshot_toolchain) {
deps = [
":v8_base",
+ ":v8_libbase",
":v8_libplatform",
":v8_nosnapshot",
"//build/config/sanitizers:deps",
@@ -2515,6 +2548,7 @@ v8_executable("d8") {
deps = [
":d8_js2c",
":v8",
+ ":v8_libbase",
":v8_libplatform",
"//build/config/sanitizers:deps",
"//build/win:default_exe_manifest",
@@ -2531,6 +2565,11 @@ v8_executable("d8") {
if (v8_enable_i18n_support) {
deps += [ "//third_party/icu" ]
}
+
+ defines = []
+ if (v8_enable_inspector_override) {
+ defines += [ "V8_INSPECTOR_ENABLED" ]
+ }
}
v8_isolate_run("d8") {
@@ -2555,6 +2594,7 @@ v8_executable("v8_hello_world") {
deps = [
":v8",
+ ":v8_libbase",
":v8_libplatform",
"//build/config/sanitizers:deps",
"//build/win:default_exe_manifest",
@@ -2579,6 +2619,7 @@ v8_executable("v8_sample_process") {
deps = [
":v8",
+ ":v8_libbase",
":v8_libplatform",
"//build/config/sanitizers:deps",
"//build/win:default_exe_manifest",
@@ -2601,32 +2642,16 @@ v8_executable("v8_parser_shell") {
]
deps = [
+ ":v8",
+ ":v8_libbase",
":v8_libplatform",
"//build/config/sanitizers:deps",
"//build/win:default_exe_manifest",
]
- if (is_component_build) {
- # v8_parser_shell can't be built against a shared library, so we
- # need to depend on the underlying static target in that case.
- deps += [ ":v8_maybe_snapshot" ]
- } else {
- deps += [ ":v8" ]
- }
-
if (v8_enable_i18n_support) {
deps += [ "//third_party/icu" ]
}
-
- if (is_win) {
- # Suppress warnings about importing locally defined symbols.
- if (is_component_build) {
- ldflags = [
- "/ignore:4049",
- "/ignore:4217",
- ]
- }
- }
}
if (want_v8_shell) {
@@ -2644,6 +2669,7 @@ if (want_v8_shell) {
deps = [
":v8",
+ ":v8_libbase",
":v8_libplatform",
"//build/config/sanitizers:deps",
"//build/win:default_exe_manifest",
@@ -2693,7 +2719,7 @@ v8_source_set("parser_fuzzer") {
]
deps = [
- ":fuzzer_support_nocomponent",
+ ":fuzzer_support",
]
configs = [
@@ -2804,6 +2830,26 @@ v8_source_set("wasm_code_fuzzer") {
v8_fuzzer("wasm_code_fuzzer") {
}
+v8_source_set("wasm_call_fuzzer") {
+ sources = [
+ "test/fuzzer/wasm-call.cc",
+ ]
+
+ deps = [
+ ":fuzzer_support",
+ ":wasm_module_runner",
+ ":wasm_test_signatures",
+ ]
+
+ configs = [
+ ":external_config",
+ ":internal_config_base",
+ ]
+}
+
+v8_fuzzer("wasm_call_fuzzer") {
+}
+
v8_source_set("lib_wasm_section_fuzzer") {
sources = [
"test/fuzzer/wasm-section-fuzzers.cc",
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 40c8537022..2dc77568d4 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,1690 @@
+2016-11-15: Version 5.6.326
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.325
+
+ [wasm] Be more lenient on the names section (issue 5632).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.324
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.323
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.322
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.321
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.320
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.319
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.318
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-15: Version 5.6.317
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.316
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.315
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.314
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.313
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.312
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.311
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.310
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.309
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.308
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.307
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.306
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-14: Version 5.6.305
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-12: Version 5.6.304
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-12: Version 5.6.303
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-12: Version 5.6.302
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.301
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.300
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.299
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.298
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.297
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.296
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.295
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.294
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.293
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.292
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.291
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.290
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-11: Version 5.6.289
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.288
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.287
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.286
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.285
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.284
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.283
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.282
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.281
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.280
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.279
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.278
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.277
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.276
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.275
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.274
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.273
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.272
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.271
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.270
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.269
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-10: Version 5.6.268
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.267
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.266
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.265
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.264
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.263
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.262
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.261
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.260
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.259
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.258
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-09: Version 5.6.257
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.256
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.255
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.254
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.253
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.252
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.251
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.250
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.249
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.248
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.247
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.246
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.245
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-08: Version 5.6.244
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.243
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.242
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.241
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.240
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.239
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.238
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.237
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.236
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.235
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.234
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.233
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.232
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.231
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.230
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.229
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.228
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.227
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.226
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.225
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-07: Version 5.6.224
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-06: Version 5.6.223
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-05: Version 5.6.222
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.221
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.220
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.219
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.218
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.217
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.216
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.215
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.214
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.213
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.212
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.211
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.210
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.209
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.208
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-04: Version 5.6.207
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-03: Version 5.6.206
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-03: Version 5.6.205
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-03: Version 5.6.204
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-03: Version 5.6.203
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-03: Version 5.6.202
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-03: Version 5.6.201
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-03: Version 5.6.200
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-03: Version 5.6.199
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.198
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.197
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.196
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.195
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.194
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.193
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.192
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.191
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.190
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.189
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.188
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.187
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.186
+
+ Performance and stability improvements on all platforms.
+
+
+2016-11-02: Version 5.6.185
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-31: Version 5.6.184
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-31: Version 5.6.183
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-31: Version 5.6.182
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-31: Version 5.6.181
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-31: Version 5.6.180
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-31: Version 5.6.179
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-29: Version 5.6.178
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.177
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.176
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.175
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.174
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.173
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.172
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.171
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.170
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.169
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.168
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.167
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.166
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.165
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-28: Version 5.6.164
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.163
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.162
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.161
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.160
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.159
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.158
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.157
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.156
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.155
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.154
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.153
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.152
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.151
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-27: Version 5.6.150
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-26: Version 5.6.149
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-25: Version 5.6.148
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-25: Version 5.6.147
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-25: Version 5.6.146
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-25: Version 5.6.145
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-25: Version 5.6.144
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-25: Version 5.6.143
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-24: Version 5.6.142
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-24: Version 5.6.141
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-24: Version 5.6.140
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-24: Version 5.6.139
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-24: Version 5.6.138
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-24: Version 5.6.137
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-24: Version 5.6.136
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-24: Version 5.6.135
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-21: Version 5.6.134
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-21: Version 5.6.133
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-21: Version 5.6.132
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-21: Version 5.6.131
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-21: Version 5.6.130
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-21: Version 5.6.129
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-21: Version 5.6.128
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-21: Version 5.6.127
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-20: Version 5.6.126
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-20: Version 5.6.125
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-20: Version 5.6.124
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-20: Version 5.6.123
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.122
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.121
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.120
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.119
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.118
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.117
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.116
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.115
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.114
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.113
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.112
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.111
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.110
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.109
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.108
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.107
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-19: Version 5.6.106
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.105
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.104
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.103
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.102
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.101
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.100
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.99
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.98
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.97
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.96
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.95
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.94
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.93
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.92
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.91
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.90
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.89
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.88
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-18: Version 5.6.87
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.86
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.85
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.84
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.83
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.82
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.81
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.80
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.79
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.78
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.77
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.76
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.75
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.74
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.73
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.72
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.71
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-17: Version 5.6.70
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-16: Version 5.6.69
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-16: Version 5.6.68
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-15: Version 5.6.67
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-15: Version 5.6.66
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.65
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.64
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.63
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.62
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.61
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.60
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.59
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.58
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.57
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.56
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.55
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.54
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-14: Version 5.6.53
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.52
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.51
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.50
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.49
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.48
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.47
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.46
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.45
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.44
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.43
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.42
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-13: Version 5.6.41
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-12: Version 5.6.40
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-12: Version 5.6.39
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-12: Version 5.6.38
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.37
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.36
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.35
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.34
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.33
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.32
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.31
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.30
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.29
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.28
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.27
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.26
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.25
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.24
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.23
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.22
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.21
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.20
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.19
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-11: Version 5.6.18
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-10: Version 5.6.17
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-10: Version 5.6.16
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-10: Version 5.6.15
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-10: Version 5.6.14
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-10: Version 5.6.13
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-09: Version 5.6.12
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-08: Version 5.6.11
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-08: Version 5.6.10
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.9
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.8
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.7
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.6
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.5
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.4
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.3
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.2
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-07: Version 5.6.1
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-06: Version 5.5.383
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-06: Version 5.5.382
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-06: Version 5.5.381
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-06: Version 5.5.380
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-06: Version 5.5.379
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-06: Version 5.5.378
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-06: Version 5.5.377
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-06: Version 5.5.376
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-05: Version 5.5.375
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-05: Version 5.5.374
+
+ Performance and stability improvements on all platforms.
+
+
+2016-10-05: Version 5.5.373
+
+ Performance and stability improvements on all platforms.
+
+
2016-10-05: Version 5.5.372
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 058cd8bea6..161015d661 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -8,19 +8,17 @@ vars = {
deps = {
"v8/build":
- Var("chromium_url") + "/chromium/src/build.git" + "@" + "475d5b37ded6589c9f8a0d19ced54ddf2e6d14a0",
+ Var("chromium_url") + "/chromium/src/build.git" + "@" + "a3b623a6eff6dc9d58a03251ae22bccf92f67cb2",
"v8/tools/gyp":
Var("chromium_url") + "/external/gyp.git" + "@" + "e7079f0e0e14108ab0dba58728ff219637458563",
"v8/third_party/icu":
- Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "b0bd3ee50bc2e768d7a17cbc60d87f517f024dbe",
+ Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "c1a237113f525a1561d4b322d7653e1083f79aaa",
"v8/third_party/instrumented_libraries":
Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "45f5814b1543e41ea0be54c771e3840ea52cca4a",
"v8/buildtools":
- Var("chromium_url") + "/chromium/buildtools.git" + "@" + "5fd66957f08bb752dca714a591c84587c9d70762",
+ Var("chromium_url") + "/chromium/buildtools.git" + "@" + "39b1db2ab4aa4b2ccaa263c29bdf63e7c1ee28aa",
"v8/base/trace_event/common":
- Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "e0fa02a02f61430dae2bddfd89a334ea4389f495",
- "v8/third_party/WebKit/Source/platform/inspector_protocol":
- Var("chromium_url") + "/chromium/src/third_party/WebKit/Source/platform/inspector_protocol.git" + "@" + "3280c57c4c575ce82ccd13e4a403492fb4ca624b",
+ Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "06294c8a4a6f744ef284cd63cfe54dbf61eea290",
"v8/third_party/jinja2":
Var("chromium_url") + "/chromium/src/third_party/jinja2.git" + "@" + "b61a2c009a579593a259c1b300e0ad02bf48fd78",
"v8/third_party/markupsafe":
@@ -37,17 +35,19 @@ deps = {
Var("chromium_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
"v8/test/simdjs/data": Var("chromium_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "baf493985cb9ea7cdbd0d68704860a8156de9556",
"v8/test/test262/data":
- Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "29c23844494a7cc2fbebc6948d2cb0bcaddb24e7",
+ Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "fb61ab44eb1bbc2699d714fc00e33af2a19411ce",
"v8/test/test262/harness":
Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "cbd968f54f7a95c6556d53ba852292a4c49d11d8",
"v8/tools/clang":
- Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "1f92f999fc374a479e98a189ebdfe25c09484486",
+ Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "75350a858c51ad69e2aae051a8727534542da29f",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
Var("chromium_url") + "/android_tools.git" + "@" + "25d57ead05d3dfef26e9c19b13ed10b0a69829cf",
+ "v8/third_party/catapult":
+ Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "6962f5c0344a79b152bf84460a93e1b2e11ea0f4",
},
"win": {
"v8/third_party/cygwin":
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index a6d4d135da..6eeac09a14 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -163,6 +163,14 @@ endif
ifeq ($(goma), on)
GYPFLAGS += -Duse_goma=1
endif
+# v8_os_page_size=0, when 0 or not specified use build OS page size
+ifdef v8_os_page_size
+ ifneq ($(v8_os_page_size), 0)
+ ifneq ($(snapshot), off)
+ GYPFLAGS += -Dv8_os_page_size=$(v8_os_page_size)
+ endif
+ endif
+endif
# arm specific flags.
# arm_version=<number | "default">
ifneq ($(strip $(arm_version)),)
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 78e7482efb..ad218330b1 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -67,7 +67,6 @@ def _V8PresubmitChecks(input_api, output_api):
input_api.PresubmitLocalPath(), 'tools'))
from presubmit import CppLintProcessor
from presubmit import SourceProcessor
- from presubmit import CheckExternalReferenceRegistration
from presubmit import CheckAuthorizedAuthor
from presubmit import CheckStatusFiles
@@ -78,9 +77,6 @@ def _V8PresubmitChecks(input_api, output_api):
results.append(output_api.PresubmitError(
"Copyright header, trailing whitespaces and two empty lines " \
"between declarations check failed"))
- if not CheckExternalReferenceRegistration(input_api.PresubmitLocalPath()):
- results.append(output_api.PresubmitError(
- "External references registration check failed"))
if not CheckStatusFiles(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError("Status file check failed"))
results.extend(CheckAuthorizedAuthor(input_api, output_api))
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index 0db92692a0..e87665b8cd 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -223,49 +223,6 @@
flow_flags, arg1_name, arg1_val, \
arg2_name, arg2_val)
-// UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not
-// included in official builds.
-
-#if OFFICIAL_BUILD
-#undef TRACING_IS_OFFICIAL_BUILD
-#define TRACING_IS_OFFICIAL_BUILD 1
-#elif !defined(TRACING_IS_OFFICIAL_BUILD)
-#define TRACING_IS_OFFICIAL_BUILD 0
-#endif
-
-#if TRACING_IS_OFFICIAL_BUILD
-#define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0
-#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
- (void)0
-#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
- arg1_val) \
- (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- (void)0
-#else
-#define UNSHIPPED_TRACE_EVENT0(category_group, name) \
- TRACE_EVENT0(category_group, name)
-#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
- TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
-#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
-#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \
- TRACE_EVENT_INSTANT0(category_group, name, scope)
-#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
- arg1_val) \
- TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val)
-#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
- arg2_name, arg2_val)
-#endif
-
// Records a single event called "name" immediately, with 0, 1 or 2
// associated arguments. If the category is not enabled, then this
// does nothing.
@@ -301,16 +258,6 @@
TRACE_EVENT_PHASE_INSTANT, category_group, name, timestamp, \
TRACE_EVENT_FLAG_NONE | scope)
-// Syntactic sugars for the sampling tracing in the main thread.
-#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
- TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
-#define TRACE_EVENT_GET_SAMPLING_STATE() \
- TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
-#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
- TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
-#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(category_and_name) \
- TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, category_and_name)
-
// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
// associated arguments. If the category is not enabled, then this
// does nothing.
@@ -1006,15 +953,15 @@
INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context)
// Macro to specify that two trace IDs are identical. For example,
-// TRACE_BIND_IDS(
+// TRACE_LINK_IDS(
// "category", "name",
// TRACE_ID_WITH_SCOPE("net::URLRequest", 0x1000),
// TRACE_ID_WITH_SCOPE("blink::ResourceFetcher::FetchRequest", 0x2000))
// tells the trace consumer that events with ID ("net::URLRequest", 0x1000) from
// the current process have the same ID as events with ID
// ("blink::ResourceFetcher::FetchRequest", 0x2000).
-#define TRACE_BIND_IDS(category_group, name, id, bind_id) \
- INTERNAL_TRACE_EVENT_ADD_BIND_IDS(category_group, name, id, bind_id);
+#define TRACE_LINK_IDS(category_group, name, id, linked_id) \
+ INTERNAL_TRACE_EVENT_ADD_LINK_IDS(category_group, name, id, linked_id);
// Macro to efficiently determine if a given category group is enabled.
#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
@@ -1081,7 +1028,7 @@
#define TRACE_EVENT_PHASE_CLOCK_SYNC ('c')
#define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(')
#define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')')
-#define TRACE_EVENT_PHASE_BIND_IDS ('=')
+#define TRACE_EVENT_PHASE_LINK_IDS ('=')
// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
diff --git a/deps/v8/build_overrides/v8.gni b/deps/v8/build_overrides/v8.gni
index 09ea4570b0..df8320d5d1 100644
--- a/deps/v8/build_overrides/v8.gni
+++ b/deps/v8/build_overrides/v8.gni
@@ -26,7 +26,7 @@ v8_experimental_extra_library_files =
declare_args() {
# Enable inspector. See include/v8-inspector.h.
- v8_enable_inspector = false
+ v8_enable_inspector = true
}
v8_enable_inspector_override = v8_enable_inspector
diff --git a/deps/v8/gni/isolate.gni b/deps/v8/gni/isolate.gni
index 93c828d2cd..1cc3a38770 100644
--- a/deps/v8/gni/isolate.gni
+++ b/deps/v8/gni/isolate.gni
@@ -3,6 +3,7 @@
# found in the LICENSE file.
import("//build/config/sanitizers/sanitizers.gni")
+import("//build_overrides/v8.gni")
import("//third_party/icu/config.gni")
import("v8.gni")
@@ -12,11 +13,21 @@ declare_args() {
}
template("v8_isolate_run") {
+ forward_variables_from(invoker,
+ "*",
+ [
+ "deps",
+ "isolate",
+ ])
+
# Remember target name as within the action scope the target name will be
# different.
name = target_name
- if (name != "" && invoker.isolate != "" && invoker.deps != [] &&
- v8_test_isolation_mode != "noop") {
+
+ assert(defined(invoker.deps))
+ assert(defined(invoker.isolate))
+
+ if (name != "" && v8_test_isolation_mode != "noop") {
action(name + "_run") {
testonly = true
@@ -86,6 +97,11 @@ template("v8_isolate_run") {
} else {
icu_use_data_file_flag = "0"
}
+ if (v8_enable_inspector_override) {
+ enable_inspector = "1"
+ } else {
+ enable_inspector = "0"
+ }
if (v8_use_external_startup_data) {
use_external_startup_data = "1"
} else {
@@ -107,7 +123,6 @@ template("v8_isolate_run") {
gcmole = "0"
}
-
# Note, all paths will be rebased in isolate_driver.py to be relative to
# the isolate file.
args = [
@@ -142,6 +157,8 @@ template("v8_isolate_run") {
"--config-variable",
"icu_use_data_file_flag=$icu_use_data_file_flag",
"--config-variable",
+ "is_gn=1",
+ "--config-variable",
"msan=$msan",
"--config-variable",
"tsan=$tsan",
@@ -154,6 +171,8 @@ template("v8_isolate_run") {
"--config-variable",
"target_arch=$target_arch",
"--config-variable",
+ "v8_enable_inspector=$enable_inspector",
+ "--config-variable",
"v8_use_external_startup_data=$use_external_startup_data",
"--config-variable",
"v8_use_snapshot=$use_snapshot",
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
index 7ff7f6fb89..3759572b93 100644
--- a/deps/v8/gni/v8.gni
+++ b/deps/v8/gni/v8.gni
@@ -26,6 +26,10 @@ declare_args() {
# Use external files for startup data blobs:
# the JS builtins sources and the start snapshot.
v8_use_external_startup_data = ""
+
+ # Enable ECMAScript Internationalization API. Enabling this feature will
+ # add a dependency on the ICU library.
+ v8_enable_i18n_support = true
}
if (v8_use_external_startup_data == "") {
@@ -38,15 +42,17 @@ if (v8_enable_backtrace == "") {
v8_enable_backtrace = is_debug && !v8_optimized_debug
}
-###############################################################################
-# Templates
-#
-
# Points to // in v8 stand-alone or to //v8/ in chromium. We need absolute
# paths for all configs in templates as they are shared in different
# subdirectories.
v8_path_prefix = get_path_info("../", "abspath")
+v8_inspector_js_protocol = v8_path_prefix + "/src/inspector/js_protocol.json"
+
+###############################################################################
+# Templates
+#
+
# Common configs to remove or add in all v8 targets.
v8_remove_configs = [ "//build/config/compiler:chromium_code" ]
v8_add_configs = [
@@ -87,7 +93,15 @@ template("v8_source_set") {
template("v8_executable") {
executable(target_name) {
- forward_variables_from(invoker, "*", [ "configs" ])
+ forward_variables_from(invoker,
+ "*",
+ [
+ "configs",
+ "remove_configs",
+ ])
+ if (defined(invoker.remove_configs)) {
+ configs -= invoker.remove_configs
+ }
configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs
diff --git a/deps/v8/gypfiles/all.gyp b/deps/v8/gypfiles/all.gyp
index 6b4ef82d69..a3f2eedc77 100644
--- a/deps/v8/gypfiles/all.gyp
+++ b/deps/v8/gypfiles/all.gyp
@@ -25,6 +25,12 @@
'../test/unittests/unittests.gyp:*',
],
}],
+ ['v8_enable_inspector==1', {
+ 'dependencies': [
+ '../test/debugger/debugger.gyp:*',
+ '../test/inspector/inspector.gyp:*',
+ ],
+ }],
['test_isolation_mode != "noop"', {
'dependencies': [
'../test/bot_default.gyp:*',
diff --git a/deps/v8/gypfiles/get_landmines.py b/deps/v8/gypfiles/get_landmines.py
index 432dfd7ae5..e6b6da6c48 100755
--- a/deps/v8/gypfiles/get_landmines.py
+++ b/deps/v8/gypfiles/get_landmines.py
@@ -30,6 +30,7 @@ def main():
print 'Clobber after Android NDK update.'
print 'Clober to fix windows build problems.'
print 'Clober again to fix windows build problems.'
+ print 'Clobber to possibly resolve failure on win-32 bot.'
return 0
diff --git a/deps/v8/gypfiles/isolate.gypi b/deps/v8/gypfiles/isolate.gypi
index 149818c8d0..8f53a153c6 100644
--- a/deps/v8/gypfiles/isolate.gypi
+++ b/deps/v8/gypfiles/isolate.gypi
@@ -74,12 +74,14 @@
'--config-variable', 'gcmole=<(gcmole)',
'--config-variable', 'has_valgrind=<(has_valgrind)',
'--config-variable', 'icu_use_data_file_flag=<(icu_use_data_file_flag)',
+ '--config-variable', 'is_gn=0',
'--config-variable', 'msan=<(msan)',
'--config-variable', 'tsan=<(tsan)',
'--config-variable', 'coverage=<(coverage)',
'--config-variable', 'sanitizer_coverage=<(sanitizer_coverage)',
'--config-variable', 'component=<(component)',
'--config-variable', 'target_arch=<(target_arch)',
+ '--config-variable', 'v8_enable_inspector=<(v8_enable_inspector)',
'--config-variable', 'v8_use_external_startup_data=<(v8_use_external_startup_data)',
'--config-variable', 'v8_use_snapshot=<(v8_use_snapshot)',
],
diff --git a/deps/v8/gypfiles/standalone.gypi b/deps/v8/gypfiles/standalone.gypi
index 7e41ce84ae..d438a5aeab 100644
--- a/deps/v8/gypfiles/standalone.gypi
+++ b/deps/v8/gypfiles/standalone.gypi
@@ -455,6 +455,7 @@
'variables': {
'v8_code%': '<(v8_code)',
'clang_warning_flags': [
+ '-Wsign-compare',
# TODO(thakis): https://crbug.com/604888
'-Wno-undefined-var-template',
# TODO(yangguo): issue 5258
@@ -503,7 +504,9 @@
},
'conditions':[
['clang==0', {
- 'cflags+': ['-Wno-sign-compare',],
+ 'cflags+': [
+ '-Wno-uninitialized',
+ ],
}],
['clang==1 or host_clang==1', {
# This is here so that all files get recompiled after a clang roll and
diff --git a/deps/v8/include/libplatform/DEPS b/deps/v8/include/libplatform/DEPS
index 15e75e6b4f..d8bcf99880 100644
--- a/deps/v8/include/libplatform/DEPS
+++ b/deps/v8/include/libplatform/DEPS
@@ -1,3 +1,7 @@
+include_rules = [
+ "+libplatform/libplatform-export.h",
+]
+
specific_include_rules = {
"libplatform\.h": [
"+libplatform/v8-tracing.h",
diff --git a/deps/v8/include/libplatform/libplatform-export.h b/deps/v8/include/libplatform/libplatform-export.h
new file mode 100644
index 0000000000..1561843497
--- /dev/null
+++ b/deps/v8/include/libplatform/libplatform-export.h
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_LIBPLATFORM_EXPORT_H_
+#define V8_LIBPLATFORM_LIBPLATFORM_EXPORT_H_
+
+#if defined(_WIN32)
+
+#ifdef BUILDING_V8_PLATFORM_SHARED
+#define V8_PLATFORM_EXPORT __declspec(dllexport)
+#elif USING_V8_PLATFORM_SHARED
+#define V8_PLATFORM_EXPORT __declspec(dllimport)
+#else
+#define V8_PLATFORM_EXPORT
+#endif // BUILDING_V8_PLATFORM_SHARED
+
+#else // defined(_WIN32)
+
+// Setup for Linux shared library export.
+#ifdef BUILDING_V8_PLATFORM_SHARED
+#define V8_PLATFORM_EXPORT __attribute__((visibility("default")))
+#else
+#define V8_PLATFORM_EXPORT
+#endif
+
+#endif // defined(_WIN32)
+
+#endif // V8_LIBPLATFORM_LIBPLATFORM_EXPORT_H_
diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h
index 5b5eee6513..40f3f66892 100644
--- a/deps/v8/include/libplatform/libplatform.h
+++ b/deps/v8/include/libplatform/libplatform.h
@@ -5,6 +5,7 @@
#ifndef V8_LIBPLATFORM_LIBPLATFORM_H_
#define V8_LIBPLATFORM_LIBPLATFORM_H_
+#include "libplatform/libplatform-export.h"
#include "libplatform/v8-tracing.h"
#include "v8-platform.h" // NOLINT(build/include)
@@ -19,8 +20,8 @@ namespace platform {
* of zero is passed, a suitable default based on the current number of
* processors online will be chosen.
*/
-v8::Platform* CreateDefaultPlatform(int thread_pool_size = 0);
-
+V8_PLATFORM_EXPORT v8::Platform* CreateDefaultPlatform(
+ int thread_pool_size = 0);
/**
* Pumps the message loop for the given isolate.
@@ -30,14 +31,15 @@ v8::Platform* CreateDefaultPlatform(int thread_pool_size = 0);
* not block if no task is pending. The |platform| has to be created using
* |CreateDefaultPlatform|.
*/
-bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate);
+V8_PLATFORM_EXPORT bool PumpMessageLoop(v8::Platform* platform,
+ v8::Isolate* isolate);
/**
* Attempts to set the tracing controller for the given platform.
*
* The |platform| has to be created using |CreateDefaultPlatform|.
*/
-void SetTracingController(
+V8_PLATFORM_EXPORT void SetTracingController(
v8::Platform* platform,
v8::platform::tracing::TracingController* tracing_controller);
diff --git a/deps/v8/include/libplatform/v8-tracing.h b/deps/v8/include/libplatform/v8-tracing.h
index e9f4941478..902f8ea93d 100644
--- a/deps/v8/include/libplatform/v8-tracing.h
+++ b/deps/v8/include/libplatform/v8-tracing.h
@@ -10,6 +10,7 @@
#include <unordered_set>
#include <vector>
+#include "libplatform/libplatform-export.h"
#include "v8-platform.h" // NOLINT(build/include)
namespace v8 {
@@ -23,7 +24,7 @@ namespace tracing {
const int kTraceMaxNumArgs = 2;
-class TraceObject {
+class V8_PLATFORM_EXPORT TraceObject {
public:
union ArgValue {
bool as_bool;
@@ -103,7 +104,7 @@ class TraceObject {
void operator=(const TraceObject&) = delete;
};
-class TraceWriter {
+class V8_PLATFORM_EXPORT TraceWriter {
public:
TraceWriter() {}
virtual ~TraceWriter() {}
@@ -118,7 +119,7 @@ class TraceWriter {
void operator=(const TraceWriter&) = delete;
};
-class TraceBufferChunk {
+class V8_PLATFORM_EXPORT TraceBufferChunk {
public:
explicit TraceBufferChunk(uint32_t seq);
@@ -142,7 +143,7 @@ class TraceBufferChunk {
void operator=(const TraceBufferChunk&) = delete;
};
-class TraceBuffer {
+class V8_PLATFORM_EXPORT TraceBuffer {
public:
TraceBuffer() {}
virtual ~TraceBuffer() {}
@@ -178,45 +179,37 @@ enum TraceRecordMode {
ECHO_TO_CONSOLE,
};
-class TraceConfig {
+class V8_PLATFORM_EXPORT TraceConfig {
public:
typedef std::vector<std::string> StringList;
static TraceConfig* CreateDefaultTraceConfig();
- TraceConfig()
- : enable_sampling_(false),
- enable_systrace_(false),
- enable_argument_filter_(false) {}
+ TraceConfig() : enable_systrace_(false), enable_argument_filter_(false) {}
TraceRecordMode GetTraceRecordMode() const { return record_mode_; }
- bool IsSamplingEnabled() const { return enable_sampling_; }
bool IsSystraceEnabled() const { return enable_systrace_; }
bool IsArgumentFilterEnabled() const { return enable_argument_filter_; }
void SetTraceRecordMode(TraceRecordMode mode) { record_mode_ = mode; }
- void EnableSampling() { enable_sampling_ = true; }
void EnableSystrace() { enable_systrace_ = true; }
void EnableArgumentFilter() { enable_argument_filter_ = true; }
void AddIncludedCategory(const char* included_category);
- void AddExcludedCategory(const char* excluded_category);
bool IsCategoryGroupEnabled(const char* category_group) const;
private:
TraceRecordMode record_mode_;
- bool enable_sampling_ : 1;
bool enable_systrace_ : 1;
bool enable_argument_filter_ : 1;
StringList included_categories_;
- StringList excluded_categories_;
// Disallow copy and assign
TraceConfig(const TraceConfig&) = delete;
void operator=(const TraceConfig&) = delete;
};
-class TracingController {
+class V8_PLATFORM_EXPORT TracingController {
public:
enum Mode { DISABLED = 0, RECORDING_MODE };
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 6ee0340f3c..74c0613946 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -48,7 +48,7 @@ namespace v8 {
/**
* TracingCpuProfiler monitors tracing being enabled/disabled
- * and emits CpuProfile trace events once v8.cpu_profile2 tracing category
+ * and emits CpuProfile trace events once v8.cpu_profiler tracing category
* is enabled. It has no overhead unless the category is enabled.
*/
class V8_EXPORT TracingCpuProfiler {
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
index 99c59fe302..8133fdd49d 100644
--- a/deps/v8/include/v8-util.h
+++ b/deps/v8/include/v8-util.h
@@ -206,19 +206,14 @@ class PersistentValueMapBase {
}
/**
- * Deprecated. Call V8::RegisterExternallyReferencedObject with the map value
- * for given key.
- * TODO(hlopko) Remove once migration to reporter is finished.
+ * Call V8::RegisterExternallyReferencedObject with the map value for given
+ * key.
*/
- void RegisterExternallyReferencedObject(K& key) {}
-
- /**
- * Use EmbedderReachableReferenceReporter with the map value for given key.
- */
- void RegisterExternallyReferencedObject(
- EmbedderReachableReferenceReporter* reporter, K& key) {
+ void RegisterExternallyReferencedObject(K& key) {
DCHECK(Contains(key));
- reporter->ReportExternalReference(FromVal(Traits::Get(&impl_, key)));
+ V8::RegisterExternallyReferencedObject(
+ reinterpret_cast<internal::Object**>(FromVal(Traits::Get(&impl_, key))),
+ reinterpret_cast<internal::Isolate*>(GetIsolate()));
}
/**
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index b216cf04c3..b694987633 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 5
-#define V8_MINOR_VERSION 5
-#define V8_BUILD_NUMBER 372
-#define V8_PATCH_LEVEL 40
+#define V8_MINOR_VERSION 6
+#define V8_BUILD_NUMBER 326
+#define V8_PATCH_LEVEL 55
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 36edf5334a..5348ba7e48 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -35,11 +35,6 @@
// the V8 DLL USING_V8_SHARED needs to be defined. When either building the V8
// static library or building a program which uses the V8 static library neither
// BUILDING_V8_SHARED nor USING_V8_SHARED should be defined.
-#if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
-#error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
- build configuration to ensure that at most one of these is set
-#endif
-
#ifdef BUILDING_V8_SHARED
# define V8_EXPORT __declspec(dllexport)
#elif USING_V8_SHARED
@@ -468,16 +463,6 @@ class WeakCallbackInfo {
enum class WeakCallbackType { kParameter, kInternalFields, kFinalizer };
/**
- * A reporter class that embedder will use to report reachable references found
- * by EmbedderHeapTracer.
- */
-class V8_EXPORT EmbedderReachableReferenceReporter {
- public:
- virtual void ReportExternalReference(Value* object) = 0;
- virtual ~EmbedderReachableReferenceReporter() = default;
-};
-
-/**
* An object reference that is independent of any handle scope. Where
* a Local handle only lives as long as the HandleScope in which it was
* allocated, a PersistentBase handle remains valid until it is explicitly
@@ -574,18 +559,11 @@ template <class T> class PersistentBase {
V8_INLINE void ClearWeak() { ClearWeak<void>(); }
/**
- * Deprecated.
- * TODO(hlopko): remove once migration to reporter is finished.
- */
- V8_INLINE void RegisterExternalReference(Isolate* isolate) const {}
-
- /**
* Allows the embedder to tell the v8 garbage collector that a certain object
* is alive. Only allowed when the embedder is asked to trace its heap by
* EmbedderHeapTracer.
*/
- V8_INLINE void RegisterExternalReference(
- EmbedderReachableReferenceReporter* reporter) const;
+ V8_INLINE void RegisterExternalReference(Isolate* isolate) const;
/**
* Marks the reference to this object independent. Garbage collector is free
@@ -596,18 +574,6 @@ template <class T> class PersistentBase {
V8_INLINE void MarkIndependent();
/**
- * Marks the reference to this object partially dependent. Partially dependent
- * handles only depend on other partially dependent handles and these
- * dependencies are provided through object groups. It provides a way to build
- * smaller object groups for young objects that represent only a subset of all
- * external dependencies. This mark is automatically cleared after each
- * garbage collection.
- */
- V8_INLINE V8_DEPRECATED(
- "deprecated optimization, do not use partially dependent groups",
- void MarkPartiallyDependent());
-
- /**
* Marks the reference to this object as active. The scavenge garbage
* collection should not reclaim the objects marked as active.
* This bit is cleared after the each garbage collection pass.
@@ -1106,22 +1072,22 @@ class V8_EXPORT Module {
*/
Local<String> GetModuleRequest(int i) const;
- void SetEmbedderData(Local<Value> data);
- Local<Value> GetEmbedderData() const;
+ /**
+ * Returns the identity hash for this object.
+ */
+ int GetIdentityHash() const;
typedef MaybeLocal<Module> (*ResolveCallback)(Local<Context> context,
Local<String> specifier,
- Local<Module> referrer,
- Local<Value> data);
+ Local<Module> referrer);
/**
* ModuleDeclarationInstantiation
*
* Returns false if an exception occurred during instantiation.
*/
- V8_WARN_UNUSED_RESULT bool Instantiate(
- Local<Context> context, ResolveCallback callback,
- Local<Value> callback_data = Local<Value>());
+ V8_WARN_UNUSED_RESULT bool Instantiate(Local<Context> context,
+ ResolveCallback callback);
/**
* ModuleEvaluation
@@ -1745,6 +1711,19 @@ class V8_EXPORT ValueSerializer {
* Nothing<bool>() returned.
*/
virtual Maybe<bool> WriteHostObject(Isolate* isolate, Local<Object> object);
+
+ /*
+ * Allocates memory for the buffer of at least the size provided. The actual
+ * size (which may be greater or equal) is written to |actual_size|. If no
+ * buffer has been allocated yet, nullptr will be provided.
+ */
+ virtual void* ReallocateBufferMemory(void* old_buffer, size_t size,
+ size_t* actual_size);
+
+ /*
+ * Frees a buffer allocated with |ReallocateBufferMemory|.
+ */
+ virtual void FreeBufferMemory(void* buffer);
};
explicit ValueSerializer(Isolate* isolate);
@@ -1766,7 +1745,15 @@ class V8_EXPORT ValueSerializer {
* Returns the stored data. This serializer should not be used once the buffer
* is released. The contents are undefined if a previous write has failed.
*/
- std::vector<uint8_t> ReleaseBuffer();
+ V8_DEPRECATE_SOON("Use Release()", std::vector<uint8_t> ReleaseBuffer());
+
+ /*
+ * Returns the stored data (allocated using the delegate's
+ * AllocateBufferMemory) and its size. This serializer should not be used once
+ * the buffer is released. The contents are undefined if a previous write has
+ * failed.
+ */
+ V8_WARN_UNUSED_RESULT std::pair<uint8_t*, size_t> Release();
/*
* Marks an ArrayBuffer as havings its contents transferred out of band.
@@ -1832,7 +1819,6 @@ class V8_EXPORT ValueDeserializer {
* May, for example, reject an invalid or unsupported wire format.
*/
V8_WARN_UNUSED_RESULT Maybe<bool> ReadHeader(Local<Context> context);
- V8_DEPRECATE_SOON("Use Local<Context> version", Maybe<bool> ReadHeader());
/*
* Deserializes a JavaScript value from the buffer.
@@ -3542,7 +3528,7 @@ class PropertyCallbackInfo {
/**
* \return The receiver. In many cases, this is the object on which the
* property access was intercepted. When using
- * `Reflect.Get`, `Function.prototype.call`, or similar functions, it is the
+ * `Reflect.get`, `Function.prototype.call`, or similar functions, it is the
* object passed in as receiver or thisArg.
*
* \code
@@ -3607,7 +3593,7 @@ class PropertyCallbackInfo {
* \return True if the intercepted function should throw if an error occurs.
* Usually, `true` corresponds to `'use strict'`.
*
- * \note Always `false` when intercepting `Reflect.Set()`
+ * \note Always `false` when intercepting `Reflect.set()`
* independent of the language mode.
*/
V8_INLINE bool ShouldThrowOnError() const;
@@ -3902,13 +3888,29 @@ class V8_EXPORT Proxy : public Object {
class V8_EXPORT WasmCompiledModule : public Object {
public:
typedef std::pair<std::unique_ptr<const uint8_t[]>, size_t> SerializedModule;
+ // A buffer that is owned by the caller.
+ typedef std::pair<const uint8_t*, size_t> CallerOwnedBuffer;
+ // Get the wasm-encoded bytes that were used to compile this module.
+ Local<String> GetWasmWireBytes();
+ // Serialize the compiled module. The serialized data does not include the
+ // uncompiled bytes.
SerializedModule Serialize();
- static MaybeLocal<WasmCompiledModule> Deserialize(
- Isolate* isolate, const SerializedModule& serialized_data);
+
+ // If possible, deserialize the module, otherwise compile it from the provided
+ // uncompiled bytes.
+ static MaybeLocal<WasmCompiledModule> DeserializeOrCompile(
+ Isolate* isolate, const CallerOwnedBuffer& serialized_module,
+ const CallerOwnedBuffer& wire_bytes);
V8_INLINE static WasmCompiledModule* Cast(Value* obj);
private:
+ static MaybeLocal<WasmCompiledModule> Deserialize(
+ Isolate* isolate, const CallerOwnedBuffer& serialized_module,
+ const CallerOwnedBuffer& wire_bytes);
+ static MaybeLocal<WasmCompiledModule> Compile(Isolate* isolate,
+ const uint8_t* start,
+ size_t length);
WasmCompiledModule();
static void CheckCast(Value* obj);
};
@@ -4623,6 +4625,8 @@ class V8_EXPORT Template : public Data {
*/
void Set(Local<Name> name, Local<Data> value,
PropertyAttribute attributes = None);
+ void SetPrivate(Local<Private> name, Local<Data> value,
+ PropertyAttribute attributes = None);
V8_INLINE void Set(Isolate* isolate, const char* name, Local<Data> value);
void SetAccessorProperty(
@@ -4675,6 +4679,14 @@ class V8_EXPORT Template : public Data {
AccessControl settings = DEFAULT);
/**
+ * Like SetNativeDataProperty, but V8 will replace the native data property
+ * with a real data property on first access.
+ */
+ void SetLazyDataProperty(Local<Name> name, AccessorNameGetterCallback getter,
+ Local<Value> data = Local<Value>(),
+ PropertyAttribute attribute = None);
+
+ /**
* During template instantiation, sets the value with the intrinsic property
* from the correct context.
*/
@@ -5095,6 +5107,14 @@ class V8_EXPORT FunctionTemplate : public Template {
Local<Value> data = Local<Value>(),
Local<Signature> signature = Local<Signature>(), int length = 0);
+ /**
+ * Creates a function template backed/cached by a private property.
+ */
+ static Local<FunctionTemplate> NewWithCache(
+ Isolate* isolate, FunctionCallback callback,
+ Local<Private> cache_property, Local<Value> data = Local<Value>(),
+ Local<Signature> signature = Local<Signature>(), int length = 0);
+
/** Returns the unique function instance in the current execution context.*/
V8_DEPRECATE_SOON("Use maybe version", Local<Function> GetFunction());
V8_WARN_UNUSED_RESULT MaybeLocal<Function> GetFunction(
@@ -5676,6 +5696,10 @@ class V8_EXPORT ResourceConstraints {
void set_code_range_size(size_t limit_in_mb) {
code_range_size_ = limit_in_mb;
}
+ size_t max_zone_pool_size() const { return max_zone_pool_size_; }
+ void set_max_zone_pool_size(const size_t bytes) {
+ max_zone_pool_size_ = bytes;
+ }
private:
int max_semi_space_size_;
@@ -5683,6 +5707,7 @@ class V8_EXPORT ResourceConstraints {
int max_executable_size_;
uint32_t* stack_limit_;
size_t code_range_size_;
+ size_t max_zone_pool_size_;
};
@@ -6145,11 +6170,11 @@ class V8_EXPORT PersistentHandleVisitor { // NOLINT
enum class MemoryPressureLevel { kNone, kModerate, kCritical };
/**
- * Interface for tracing through the embedder heap. During the v8 garbage
+ * Interface for tracing through the embedder heap. During a v8 garbage
* collection, v8 collects hidden fields of all potential wrappers, and at the
* end of its marking phase iterates the collection and asks the embedder to
- * trace through its heap and use reporter to report each js object reachable
- * from any of the given wrappers.
+ * trace through its heap and use reporter to report each JavaScript object
+ * reachable from any of the given wrappers.
*
* Before the first call to the TraceWrappersFrom function TracePrologue will be
* called. When the garbage collection cycle is finished, TraceEpilogue will be
@@ -6167,30 +6192,26 @@ class V8_EXPORT EmbedderHeapTracer {
};
/**
- * V8 will call this method with internal fields of found wrappers. The
- * embedder is expected to store them in its marking deque and trace
- * reachable wrappers from them when called through |AdvanceTracing|.
+ * Called by v8 to register internal fields of found wrappers.
+ *
+ * The embedder is expected to store them somewhere and trace reachable
+ * wrappers from them when called through |AdvanceTracing|.
*/
virtual void RegisterV8References(
const std::vector<std::pair<void*, void*> >& internal_fields) = 0;
/**
- * Deprecated.
- * TODO(hlopko) Remove once the migration to reporter is finished.
- */
- virtual void TracePrologue() {}
-
- /**
- * V8 will call this method at the beginning of a GC cycle. Embedder is
- * expected to use EmbedderReachableReferenceReporter for reporting all
- * reachable v8 objects.
+ * Called at the beginning of a GC cycle.
*/
- virtual void TracePrologue(EmbedderReachableReferenceReporter* reporter) {}
+ virtual void TracePrologue() = 0;
/**
- * Embedder is expected to trace its heap starting from wrappers reported by
- * RegisterV8References method, and use reporter for all reachable wrappers.
- * Embedder is expected to stop tracing by the given deadline.
+ * Called to to make a tracing step in the embedder.
+ *
+ * The embedder is expected to trace its heap starting from wrappers reported
+ * by RegisterV8References method, and report back all reachable wrappers.
+ * Furthermore, the embedder is expected to stop tracing by the given
+ * deadline.
*
* Returns true if there is still work to do.
*/
@@ -6198,22 +6219,25 @@ class V8_EXPORT EmbedderHeapTracer {
AdvanceTracingActions actions) = 0;
/**
- * V8 will call this method at the end of a GC cycle.
+ * Called at the end of a GC cycle.
*
* Note that allocation is *not* allowed within |TraceEpilogue|.
*/
virtual void TraceEpilogue() = 0;
/**
- * Let embedder know v8 entered final marking pause (no more incremental steps
- * will follow).
+ * Called upon entering the final marking pause. No more incremental marking
+ * steps will follow this call.
*/
- virtual void EnterFinalPause() {}
+ virtual void EnterFinalPause() = 0;
/**
- * Throw away all intermediate data and reset to the initial state.
+ * Called when tracing is aborted.
+ *
+ * The embedder is expected to throw away all intermediate data and reset to
+ * the initial state.
*/
- virtual void AbortTracing() {}
+ virtual void AbortTracing() = 0;
/**
* Returns the number of wrappers that are still to be traced by the embedder.
@@ -6225,6 +6249,19 @@ class V8_EXPORT EmbedderHeapTracer {
};
/**
+ * Callback to the embedder used in SnapshotCreator to handle internal fields.
+ */
+typedef StartupData (*SerializeInternalFieldsCallback)(Local<Object> holder,
+ int index);
+
+/**
+ * Callback to the embedder used to deserialize internal fields.
+ */
+typedef void (*DeserializeInternalFieldsCallback)(Local<Object> holder,
+ int index,
+ StartupData payload);
+
+/**
* Isolate represents an isolated instance of the V8 engine. V8 isolates have
* completely separate states. Objects from one isolate must not be used in
* other isolates. The embedder can create multiple isolates and use them in
@@ -6246,7 +6283,8 @@ class V8_EXPORT Isolate {
create_histogram_callback(nullptr),
add_histogram_sample_callback(nullptr),
array_buffer_allocator(nullptr),
- external_references(nullptr) {}
+ external_references(nullptr),
+ deserialize_internal_fields_callback(nullptr) {}
/**
* The optional entry_hook allows the host application to provide the
@@ -6302,6 +6340,12 @@ class V8_EXPORT Isolate {
* entire lifetime of the isolate.
*/
intptr_t* external_references;
+
+ /**
+ * Specifies an optional callback to deserialize internal fields. It
+ * should match the SerializeInternalFieldCallback used to serialize.
+ */
+ DeserializeInternalFieldsCallback deserialize_internal_fields_callback;
};
@@ -7506,6 +7550,9 @@ class V8_EXPORT V8 {
int* index);
static Local<Value> GetEternal(Isolate* isolate, int index);
+ static void RegisterExternallyReferencedObject(internal::Object** object,
+ internal::Isolate* isolate);
+
template <class K, class V, class T>
friend class PersistentValueMapBase;
@@ -7528,7 +7575,7 @@ class V8_EXPORT V8 {
/**
* Helper class to create a snapshot data blob.
*/
-class SnapshotCreator {
+class V8_EXPORT SnapshotCreator {
public:
enum class FunctionCodeHandling { kClear, kKeep };
@@ -7567,10 +7614,12 @@ class SnapshotCreator {
* This must not be called from within a handle scope.
* \param function_code_handling whether to include compiled function code
* in the snapshot.
+ * \param callback to serialize embedder-set internal fields.
* \returns { nullptr, 0 } on failure, and a startup snapshot on success. The
* caller acquires ownership of the data array in the return value.
*/
- StartupData CreateBlob(FunctionCodeHandling function_code_handling);
+ StartupData CreateBlob(FunctionCodeHandling function_code_handling,
+ SerializeInternalFieldsCallback callback = nullptr);
// Disallow copying and assigning.
SnapshotCreator(const SnapshotCreator&) = delete;
@@ -7824,7 +7873,6 @@ class V8_EXPORT ExtensionConfiguration {
const char** names_;
};
-
/**
* A sandboxed execution context with its own set of built-in objects
* and functions.
@@ -8286,11 +8334,10 @@ class Internals {
static const int kNodeStateIsPendingValue = 3;
static const int kNodeStateIsNearDeathValue = 4;
static const int kNodeIsIndependentShift = 3;
- static const int kNodeIsPartiallyDependentShift = 4;
static const int kNodeIsActiveShift = 4;
- static const int kJSObjectType = 0xb9;
- static const int kJSApiObjectType = 0xb8;
+ static const int kJSObjectType = 0xbc;
+ static const int kJSApiObjectType = 0xbb;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
@@ -8566,10 +8613,11 @@ P* PersistentBase<T>::ClearWeak() {
}
template <class T>
-void PersistentBase<T>::RegisterExternalReference(
- EmbedderReachableReferenceReporter* reporter) const {
+void PersistentBase<T>::RegisterExternalReference(Isolate* isolate) const {
if (IsEmpty()) return;
- reporter->ReportExternalReference(this->val_);
+ V8::RegisterExternallyReferencedObject(
+ reinterpret_cast<internal::Object**>(this->val_),
+ reinterpret_cast<internal::Isolate*>(isolate));
}
template <class T>
@@ -8581,17 +8629,6 @@ void PersistentBase<T>::MarkIndependent() {
I::kNodeIsIndependentShift);
}
-
-template <class T>
-void PersistentBase<T>::MarkPartiallyDependent() {
- typedef internal::Internals I;
- if (this->IsEmpty()) return;
- I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
- true,
- I::kNodeIsPartiallyDependentShift);
-}
-
-
template <class T>
void PersistentBase<T>::MarkActive() {
typedef internal::Internals I;
diff --git a/deps/v8/infra/config/cq.cfg b/deps/v8/infra/config/cq.cfg
index 3c645fd90b..e93895f382 100644
--- a/deps/v8/infra/config/cq.cfg
+++ b/deps/v8/infra/config/cq.cfg
@@ -104,7 +104,7 @@ verifiers {
}
builders {
name: "v8_linux64_sanitizer_coverage_rel"
- experiment_percentage: 100
+ experiment_percentage: 20
}
}
buckets {
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index 2747be5909..d6a2a2dc4a 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -40,7 +40,7 @@
'V8 Linux - shared': 'gn_release_x86_shared_verify_heap',
'V8 Linux - noi18n - debug': 'gn_debug_x86_no_i18n',
# Linux64.
- 'V8 Linux64 - builder': 'gn_release_x64',
+ 'V8 Linux64 - builder': 'gn_release_x64_valgrind',
'V8 Linux64 - debug builder': 'gn_debug_x64_valgrind',
'V8 Linux64 - custom snapshot - debug builder': 'gn_debug_x64_custom',
'V8 Linux64 - internal snapshot': 'gn_release_x64_internal',
@@ -156,7 +156,7 @@
'v8_linux_nosnap_dbg': 'gn_debug_x86_no_snap_trybot',
'v8_linux_gcc_compile_rel': 'gn_release_x86_gcc_minimal_symbols',
'v8_linux_gcc_rel': 'gn_release_x86_gcc_minimal_symbols',
- 'v8_linux64_rel_ng': 'gn_release_x64_trybot',
+ 'v8_linux64_rel_ng': 'gn_release_x64_valgrind_trybot',
'v8_linux64_gyp_rel_ng': 'gyp_release_x64',
'v8_linux64_avx2_rel_ng': 'gn_release_x64_trybot',
'v8_linux64_avx2_dbg': 'gn_debug_x64_trybot',
@@ -296,6 +296,10 @@
'gn', 'release_bot', 'x64', 'tsan', 'swarming'],
'gn_release_x64_tsan_minimal_symbols': [
'gn', 'release_bot', 'x64', 'tsan', 'minimal_symbols', 'swarming'],
+ 'gn_release_x64_valgrind': [
+ 'gn', 'release_bot', 'x64', 'swarming', 'valgrind'],
+ 'gn_release_x64_valgrind_trybot': [
+ 'gn', 'release_trybot', 'x64', 'swarming', 'valgrind'],
# GN debug configs for x64.
'gn_debug_x64': [
@@ -317,7 +321,8 @@
'gn_debug_x86_minimal_symbols': [
'gn', 'debug_bot', 'x86', 'minimal_symbols', 'swarming'],
'gn_debug_x86_no_i18n': [
- 'gn', 'debug_bot', 'x86', 'v8_no_i18n'],
+ 'gn', 'debug_bot', 'x86', 'swarming', 'v8_disable_inspector',
+ 'v8_no_i18n'],
'gn_debug_x86_no_snap': [
'gn', 'debug_bot', 'x86', 'swarming', 'v8_snapshot_none'],
'gn_debug_x86_no_snap_trybot': [
@@ -339,7 +344,8 @@
'gn_release_x86_minimal_symbols': [
'gn', 'release_bot', 'x86', 'minimal_symbols', 'swarming'],
'gn_release_x86_no_i18n_trybot': [
- 'gn', 'release_trybot', 'x86', 'swarming', 'v8_no_i18n'],
+ 'gn', 'release_trybot', 'x86', 'swarming', 'v8_disable_inspector',
+ 'v8_no_i18n'],
'gn_release_x86_no_snap': [
'gn', 'release_bot', 'x86', 'swarming', 'v8_snapshot_none'],
'gn_release_x86_no_snap_shared_minimal_symbols': [
@@ -354,7 +360,8 @@
# Gyp debug configs for simulators.
'gyp_debug_simulate_x87_no_snap': [
- 'gyp', 'debug_bot', 'simulate_x87', 'swarming', 'v8_snapshot_none'],
+ 'gyp', 'debug_bot_static', 'simulate_x87', 'swarming',
+ 'v8_snapshot_none'],
# Gyp debug configs for x86.
'gyp_debug_x86': [
@@ -461,6 +468,12 @@
'v8_optimized_debug'],
},
+ 'debug_bot_static': {
+ 'mixins': [
+ 'debug', 'static', 'goma', 'v8_enable_slow_dchecks',
+ 'v8_optimized_debug'],
+ },
+
'debug_trybot': {
'mixins': ['debug_bot', 'minimal_symbols'],
},
@@ -613,6 +626,11 @@
'gyp_defines': 'v8_enable_i18n_support=0 icu_use_data_file_flag=0',
},
+ 'v8_disable_inspector': {
+ 'gn_args': 'v8_enable_inspector=false',
+ 'gyp_defines': 'v8_enable_inspector=0 ',
+ },
+
'v8_enable_disassembler': {
'gn_args': 'v8_enable_disassembler=true',
'gyp_defines': 'v8_enable_disassembler=1',
diff --git a/deps/v8/samples/samples.gyp b/deps/v8/samples/samples.gyp
index e5e9ef0f8c..e7c26cf262 100644
--- a/deps/v8/samples/samples.gyp
+++ b/deps/v8/samples/samples.gyp
@@ -36,6 +36,7 @@
'type': 'executable',
'dependencies': [
'../src/v8.gyp:v8',
+ '../src/v8.gyp:v8_libbase',
'../src/v8.gyp:v8_libplatform',
],
'include_dirs': [
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index b1c428d5d7..9114669a6d 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -8,6 +8,7 @@ include_rules = [
"-src/heap",
"+src/heap/heap.h",
"+src/heap/heap-inl.h",
+ "-src/inspector",
"-src/interpreter",
"+src/interpreter/bytecode-array-iterator.h",
"+src/interpreter/bytecode-decoder.h",
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index da44151b3e..9ec24b84c7 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -19,13 +19,9 @@
namespace v8 {
namespace internal {
-
Handle<AccessorInfo> Accessors::MakeAccessor(
- Isolate* isolate,
- Handle<Name> name,
- AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter,
- PropertyAttributes attributes) {
+ Isolate* isolate, Handle<Name> name, AccessorNameGetterCallback getter,
+ AccessorNameBooleanSetterCallback setter, PropertyAttributes attributes) {
Factory* factory = isolate->factory();
Handle<AccessorInfo> info = factory->NewAccessorInfo();
info->set_property_attributes(attributes);
@@ -33,6 +29,7 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
info->set_all_can_write(false);
info->set_is_special_data_property(true);
info->set_is_sloppy(false);
+ info->set_replace_on_access(false);
name = factory->InternalizeName(name);
info->set_name(*name);
Handle<Object> get = v8::FromCData(isolate, getter);
@@ -106,7 +103,7 @@ MUST_USE_RESULT MaybeHandle<Object> ReplaceAccessorWithDataProperty(
void Accessors::ReconfigureToDataProperty(
v8::Local<v8::Name> key, v8::Local<v8::Value> val,
- const v8::PropertyCallbackInfo<void>& info) {
+ const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> receiver = Utils::OpenHandle(*info.This());
@@ -116,7 +113,11 @@ void Accessors::ReconfigureToDataProperty(
Handle<Object> value = Utils::OpenHandle(*val);
MaybeHandle<Object> result =
ReplaceAccessorWithDataProperty(isolate, receiver, holder, name, value);
- if (result.is_null()) isolate->OptionalRescheduleException(false);
+ if (result.is_null()) {
+ isolate->OptionalRescheduleException(false);
+ } else {
+ info.GetReturnValue().Set(true);
+ }
}
//
@@ -151,6 +152,8 @@ void Accessors::ArrayLengthGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ RuntimeCallTimerScope timer(
+ isolate, &RuntimeCallStats::AccessorNameGetterCallback_ArrayLength);
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
JSArray* holder = JSArray::cast(*Utils::OpenHandle(*info.Holder()));
@@ -158,11 +161,9 @@ void Accessors::ArrayLengthGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
}
-
void Accessors::ArrayLengthSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> val,
- const v8::PropertyCallbackInfo<void>& info) {
+ v8::Local<v8::Name> name, v8::Local<v8::Value> val,
+ const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
@@ -178,17 +179,21 @@ void Accessors::ArrayLengthSetter(
JSArray::SetLength(array, length);
- if (info.ShouldThrowOnError()) {
- uint32_t actual_new_len = 0;
- CHECK(array->length()->ToArrayLength(&actual_new_len));
- // Throw TypeError if there were non-deletable elements.
- if (actual_new_len != length) {
+ uint32_t actual_new_len = 0;
+ CHECK(array->length()->ToArrayLength(&actual_new_len));
+ // Fail if there were non-deletable elements.
+ if (actual_new_len != length) {
+ if (info.ShouldThrowOnError()) {
Factory* factory = isolate->factory();
isolate->Throw(*factory->NewTypeError(
MessageTemplate::kStrictDeleteProperty,
factory->NewNumberFromUint(actual_new_len - 1), array));
isolate->OptionalRescheduleException(false);
+ } else {
+ info.GetReturnValue().Set(false);
}
+ } else {
+ info.GetReturnValue().Set(true);
}
}
@@ -202,6 +207,50 @@ Handle<AccessorInfo> Accessors::ArrayLengthInfo(
attributes);
}
+//
+// Accessors::ModuleNamespaceEntry
+//
+
+void Accessors::ModuleNamespaceEntryGetter(
+ v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ JSModuleNamespace* holder =
+ JSModuleNamespace::cast(*Utils::OpenHandle(*info.Holder()));
+ Handle<Object> result;
+ if (!holder->GetExport(Handle<String>::cast(Utils::OpenHandle(*name)))
+ .ToHandle(&result)) {
+ isolate->OptionalRescheduleException(false);
+ } else {
+ info.GetReturnValue().Set(Utils::ToLocal(result));
+ }
+}
+
+void Accessors::ModuleNamespaceEntrySetter(
+ v8::Local<v8::Name> name, v8::Local<v8::Value> val,
+ const v8::PropertyCallbackInfo<v8::Boolean>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
+ Handle<JSModuleNamespace> holder =
+ Handle<JSModuleNamespace>::cast(Utils::OpenHandle(*info.Holder()));
+
+ if (info.ShouldThrowOnError()) {
+ isolate->Throw(*factory->NewTypeError(
+ MessageTemplate::kStrictReadOnlyProperty, Utils::OpenHandle(*name),
+ i::Object::TypeOf(isolate, holder), holder));
+ isolate->OptionalRescheduleException(false);
+ } else {
+ info.GetReturnValue().Set(false);
+ }
+}
+
+Handle<AccessorInfo> Accessors::ModuleNamespaceEntryInfo(
+ Isolate* isolate, Handle<String> name, PropertyAttributes attributes) {
+ return MakeAccessor(isolate, name, &ModuleNamespaceEntryGetter,
+ &ModuleNamespaceEntrySetter, attributes);
+}
+
//
// Accessors::StringLength
@@ -211,6 +260,8 @@ void Accessors::StringLengthGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ RuntimeCallTimerScope timer(
+ isolate, &RuntimeCallStats::AccessorNameGetterCallback_StringLength);
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
@@ -418,40 +469,6 @@ Handle<AccessorInfo> Accessors::ScriptCompilationTypeInfo(
//
-// Accessors::ScriptGetLineEnds
-//
-
-
-void Accessors::ScriptLineEndsGetter(
- v8::Local<v8::Name> name,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- HandleScope scope(isolate);
- Handle<Object> object = Utils::OpenHandle(*info.Holder());
- Handle<Script> script(
- Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
- Script::InitLineEnds(script);
- DCHECK(script->line_ends()->IsFixedArray());
- Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
- // We do not want anyone to modify this array from JS.
- DCHECK(*line_ends == isolate->heap()->empty_fixed_array() ||
- line_ends->map() == isolate->heap()->fixed_cow_array_map());
- Handle<JSArray> js_array =
- isolate->factory()->NewJSArrayWithElements(line_ends);
- info.GetReturnValue().Set(Utils::ToLocal(js_array));
-}
-
-
-Handle<AccessorInfo> Accessors::ScriptLineEndsInfo(
- Isolate* isolate, PropertyAttributes attributes) {
- Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("line_ends")));
- return MakeAccessor(isolate, name, &ScriptLineEndsGetter, nullptr,
- attributes);
-}
-
-
-//
// Accessors::ScriptSourceUrl
//
@@ -691,6 +708,8 @@ void Accessors::FunctionPrototypeGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ RuntimeCallTimerScope timer(
+ isolate, &RuntimeCallStats::AccessorNameGetterCallback_FunctionPrototype);
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -698,11 +717,9 @@ void Accessors::FunctionPrototypeGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
-
void Accessors::FunctionPrototypeSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> val,
- const v8::PropertyCallbackInfo<void>& info) {
+ v8::Local<v8::Name> name, v8::Local<v8::Value> val,
+ const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
@@ -710,6 +727,8 @@ void Accessors::FunctionPrototypeSetter(
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
if (SetFunctionPrototype(isolate, object, value).is_null()) {
isolate->OptionalRescheduleException(false);
+ } else {
+ info.GetReturnValue().Set(true);
}
}
@@ -738,7 +757,7 @@ void Accessors::FunctionLengthGetter(
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
Handle<Object> result;
if (!JSFunction::GetLength(isolate, function).ToHandle(&result)) {
- result = handle(Smi::FromInt(0), isolate);
+ result = handle(Smi::kZero, isolate);
isolate->OptionalRescheduleException(false);
}
@@ -1064,6 +1083,9 @@ Handle<AccessorInfo> Accessors::FunctionCallerInfo(
void Accessors::BoundFunctionLengthGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ RuntimeCallTimerScope timer(
+ isolate,
+ &RuntimeCallStats::AccessorNameGetterCallback_BoundFunctionLength);
HandleScope scope(isolate);
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -1072,7 +1094,7 @@ void Accessors::BoundFunctionLengthGetter(
Handle<JSFunction> target(JSFunction::cast(function->bound_target_function()),
isolate);
if (!JSFunction::GetLength(isolate, target).ToHandle(&target_length)) {
- target_length = handle(Smi::FromInt(0), isolate);
+ target_length = handle(Smi::kZero, isolate);
isolate->OptionalRescheduleException(false);
return;
}
@@ -1098,6 +1120,8 @@ Handle<AccessorInfo> Accessors::BoundFunctionLengthInfo(
void Accessors::BoundFunctionNameGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ RuntimeCallTimerScope timer(
+ isolate, &RuntimeCallStats::AccessorNameGetterCallback_BoundFunctionName);
HandleScope scope(isolate);
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -1207,9 +1231,9 @@ void Accessors::ErrorStackGetter(
info.GetReturnValue().Set(value);
}
-void Accessors::ErrorStackSetter(v8::Local<v8::Name> name,
- v8::Local<v8::Value> val,
- const v8::PropertyCallbackInfo<void>& info) {
+void Accessors::ErrorStackSetter(
+ v8::Local<v8::Name> name, v8::Local<v8::Value> val,
+ const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSObject> obj =
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 2171a35c74..f53d30986c 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -37,7 +37,6 @@ class AccessorInfo;
V(ScriptEvalFromScriptPosition) \
V(ScriptEvalFromFunctionName) \
V(ScriptId) \
- V(ScriptLineEnds) \
V(ScriptLineOffset) \
V(ScriptName) \
V(ScriptSource) \
@@ -48,10 +47,11 @@ class AccessorInfo;
V(StringLength)
#define ACCESSOR_SETTER_LIST(V) \
- V(ReconfigureToDataProperty) \
V(ArrayLengthSetter) \
V(ErrorStackSetter) \
- V(FunctionPrototypeSetter)
+ V(FunctionPrototypeSetter) \
+ V(ModuleNamespaceEntrySetter) \
+ V(ReconfigureToDataProperty)
// Accessors contains all predefined proxy accessors.
@@ -70,10 +70,16 @@ class Accessors : public AllStatic {
#define ACCESSOR_SETTER_DECLARATION(name) \
static void name(v8::Local<v8::Name> name, v8::Local<v8::Value> value, \
- const v8::PropertyCallbackInfo<void>& info);
+ const v8::PropertyCallbackInfo<v8::Boolean>& info);
ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
#undef ACCESSOR_SETTER_DECLARATION
+ static void ModuleNamespaceEntryGetter(
+ v8::Local<v8::Name> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info);
+ static Handle<AccessorInfo> ModuleNamespaceEntryInfo(
+ Isolate* isolate, Handle<String> name, PropertyAttributes attributes);
+
enum DescriptorId {
#define ACCESSOR_INFO_DECLARATION(name) \
k##name##Getter, \
@@ -93,12 +99,21 @@ class Accessors : public AllStatic {
static bool IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
int* object_offset);
+ // Create an AccessorInfo. The setter is optional (can be nullptr).
+ //
+ // Note that the type of setter is AccessorNameBooleanSetterCallback instead
+ // of v8::AccessorNameSetterCallback. The difference is that the former can
+ // set a (boolean) return value. The setter should roughly follow the same
+ // conventions as many of the internal methods in objects.cc:
+ // - The return value is unset iff there was an exception.
+ // - If the ShouldThrow argument is true, the return value must not be false.
+ typedef void (*AccessorNameBooleanSetterCallback)(
+ Local<v8::Name> property, Local<v8::Value> value,
+ const PropertyCallbackInfo<v8::Boolean>& info);
+
static Handle<AccessorInfo> MakeAccessor(
- Isolate* isolate,
- Handle<Name> name,
- AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter,
- PropertyAttributes attributes);
+ Isolate* isolate, Handle<Name> name, AccessorNameGetterCallback getter,
+ AccessorNameBooleanSetterCallback setter, PropertyAttributes attributes);
};
} // namespace internal
diff --git a/deps/v8/src/address-map.cc b/deps/v8/src/address-map.cc
index 3122b33693..79f8e62d54 100644
--- a/deps/v8/src/address-map.cc
+++ b/deps/v8/src/address-map.cc
@@ -13,7 +13,7 @@ namespace internal {
RootIndexMap::RootIndexMap(Isolate* isolate) {
map_ = isolate->root_index_map();
if (map_ != NULL) return;
- map_ = new base::HashMap();
+ map_ = new HeapObjectToIndexHashMap();
for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
Object* root = isolate->heap()->root(root_index);
@@ -22,12 +22,12 @@ RootIndexMap::RootIndexMap(Isolate* isolate) {
// not be referenced through the root list in the snapshot.
if (isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
HeapObject* heap_object = HeapObject::cast(root);
- base::HashMap::Entry* entry = LookupEntry(map_, heap_object, false);
- if (entry != NULL) {
+ Maybe<uint32_t> maybe_index = map_->Get(heap_object);
+ if (maybe_index.IsJust()) {
// Some are initialized to a previous value in the root list.
- DCHECK_LT(GetValue(entry), i);
+ DCHECK_LT(maybe_index.FromJust(), i);
} else {
- SetValue(LookupEntry(map_, heap_object, true), i);
+ map_->Set(heap_object, i);
}
} else {
// Immortal immovable root objects are constant and allocated on the first
diff --git a/deps/v8/src/address-map.h b/deps/v8/src/address-map.h
index 95e9cb064b..d50847fcd4 100644
--- a/deps/v8/src/address-map.h
+++ b/deps/v8/src/address-map.h
@@ -5,6 +5,7 @@
#ifndef V8_ADDRESS_MAP_H_
#define V8_ADDRESS_MAP_H_
+#include "include/v8.h"
#include "src/assert-scope.h"
#include "src/base/hashmap.h"
#include "src/objects.h"
@@ -12,49 +13,50 @@
namespace v8 {
namespace internal {
-class AddressMapBase {
- protected:
- static void SetValue(base::HashMap::Entry* entry, uint32_t v) {
- entry->value = reinterpret_cast<void*>(v);
- }
+template <typename Type>
+class PointerToIndexHashMap
+ : public base::TemplateHashMapImpl<uintptr_t, uint32_t,
+ base::KeyEqualityMatcher<intptr_t>,
+ base::DefaultAllocationPolicy> {
+ public:
+ typedef base::TemplateHashMapEntry<uintptr_t, uint32_t> Entry;
- static uint32_t GetValue(base::HashMap::Entry* entry) {
- return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
+ inline void Set(Type value, uint32_t index) {
+ uintptr_t key = Key(value);
+ LookupOrInsert(key, Hash(key))->value = index;
}
- inline static base::HashMap::Entry* LookupEntry(base::HashMap* map,
- HeapObject* obj,
- bool insert) {
- if (insert) {
- map->LookupOrInsert(Key(obj), Hash(obj));
- }
- return map->Lookup(Key(obj), Hash(obj));
+ inline Maybe<uint32_t> Get(Type value) const {
+ uintptr_t key = Key(value);
+ Entry* entry = Lookup(key, Hash(key));
+ if (entry == nullptr) return Nothing<uint32_t>();
+ return Just(entry->value);
}
private:
- static uint32_t Hash(HeapObject* obj) {
- return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
+ static uintptr_t Key(Type value) {
+ return reinterpret_cast<uintptr_t>(value);
}
- static void* Key(HeapObject* obj) {
- return reinterpret_cast<void*>(obj->address());
- }
+ static uint32_t Hash(uintptr_t key) { return static_cast<uint32_t>(key); }
};
-class RootIndexMap : public AddressMapBase {
+class AddressToIndexHashMap : public PointerToIndexHashMap<Address> {};
+class HeapObjectToIndexHashMap : public PointerToIndexHashMap<HeapObject*> {};
+
+class RootIndexMap {
public:
explicit RootIndexMap(Isolate* isolate);
static const int kInvalidRootIndex = -1;
int Lookup(HeapObject* obj) {
- base::HashMap::Entry* entry = LookupEntry(map_, obj, false);
- if (entry) return GetValue(entry);
- return kInvalidRootIndex;
+ Maybe<uint32_t> maybe_index = map_->Get(obj);
+ return maybe_index.IsJust() ? maybe_index.FromJust() : kInvalidRootIndex;
}
private:
- base::HashMap* map_;
+ HeapObjectToIndexHashMap* map_;
DISALLOW_COPY_AND_ASSIGN(RootIndexMap);
};
@@ -186,21 +188,21 @@ class SerializerReference {
// Mapping objects to their location after deserialization.
// This is used during building, but not at runtime by V8.
-class SerializerReferenceMap : public AddressMapBase {
+class SerializerReferenceMap {
public:
SerializerReferenceMap()
: no_allocation_(), map_(), attached_reference_index_(0) {}
SerializerReference Lookup(HeapObject* obj) {
- base::HashMap::Entry* entry = LookupEntry(&map_, obj, false);
- return entry ? SerializerReference(GetValue(entry)) : SerializerReference();
+ Maybe<uint32_t> maybe_index = map_.Get(obj);
+ return maybe_index.IsJust() ? SerializerReference(maybe_index.FromJust())
+ : SerializerReference();
}
void Add(HeapObject* obj, SerializerReference b) {
DCHECK(b.is_valid());
- DCHECK_NULL(LookupEntry(&map_, obj, false));
- base::HashMap::Entry* entry = LookupEntry(&map_, obj, true);
- SetValue(entry, b.bitfield_);
+ DCHECK(map_.Get(obj).IsNothing());
+ map_.Set(obj, b.bitfield_);
}
SerializerReference AddAttachedReference(HeapObject* attached_reference) {
@@ -212,7 +214,7 @@ class SerializerReferenceMap : public AddressMapBase {
private:
DisallowHeapAllocation no_allocation_;
- base::HashMap map_;
+ HeapObjectToIndexHashMap map_;
int attached_reference_index_;
DISALLOW_COPY_AND_ASSIGN(SerializerReferenceMap);
};
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index a92b71f08e..e87a3f1b1c 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -5,6 +5,7 @@
#ifndef V8_ALLOCATION_H_
#define V8_ALLOCATION_H_
+#include "src/base/compiler-specific.h"
#include "src/globals.h"
namespace v8 {
@@ -39,7 +40,7 @@ class Embedded {
void* operator new(size_t size);
void operator delete(void* p);
};
-#define BASE_EMBEDDED : public Embedded
+#define BASE_EMBEDDED : public NON_EXPORTED_BASE(Embedded)
#else
#define BASE_EMBEDDED
#endif
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index 9e01f3ae7d..d6d1b951af 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -88,7 +88,7 @@ class PropertyCallbackArguments
Smi::FromInt(should_throw == Object::THROW_ON_ERROR ? 1 : 0);
// Here the hole is set as default value.
- // It cannot escape into js as it's remove in Call below.
+ // It cannot escape into js as it's removed in Call below.
values[T::kReturnValueDefaultValueIndex] =
isolate->heap()->the_hole_value();
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index ea2cce5c88..3fe59e293d 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -437,9 +437,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
JSObject::GetProperty(parent_instance,
isolate->factory()->prototype_string()),
JSFunction);
- MAYBE_RETURN(JSObject::SetPrototype(prototype, parent_prototype, false,
- Object::THROW_ON_ERROR),
- MaybeHandle<JSFunction>());
+ JSObject::ForceSetPrototype(prototype, parent_prototype);
}
}
Handle<JSFunction> function = ApiNatives::CreateApiFunction(
@@ -533,24 +531,22 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes) {
- const int kSize = 3;
PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
- Handle<Object> data[kSize] = {name, details_handle, value};
- AddPropertyToPropertyList(isolate, info, kSize, data);
+ Handle<Object> data[] = {name, details_handle, value};
+ AddPropertyToPropertyList(isolate, info, arraysize(data), data);
}
void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, v8::Intrinsic intrinsic,
PropertyAttributes attributes) {
- const int kSize = 4;
auto value = handle(Smi::FromInt(intrinsic), isolate);
auto intrinsic_marker = isolate->factory()->true_value();
PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
- Handle<Object> data[kSize] = {name, intrinsic_marker, details_handle, value};
- AddPropertyToPropertyList(isolate, info, kSize, data);
+ Handle<Object> data[] = {name, intrinsic_marker, details_handle, value};
+ AddPropertyToPropertyList(isolate, info, arraysize(data), data);
}
@@ -560,11 +556,10 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate,
Handle<FunctionTemplateInfo> getter,
Handle<FunctionTemplateInfo> setter,
PropertyAttributes attributes) {
- const int kSize = 4;
PropertyDetails details(attributes, ACCESSOR, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
- Handle<Object> data[kSize] = {name, details_handle, getter, setter};
- AddPropertyToPropertyList(isolate, info, kSize, data);
+ Handle<Object> data[] = {name, details_handle, getter, setter};
+ AddPropertyToPropertyList(isolate, info, arraysize(data), data);
}
@@ -618,10 +613,12 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
}
int internal_field_count = 0;
+ bool immutable_proto = false;
if (!obj->instance_template()->IsUndefined(isolate)) {
Handle<ObjectTemplateInfo> instance_template = Handle<ObjectTemplateInfo>(
ObjectTemplateInfo::cast(obj->instance_template()));
internal_field_count = instance_template->internal_field_count();
+ immutable_proto = instance_template->immutable_proto();
}
// TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
@@ -681,6 +678,8 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
map->set_is_constructor(true);
}
+ if (immutable_proto) map->set_immutable_proto(true);
+
return result;
}
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 44933b965b..da7f2ef414 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -73,6 +73,8 @@
#include "src/version.h"
#include "src/vm-state-inl.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-result.h"
namespace v8 {
@@ -197,7 +199,6 @@ class CallDepthScope {
: isolate_(isolate), context_(context), escaped_(false) {
// TODO(dcarney): remove this when blink stops crashing.
DCHECK(!isolate_->external_caught_exception());
- isolate_->IncrementJsCallsFromApiCounter();
isolate_->handle_scope_implementer()->IncrementCallDepth();
if (!context.IsEmpty()) {
i::Handle<i::Context> env = Utils::OpenHandle(*context);
@@ -273,10 +274,23 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
i::Isolate* isolate = i::Isolate::Current();
char last_few_messages[Heap::kTraceRingBufferSize + 1];
char js_stacktrace[Heap::kStacktraceBufferSize + 1];
+ i::HeapStats heap_stats;
+
+ if (isolate == nullptr) {
+ // On a background thread -> we cannot retrieve memory information from the
+ // Isolate. Write easy-to-recognize values on the stack.
+ memset(last_few_messages, 0x0badc0de, Heap::kTraceRingBufferSize + 1);
+ memset(js_stacktrace, 0x0badc0de, Heap::kStacktraceBufferSize + 1);
+ memset(&heap_stats, 0xbadc0de, sizeof(heap_stats));
+ // Note that the embedder's oom handler won't be called in this case. We
+ // just crash.
+ FATAL("API fatal error handler returned after process out of memory");
+ return;
+ }
+
memset(last_few_messages, 0, Heap::kTraceRingBufferSize + 1);
memset(js_stacktrace, 0, Heap::kStacktraceBufferSize + 1);
- i::HeapStats heap_stats;
intptr_t start_marker;
heap_stats.start_marker = &start_marker;
size_t new_space_size;
@@ -509,7 +523,8 @@ size_t SnapshotCreator::AddTemplate(Local<Template> template_obj) {
}
StartupData SnapshotCreator::CreateBlob(
- SnapshotCreator::FunctionCodeHandling function_code_handling) {
+ SnapshotCreator::FunctionCodeHandling function_code_handling,
+ SerializeInternalFieldsCallback callback) {
SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(data->isolate_);
DCHECK(!data->created_);
@@ -544,18 +559,30 @@ StartupData SnapshotCreator::CreateBlob(
}
data->contexts_.Clear();
+#ifdef DEBUG
+ i::ExternalReferenceTable::instance(isolate)->ResetCount();
+#endif // DEBUG
+
i::StartupSerializer startup_serializer(isolate, function_code_handling);
startup_serializer.SerializeStrongReferences();
// Serialize each context with a new partial serializer.
i::List<i::SnapshotData*> context_snapshots(num_contexts);
for (int i = 0; i < num_contexts; i++) {
- i::PartialSerializer partial_serializer(isolate, &startup_serializer);
+ i::PartialSerializer partial_serializer(isolate, &startup_serializer,
+ callback);
partial_serializer.Serialize(&contexts[i]);
context_snapshots.Add(new i::SnapshotData(&partial_serializer));
}
startup_serializer.SerializeWeakReferencesAndDeferred();
+
+#ifdef DEBUG
+ if (i::FLAG_external_reference_stats) {
+ i::ExternalReferenceTable::instance(isolate)->PrintCount();
+ }
+#endif // DEBUG
+
i::SnapshotData startup_snapshot(&startup_serializer);
StartupData result =
i::Snapshot::CreateSnapshotBlob(&startup_snapshot, &context_snapshots);
@@ -642,6 +669,7 @@ StartupData V8::WarmUpSnapshotDataBlob(StartupData cold_snapshot_blob,
void V8::SetFlagsFromString(const char* str, int length) {
i::FlagList::SetFlagsFromString(str, length);
+ i::FlagList::EnforceFlagImplications();
}
@@ -696,13 +724,13 @@ Extension::Extension(const char* name,
CHECK(source != NULL || source_length_ == 0);
}
-
ResourceConstraints::ResourceConstraints()
: max_semi_space_size_(0),
max_old_space_size_(0),
max_executable_size_(0),
stack_limit_(NULL),
- code_range_size_(0) { }
+ code_range_size_(0),
+ max_zone_pool_size_(0) {}
void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit) {
@@ -722,18 +750,25 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeLowMemoryDevice);
set_max_old_space_size(i::Heap::kMaxOldSpaceSizeLowMemoryDevice);
set_max_executable_size(i::Heap::kMaxExecutableSizeLowMemoryDevice);
+ set_max_zone_pool_size(i::AccountingAllocator::kMaxPoolSizeLowMemoryDevice);
} else if (physical_memory <= medium_limit) {
set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeMediumMemoryDevice);
set_max_old_space_size(i::Heap::kMaxOldSpaceSizeMediumMemoryDevice);
set_max_executable_size(i::Heap::kMaxExecutableSizeMediumMemoryDevice);
+ set_max_zone_pool_size(
+ i::AccountingAllocator::kMaxPoolSizeMediumMemoryDevice);
} else if (physical_memory <= high_limit) {
set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeHighMemoryDevice);
set_max_old_space_size(i::Heap::kMaxOldSpaceSizeHighMemoryDevice);
set_max_executable_size(i::Heap::kMaxExecutableSizeHighMemoryDevice);
+ set_max_zone_pool_size(
+ i::AccountingAllocator::kMaxPoolSizeHighMemoryDevice);
} else {
set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeHugeMemoryDevice);
set_max_old_space_size(i::Heap::kMaxOldSpaceSizeHugeMemoryDevice);
set_max_executable_size(i::Heap::kMaxExecutableSizeHugeMemoryDevice);
+ set_max_zone_pool_size(
+ i::AccountingAllocator::kMaxPoolSizeHugeMemoryDevice);
}
if (virtual_memory_limit > 0 && i::kRequiresCodeRange) {
@@ -752,11 +787,14 @@ void SetResourceConstraints(i::Isolate* isolate,
int old_space_size = constraints.max_old_space_size();
int max_executable_size = constraints.max_executable_size();
size_t code_range_size = constraints.code_range_size();
+ size_t max_pool_size = constraints.max_zone_pool_size();
if (semi_space_size != 0 || old_space_size != 0 ||
max_executable_size != 0 || code_range_size != 0) {
isolate->heap()->ConfigureHeap(semi_space_size, old_space_size,
max_executable_size, code_range_size);
}
+ isolate->allocator()->ConfigureSegmentPool(max_pool_size);
+
if (constraints.stack_limit() != NULL) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints.stack_limit());
isolate->stack_guard()->SetStackLimit(limit);
@@ -786,6 +824,11 @@ i::Object** V8::CopyPersistent(i::Object** obj) {
return result.location();
}
+void V8::RegisterExternallyReferencedObject(i::Object** object,
+ i::Isolate* isolate) {
+ isolate->heap()->RegisterExternallyReferencedObject(object);
+}
+
void V8::MakeWeak(i::Object** location, void* parameter,
int internal_field_index1, int internal_field_index2,
WeakCallbackInfo<void>::Callback weak_callback) {
@@ -1061,7 +1104,7 @@ void Template::Set(v8::Local<Name> name, v8::Local<Data> value,
auto value_obj = Utils::OpenHandle(*value);
CHECK(!value_obj->IsJSReceiver() || value_obj->IsTemplateInfo());
if (value_obj->IsObjectTemplateInfo()) {
- templ->set_serial_number(i::Smi::FromInt(0));
+ templ->set_serial_number(i::Smi::kZero);
if (templ->IsFunctionTemplateInfo()) {
i::Handle<i::FunctionTemplateInfo>::cast(templ)->set_do_not_cache(true);
}
@@ -1071,6 +1114,11 @@ void Template::Set(v8::Local<Name> name, v8::Local<Data> value,
static_cast<i::PropertyAttributes>(attribute));
}
+void Template::SetPrivate(v8::Local<Private> name, v8::Local<Data> value,
+ v8::PropertyAttribute attribute) {
+ Set(Utils::ToLocal(Utils::OpenHandle(reinterpret_cast<Name*>(*name))), value,
+ attribute);
+}
void Template::SetAccessorProperty(
v8::Local<v8::Name> name,
@@ -1134,11 +1182,11 @@ void FunctionTemplate::Inherit(v8::Local<FunctionTemplate> value) {
info->set_parent_template(*Utils::OpenHandle(*value));
}
-
static Local<FunctionTemplate> FunctionTemplateNew(
i::Isolate* isolate, FunctionCallback callback,
experimental::FastAccessorBuilder* fast_handler, v8::Local<Value> data,
- v8::Local<Signature> signature, int length, bool do_not_cache) {
+ v8::Local<Signature> signature, int length, bool do_not_cache,
+ v8::Local<Private> cached_property_name = v8::Local<Private>()) {
i::Handle<i::Struct> struct_obj =
isolate->factory()->NewStruct(i::FUNCTION_TEMPLATE_INFO_TYPE);
i::Handle<i::FunctionTemplateInfo> obj =
@@ -1162,6 +1210,10 @@ static Local<FunctionTemplate> FunctionTemplateNew(
obj->set_accept_any_receiver(true);
if (!signature.IsEmpty())
obj->set_signature(*Utils::OpenHandle(*signature));
+ obj->set_cached_property_name(
+ cached_property_name.IsEmpty()
+ ? isolate->heap()->the_hole_value()
+ : *Utils::OpenHandle(*cached_property_name));
return Utils::ToLocal(obj);
}
@@ -1199,13 +1251,21 @@ Local<FunctionTemplate> FunctionTemplate::NewWithFastHandler(
experimental::FastAccessorBuilder* fast_handler, v8::Local<Value> data,
v8::Local<Signature> signature, int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- DCHECK(!i_isolate->serializer_enabled());
LOG_API(i_isolate, FunctionTemplate, NewWithFastHandler);
ENTER_V8(i_isolate);
return FunctionTemplateNew(i_isolate, callback, fast_handler, data, signature,
length, false);
}
+Local<FunctionTemplate> FunctionTemplate::NewWithCache(
+ Isolate* isolate, FunctionCallback callback, Local<Private> cache_property,
+ Local<Value> data, Local<Signature> signature, int length) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, FunctionTemplate, NewWithFastHandler);
+ ENTER_V8(i_isolate);
+ return FunctionTemplateNew(i_isolate, callback, nullptr, data, signature,
+ length, false, cache_property);
+}
Local<Signature> Signature::New(Isolate* isolate,
Local<FunctionTemplate> receiver) {
@@ -1271,10 +1331,13 @@ template <typename Getter, typename Setter>
i::Handle<i::AccessorInfo> MakeAccessorInfo(
v8::Local<Name> name, Getter getter, Setter setter, v8::Local<Value> data,
v8::AccessControl settings, v8::PropertyAttribute attributes,
- v8::Local<AccessorSignature> signature, bool is_special_data_property) {
+ v8::Local<AccessorSignature> signature, bool is_special_data_property,
+ bool replace_on_access) {
i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
i::Handle<i::AccessorInfo> obj = isolate->factory()->NewAccessorInfo();
SET_FIELD_WRAPPED(obj, set_getter, getter);
+ DCHECK_IMPLIES(replace_on_access,
+ is_special_data_property && setter == nullptr);
if (is_special_data_property && setter == nullptr) {
setter = reinterpret_cast<Setter>(&i::Accessors::ReconfigureToDataProperty);
}
@@ -1286,6 +1349,7 @@ i::Handle<i::AccessorInfo> MakeAccessorInfo(
}
obj->set_data(*Utils::OpenHandle(*data));
obj->set_is_special_data_property(is_special_data_property);
+ obj->set_replace_on_access(replace_on_access);
return SetAccessorInfoProperties(obj, name, settings, attributes, signature);
}
@@ -1395,7 +1459,7 @@ static Local<ObjectTemplate> ObjectTemplateNew(
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (!constructor.IsEmpty())
obj->set_constructor(*Utils::OpenHandle(*constructor));
- obj->set_data(i::Smi::FromInt(0));
+ obj->set_data(i::Smi::kZero);
return Utils::ToLocal(obj);
}
@@ -1437,20 +1501,21 @@ static i::Handle<i::FunctionTemplateInfo> EnsureConstructor(
return constructor;
}
-
template <typename Getter, typename Setter, typename Data, typename Template>
static bool TemplateSetAccessor(Template* template_obj, v8::Local<Name> name,
Getter getter, Setter setter, Data data,
AccessControl settings,
PropertyAttribute attribute,
v8::Local<AccessorSignature> signature,
- bool is_special_data_property) {
+ bool is_special_data_property,
+ bool replace_on_access) {
auto info = Utils::OpenHandle(template_obj);
auto isolate = info->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- auto obj = MakeAccessorInfo(name, getter, setter, data, settings, attribute,
- signature, is_special_data_property);
+ auto obj =
+ MakeAccessorInfo(name, getter, setter, data, settings, attribute,
+ signature, is_special_data_property, replace_on_access);
if (obj.is_null()) return false;
i::ApiNatives::AddNativeDataProperty(isolate, info, obj);
return true;
@@ -1465,7 +1530,7 @@ void Template::SetNativeDataProperty(v8::Local<String> name,
v8::Local<AccessorSignature> signature,
AccessControl settings) {
TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
- signature, true);
+ signature, true, false);
}
@@ -1477,9 +1542,17 @@ void Template::SetNativeDataProperty(v8::Local<Name> name,
v8::Local<AccessorSignature> signature,
AccessControl settings) {
TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
- signature, true);
+ signature, true, false);
}
+void Template::SetLazyDataProperty(v8::Local<Name> name,
+ AccessorNameGetterCallback getter,
+ v8::Local<Value> data,
+ PropertyAttribute attribute) {
+ TemplateSetAccessor(
+ this, name, getter, static_cast<AccessorNameSetterCallback>(nullptr),
+ data, DEFAULT, attribute, Local<AccessorSignature>(), true, true);
+}
void Template::SetIntrinsicDataProperty(Local<Name> name, Intrinsic intrinsic,
PropertyAttribute attribute) {
@@ -1500,7 +1573,7 @@ void ObjectTemplate::SetAccessor(v8::Local<String> name,
PropertyAttribute attribute,
v8::Local<AccessorSignature> signature) {
TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
- signature, i::FLAG_disable_old_api_accessors);
+ signature, i::FLAG_disable_old_api_accessors, false);
}
@@ -1511,7 +1584,7 @@ void ObjectTemplate::SetAccessor(v8::Local<Name> name,
PropertyAttribute attribute,
v8::Local<AccessorSignature> signature) {
TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
- signature, i::FLAG_disable_old_api_accessors);
+ signature, i::FLAG_disable_old_api_accessors, false);
}
template <typename Getter, typename Setter, typename Query, typename Descriptor,
@@ -1900,22 +1973,13 @@ Local<String> Module::GetModuleRequest(int i) const {
return ToApiHandle<String>(i::handle(module_requests->get(i), isolate));
}
-void Module::SetEmbedderData(Local<Value> data) {
- Utils::OpenHandle(this)->set_embedder_data(*Utils::OpenHandle(*data));
-}
-
-Local<Value> Module::GetEmbedderData() const {
- auto self = Utils::OpenHandle(this);
- return ToApiHandle<Value>(
- i::handle(self->embedder_data(), self->GetIsolate()));
-}
+int Module::GetIdentityHash() const { return Utils::OpenHandle(this)->hash(); }
bool Module::Instantiate(Local<Context> context,
- Module::ResolveCallback callback,
- Local<Value> callback_data) {
+ Module::ResolveCallback callback) {
PREPARE_FOR_EXECUTION_BOOL(context, Module, Instantiate);
- has_pending_exception = !i::Module::Instantiate(
- Utils::OpenHandle(this), context, callback, callback_data);
+ has_pending_exception =
+ !i::Module::Instantiate(Utils::OpenHandle(this), context, callback);
RETURN_ON_FAILED_EXECUTION_BOOL();
return true;
}
@@ -1930,7 +1994,7 @@ MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
i::Handle<i::Module> self = Utils::OpenHandle(this);
// It's an API error to call Evaluate before Instantiate.
- CHECK(self->code()->IsJSFunction());
+ CHECK(self->instantiated());
Local<Value> result;
has_pending_exception = !ToLocal(i::Module::Evaluate(self), &result);
@@ -2252,18 +2316,10 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
source->info->set_script(script);
- {
- // Create a canonical handle scope if compiling ignition bytecode. This is
- // required by the constant array builder to de-duplicate objects without
- // dereferencing handles.
- std::unique_ptr<i::CanonicalHandleScope> canonical;
- if (i::FLAG_ignition) canonical.reset(new i::CanonicalHandleScope(isolate));
-
- // Do the parsing tasks which need to be done on the main thread. This will
- // also handle parse errors.
- source->parser->Internalize(isolate, script,
- source->info->literal() == nullptr);
- }
+ // Do the parsing tasks which need to be done on the main thread. This will
+ // also handle parse errors.
+ source->parser->Internalize(isolate, script,
+ source->info->literal() == nullptr);
source->parser->HandleSourceURLComments(isolate, script);
i::Handle<i::SharedFunctionInfo> result;
@@ -2926,6 +2982,17 @@ Maybe<bool> ValueSerializer::Delegate::WriteHostObject(Isolate* v8_isolate,
return Nothing<bool>();
}
+void* ValueSerializer::Delegate::ReallocateBufferMemory(void* old_buffer,
+ size_t size,
+ size_t* actual_size) {
+ *actual_size = size;
+ return realloc(old_buffer, size);
+}
+
+void ValueSerializer::Delegate::FreeBufferMemory(void* buffer) {
+ return free(buffer);
+}
+
struct ValueSerializer::PrivateData {
explicit PrivateData(i::Isolate* i, ValueSerializer::Delegate* delegate)
: isolate(i), serializer(i, delegate) {}
@@ -2958,6 +3025,10 @@ std::vector<uint8_t> ValueSerializer::ReleaseBuffer() {
return private_->serializer.ReleaseBuffer();
}
+std::pair<uint8_t*, size_t> ValueSerializer::Release() {
+ return private_->serializer.Release();
+}
+
void ValueSerializer::TransferArrayBuffer(uint32_t transfer_id,
Local<ArrayBuffer> array_buffer) {
private_->serializer.TransferArrayBuffer(transfer_id,
@@ -3053,11 +3124,6 @@ Maybe<bool> ValueDeserializer::ReadHeader(Local<Context> context) {
return Just(true);
}
-Maybe<bool> ValueDeserializer::ReadHeader() {
- Isolate* isolate = reinterpret_cast<Isolate*>(private_->isolate);
- return ReadHeader(isolate->GetEnteredContext());
-}
-
void ValueDeserializer::SetSupportsLegacyWireFormat(
bool supports_legacy_wire_format) {
private_->supports_legacy_wire_format = supports_legacy_wire_format;
@@ -4479,8 +4545,9 @@ static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
i::Handle<i::JSObject> obj =
i::Handle<i::JSObject>::cast(Utils::OpenHandle(self));
v8::Local<AccessorSignature> signature;
- auto info = MakeAccessorInfo(name, getter, setter, data, settings, attributes,
- signature, i::FLAG_disable_old_api_accessors);
+ auto info =
+ MakeAccessorInfo(name, getter, setter, data, settings, attributes,
+ signature, i::FLAG_disable_old_api_accessors, false);
if (info.is_null()) return Nothing<bool>();
bool fast = obj->HasFastProperties();
i::Handle<i::Object> result;
@@ -6052,6 +6119,9 @@ static i::Handle<ObjectType> CreateEnvironment(
proxy_constructor->set_prototype_template(
*Utils::OpenHandle(*global_template));
+ proxy_template->SetInternalFieldCount(
+ global_template->InternalFieldCount());
+
// Migrate security handlers from global_template to
// proxy_template. Temporarily removing access check
// information from the global template.
@@ -6316,7 +6386,16 @@ MaybeLocal<v8::Object> FunctionTemplate::NewRemoteInstance() {
bool FunctionTemplate::HasInstance(v8::Local<v8::Value> value) {
auto self = Utils::OpenHandle(this);
auto obj = Utils::OpenHandle(*value);
- return obj->IsJSObject() && self->IsTemplateFor(i::JSObject::cast(*obj));
+ if (obj->IsJSObject() && self->IsTemplateFor(i::JSObject::cast(*obj))) {
+ return true;
+ }
+ if (obj->IsJSGlobalProxy()) {
+ // If it's a global proxy object, then test with the global object.
+ i::PrototypeIterator iter(i::JSObject::cast(*obj)->map());
+ if (iter.IsAtEnd()) return false;
+ return self->IsTemplateFor(iter.GetCurrent<i::JSGlobalObject>());
+ }
+ return false;
}
@@ -7184,34 +7263,77 @@ MaybeLocal<Proxy> Proxy::New(Local<Context> context, Local<Object> local_target,
RETURN_ESCAPED(result);
}
+Local<String> WasmCompiledModule::GetWasmWireBytes() {
+ i::Handle<i::JSObject> obj =
+ i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
+ i::Handle<i::WasmCompiledModule> compiled_part =
+ i::handle(i::WasmCompiledModule::cast(obj->GetInternalField(0)));
+ i::Handle<i::String> wire_bytes = compiled_part->module_bytes();
+ return Local<String>::Cast(Utils::ToLocal(wire_bytes));
+}
+
WasmCompiledModule::SerializedModule WasmCompiledModule::Serialize() {
i::Handle<i::JSObject> obj =
i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
- i::Handle<i::FixedArray> compiled_part =
- i::handle(i::FixedArray::cast(obj->GetInternalField(0)));
+ i::Handle<i::WasmCompiledModule> compiled_part =
+ i::handle(i::WasmCompiledModule::cast(obj->GetInternalField(0)));
+
std::unique_ptr<i::ScriptData> script_data =
i::WasmCompiledModuleSerializer::SerializeWasmModule(obj->GetIsolate(),
compiled_part);
script_data->ReleaseDataOwnership();
+
size_t size = static_cast<size_t>(script_data->length());
return {std::unique_ptr<const uint8_t[]>(script_data->data()), size};
}
MaybeLocal<WasmCompiledModule> WasmCompiledModule::Deserialize(
Isolate* isolate,
- const WasmCompiledModule::SerializedModule& serialized_data) {
- int size = static_cast<int>(serialized_data.second);
- i::ScriptData sc(serialized_data.first.get(), size);
+ const WasmCompiledModule::CallerOwnedBuffer& serialized_module,
+ const WasmCompiledModule::CallerOwnedBuffer& wire_bytes) {
+ int size = static_cast<int>(serialized_module.second);
+ i::ScriptData sc(serialized_module.first, size);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::MaybeHandle<i::FixedArray> maybe_compiled_part =
- i::WasmCompiledModuleSerializer::DeserializeWasmModule(i_isolate, &sc);
+ i::WasmCompiledModuleSerializer::DeserializeWasmModule(
+ i_isolate, &sc,
+ {wire_bytes.first, static_cast<int>(wire_bytes.second)});
i::Handle<i::FixedArray> compiled_part;
if (!maybe_compiled_part.ToHandle(&compiled_part)) {
return MaybeLocal<WasmCompiledModule>();
}
+ i::Handle<i::WasmCompiledModule> compiled_module =
+ handle(i::WasmCompiledModule::cast(*compiled_part));
+ return Local<WasmCompiledModule>::Cast(
+ Utils::ToLocal(i::Handle<i::JSObject>::cast(
+ i::WasmModuleObject::New(i_isolate, compiled_module))));
+}
+
+MaybeLocal<WasmCompiledModule> WasmCompiledModule::DeserializeOrCompile(
+ Isolate* isolate,
+ const WasmCompiledModule::CallerOwnedBuffer& serialized_module,
+ const WasmCompiledModule::CallerOwnedBuffer& wire_bytes) {
+ MaybeLocal<WasmCompiledModule> ret =
+ Deserialize(isolate, serialized_module, wire_bytes);
+ if (!ret.IsEmpty()) {
+ return ret;
+ }
+ return Compile(isolate, wire_bytes.first, wire_bytes.second);
+}
+
+MaybeLocal<WasmCompiledModule> WasmCompiledModule::Compile(Isolate* isolate,
+ const uint8_t* start,
+ size_t length) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::wasm::ErrorThrower thrower(i_isolate, "WasmCompiledModule::Deserialize()");
+ i::MaybeHandle<i::JSObject> maybe_compiled =
+ i::wasm::CreateModuleObjectFromBytes(
+ i_isolate, start, start + length, &thrower,
+ i::wasm::ModuleOrigin::kWasmOrigin, i::Handle<i::Script>::null(),
+ nullptr, nullptr);
+ if (maybe_compiled.is_null()) return MaybeLocal<WasmCompiledModule>();
return Local<WasmCompiledModule>::Cast(
- Utils::ToLocal(i::wasm::CreateCompiledModuleObject(
- i_isolate, compiled_part, i::wasm::ModuleOrigin::kWasmOrigin)));
+ Utils::ToLocal(maybe_compiled.ToHandleChecked()));
}
// static
@@ -7277,7 +7399,11 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
- i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length);
+ // TODO(jbroman): It may be useful in the future to provide a MaybeLocal
+ // version that throws an exception or otherwise does not crash.
+ if (!i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length)) {
+ i::FatalProcessOutOfMemory("v8::ArrayBuffer::New");
+ }
return Utils::ToLocal(obj);
}
@@ -7467,8 +7593,12 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* isolate,
ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
- i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length, true,
- i::SharedFlag::kShared);
+ // TODO(jbroman): It may be useful in the future to provide a MaybeLocal
+ // version that throws an exception or otherwise does not crash.
+ if (!i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length, true,
+ i::SharedFlag::kShared)) {
+ i::FatalProcessOutOfMemory("v8::SharedArrayBuffer::New");
+ }
return Utils::ToLocalShared(obj);
}
@@ -7839,6 +7969,8 @@ Isolate* Isolate::New(const Isolate::CreateParams& params) {
}
isolate->set_api_external_references(params.external_references);
+ isolate->set_deserialize_internal_fields_callback(
+ params.deserialize_internal_fields_callback);
SetResourceConstraints(isolate, params.constraints);
// TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
Isolate::Scope isolate_scope(v8_isolate);
@@ -7992,7 +8124,7 @@ size_t Isolate::NumberOfTrackedHeapObjectTypes() {
bool Isolate::GetHeapObjectStatisticsAtLastGC(
HeapObjectStatistics* object_statistics, size_t type_index) {
if (!object_statistics) return false;
- if (!i::FLAG_track_gc_object_stats) return false;
+ if (V8_LIKELY(!i::FLAG_gc_stats)) return false;
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::Heap* heap = isolate->heap();
@@ -8245,6 +8377,7 @@ void Isolate::IsolateInBackgroundNotification() {
void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->MemoryPressureNotification(level, Locker::IsLocked(this));
+ isolate->allocator()->MemoryPressureNotification(level);
}
void Isolate::SetRAILMode(RAILMode rail_mode) {
@@ -8693,6 +8826,299 @@ MaybeLocal<Array> Debug::GetInternalProperties(Isolate* v8_isolate,
return Utils::ToLocal(result);
}
+bool DebugInterface::SetDebugEventListener(Isolate* isolate,
+ DebugInterface::EventCallback that,
+ Local<Value> data) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ i::HandleScope scope(i_isolate);
+ i::Handle<i::Object> foreign = i_isolate->factory()->undefined_value();
+ if (that != NULL) {
+ foreign = i_isolate->factory()->NewForeign(FUNCTION_ADDR(that));
+ }
+ i_isolate->debug()->SetEventListener(foreign, Utils::OpenHandle(*data, true));
+ return true;
+}
+
+Local<Context> DebugInterface::GetDebugContext(Isolate* isolate) {
+ return Debug::GetDebugContext(isolate);
+}
+
+MaybeLocal<Value> DebugInterface::Call(Local<Context> context,
+ v8::Local<v8::Function> fun,
+ v8::Local<v8::Value> data) {
+ return Debug::Call(context, fun, data);
+}
+
+void DebugInterface::SetLiveEditEnabled(Isolate* isolate, bool enable) {
+ Debug::SetLiveEditEnabled(isolate, enable);
+}
+
+void DebugInterface::DebugBreak(Isolate* isolate) {
+ Debug::DebugBreak(isolate);
+}
+
+void DebugInterface::CancelDebugBreak(Isolate* isolate) {
+ Debug::CancelDebugBreak(isolate);
+}
+
+MaybeLocal<Array> DebugInterface::GetInternalProperties(Isolate* isolate,
+ Local<Value> value) {
+ return Debug::GetInternalProperties(isolate, value);
+}
+
+void DebugInterface::ChangeBreakOnException(Isolate* isolate,
+ ExceptionBreakState type) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->debug()->ChangeBreakOnException(
+ i::BreakException, type == BreakOnAnyException);
+ internal_isolate->debug()->ChangeBreakOnException(i::BreakUncaughtException,
+ type != NoBreakOnException);
+}
+
+void DebugInterface::PrepareStep(Isolate* v8_isolate, StepAction action) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8(isolate);
+ CHECK(isolate->debug()->CheckExecutionState());
+ // Clear all current stepping setup.
+ isolate->debug()->ClearStepping();
+ // Prepare step.
+ isolate->debug()->PrepareStep(static_cast<i::StepAction>(action));
+}
+
+void DebugInterface::ClearStepping(Isolate* v8_isolate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8(isolate);
+ // Clear all current stepping setup.
+ isolate->debug()->ClearStepping();
+}
+
+v8::Isolate* DebugInterface::Script::GetIsolate() const {
+ return reinterpret_cast<v8::Isolate*>(Utils::OpenHandle(this)->GetIsolate());
+}
+
+ScriptOriginOptions DebugInterface::Script::OriginOptions() const {
+ return Utils::OpenHandle(this)->origin_options();
+}
+
+bool DebugInterface::Script::WasCompiled() const {
+ return Utils::OpenHandle(this)->compilation_state() ==
+ i::Script::COMPILATION_STATE_COMPILED;
+}
+
+int DebugInterface::Script::Id() const { return Utils::OpenHandle(this)->id(); }
+
+int DebugInterface::Script::LineOffset() const {
+ return Utils::OpenHandle(this)->line_offset();
+}
+
+int DebugInterface::Script::ColumnOffset() const {
+ return Utils::OpenHandle(this)->column_offset();
+}
+
+std::vector<int> DebugInterface::Script::LineEnds() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Isolate* isolate = script->GetIsolate();
+ i::HandleScope scope(isolate);
+ i::Script::InitLineEnds(script);
+ CHECK(script->line_ends()->IsFixedArray());
+ i::Handle<i::FixedArray> line_ends(i::FixedArray::cast(script->line_ends()));
+ std::vector<int> result(line_ends->length());
+ for (int i = 0; i < line_ends->length(); ++i) {
+ i::Smi* line_end = i::Smi::cast(line_ends->get(i));
+ result[i] = line_end->value();
+ }
+ return result;
+}
+
+MaybeLocal<String> DebugInterface::Script::Name() const {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Handle<i::Object> value(script->name(), isolate);
+ if (!value->IsString()) return MaybeLocal<String>();
+ return Utils::ToLocal(
+ handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+}
+
+MaybeLocal<String> DebugInterface::Script::SourceURL() const {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Handle<i::Object> value(script->source_url(), isolate);
+ if (!value->IsString()) return MaybeLocal<String>();
+ return Utils::ToLocal(
+ handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+}
+
+MaybeLocal<String> DebugInterface::Script::SourceMappingURL() const {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Handle<i::Object> value(script->source_mapping_url(), isolate);
+ if (!value->IsString()) return MaybeLocal<String>();
+ return Utils::ToLocal(
+ handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+}
+
+MaybeLocal<String> DebugInterface::Script::ContextData() const {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Handle<i::Object> value(script->context_data(), isolate);
+ if (!value->IsString()) return MaybeLocal<String>();
+ return Utils::ToLocal(
+ handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+}
+
+MaybeLocal<String> DebugInterface::Script::Source() const {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ i::Handle<i::Object> value(script->source(), isolate);
+ if (!value->IsString()) return MaybeLocal<String>();
+ return Utils::ToLocal(
+ handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+}
+
+namespace {
+int GetSmiValue(i::Handle<i::FixedArray> array, int index) {
+ return i::Smi::cast(array->get(index))->value();
+}
+} // namespace
+
+bool DebugInterface::Script::GetPossibleBreakpoints(
+ const Location& start, const Location& end,
+ std::vector<Location>* locations) const {
+ CHECK(!start.IsEmpty());
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+
+ i::Script::InitLineEnds(script);
+ CHECK(script->line_ends()->IsFixedArray());
+ i::Isolate* isolate = script->GetIsolate();
+ i::Handle<i::FixedArray> line_ends =
+ i::Handle<i::FixedArray>::cast(i::handle(script->line_ends(), isolate));
+ CHECK(line_ends->length());
+
+ int start_offset = GetSourcePosition(start);
+ int end_offset;
+ if (end.IsEmpty()) {
+ end_offset = GetSmiValue(line_ends, line_ends->length() - 1) + 1;
+ } else {
+ end_offset = GetSourcePosition(end);
+ }
+ if (start_offset >= end_offset) return true;
+
+ std::set<int> offsets;
+ if (!isolate->debug()->GetPossibleBreakpoints(script, start_offset,
+ end_offset, &offsets)) {
+ return false;
+ }
+
+ int current_line_end_index = 0;
+ for (const auto& it : offsets) {
+ int offset = it;
+ while (offset > GetSmiValue(line_ends, current_line_end_index)) {
+ ++current_line_end_index;
+ CHECK(current_line_end_index < line_ends->length());
+ }
+ int line_offset = 0;
+
+ if (current_line_end_index > 0) {
+ line_offset = GetSmiValue(line_ends, current_line_end_index - 1) + 1;
+ }
+ locations->push_back(Location(
+ current_line_end_index + script->line_offset(),
+ offset - line_offset +
+ (current_line_end_index == 0 ? script->column_offset() : 0)));
+ }
+ return true;
+}
+
+int DebugInterface::Script::GetSourcePosition(const Location& location) const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+
+ int line = std::max(location.GetLineNumber() - script->line_offset(), 0);
+ int column = location.GetColumnNumber();
+ if (line == 0) {
+ column = std::max(0, column - script->column_offset());
+ }
+
+ i::Script::InitLineEnds(script);
+ CHECK(script->line_ends()->IsFixedArray());
+ i::Handle<i::FixedArray> line_ends = i::Handle<i::FixedArray>::cast(
+ i::handle(script->line_ends(), script->GetIsolate()));
+ CHECK(line_ends->length());
+ if (line >= line_ends->length())
+ return GetSmiValue(line_ends, line_ends->length() - 1);
+ int line_offset = GetSmiValue(line_ends, line);
+ if (line == 0) return std::min(column, line_offset);
+ int prev_line_offset = GetSmiValue(line_ends, line - 1);
+ return std::min(prev_line_offset + column + 1, line_offset);
+}
+
+MaybeLocal<DebugInterface::Script> DebugInterface::Script::Wrap(
+ v8::Isolate* v8_isolate, v8::Local<v8::Object> script) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8(isolate);
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::JSReceiver> script_receiver(Utils::OpenHandle(*script));
+ if (!script_receiver->IsJSValue()) return MaybeLocal<Script>();
+ i::Handle<i::Object> script_value(
+ i::Handle<i::JSValue>::cast(script_receiver)->value(), isolate);
+ if (!script_value->IsScript()) {
+ return MaybeLocal<Script>();
+ }
+ i::Handle<i::Script> script_obj = i::Handle<i::Script>::cast(script_value);
+ if (script_obj->type() != i::Script::TYPE_NORMAL) return MaybeLocal<Script>();
+ return ToApiHandle<DebugInterface::Script>(
+ handle_scope.CloseAndEscape(script_obj));
+}
+
+DebugInterface::Location::Location(int lineNumber, int columnNumber)
+ : lineNumber_(lineNumber), columnNumber_(columnNumber) {
+ CHECK(lineNumber >= 0);
+ CHECK(columnNumber >= 0);
+}
+
+DebugInterface::Location::Location() : lineNumber_(-1), columnNumber_(-1) {}
+
+int DebugInterface::Location::GetLineNumber() const {
+ CHECK(lineNumber_ >= 0);
+ return lineNumber_;
+}
+
+int DebugInterface::Location::GetColumnNumber() const {
+ CHECK(columnNumber_ >= 0);
+ return columnNumber_;
+}
+
+bool DebugInterface::Location::IsEmpty() const {
+ return lineNumber_ == -1 && columnNumber_ == -1;
+}
+
+void DebugInterface::GetLoadedScripts(
+ v8::Isolate* v8_isolate,
+ PersistentValueVector<DebugInterface::Script>& scripts) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8(isolate);
+ // TODO(kozyatinskiy): remove this GC once tests are dealt with.
+ isolate->heap()->CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask,
+ i::GarbageCollectionReason::kDebugger);
+ {
+ i::DisallowHeapAllocation no_gc;
+ i::Script::Iterator iterator(isolate);
+ i::Script* script;
+ while ((script = iterator.Next())) {
+ if (script->type() != i::Script::TYPE_NORMAL) continue;
+ if (script->HasValidSource()) {
+ i::HandleScope handle_scope(isolate);
+ i::Handle<i::Script> script_handle(script, isolate);
+ scripts.Append(ToApiHandle<Script>(script_handle));
+ }
+ }
+ }
+}
Local<String> CpuProfileNode::GetFunctionName() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 22c10dda14..6fcaa90a5e 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -7,6 +7,7 @@
#include "include/v8-testing.h"
#include "src/contexts.h"
+#include "src/debug/debug-interface.h"
#include "src/factory.h"
#include "src/isolate.h"
#include "src/list.h"
@@ -28,7 +29,7 @@ class Consts {
template <typename T> inline T ToCData(v8::internal::Object* obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
- if (obj == v8::internal::Smi::FromInt(0)) return nullptr;
+ if (obj == v8::internal::Smi::kZero) return nullptr;
return reinterpret_cast<T>(
reinterpret_cast<intptr_t>(
v8::internal::Foreign::cast(obj)->foreign_address()));
@@ -39,7 +40,7 @@ template <typename T>
inline v8::internal::Handle<v8::internal::Object> FromCData(
v8::internal::Isolate* isolate, T obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
- if (obj == nullptr) return handle(v8::internal::Smi::FromInt(0), isolate);
+ if (obj == nullptr) return handle(v8::internal::Smi::kZero, isolate);
return isolate->factory()->NewForeign(
reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
}
@@ -108,7 +109,8 @@ class RegisteredExtension {
V(StackTrace, JSArray) \
V(StackFrame, JSObject) \
V(Proxy, JSProxy) \
- V(NativeWeakMap, JSWeakMap)
+ V(NativeWeakMap, JSWeakMap) \
+ V(DebugInterface::Script, Script)
class Utils {
public:
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index 92c7075ee5..d5d2c02bf0 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -96,8 +96,7 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
DCHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
CLOBBER_DOUBLE_REGISTERS(); \
- if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() || \
- FLAG_runtime_call_stats)) { \
+ if (V8_UNLIKELY(FLAG_runtime_stats)) { \
return Stats_##Name(args_length, args_object, isolate); \
} \
Arguments args(args_length, args_object); \
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index ee02027610..d90dc76252 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -4280,10 +4280,10 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Deduplicate constants.
int size_after_marker = estimated_size_after_marker;
- for (int i = 0; i < pending_64_bit_constants_.size(); i++) {
+ for (size_t i = 0; i < pending_64_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_64_bit_constants_[i];
DCHECK(!entry.is_merged());
- for (int j = 0; j < i; j++) {
+ for (size_t j = 0; j < i; j++) {
if (entry.value64() == pending_64_bit_constants_[j].value64()) {
DCHECK(!pending_64_bit_constants_[j].is_merged());
entry.set_merged_index(j);
@@ -4293,11 +4293,11 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
}
- for (int i = 0; i < pending_32_bit_constants_.size(); i++) {
+ for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
DCHECK(!entry.is_merged());
if (!entry.sharing_ok()) continue;
- for (int j = 0; j < i; j++) {
+ for (size_t j = 0; j < i; j++) {
if (entry.value() == pending_32_bit_constants_[j].value()) {
DCHECK(!pending_32_bit_constants_[j].is_merged());
entry.set_merged_index(j);
@@ -4338,7 +4338,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Emit 64-bit constant pool entries first: their range is smaller than
// 32-bit entries.
- for (int i = 0; i < pending_64_bit_constants_.size(); i++) {
+ for (size_t i = 0; i < pending_64_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_64_bit_constants_[i];
Instr instr = instr_at(entry.position());
@@ -4367,7 +4367,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
// Emit 32-bit constant pool entries.
- for (int i = 0; i < pending_32_bit_constants_.size(); i++) {
+ for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
Instr instr = instr_at(entry.position());
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index e5448f79ae..1283c3984a 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -1421,7 +1421,8 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+ void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+ int id);
// Record the emission of a constant pool.
//
@@ -1585,7 +1586,8 @@ class Assembler : public AssemblerBase {
// Check the constant pool hasn't been blocked for too long.
DCHECK(pending_32_bit_constants_.empty() ||
(start + pending_64_bit_constants_.size() * kDoubleSize <
- (first_const_pool_32_use_ + kMaxDistToIntPool)));
+ static_cast<size_t>(first_const_pool_32_use_ +
+ kMaxDistToIntPool)));
DCHECK(pending_64_bit_constants_.empty() ||
(start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
#endif
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index de6803fa6f..59f304d51d 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -544,7 +544,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
__ and_(r2, lhs, Operand(rhs));
__ JumpIfNotSmi(r2, &not_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
@@ -1494,13 +1494,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(r1, r1, Operand(2)); // r1 was a smi.
- __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(r0, &runtime);
- __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
- __ b(ne, &runtime);
+ // Check that the last match info is a FixedArray.
+ __ ldr(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(last_match_info_elements, &runtime);
// Check that the object has fast elements.
- __ ldr(last_match_info_elements,
- FieldMemOperand(r0, JSArray::kElementsOffset));
__ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
__ b(ne, &runtime);
@@ -1508,7 +1505,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// additional information.
__ ldr(r0,
FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
+ __ add(r2, r1, Operand(RegExpMatchInfo::kLastMatchOverhead));
__ cmp(r2, Operand::SmiUntag(r0));
__ b(gt, &runtime);
@@ -1517,28 +1514,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Store the capture count.
__ SmiTag(r2, r1);
__ str(r2, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastCaptureCountOffset));
+ RegExpMatchInfo::kNumberOfCapturesOffset));
// Store last subject and last input.
- __ str(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset));
+ __ str(subject, FieldMemOperand(last_match_info_elements,
+ RegExpMatchInfo::kLastSubjectOffset));
__ mov(r2, subject);
__ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset,
- subject,
- r3,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ RegExpMatchInfo::kLastSubjectOffset, subject, r3,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
__ mov(subject, r2);
- __ str(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastInputOffset));
+ __ str(subject, FieldMemOperand(last_match_info_elements,
+ RegExpMatchInfo::kLastInputOffset));
__ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastInputOffset,
- subject,
- r3,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ RegExpMatchInfo::kLastInputOffset, subject, r3,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -1549,10 +1538,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// r2: offsets vector
Label next_capture, done;
// Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ add(r0,
- last_match_info_elements,
- Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+ // counts down until wrapping after zero.
+ __ add(r0, last_match_info_elements,
+ Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag));
__ bind(&next_capture);
__ sub(r1, r1, Operand(1), SetCC);
__ b(mi, &done);
@@ -1565,7 +1553,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&done);
// Return last match info.
- __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
+ __ mov(r0, last_match_info_elements);
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
@@ -1784,6 +1772,7 @@ static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+ // r0 - number of arguments
// r1 - function
// r3 - slot id
// r2 - vector
@@ -1792,25 +1781,22 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ cmp(r1, r5);
__ b(ne, miss);
- __ mov(r0, Operand(arg_count()));
-
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, r2, r3);
__ mov(r2, r4);
__ mov(r3, r1);
- ArrayConstructorStub stub(masm->isolate(), arg_count());
+ ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
void CallICStub::Generate(MacroAssembler* masm) {
+ // r0 - number of arguments
// r1 - function
// r3 - slot id (Smi)
// r2 - vector
Label extra_checks_or_miss, call, call_function, call_count_incremented;
- int argc = arg_count();
- ParameterCount actual(argc);
// The checks. First, does r1 match the recorded monomorphic target?
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
@@ -1843,7 +1829,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, r2, r3);
- __ mov(r0, Operand(argc));
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -1888,7 +1873,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
IncrementCallCount(masm, r2, r3);
__ bind(&call_count_incremented);
- __ mov(r0, Operand(argc));
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -1921,13 +1905,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
- __ Push(r2);
- __ Push(r3);
- __ Push(cp, r1);
+ __ SmiTag(r0);
+ __ Push(r0, r2, r3, cp, r1);
__ CallStub(&create_stub);
- __ Pop(cp, r1);
- __ Pop(r3);
- __ Pop(r2);
+ __ Pop(r2, r3, cp, r1);
+ __ Pop(r0);
+ __ SmiUntag(r0);
}
__ jmp(&call_function);
@@ -1944,14 +1927,21 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Preserve the number of arguments as Smi.
+ __ SmiTag(r0);
+
// Push the receiver and the function and feedback info.
- __ Push(r1, r2, r3);
+ __ Push(r0, r1, r2, r3);
// Call the entry.
__ CallRuntime(Runtime::kCallIC_Miss);
// Move result to edi and exit the internal frame.
__ mov(r1, r0);
+
+ // Restore number of arguments.
+ __ Pop(r0);
+ __ SmiUntag(r0);
}
@@ -3061,21 +3051,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(r2);
CallICStub stub(isolate(), state());
@@ -3083,14 +3058,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
}
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, bool is_polymorphic,
@@ -3178,183 +3145,12 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
}
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r1
- Register name = LoadWithVectorDescriptor::NameRegister(); // r2
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // r3
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // r0
- Register feedback = r4;
- Register receiver_map = r5;
- Register scratch1 = r6;
-
- __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
- __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &not_array);
- HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
-
- __ bind(&not_array);
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ b(ne, &miss);
- masm->isolate()->load_stub_cache()->GenerateProbe(
- masm, receiver, name, feedback, receiver_map, scratch1, r9);
-
- __ bind(&miss);
- LoadIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r1
- Register key = LoadWithVectorDescriptor::NameRegister(); // r2
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // r3
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // r0
- Register feedback = r4;
- Register receiver_map = r5;
- Register scratch1 = r6;
-
- __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
- __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &not_array);
-
- // We have a polymorphic element handler.
- Label polymorphic, try_poly_name;
- __ bind(&polymorphic);
- HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ b(ne, &try_poly_name);
- Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ cmp(key, feedback);
- __ b(ne, &miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
- __ ldr(feedback,
- FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, false, &miss);
-
- __ bind(&miss);
- KeyedLoadIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- StoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r1
- Register key = StoreWithVectorDescriptor::NameRegister(); // r2
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // r3
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // r4
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r0)); // r0
- Register feedback = r5;
- Register receiver_map = r6;
- Register scratch1 = r9;
-
- __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
- __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &not_array);
-
- // We are using register r8, which is used for the embedded constant pool
- // when FLAG_enable_embedded_constant_pool is true.
- DCHECK(!FLAG_enable_embedded_constant_pool);
- Register scratch2 = r8;
- HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
- &miss);
-
- __ bind(&not_array);
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ b(ne, &miss);
- masm->isolate()->store_stub_cache()->GenerateProbe(
- masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
-
- __ bind(&miss);
- StoreIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
@@ -3706,30 +3502,19 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- if (argument_count() == ANY) {
- Label not_zero_case, not_one_case;
- __ tst(r0, r0);
- __ b(ne, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ bind(&not_zero_case);
- __ cmp(r0, Operand(1));
- __ b(gt, &not_one_case);
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else if (argument_count() == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count() == ONE) {
- CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count() == MORE_THAN_ONE) {
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- UNREACHABLE();
- }
+ Label not_zero_case, not_one_case;
+ __ tst(r0, r0);
+ __ b(ne, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ cmp(r0, Operand(1));
+ __ b(gt, &not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -3781,21 +3566,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
__ bind(&subclassing);
- switch (argument_count()) {
- case ANY:
- case MORE_THAN_ONE:
- __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ add(r0, r0, Operand(3));
- break;
- case NONE:
- __ str(r1, MemOperand(sp, 0 * kPointerSize));
- __ mov(r0, Operand(3));
- break;
- case ONE:
- __ str(r1, MemOperand(sp, 1 * kPointerSize));
- __ mov(r0, Operand(4));
- break;
- }
+ __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ add(r0, r0, Operand(3));
__ Push(r3, r2);
__ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -4216,7 +3988,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
// If there are no mapped parameters, we do not need the parameter_map.
- __ cmp(r6, Operand(Smi::FromInt(0)));
+ __ cmp(r6, Operand(Smi::kZero));
__ mov(r9, Operand::Zero(), LeaveCC, eq);
__ mov(r9, Operand(r6, LSL, 1), LeaveCC, ne);
__ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
@@ -4273,7 +4045,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// r6 = mapped parameter count (tagged)
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
- __ cmp(r6, Operand(Smi::FromInt(0)));
+ __ cmp(r6, Operand(Smi::kZero));
// Move backing store address to r1, because it is
// expected there when filling in the unmapped arguments.
__ mov(r1, r4, LeaveCC, eq);
@@ -4321,7 +4093,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
__ str(ip, MemOperand(r1, r0));
__ add(r9, r9, Operand(Smi::FromInt(1)));
__ bind(&parameters_test);
- __ cmp(r5, Operand(Smi::FromInt(0)));
+ __ cmp(r5, Operand(Smi::kZero));
__ b(ne, &parameters_loop);
// Restore r0 = new object (tagged) and r5 = argument count (tagged).
@@ -4481,130 +4253,6 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
}
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register value = r0;
- Register slot = r2;
-
- Register cell = r1;
- Register cell_details = r4;
- Register cell_value = r5;
- Register cell_value_map = r6;
- Register scratch = r9;
-
- Register context = cp;
- Register context_temp = cell;
-
- Label fast_heapobject_case, fast_smi_case, slow_case;
-
- if (FLAG_debug_code) {
- __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
- __ Check(ne, kUnexpectedValue);
- }
-
- // Go up the context chain to the script context.
- for (int i = 0; i < depth(); i++) {
- __ ldr(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- context = context_temp;
- }
-
- // Load the PropertyCell at the specified slot.
- __ add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
- __ ldr(cell, ContextMemOperand(cell));
-
- // Load PropertyDetails for the cell (actually only the cell_type and kind).
- __ ldr(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
- __ SmiUntag(cell_details);
- __ and_(cell_details, cell_details,
- Operand(PropertyDetails::PropertyCellTypeField::kMask |
- PropertyDetails::KindField::kMask |
- PropertyDetails::kAttributesReadOnlyMask));
-
- // Check if PropertyCell holds mutable data.
- Label not_mutable_data;
- __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kMutable) |
- PropertyDetails::KindField::encode(kData)));
- __ b(ne, &not_mutable_data);
- __ JumpIfSmi(value, &fast_smi_case);
-
- __ bind(&fast_heapobject_case);
- __ str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
- // RecordWriteField clobbers the value register, so we copy it before the
- // call.
- __ mov(r4, Operand(value));
- __ RecordWriteField(cell, PropertyCell::kValueOffset, r4, scratch,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Ret();
-
- __ bind(&not_mutable_data);
- // Check if PropertyCell value matches the new value (relevant for Constant,
- // ConstantType and Undefined cells).
- Label not_same_value;
- __ ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
- __ cmp(cell_value, value);
- __ b(ne, &not_same_value);
-
- // Make sure the PropertyCell is not marked READ_ONLY.
- __ tst(cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
- __ b(ne, &slow_case);
-
- if (FLAG_debug_code) {
- Label done;
- // This can only be true for Constant, ConstantType and Undefined cells,
- // because we never store the_hole via this stub.
- __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstant) |
- PropertyDetails::KindField::encode(kData)));
- __ b(eq, &done);
- __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
- __ b(eq, &done);
- __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kUndefined) |
- PropertyDetails::KindField::encode(kData)));
- __ Check(eq, kUnexpectedValue);
- __ bind(&done);
- }
- __ Ret();
- __ bind(&not_same_value);
-
- // Check if PropertyCell contains data with constant type (and is not
- // READ_ONLY).
- __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
- __ b(ne, &slow_case);
-
- // Now either both old and new values must be smis or both must be heap
- // objects with same map.
- Label value_is_heap_object;
- __ JumpIfNotSmi(value, &value_is_heap_object);
- __ JumpIfNotSmi(cell_value, &slow_case);
- // Old and new values are smis, no need for a write barrier here.
- __ bind(&fast_smi_case);
- __ str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
- __ Ret();
-
- __ bind(&value_is_heap_object);
- __ JumpIfSmi(cell_value, &slow_case);
-
- __ ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
- __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- __ cmp(cell_value_map, scratch);
- __ b(eq, &fast_heapobject_case);
-
- // Fallback to runtime.
- __ bind(&slow_case);
- __ SmiTag(slot);
- __ Push(slot, value);
- __ TailCallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
@@ -4881,7 +4529,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ Push(scratch, scratch);
__ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
__ Push(scratch, holder);
- __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
+ __ Push(Smi::kZero); // should_throw_on_error -> false
__ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
__ push(scratch);
// v8::PropertyCallbackInfo::args_ array and name handle.
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index e49fed97c8..1231355dc0 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -329,7 +329,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// in a separate table if necessary.
Label high_fixes[256];
int high_fix_max = (count() - 1) >> 8;
- DCHECK_GT(arraysize(high_fixes), high_fix_max);
+ DCHECK_GT(arraysize(high_fixes), static_cast<size_t>(high_fix_max));
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index a002b8d44a..75161afcd7 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -31,9 +31,9 @@ const Register LoadDescriptor::ReceiverRegister() { return r1; }
const Register LoadDescriptor::NameRegister() { return r2; }
const Register LoadDescriptor::SlotRegister() { return r0; }
-
const Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return r4; }
const Register StoreDescriptor::ReceiverRegister() { return r1; }
const Register StoreDescriptor::NameRegister() { return r2; }
@@ -46,10 +46,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return r4; }
const Register StoreTransitionDescriptor::VectorRegister() { return r3; }
const Register StoreTransitionDescriptor::MapRegister() { return r5; }
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r2; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r0; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return r1; }
const Register StringCompareDescriptor::RightRegister() { return r0; }
@@ -162,7 +158,7 @@ void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {r1, r3, r2};
+ Register registers[] = {r1, r0, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -211,13 +207,6 @@ void ConstructTrampolineDescriptor::InitializePlatformSpecific(
}
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r2, r1, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0, r1};
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 00f8ab5cf5..c67fad8e1d 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -663,7 +663,7 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
// Save caller-saved registers, which includes js_function.
DCHECK((kCallerSaved & js_function.bit()) != 0);
- DCHECK_EQ(kCallerSaved & code_entry.bit(), 0);
+ DCHECK_EQ(kCallerSaved & code_entry.bit(), 0u);
stm(db_w, sp, (kCallerSaved | lr.bit()));
int argument_count = 3;
@@ -1051,6 +1051,69 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
}
+void MacroAssembler::VmovExtended(Register dst, int src_code) {
+ DCHECK_LE(32, src_code);
+ DCHECK_GT(64, src_code);
+ if (src_code & 0x1) {
+ VmovHigh(dst, DwVfpRegister::from_code(src_code / 2));
+ } else {
+ VmovLow(dst, DwVfpRegister::from_code(src_code / 2));
+ }
+}
+
+void MacroAssembler::VmovExtended(int dst_code, Register src) {
+ DCHECK_LE(32, dst_code);
+ DCHECK_GT(64, dst_code);
+ if (dst_code & 0x1) {
+ VmovHigh(DwVfpRegister::from_code(dst_code / 2), src);
+ } else {
+ VmovLow(DwVfpRegister::from_code(dst_code / 2), src);
+ }
+}
+
+void MacroAssembler::VmovExtended(int dst_code, int src_code,
+ Register scratch) {
+ if (src_code < 32 && dst_code < 32) {
+ // src and dst are both s-registers.
+ vmov(SwVfpRegister::from_code(dst_code),
+ SwVfpRegister::from_code(src_code));
+ } else if (src_code < 32) {
+ // src is an s-register.
+ vmov(scratch, SwVfpRegister::from_code(src_code));
+ VmovExtended(dst_code, scratch);
+ } else if (dst_code < 32) {
+ // dst is an s-register.
+ VmovExtended(scratch, src_code);
+ vmov(SwVfpRegister::from_code(dst_code), scratch);
+ } else {
+ // Neither src or dst are s-registers.
+ DCHECK_GT(64, src_code);
+ DCHECK_GT(64, dst_code);
+ VmovExtended(scratch, src_code);
+ VmovExtended(dst_code, scratch);
+ }
+}
+
+void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src,
+ Register scratch) {
+ if (dst_code >= 32) {
+ ldr(scratch, src);
+ VmovExtended(dst_code, scratch);
+ } else {
+ vldr(SwVfpRegister::from_code(dst_code), src);
+ }
+}
+
+void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code,
+ Register scratch) {
+ if (src_code >= 32) {
+ VmovExtended(scratch, src_code);
+ str(scratch, dst);
+ } else {
+ vstr(SwVfpRegister::from_code(src_code), dst);
+ }
+}
+
void MacroAssembler::LslPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
@@ -1767,90 +1830,6 @@ void MacroAssembler::PopStackHandler() {
}
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
- Label same_contexts;
-
- DCHECK(!holder_reg.is(scratch));
- DCHECK(!holder_reg.is(ip));
- DCHECK(!scratch.is(ip));
-
- // Load current lexical context from the active StandardFrame, which
- // may require crawling past STUB frames.
- Label load_context;
- Label has_context;
- DCHECK(!ip.is(scratch));
- mov(ip, fp);
- bind(&load_context);
- ldr(scratch, MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
- JumpIfNotSmi(scratch, &has_context);
- ldr(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
- b(&load_context);
- bind(&has_context);
-
- // In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
- cmp(scratch, Operand::Zero());
- Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
-#endif
-
- // Load the native context of the current context.
- ldr(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // Cannot use ip as a temporary in this verification code. Due to the fact
- // that ip is clobbered as part of cmp with an object Operand.
- push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the native_context_map.
- ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kNativeContextMapRootIndex);
- cmp(holder_reg, ip);
- Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
- pop(holder_reg); // Restore holder.
- }
-
- // Check if both contexts are the same.
- ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- cmp(scratch, Operand(ip));
- b(eq, &same_contexts);
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // Cannot use ip as a temporary in this verification code. Due to the fact
- // that ip is clobbered as part of cmp with an object Operand.
- push(holder_reg); // Temporarily save holder on the stack.
- mov(holder_reg, ip); // Move ip to its holding place.
- LoadRoot(ip, Heap::kNullValueRootIndex);
- cmp(holder_reg, ip);
- Check(ne, kJSGlobalProxyContextShouldNotBeNull);
-
- ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kNativeContextMapRootIndex);
- cmp(holder_reg, ip);
- Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
- // Restore ip is not needed. ip is reloaded below.
- pop(holder_reg); // Restore holder.
- // Restore ip to holder's context.
- ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- }
-
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
- ldr(scratch, FieldMemOperand(scratch, token_offset));
- ldr(ip, FieldMemOperand(ip, token_offset));
- cmp(scratch, Operand(ip));
- b(ne, miss);
-
- bind(&same_contexts);
-}
-
-
// Compute the hash code from the untagged key. This must be kept in sync with
// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
@@ -1883,85 +1862,6 @@ void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
bic(t0, t0, Operand(0xc0000000u));
}
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register result,
- Register t0,
- Register t1,
- Register t2) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- //
- // Scratch registers:
- //
- // t0 - holds the untagged key on entry and holds the hash once computed.
- //
- // t1 - used to hold the capacity mask of the dictionary
- //
- // t2 - used for the index into the dictionary.
- Label done;
-
- GetNumberHash(t0, t1);
-
- // Compute the capacity mask.
- ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
- SmiUntag(t1);
- sub(t1, t1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before giving up.
- for (int i = 0; i < kNumberDictionaryProbes; i++) {
- // Use t2 for index calculations and keep the hash intact in t0.
- mov(t2, t0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
- }
- and_(t2, t2, Operand(t1));
-
- // Scale the index by multiplying by the element size.
- DCHECK(SeededNumberDictionary::kEntrySize == 3);
- add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
-
- // Check if the key is identical to the name.
- add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
- ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
- cmp(key, Operand(ip));
- if (i != kNumberDictionaryProbes - 1) {
- b(eq, &done);
- } else {
- b(ne, miss);
- }
- }
-
- bind(&done);
- // Check that the value is a field property.
- // t2: elements + (index * kPointerSize)
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ldr(t1, FieldMemOperand(t2, kDetailsOffset));
- DCHECK_EQ(DATA, 0);
- tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
- b(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- ldr(result, FieldMemOperand(t2, kValueOffset));
-}
-
-
void MacroAssembler::Allocate(int object_size,
Register result,
Register scratch1,
@@ -2414,20 +2314,6 @@ void MacroAssembler::CompareRoot(Register obj,
cmp(obj, ip);
}
-
-void MacroAssembler::CheckFastElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
- b(hi, fail);
-}
-
-
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
@@ -2630,18 +2516,6 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
-}
-
-
void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
if (CpuFeatures::IsSupported(VFPv3)) {
CpuFeatureScope scope(this, VFPv3);
@@ -3346,50 +3220,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
-
-void MacroAssembler::CopyBytes(Register src,
- Register dst,
- Register length,
- Register scratch) {
- Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
-
- // Align src before copying in word size chunks.
- cmp(length, Operand(kPointerSize));
- b(le, &byte_loop);
-
- bind(&align_loop_1);
- tst(src, Operand(kPointerSize - 1));
- b(eq, &word_loop);
- ldrb(scratch, MemOperand(src, 1, PostIndex));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- sub(length, length, Operand(1), SetCC);
- b(&align_loop_1);
- // Copy bytes in word size chunks.
- bind(&word_loop);
- if (emit_debug_code()) {
- tst(src, Operand(kPointerSize - 1));
- Assert(eq, kExpectingAlignmentForCopyBytes);
- }
- cmp(length, Operand(kPointerSize));
- b(lt, &byte_loop);
- ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
- str(scratch, MemOperand(dst, kPointerSize, PostIndex));
- sub(length, length, Operand(kPointerSize));
- b(&word_loop);
-
- // Copy the last bytes if any left.
- bind(&byte_loop);
- cmp(length, Operand::Zero());
- b(eq, &done);
- bind(&byte_loop_1);
- ldrb(scratch, MemOperand(src, 1, PostIndex));
- strb(scratch, MemOperand(dst, 1, PostIndex));
- sub(length, length, Operand(1), SetCC);
- b(ne, &byte_loop_1);
- bind(&done);
-}
-
-
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {
@@ -3649,7 +3479,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
cmp(index, ip);
Check(lt, kIndexIsTooLarge);
- cmp(index, Operand(Smi::FromInt(0)));
+ cmp(index, Operand(Smi::kZero));
Check(ge, kIndexIsNegative);
SmiUntag(index, index);
@@ -3939,7 +3769,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// For all objects but the receiver, check that the cache is empty.
EnumLength(r3, r1);
- cmp(r3, Operand(Smi::FromInt(0)));
+ cmp(r3, Operand(Smi::kZero));
b(ne, call_runtime);
bind(&start);
@@ -3970,13 +3800,14 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
- const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+ const int kMementoLastWordOffset =
+ kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
// Bail out if the object is not in new space.
JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
- add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
mov(ip, Operand(new_space_allocation_top_adr));
ldr(ip, MemOperand(ip));
eor(scratch_reg, scratch_reg, Operand(ip));
@@ -3985,7 +3816,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
// The object is on a different page than allocation top. Bail out if the
// object sits on the page boundary as no memento can follow and we cannot
// touch the memory following it.
- add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
eor(scratch_reg, scratch_reg, Operand(receiver_reg));
tst(scratch_reg, Operand(~Page::kPageAlignmentMask));
b(ne, no_memento_found);
@@ -3994,11 +3825,11 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
// If top is on the same page as the current object, we need to check whether
// we are below top.
bind(&top_check);
- add(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ add(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
mov(ip, Operand(new_space_allocation_top_adr));
ldr(ip, MemOperand(ip));
cmp(scratch_reg, ip);
- b(gt, no_memento_found);
+ b(ge, no_memento_found);
// Memento map check.
bind(&map_check);
ldr(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index d524d84674..4f0ee82c00 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -549,6 +549,14 @@ class MacroAssembler: public Assembler {
void VmovLow(Register dst, DwVfpRegister src);
void VmovLow(DwVfpRegister dst, Register src);
+ // Simulate s-register moves for imaginary s32 - s63 registers.
+ void VmovExtended(Register dst, int src_code);
+ void VmovExtended(int dst_code, Register src);
+ // Move between s-registers and imaginary s-registers.
+ void VmovExtended(int dst_code, int src_code, Register scratch);
+ void VmovExtended(int dst_code, const MemOperand& src, Register scratch);
+ void VmovExtended(const MemOperand& dst, int src_code, Register scratch);
+
void LslPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
void LslPair(Register dst_low, Register dst_high, Register src_low,
@@ -720,24 +728,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Inline caching support
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, whereas both scratch registers are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss);
-
void GetNumberHash(Register t0, Register scratch);
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register result,
- Register t0,
- Register t1,
- Register t2);
-
-
inline void MarkCode(NopMarkerTypes type) {
nop(type);
}
@@ -850,14 +842,6 @@ class MacroAssembler: public Assembler {
Register scratch1, Register scratch2,
Label* gc_required);
- // Copies a number of bytes from src to dst. All registers are clobbered. On
- // exit src and dst will point to the place just after where the last byte was
- // read or written and length will be zero.
- void CopyBytes(Register src,
- Register dst,
- Register length,
- Register scratch);
-
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.
@@ -900,13 +884,6 @@ class MacroAssembler: public Assembler {
Register type_reg,
InstanceType type);
-
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Register scratch,
- Label* fail);
-
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,
@@ -1016,12 +993,6 @@ class MacroAssembler: public Assembler {
}
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
// Get the number of least significant bits from a register
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index 9ee20d45e2..37fdb2618f 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -306,8 +306,9 @@ void ConstPool::RecordEntry(intptr_t data,
DCHECK(mode != RelocInfo::COMMENT && mode != RelocInfo::CONST_POOL &&
mode != RelocInfo::VENEER_POOL &&
mode != RelocInfo::CODE_AGE_SEQUENCE &&
- mode != RelocInfo::DEOPT_POSITION && mode != RelocInfo::DEOPT_REASON &&
- mode != RelocInfo::DEOPT_ID);
+ mode != RelocInfo::DEOPT_SCRIPT_OFFSET &&
+ mode != RelocInfo::DEOPT_INLINING_ID &&
+ mode != RelocInfo::DEOPT_REASON && mode != RelocInfo::DEOPT_ID);
uint64_t raw_data = static_cast<uint64_t>(data);
int offset = assm_->pc_offset();
if (IsEmpty()) {
@@ -2947,7 +2948,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
(rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL)) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
- (rmode == RelocInfo::DEOPT_POSITION) ||
+ (rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) ||
+ (rmode == RelocInfo::DEOPT_INLINING_ID) ||
(rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID) ||
(rmode == RelocInfo::GENERATOR_CONTINUATION)) {
// Adjust code for new modes.
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 16b7eae03f..d5c2936dc2 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -933,7 +933,8 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+ void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+ int id);
int buffer_space() const;
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index ca5ea8035a..c0d700ce0d 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -1649,15 +1649,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Add(x10, x10, x10);
__ Add(number_of_capture_registers, x10, 2);
- // Check that the fourth object is a JSObject.
+ // Check that the last match info is a FixedArray.
DCHECK(jssp.Is(__ StackPointer()));
- __ Peek(x10, kLastMatchInfoOffset);
- __ JumpIfSmi(x10, &runtime);
- __ JumpIfNotObjectType(x10, x11, x11, JS_OBJECT_TYPE, &runtime);
+ __ Peek(last_match_info_elements, kLastMatchInfoOffset);
+ __ JumpIfSmi(last_match_info_elements, &runtime);
// Check that the object has fast elements.
- __ Ldr(last_match_info_elements,
- FieldMemOperand(x10, JSObject::kElementsOffset));
__ Ldr(x10,
FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
@@ -1670,38 +1667,29 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Ldrsw(x10,
UntagSmiFieldMemOperand(last_match_info_elements,
FixedArray::kLengthOffset));
- __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead);
+ __ Add(x11, number_of_capture_registers, RegExpMatchInfo::kLastMatchOverhead);
__ Cmp(x11, x10);
__ B(gt, &runtime);
// Store the capture count.
__ SmiTag(x10, number_of_capture_registers);
- __ Str(x10,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastCaptureCountOffset));
+ __ Str(x10, FieldMemOperand(last_match_info_elements,
+ RegExpMatchInfo::kNumberOfCapturesOffset));
// Store last subject and last input.
- __ Str(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset));
+ __ Str(subject, FieldMemOperand(last_match_info_elements,
+ RegExpMatchInfo::kLastSubjectOffset));
// Use x10 as the subject string in order to only need
// one RecordWriteStub.
__ Mov(x10, subject);
__ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset,
- x10,
- x11,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
- __ Str(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastInputOffset));
+ RegExpMatchInfo::kLastSubjectOffset, x10, x11,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ Str(subject, FieldMemOperand(last_match_info_elements,
+ RegExpMatchInfo::kLastInputOffset));
__ Mov(x10, subject);
__ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastInputOffset,
- x10,
- x11,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ RegExpMatchInfo::kLastInputOffset, x10, x11,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
Register last_match_offsets = x13;
Register offsets_vector_index = x14;
@@ -1716,9 +1704,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Label next_capture, done;
// Capture register counter starts from number of capture registers and
// iterates down to zero (inclusive).
- __ Add(last_match_offsets,
- last_match_info_elements,
- RegExpImpl::kFirstCaptureOffset - kHeapObjectTag);
+ __ Add(last_match_offsets, last_match_info_elements,
+ RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag);
__ Bind(&next_capture);
__ Subs(number_of_capture_registers, number_of_capture_registers, 2);
__ B(mi, &done);
@@ -1738,7 +1725,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Bind(&done);
// Return last match info.
- __ Peek(x0, kLastMatchInfoOffset);
+ __ Mov(x0, last_match_info_elements);
// Drop the 4 arguments of the stub from the stack.
__ Drop(4);
__ Ret();
@@ -1997,6 +1984,7 @@ static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+ // x0 - number of arguments
// x1 - function
// x3 - slot id
// x2 - vector
@@ -2011,8 +1999,6 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ Cmp(function, scratch);
__ B(ne, miss);
- __ Mov(x0, Operand(arg_count()));
-
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, feedback_vector, index);
@@ -2021,7 +2007,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
Register new_target_arg = index;
__ Mov(allocation_site_arg, allocation_site);
__ Mov(new_target_arg, function);
- ArrayConstructorStub stub(masm->isolate(), arg_count());
+ ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -2029,12 +2015,11 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
void CallICStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("CallICStub");
+ // x0 - number of arguments
// x1 - function
// x3 - slot id (Smi)
// x2 - vector
Label extra_checks_or_miss, call, call_function, call_count_incremented;
- int argc = arg_count();
- ParameterCount actual(argc);
Register function = x1;
Register feedback_vector = x2;
@@ -2072,7 +2057,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, feedback_vector, index);
- __ Mov(x0, argc);
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -2113,7 +2097,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
IncrementCallCount(masm, feedback_vector, index);
__ Bind(&call_count_incremented);
- __ Mov(x0, argc);
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -2142,9 +2125,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
// x2 - vector
// x3 - slot
// x1 - function
+ // x0 - number of arguments
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
+ __ SmiTag(x0);
+ __ Push(x0);
__ Push(feedback_vector, index);
__ Push(cp, function);
@@ -2152,6 +2138,8 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(cp, function);
__ Pop(feedback_vector, index);
+ __ Pop(x0);
+ __ SmiUntag(x0);
}
__ B(&call_function);
@@ -2171,14 +2159,21 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Preserve the number of arguments as Smi.
+ __ SmiTag(x0);
+
// Push the receiver and the function and feedback info.
- __ Push(x1, x2, x3);
+ __ Push(x0, x1, x2, x3);
// Call the entry.
__ CallRuntime(Runtime::kCallIC_Miss);
// Move result to edi and exit the internal frame.
__ Mov(x1, x0);
+
+ // Restore number of arguments.
+ __ Pop(x0);
+ __ SmiUntag(x0);
}
@@ -2979,21 +2974,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(x2);
CallICStub stub(isolate(), state());
@@ -3001,14 +2981,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
}
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, bool is_polymorphic,
@@ -3099,172 +3071,12 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ Jump(handler);
}
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // x1
- Register name = LoadWithVectorDescriptor::NameRegister(); // x2
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // x3
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // x0
- Register feedback = x4;
- Register receiver_map = x5;
- Register scratch1 = x6;
-
- __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
- __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ Bind(&try_array);
- __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
- HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, true, &miss);
-
- __ Bind(&not_array);
- __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
- masm->isolate()->load_stub_cache()->GenerateProbe(
- masm, receiver, name, feedback, receiver_map, scratch1, x7);
-
- __ Bind(&miss);
- LoadIC::GenerateMiss(masm);
-
- __ Bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // x1
- Register key = LoadWithVectorDescriptor::NameRegister(); // x2
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // x3
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // x0
- Register feedback = x4;
- Register receiver_map = x5;
- Register scratch1 = x6;
-
- __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
- __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ Bind(&try_array);
- // Is it a fixed array?
- __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
-
- // We have a polymorphic element handler.
- Label polymorphic, try_poly_name;
- __ Bind(&polymorphic);
- HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, true, &miss);
-
- __ Bind(&not_array);
- // Is it generic?
- __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
- &try_poly_name);
- Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ Bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ Cmp(key, feedback);
- __ B(ne, &miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
- __ Ldr(feedback,
- FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, false, &miss);
-
- __ Bind(&miss);
- KeyedLoadIC::GenerateMiss(masm);
-
- __ Bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- StoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // x1
- Register key = StoreWithVectorDescriptor::NameRegister(); // x2
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // x3
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // x4
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(x0)); // x0
- Register feedback = x5;
- Register receiver_map = x6;
- Register scratch1 = x7;
-
- __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
- __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ Bind(&try_array);
- __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
- HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, true, &miss);
-
- __ Bind(&not_array);
- __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
- masm->isolate()->store_stub_cache()->GenerateProbe(
- masm, receiver, key, feedback, receiver_map, scratch1, x8);
-
- __ Bind(&miss);
- StoreIC::GenerateMiss(masm);
-
- __ Bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
@@ -3895,33 +3707,22 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
Register argc = x0;
- if (argument_count() == ANY) {
- Label zero_case, n_case;
- __ Cbz(argc, &zero_case);
- __ Cmp(argc, 1);
- __ B(ne, &n_case);
-
- // One argument.
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ Bind(&zero_case);
- // No arguments.
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ Bind(&n_case);
- // N arguments.
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else if (argument_count() == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count() == ONE) {
- CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count() == MORE_THAN_ONE) {
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- UNREACHABLE();
- }
+ Label zero_case, n_case;
+ __ Cbz(argc, &zero_case);
+ __ Cmp(argc, 1);
+ __ B(ne, &n_case);
+
+ // One argument.
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ Bind(&zero_case);
+ // No arguments.
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ Bind(&n_case);
+ // N arguments.
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -3981,21 +3782,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing support.
__ Bind(&subclassing);
- switch (argument_count()) {
- case ANY:
- case MORE_THAN_ONE:
- __ Poke(constructor, Operand(x0, LSL, kPointerSizeLog2));
- __ Add(x0, x0, Operand(3));
- break;
- case NONE:
- __ Poke(constructor, 0 * kPointerSize);
- __ Mov(x0, Operand(3));
- break;
- case ONE:
- __ Poke(constructor, 1 * kPointerSize);
- __ Mov(x0, Operand(4));
- break;
- }
+ __ Poke(constructor, Operand(x0, LSL, kPointerSizeLog2));
+ __ Add(x0, x0, Operand(3));
__ Push(new_target, allocation_site);
__ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -4271,7 +4059,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
__ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
__ Str(x1, FieldMemOperand(x0, JSArray::kElementsOffset));
- __ Mov(x1, Smi::FromInt(0));
+ __ Mov(x1, Smi::kZero);
__ Str(x1, FieldMemOperand(x0, JSArray::kLengthOffset));
STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
__ Ret();
@@ -4783,126 +4571,6 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
}
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register context = cp;
- Register value = x0;
- Register slot = x2;
- Register context_temp = x10;
- Register cell = x10;
- Register cell_details = x11;
- Register cell_value = x12;
- Register cell_value_map = x13;
- Register value_map = x14;
- Label fast_heapobject_case, fast_smi_case, slow_case;
-
- if (FLAG_debug_code) {
- __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
- __ Check(ne, kUnexpectedValue);
- }
-
- // Go up the context chain to the script context.
- for (int i = 0; i < depth(); i++) {
- __ Ldr(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- context = context_temp;
- }
-
- // Load the PropertyCell at the specified slot.
- __ Add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
- __ Ldr(cell, ContextMemOperand(cell));
-
- // Load PropertyDetails for the cell (actually only the cell_type and kind).
- __ Ldr(cell_details,
- UntagSmiFieldMemOperand(cell, PropertyCell::kDetailsOffset));
- __ And(cell_details, cell_details,
- PropertyDetails::PropertyCellTypeField::kMask |
- PropertyDetails::KindField::kMask |
- PropertyDetails::kAttributesReadOnlyMask);
-
- // Check if PropertyCell holds mutable data.
- Label not_mutable_data;
- __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kMutable) |
- PropertyDetails::KindField::encode(kData));
- __ B(ne, &not_mutable_data);
- __ JumpIfSmi(value, &fast_smi_case);
- __ Bind(&fast_heapobject_case);
- __ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
- // RecordWriteField clobbers the value register, so we copy it before the
- // call.
- __ Mov(x11, value);
- __ RecordWriteField(cell, PropertyCell::kValueOffset, x11, x12,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Ret();
-
- __ Bind(&not_mutable_data);
- // Check if PropertyCell value matches the new value (relevant for Constant,
- // ConstantType and Undefined cells).
- Label not_same_value;
- __ Ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
- __ Cmp(cell_value, value);
- __ B(ne, &not_same_value);
-
- // Make sure the PropertyCell is not marked READ_ONLY.
- __ Tst(cell_details, PropertyDetails::kAttributesReadOnlyMask);
- __ B(ne, &slow_case);
-
- if (FLAG_debug_code) {
- Label done;
- // This can only be true for Constant, ConstantType and Undefined cells,
- // because we never store the_hole via this stub.
- __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstant) |
- PropertyDetails::KindField::encode(kData));
- __ B(eq, &done);
- __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData));
- __ B(eq, &done);
- __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kUndefined) |
- PropertyDetails::KindField::encode(kData));
- __ Check(eq, kUnexpectedValue);
- __ Bind(&done);
- }
- __ Ret();
- __ Bind(&not_same_value);
-
- // Check if PropertyCell contains data with constant type (and is not
- // READ_ONLY).
- __ Cmp(cell_details, PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData));
- __ B(ne, &slow_case);
-
- // Now either both old and new values must be smis or both must be heap
- // objects with same map.
- Label value_is_heap_object;
- __ JumpIfNotSmi(value, &value_is_heap_object);
- __ JumpIfNotSmi(cell_value, &slow_case);
- // Old and new values are smis, no need for a write barrier here.
- __ Bind(&fast_smi_case);
- __ Str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
- __ Ret();
-
- __ Bind(&value_is_heap_object);
- __ JumpIfSmi(cell_value, &slow_case);
-
- __ Ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
- __ Ldr(value_map, FieldMemOperand(value, HeapObject::kMapOffset));
- __ Cmp(cell_value_map, value_map);
- __ B(eq, &fast_heapobject_case);
-
- // Fall back to the runtime.
- __ Bind(&slow_case);
- __ SmiTag(slot);
- __ Push(slot, value);
- __ TailCallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
// The number of register that CallApiFunctionAndReturn will need to save on
// the stack. The space for these registers need to be allocated in the
// ExitFrame before calling CallApiFunctionAndReturn.
@@ -5202,7 +4870,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ Mov(scratch2, Operand(ExternalReference::isolate_address(isolate())));
__ Ldr(scratch3, FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ Push(scratch3, scratch, scratch, scratch2, holder);
- __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
+ __ Push(Smi::kZero); // should_throw_on_error -> false
__ Ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
__ Push(scratch);
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index d7bc3de01a..13ecc2b982 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -31,9 +31,9 @@ const Register LoadDescriptor::ReceiverRegister() { return x1; }
const Register LoadDescriptor::NameRegister() { return x2; }
const Register LoadDescriptor::SlotRegister() { return x0; }
-
const Register LoadWithVectorDescriptor::VectorRegister() { return x3; }
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return x4; }
const Register StoreDescriptor::ReceiverRegister() { return x1; }
const Register StoreDescriptor::NameRegister() { return x2; }
@@ -46,10 +46,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return x4; }
const Register StoreTransitionDescriptor::VectorRegister() { return x3; }
const Register StoreTransitionDescriptor::MapRegister() { return x5; }
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return x2; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return x0; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return x1; }
const Register StringCompareDescriptor::RightRegister() { return x0; }
@@ -181,7 +177,7 @@ void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {x1, x3, x2};
+ Register registers[] = {x1, x0, x3, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -230,16 +226,6 @@ void ConstructTrampolineDescriptor::InitializePlatformSpecific(
}
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x2: length
- // x1: index (of last match)
- // x0: string
- Register registers[] = {x2, x1, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value (js_array)
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 87ea1eb9d5..bc7a2817fa 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -1532,11 +1532,12 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
- const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+ const int kMementoLastWordOffset =
+ kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
// Bail out if the object is not in new space.
JumpIfNotInNewSpace(receiver, no_memento_found);
- Add(scratch1, receiver, kMementoEndOffset);
+ Add(scratch1, receiver, kMementoLastWordOffset);
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
Mov(scratch2, new_space_allocation_top_adr);
@@ -1558,7 +1559,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
Mov(scratch2, new_space_allocation_top_adr);
Ldr(scratch2, MemOperand(scratch2));
Cmp(scratch1, scratch2);
- B(gt, no_memento_found);
+ B(ge, no_memento_found);
// Memento map check.
bind(&map_check);
Ldr(scratch1, MemOperand(receiver, kMementoMapOffset));
@@ -2169,62 +2170,6 @@ void MacroAssembler::ClampDoubleToUint8(Register output,
Fcvtnu(output, dbl_scratch);
}
-
-void MacroAssembler::CopyBytes(Register dst,
- Register src,
- Register length,
- Register scratch,
- CopyHint hint) {
- UseScratchRegisterScope temps(this);
- Register tmp1 = temps.AcquireX();
- Register tmp2 = temps.AcquireX();
- DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
- DCHECK(!AreAliased(src, dst, csp));
-
- if (emit_debug_code()) {
- // Check copy length.
- Cmp(length, 0);
- Assert(ge, kUnexpectedNegativeValue);
-
- // Check src and dst buffers don't overlap.
- Add(scratch, src, length); // Calculate end of src buffer.
- Cmp(scratch, dst);
- Add(scratch, dst, length); // Calculate end of dst buffer.
- Ccmp(scratch, src, ZFlag, gt);
- Assert(le, kCopyBuffersOverlap);
- }
-
- Label short_copy, short_loop, bulk_loop, done;
-
- if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) {
- Register bulk_length = scratch;
- int pair_size = 2 * kXRegSize;
- int pair_mask = pair_size - 1;
-
- Bic(bulk_length, length, pair_mask);
- Cbz(bulk_length, &short_copy);
- Bind(&bulk_loop);
- Sub(bulk_length, bulk_length, pair_size);
- Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex));
- Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex));
- Cbnz(bulk_length, &bulk_loop);
-
- And(length, length, pair_mask);
- }
-
- Bind(&short_copy);
- Cbz(length, &done);
- Bind(&short_loop);
- Sub(length, length, 1);
- Ldrb(tmp1, MemOperand(src, 1, PostIndex));
- Strb(tmp1, MemOperand(dst, 1, PostIndex));
- Cbnz(length, &short_loop);
-
-
- Bind(&done);
-}
-
-
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {
@@ -3719,20 +3664,6 @@ void MacroAssembler::TestAndSplit(const Register& reg,
}
}
-
-void MacroAssembler::CheckFastElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
- B(hi, fail);
-}
-
-
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
@@ -3790,19 +3721,6 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- DecodeField<String::ArrayIndexValueBits>(index, hash);
- SmiTag(index, index);
-}
-
-
void MacroAssembler::EmitSeqStringSetCharCheck(
Register string,
Register index,
@@ -3830,85 +3748,12 @@ void MacroAssembler::EmitSeqStringSetCharCheck(
Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
Check(lt, kIndexIsTooLarge);
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
Cmp(index, 0);
Check(ge, kIndexIsNegative);
}
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
- Label same_contexts;
-
- // Load current lexical context from the active StandardFrame, which
- // may require crawling past STUB frames.
- Label load_context;
- Label has_context;
- Mov(scratch2, fp);
- bind(&load_context);
- Ldr(scratch1,
- MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- JumpIfNotSmi(scratch1, &has_context);
- Ldr(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
- B(&load_context);
- bind(&has_context);
-
- // In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
- Cmp(scratch1, 0);
- Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
-#endif
-
- // Load the native context of the current context.
- Ldr(scratch1, ContextMemOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // Read the first word and compare to the native_context_map.
- Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
- CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
- Check(eq, kExpectedNativeContext);
- }
-
- // Check if both contexts are the same.
- Ldr(scratch2, FieldMemOperand(holder_reg,
- JSGlobalProxy::kNativeContextOffset));
- Cmp(scratch1, scratch2);
- B(&same_contexts, eq);
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // We're short on scratch registers here, so use holder_reg as a scratch.
- Push(holder_reg);
- Register scratch3 = holder_reg;
-
- CompareRoot(scratch2, Heap::kNullValueRootIndex);
- Check(ne, kExpectedNonNullContext);
-
- Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
- Check(eq, kExpectedNativeContext);
- Pop(holder_reg);
- }
-
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
- Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
- Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
- Cmp(scratch1, scratch2);
- B(miss, ne);
-
- Bind(&same_contexts);
-}
-
-
// Compute the hash code from the untagged key. This must be kept in sync with
// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
@@ -3944,69 +3789,6 @@ void MacroAssembler::GetNumberHash(Register key, Register scratch) {
Bic(key, key, Operand(0xc0000000u));
}
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register result,
- Register scratch0,
- Register scratch1,
- Register scratch2,
- Register scratch3) {
- DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
-
- Label done;
-
- SmiUntag(scratch0, key);
- GetNumberHash(scratch0, scratch1);
-
- // Compute the capacity mask.
- Ldrsw(scratch1,
- UntagSmiFieldMemOperand(elements,
- SeededNumberDictionary::kCapacityOffset));
- Sub(scratch1, scratch1, 1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- for (int i = 0; i < kNumberDictionaryProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
- } else {
- Mov(scratch2, scratch0);
- }
- And(scratch2, scratch2, scratch1);
-
- // Scale the index by multiplying by the element size.
- DCHECK(SeededNumberDictionary::kEntrySize == 3);
- Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
-
- // Check if the key is identical to the name.
- Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
- Ldr(scratch3,
- FieldMemOperand(scratch2,
- SeededNumberDictionary::kElementsStartOffset));
- Cmp(key, scratch3);
- if (i != (kNumberDictionaryProbes - 1)) {
- B(eq, &done);
- } else {
- B(ne, miss);
- }
- }
-
- Bind(&done);
- // Check that the value is a field property.
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
- DCHECK_EQ(DATA, 0);
- TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- Ldr(result, FieldMemOperand(scratch2, kValueOffset));
-}
-
void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
Register code_entry,
Register scratch) {
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 37e9926e9b..a89c106b63 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -1099,16 +1099,6 @@ class MacroAssembler : public Assembler {
void InitializeFieldsWithFiller(Register current_address,
Register end_address, Register filler);
- // Copies a number of bytes from src to dst. All passed registers are
- // clobbered. On exit src and dst will point to the place just after where the
- // last byte was read or written and length will be zero. Hint may be used to
- // determine which is the most efficient algorithm to use for copying.
- void CopyBytes(Register dst,
- Register src,
- Register length,
- Register scratch,
- CopyHint hint = kCopyUnknown);
-
// ---- String Utilities ----
@@ -1576,10 +1566,6 @@ class MacroAssembler : public Assembler {
Label* if_any_set,
Label* fall_through);
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map, Register scratch, Label* fail);
-
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map, Register scratch, Label* fail);
@@ -1595,12 +1581,6 @@ class MacroAssembler : public Assembler {
Label* fail,
int elements_offset = 0);
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
// ---------------------------------------------------------------------------
// Inline caching support.
@@ -1610,39 +1590,10 @@ class MacroAssembler : public Assembler {
Register scratch,
uint32_t encoding_mask);
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, whereas both scratch registers are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch1,
- Register scratch2,
- Label* miss);
-
// Hash the interger value in 'key' register.
// It uses the same algorithm as ComputeIntegerHash in utils.h.
void GetNumberHash(Register key, Register scratch);
- // Load value from the dictionary.
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register result,
- Register scratch0,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
// ---------------------------------------------------------------------------
// Frames.
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index a1af1af368..13f936d0b5 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -20,6 +20,7 @@
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
typedef uint8_t byte;
@@ -162,11 +163,14 @@ MaybeHandle<FixedArray> AsmJs::ConvertAsmToWasm(ParseInfo* info) {
v8::internal::wasm::AsmWasmBuilder builder(info->isolate(), info->zone(),
info->literal(), &typer);
i::Handle<i::FixedArray> foreign_globals;
- auto module = builder.Run(&foreign_globals);
+ auto asm_wasm_result = builder.Run(&foreign_globals);
+ wasm::ZoneBuffer* module = asm_wasm_result.module_bytes;
+ wasm::ZoneBuffer* asm_offsets = asm_wasm_result.asm_offset_table;
i::MaybeHandle<i::JSObject> compiled = wasm::CreateModuleObjectFromBytes(
info->isolate(), module->begin(), module->end(), &thrower,
- internal::wasm::kAsmJsOrigin);
+ internal::wasm::kAsmJsOrigin, info->script(), asm_offsets->begin(),
+ asm_offsets->end());
DCHECK(!compiled.is_null());
wasm::AsmTyper::StdlibSet uses = typer.StdlibUses();
diff --git a/deps/v8/src/asmjs/asm-typer.cc b/deps/v8/src/asmjs/asm-typer.cc
index 94cc4dbfd1..55b5fc70d8 100644
--- a/deps/v8/src/asmjs/asm-typer.cc
+++ b/deps/v8/src/asmjs/asm-typer.cc
@@ -567,7 +567,7 @@ AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
module_name_ = fun->name();
// Allowed parameters: Stdlib, FFI, Mem
- static const uint32_t MaxModuleParameters = 3;
+ static const int MaxModuleParameters = 3;
if (scope->num_parameters() > MaxModuleParameters) {
FAIL(fun, "asm.js modules may not have more than three parameters.");
}
@@ -647,11 +647,8 @@ AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
FAIL(current, "Invalid top-level statement in asm.js module.");
}
- ZoneList<Declaration*>* decls = scope->declarations();
-
- for (int ii = 0; ii < decls->length(); ++ii) {
- Declaration* decl = decls->at(ii);
-
+ Declaration::List* decls = scope->declarations();
+ for (Declaration* decl : *decls) {
if (FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration()) {
RECURSE(ValidateFunction(fun_decl));
source_layout.AddFunction(*fun_decl);
@@ -664,9 +661,7 @@ AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
source_layout.AddTable(*function_table);
}
- for (int ii = 0; ii < decls->length(); ++ii) {
- Declaration* decl = decls->at(ii);
-
+ for (Declaration* decl : *decls) {
if (decl->IsFunctionDeclaration()) {
continue;
}
@@ -762,9 +757,15 @@ AsmType* AsmTyper::ValidateGlobalDeclaration(Assignment* assign) {
bool global_variable = false;
if (value->IsLiteral() || value->IsCall()) {
AsmType* type = nullptr;
- RECURSE(type = VariableTypeAnnotations(value, true));
+ VariableInfo::Mutability mutability;
+ if (target_variable->mode() == CONST) {
+ mutability = VariableInfo::kConstGlobal;
+ } else {
+ mutability = VariableInfo::kMutableGlobal;
+ }
+ RECURSE(type = VariableTypeAnnotations(value, mutability));
target_info = new (zone_) VariableInfo(type);
- target_info->set_mutability(VariableInfo::kMutableGlobal);
+ target_info->set_mutability(mutability);
global_variable = true;
} else if (value->IsProperty()) {
target_info = ImportLookup(value->AsProperty());
@@ -828,6 +829,23 @@ AsmType* AsmTyper::ValidateGlobalDeclaration(Assignment* assign) {
RECURSE(type = NewHeapView(value->AsCallNew()));
target_info = new (zone_) VariableInfo(type);
target_info->set_mutability(VariableInfo::kImmutableGlobal);
+ } else if (auto* proxy = value->AsVariableProxy()) {
+ auto* var_info = Lookup(proxy->var());
+
+ if (var_info == nullptr) {
+ FAIL(value, "Undeclared identifier in global initializer");
+ }
+
+ if (var_info->mutability() != VariableInfo::kConstGlobal) {
+ FAIL(value, "Identifier used to initialize a global must be a const");
+ }
+
+ target_info = new (zone_) VariableInfo(var_info->type());
+ if (target_variable->mode() == CONST) {
+ target_info->set_mutability(VariableInfo::kConstGlobal);
+ } else {
+ target_info->set_mutability(VariableInfo::kMutableGlobal);
+ }
}
if (target_info == nullptr) {
@@ -999,7 +1017,7 @@ AsmType* AsmTyper::ValidateFunctionTable(Assignment* assign) {
FAIL(assign, "Identifier redefined (function table name).");
}
- if (target_info_table->length() != pointers->length()) {
+ if (static_cast<int>(target_info_table->length()) != pointers->length()) {
FAIL(assign, "Function table size mismatch.");
}
@@ -1053,7 +1071,7 @@ AsmType* AsmTyper::ValidateFunction(FunctionDeclaration* fun_decl) {
}
auto* param = proxy->var();
if (param->location() != VariableLocation::PARAMETER ||
- param->index() != annotated_parameters) {
+ param->index() != static_cast<int>(annotated_parameters)) {
// Done with parameters.
break;
}
@@ -1075,7 +1093,7 @@ AsmType* AsmTyper::ValidateFunction(FunctionDeclaration* fun_decl) {
SetTypeOf(expr, type);
}
- if (annotated_parameters != fun->parameter_count()) {
+ if (static_cast<int>(annotated_parameters) != fun->parameter_count()) {
FAIL(fun_decl, "Incorrect parameter type annotations.");
}
@@ -1138,7 +1156,7 @@ AsmType* AsmTyper::ValidateFunction(FunctionDeclaration* fun_decl) {
DCHECK(return_type_->IsReturnType());
- for (auto* decl : *fun->scope()->declarations()) {
+ for (Declaration* decl : *fun->scope()->declarations()) {
auto* var_decl = decl->AsVariableDeclaration();
if (var_decl == nullptr) {
FAIL(decl, "Functions may only define inner variables.");
@@ -1642,7 +1660,15 @@ AsmType* AsmTyper::ValidateCommaExpression(BinaryOperation* comma) {
auto* right = comma->right();
AsmType* right_type = nullptr;
if (auto* right_as_call = right->AsCall()) {
- RECURSE(right_type = ValidateCall(AsmType::Void(), right_as_call));
+ RECURSE(right_type = ValidateFloatCoercion(right_as_call));
+ if (right_type != AsmType::Float()) {
+ // right_type == nullptr <-> right_as_call is not a call to fround.
+ DCHECK(right_type == nullptr);
+ RECURSE(right_type = ValidateCall(AsmType::Void(), right_as_call));
+ // Unnanotated function call to something that's not fround must be a call
+ // to a void function.
+ DCHECK_EQ(right_type, AsmType::Void());
+ }
} else {
RECURSE(right_type = ValidateExpression(right));
}
@@ -1674,7 +1700,7 @@ AsmType* AsmTyper::ValidateNumericLiteral(Literal* literal) {
if (!literal->value()->ToInt32(&value)) {
FAIL(literal, "Integer literal is out of range.");
}
- // *VIOLATION* Not really a violation, but rather a different in the
+ // *VIOLATION* Not really a violation, but rather a difference in
// validation. The spec handles -NumericLiteral in ValidateUnaryExpression,
// but V8's AST represents the negative literals as Literals.
return AsmType::Signed();
@@ -2676,13 +2702,31 @@ AsmType* AsmTyper::ReturnTypeAnnotations(ReturnStatement* statement) {
FAIL(statement, "Invalid literal in return statement.");
}
+ if (auto* proxy = ret_expr->AsVariableProxy()) {
+ auto* var_info = Lookup(proxy->var());
+
+ if (var_info == nullptr) {
+ FAIL(statement, "Undeclared identifier in return statement.");
+ }
+
+ if (var_info->mutability() != VariableInfo::kConstGlobal) {
+ FAIL(statement, "Identifier in return statement is not const.");
+ }
+
+ if (!var_info->type()->IsReturnType()) {
+ FAIL(statement, "Constant in return must be signed, float, or double.");
+ }
+
+ return var_info->type();
+ }
+
FAIL(statement, "Invalid return type expression.");
}
// 5.4 VariableTypeAnnotations
// Also used for 5.5 GlobalVariableTypeAnnotations
-AsmType* AsmTyper::VariableTypeAnnotations(Expression* initializer,
- bool global) {
+AsmType* AsmTyper::VariableTypeAnnotations(
+ Expression* initializer, VariableInfo::Mutability mutability_type) {
if (auto* literal = initializer->AsLiteral()) {
if (literal->raw_value()->ContainsDot()) {
SetTypeOf(initializer, AsmType::Double());
@@ -2690,24 +2734,50 @@ AsmType* AsmTyper::VariableTypeAnnotations(Expression* initializer,
}
int32_t i32;
uint32_t u32;
+
+ AsmType* initializer_type = nullptr;
if (literal->value()->ToUint32(&u32)) {
if (u32 > LargestFixNum) {
- SetTypeOf(initializer, AsmType::Unsigned());
+ initializer_type = AsmType::Unsigned();
+ SetTypeOf(initializer, initializer_type);
} else {
- SetTypeOf(initializer, AsmType::FixNum());
+ initializer_type = AsmType::FixNum();
+ SetTypeOf(initializer, initializer_type);
+ initializer_type = AsmType::Signed();
}
} else if (literal->value()->ToInt32(&i32)) {
- SetTypeOf(initializer, AsmType::Signed());
+ initializer_type = AsmType::Signed();
+ SetTypeOf(initializer, initializer_type);
} else {
FAIL(initializer, "Invalid type annotation - forbidden literal.");
}
- return AsmType::Int();
+ if (mutability_type != VariableInfo::kConstGlobal) {
+ return AsmType::Int();
+ }
+ return initializer_type;
+ }
+
+ if (auto* proxy = initializer->AsVariableProxy()) {
+ auto* var_info = Lookup(proxy->var());
+
+ if (var_info == nullptr) {
+ FAIL(initializer,
+ "Undeclared identifier in variable declaration initializer.");
+ }
+
+ if (var_info->mutability() != VariableInfo::kConstGlobal) {
+ FAIL(initializer,
+ "Identifier in variable declaration initializer must be const.");
+ }
+
+ SetTypeOf(initializer, var_info->type());
+ return var_info->type();
}
auto* call = initializer->AsCall();
if (call == nullptr) {
FAIL(initializer,
- "Invalid variable initialization - it should be a literal, or "
+ "Invalid variable initialization - it should be a literal, const, or "
"fround(literal).");
}
@@ -2724,7 +2794,7 @@ AsmType* AsmTyper::VariableTypeAnnotations(Expression* initializer,
}
// Float constants must contain dots in local, but not in globals.
- if (!global) {
+ if (mutability_type == VariableInfo::kLocal) {
if (!src_expr->raw_value()->ContainsDot()) {
FAIL(initializer,
"Invalid float type annotation - expected literal argument to be a "
diff --git a/deps/v8/src/asmjs/asm-typer.h b/deps/v8/src/asmjs/asm-typer.h
index 942ca21370..2c66948d56 100644
--- a/deps/v8/src/asmjs/asm-typer.h
+++ b/deps/v8/src/asmjs/asm-typer.h
@@ -102,6 +102,13 @@ class AsmTyper final {
kInvalidMutability,
kLocal,
kMutableGlobal,
+ // *VIOLATION* We support const variables in asm.js, as per the
+ //
+ // https://discourse.wicg.io/t/allow-const-global-variables/684
+ //
+ // Global const variables are treated as if they were numeric literals,
+ // and can be used anywhere a literal can be used.
+ kConstGlobal,
kImmutableGlobal,
};
@@ -114,7 +121,8 @@ class AsmTyper final {
}
bool IsGlobal() const {
- return mutability_ == kImmutableGlobal || mutability_ == kMutableGlobal;
+ return mutability_ == kImmutableGlobal || mutability_ == kConstGlobal ||
+ mutability_ == kMutableGlobal;
}
bool IsStdlib() const { return standard_member_ == kStdlib; }
@@ -307,8 +315,9 @@ class AsmTyper final {
AsmType* ReturnTypeAnnotations(ReturnStatement* statement);
// 5.4 VariableTypeAnnotations
// 5.5 GlobalVariableTypeAnnotations
- AsmType* VariableTypeAnnotations(Expression* initializer,
- bool global = false);
+ AsmType* VariableTypeAnnotations(
+ Expression* initializer,
+ VariableInfo::Mutability global = VariableInfo::kLocal);
AsmType* ImportExpression(Property* import);
AsmType* NewHeapView(CallNew* new_heap_view);
diff --git a/deps/v8/src/asmjs/asm-types.h b/deps/v8/src/asmjs/asm-types.h
index 6fe42013c0..882e32828a 100644
--- a/deps/v8/src/asmjs/asm-types.h
+++ b/deps/v8/src/asmjs/asm-types.h
@@ -7,7 +7,9 @@
#include <string>
+#include "src/base/compiler-specific.h"
#include "src/base/macros.h"
+#include "src/globals.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
@@ -92,7 +94,7 @@ class AsmValueType {
}
static AsmType* New(bitset_t bits) {
- DCHECK_EQ((bits & kAsmValueTypeTag), 0);
+ DCHECK_EQ((bits & kAsmValueTypeTag), 0u);
return reinterpret_cast<AsmType*>(
static_cast<uintptr_t>(bits | kAsmValueTypeTag));
}
@@ -101,7 +103,7 @@ class AsmValueType {
DISALLOW_IMPLICIT_CONSTRUCTORS(AsmValueType);
};
-class AsmCallableType : public ZoneObject {
+class V8_EXPORT_PRIVATE AsmCallableType : public NON_EXPORTED_BASE(ZoneObject) {
public:
virtual std::string Name() = 0;
@@ -124,7 +126,7 @@ class AsmCallableType : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(AsmCallableType);
};
-class AsmFunctionType final : public AsmCallableType {
+class V8_EXPORT_PRIVATE AsmFunctionType final : public AsmCallableType {
public:
AsmFunctionType* AsFunctionType() final { return this; }
@@ -151,7 +153,8 @@ class AsmFunctionType final : public AsmCallableType {
DISALLOW_COPY_AND_ASSIGN(AsmFunctionType);
};
-class AsmOverloadedFunctionType final : public AsmCallableType {
+class V8_EXPORT_PRIVATE AsmOverloadedFunctionType final
+ : public AsmCallableType {
public:
AsmOverloadedFunctionType* AsOverloadedFunctionType() override {
return this;
@@ -173,7 +176,7 @@ class AsmOverloadedFunctionType final : public AsmCallableType {
DISALLOW_IMPLICIT_CONSTRUCTORS(AsmOverloadedFunctionType);
};
-class AsmFFIType final : public AsmCallableType {
+class V8_EXPORT_PRIVATE AsmFFIType final : public AsmCallableType {
public:
AsmFFIType* AsFFIType() override { return this; }
@@ -189,7 +192,7 @@ class AsmFFIType final : public AsmCallableType {
DISALLOW_COPY_AND_ASSIGN(AsmFFIType);
};
-class AsmFunctionTableType : public AsmCallableType {
+class V8_EXPORT_PRIVATE AsmFunctionTableType : public AsmCallableType {
public:
AsmFunctionTableType* AsFunctionTableType() override { return this; }
@@ -212,7 +215,7 @@ class AsmFunctionTableType : public AsmCallableType {
DISALLOW_IMPLICIT_CONSTRUCTORS(AsmFunctionTableType);
};
-class AsmType {
+class V8_EXPORT_PRIVATE AsmType {
public:
#define DEFINE_CONSTRUCTOR(CamelName, string_name, number, parent_types) \
static AsmType* CamelName() { \
diff --git a/deps/v8/src/asmjs/asm-wasm-builder.cc b/deps/v8/src/asmjs/asm-wasm-builder.cc
index 091f7935f6..cac6fbd8b3 100644
--- a/deps/v8/src/asmjs/asm-wasm-builder.cc
+++ b/deps/v8/src/asmjs/asm-wasm-builder.cc
@@ -12,13 +12,13 @@
#include "src/asmjs/asm-types.h"
#include "src/asmjs/asm-wasm-builder.h"
-#include "src/wasm/switch-logic.h"
+#include "src/asmjs/switch-logic.h"
+
#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
-#include "src/codegen.h"
namespace v8 {
namespace internal {
@@ -81,14 +81,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
++i) {
b.AddParam(i->type);
}
- foreign_init_function_->SetExported();
- std::string raw_name = "__foreign_init__";
- foreign_init_function_->SetName(
- AsmWasmBuilder::foreign_init_name,
- static_cast<int>(strlen(AsmWasmBuilder::foreign_init_name)));
-
- foreign_init_function_->SetName(raw_name.data(),
- static_cast<int>(raw_name.size()));
+ foreign_init_function_->ExportAs(
+ CStrVector(AsmWasmBuilder::foreign_init_name));
foreign_init_function_->SetSignature(b.Build());
for (size_t pos = 0; pos < foreign_variables_.size(); ++pos) {
foreign_init_function_->EmitGetLocal(static_cast<uint32_t>(pos));
@@ -563,10 +557,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
Variable* var = expr->var();
DCHECK(var->is_function());
WasmFunctionBuilder* function = LookupOrInsertFunction(var);
- function->SetExported();
- function->SetName(
- AsmWasmBuilder::single_function_name,
- static_cast<int>(strlen(AsmWasmBuilder::single_function_name)));
+ function->ExportAs(CStrVector(AsmWasmBuilder::single_function_name));
}
}
@@ -650,9 +641,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
const AstRawString* raw_name = name->AsRawPropertyName();
if (var->is_function()) {
WasmFunctionBuilder* function = LookupOrInsertFunction(var);
- function->SetExported();
- function->SetName(reinterpret_cast<const char*>(raw_name->raw_data()),
- raw_name->length());
+ function->Export();
+ function->SetName({reinterpret_cast<const char*>(raw_name->raw_data()),
+ raw_name->length()});
}
}
}
@@ -763,7 +754,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
};
- void EmitAssignmentLhs(Expression* target, MachineType* mtype) {
+ void EmitAssignmentLhs(Expression* target, AsmType** atype) {
// Match the left hand side of the assignment.
VariableProxy* target_var = target->AsVariableProxy();
if (target_var != nullptr) {
@@ -774,7 +765,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
Property* target_prop = target->AsProperty();
if (target_prop != nullptr) {
// Left hand side is a property access, i.e. the asm.js heap.
- VisitPropertyAndEmitIndex(target_prop, mtype);
+ VisitPropertyAndEmitIndex(target_prop, atype);
return;
}
@@ -822,7 +813,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
RECURSE(Visit(value));
}
- void EmitAssignment(Assignment* expr, MachineType type, ValueFate fate) {
+ void EmitAssignment(Assignment* expr, AsmType* type, ValueFate fate) {
// Match the left hand side of the assignment.
VariableProxy* target_var = expr->target()->AsVariableProxy();
if (target_var != nullptr) {
@@ -857,21 +848,21 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
// Note that unlike StoreMem, AsmjsStoreMem ignores out-of-bounds writes.
WasmOpcode opcode;
- if (type == MachineType::Int8()) {
+ if (type == AsmType::Int8Array()) {
opcode = kExprI32AsmjsStoreMem8;
- } else if (type == MachineType::Uint8()) {
+ } else if (type == AsmType::Uint8Array()) {
opcode = kExprI32AsmjsStoreMem8;
- } else if (type == MachineType::Int16()) {
+ } else if (type == AsmType::Int16Array()) {
opcode = kExprI32AsmjsStoreMem16;
- } else if (type == MachineType::Uint16()) {
+ } else if (type == AsmType::Uint16Array()) {
opcode = kExprI32AsmjsStoreMem16;
- } else if (type == MachineType::Int32()) {
+ } else if (type == AsmType::Int32Array()) {
opcode = kExprI32AsmjsStoreMem;
- } else if (type == MachineType::Uint32()) {
+ } else if (type == AsmType::Uint32Array()) {
opcode = kExprI32AsmjsStoreMem;
- } else if (type == MachineType::Float32()) {
+ } else if (type == AsmType::Float32Array()) {
opcode = kExprF32AsmjsStoreMem;
- } else if (type == MachineType::Float64()) {
+ } else if (type == AsmType::Float64Array()) {
opcode = kExprF64AsmjsStoreMem;
} else {
UNREACHABLE();
@@ -938,12 +929,12 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
if (as_init) LoadInitFunction();
- MachineType mtype = MachineType::None();
+ AsmType* atype = AsmType::None();
bool is_nop = false;
- EmitAssignmentLhs(expr->target(), &mtype);
+ EmitAssignmentLhs(expr->target(), &atype);
EmitAssignmentRhs(expr->target(), expr->value(), &is_nop);
if (!is_nop) {
- EmitAssignment(expr, mtype, fate);
+ EmitAssignment(expr, atype, fate);
}
if (as_init) UnLoadInitFunction();
}
@@ -967,40 +958,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
}
- void VisitPropertyAndEmitIndex(Property* expr, MachineType* mtype) {
+ void VisitPropertyAndEmitIndex(Property* expr, AsmType** atype) {
Expression* obj = expr->obj();
- AsmType* type = typer_->TypeOf(obj);
- int size;
- if (type->IsA(AsmType::Uint8Array())) {
- *mtype = MachineType::Uint8();
- size = 1;
- } else if (type->IsA(AsmType::Int8Array())) {
- *mtype = MachineType::Int8();
- size = 1;
- } else if (type->IsA(AsmType::Uint16Array())) {
- *mtype = MachineType::Uint16();
- size = 2;
- } else if (type->IsA(AsmType::Int16Array())) {
- *mtype = MachineType::Int16();
- size = 2;
- } else if (type->IsA(AsmType::Uint32Array())) {
- *mtype = MachineType::Uint32();
- size = 4;
- } else if (type->IsA(AsmType::Int32Array())) {
- *mtype = MachineType::Int32();
- size = 4;
- } else if (type->IsA(AsmType::Uint32Array())) {
- *mtype = MachineType::Uint32();
- size = 4;
- } else if (type->IsA(AsmType::Float32Array())) {
- *mtype = MachineType::Float32();
- size = 4;
- } else if (type->IsA(AsmType::Float64Array())) {
- *mtype = MachineType::Float64();
- size = 8;
- } else {
- UNREACHABLE();
- }
+ *atype = typer_->TypeOf(obj);
+ int size = (*atype)->ElementSizeInBytes();
if (size == 1) {
// Allow more general expression in byte arrays than the spec
// strictly permits.
@@ -1038,24 +999,24 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
void VisitProperty(Property* expr) {
- MachineType type;
+ AsmType* type = AsmType::None();
VisitPropertyAndEmitIndex(expr, &type);
WasmOpcode opcode;
- if (type == MachineType::Int8()) {
+ if (type == AsmType::Int8Array()) {
opcode = kExprI32AsmjsLoadMem8S;
- } else if (type == MachineType::Uint8()) {
+ } else if (type == AsmType::Uint8Array()) {
opcode = kExprI32AsmjsLoadMem8U;
- } else if (type == MachineType::Int16()) {
+ } else if (type == AsmType::Int16Array()) {
opcode = kExprI32AsmjsLoadMem16S;
- } else if (type == MachineType::Uint16()) {
+ } else if (type == AsmType::Uint16Array()) {
opcode = kExprI32AsmjsLoadMem16U;
- } else if (type == MachineType::Int32()) {
+ } else if (type == AsmType::Int32Array()) {
opcode = kExprI32AsmjsLoadMem;
- } else if (type == MachineType::Uint32()) {
+ } else if (type == AsmType::Uint32Array()) {
opcode = kExprI32AsmjsLoadMem;
- } else if (type == MachineType::Float32()) {
+ } else if (type == AsmType::Float32Array()) {
opcode = kExprF32AsmjsLoadMem;
- } else if (type == MachineType::Float64()) {
+ } else if (type == AsmType::Float64Array()) {
opcode = kExprF64AsmjsLoadMem;
} else {
UNREACHABLE();
@@ -1367,11 +1328,13 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
uint32_t index = imported_function_table_.LookupOrInsertImport(
vp->var(), sig.Build());
VisitCallArgs(expr);
+ current_function_builder_->AddAsmWasmOffset(expr->position());
current_function_builder_->Emit(kExprCallFunction);
current_function_builder_->EmitVarInt(index);
} else {
WasmFunctionBuilder* function = LookupOrInsertFunction(vp->var());
VisitCallArgs(expr);
+ current_function_builder_->AddAsmWasmOffset(expr->position());
current_function_builder_->Emit(kExprCallFunction);
current_function_builder_->EmitDirectCallIndex(
function->func_index());
@@ -1397,8 +1360,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
VisitCallArgs(expr);
current_function_builder_->EmitGetLocal(tmp.index());
+ current_function_builder_->AddAsmWasmOffset(expr->position());
current_function_builder_->Emit(kExprCallIndirect);
current_function_builder_->EmitVarInt(indices->signature_index);
+ current_function_builder_->EmitVarInt(0); // table index
returns_value =
builder_->GetSignature(indices->signature_index)->return_count() >
0;
@@ -1726,9 +1691,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitThisFunction(ThisFunction* expr) { UNREACHABLE(); }
- void VisitDeclarations(ZoneList<Declaration*>* decls) {
- for (int i = 0; i < decls->length(); ++i) {
- Declaration* decl = decls->at(i);
+ void VisitDeclarations(Declaration::List* decls) {
+ for (Declaration* decl : *decls) {
RECURSE(Visit(decl));
}
}
@@ -1821,8 +1785,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
entry = functions_.LookupOrInsert(v, ComputePointerHash(v),
ZoneAllocationPolicy(zone()));
function->SetName(
- reinterpret_cast<const char*>(v->raw_name()->raw_data()),
- v->raw_name()->length());
+ {reinterpret_cast<const char*>(v->raw_name()->raw_data()),
+ v->raw_name()->length()});
entry->value = function;
}
return (reinterpret_cast<WasmFunctionBuilder*>(entry->value));
@@ -1878,13 +1842,16 @@ AsmWasmBuilder::AsmWasmBuilder(Isolate* isolate, Zone* zone,
// TODO(aseemgarg): probably should take zone (to write wasm to) as input so
// that zone in constructor may be thrown away once wasm module is written.
-ZoneBuffer* AsmWasmBuilder::Run(i::Handle<i::FixedArray>* foreign_args) {
+AsmWasmBuilder::Result AsmWasmBuilder::Run(
+ i::Handle<i::FixedArray>* foreign_args) {
AsmWasmBuilderImpl impl(isolate_, zone_, literal_, typer_);
impl.Build();
*foreign_args = impl.GetForeignArgs();
- ZoneBuffer* buffer = new (zone_) ZoneBuffer(zone_);
- impl.builder_->WriteTo(*buffer);
- return buffer;
+ ZoneBuffer* module_buffer = new (zone_) ZoneBuffer(zone_);
+ impl.builder_->WriteTo(*module_buffer);
+ ZoneBuffer* asm_offsets_buffer = new (zone_) ZoneBuffer(zone_);
+ impl.builder_->WriteAsmJsOffsetTable(*asm_offsets_buffer);
+ return {module_buffer, asm_offsets_buffer};
}
const char* AsmWasmBuilder::foreign_init_name = "__foreign_init__";
diff --git a/deps/v8/src/asmjs/asm-wasm-builder.h b/deps/v8/src/asmjs/asm-wasm-builder.h
index 9f85dfaf07..f234abde8a 100644
--- a/deps/v8/src/asmjs/asm-wasm-builder.h
+++ b/deps/v8/src/asmjs/asm-wasm-builder.h
@@ -20,9 +20,14 @@ namespace wasm {
class AsmWasmBuilder {
public:
+ struct Result {
+ ZoneBuffer* module_bytes;
+ ZoneBuffer* asm_offset_table;
+ };
+
explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root,
AsmTyper* typer);
- ZoneBuffer* Run(Handle<FixedArray>* foreign_args);
+ Result Run(Handle<FixedArray>* foreign_args);
static const char* foreign_init_name;
static const char* single_function_name;
diff --git a/deps/v8/src/wasm/switch-logic.cc b/deps/v8/src/asmjs/switch-logic.cc
index 9ebc0b3452..93544da9b4 100644
--- a/deps/v8/src/wasm/switch-logic.cc
+++ b/deps/v8/src/asmjs/switch-logic.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/wasm/switch-logic.h"
+#include "src/asmjs/switch-logic.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/wasm/switch-logic.h b/deps/v8/src/asmjs/switch-logic.h
index 160e0d69b2..4e967ae35f 100644
--- a/deps/v8/src/wasm/switch-logic.h
+++ b/deps/v8/src/asmjs/switch-logic.h
@@ -5,6 +5,7 @@
#ifndef V8_WASM_SWITCH_LOGIC_H
#define V8_WASM_SWITCH_LOGIC_H
+#include "src/globals.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
@@ -23,7 +24,7 @@ struct CaseNode : public ZoneObject {
}
};
-CaseNode* OrderCases(ZoneVector<int>* cases, Zone* zone);
+V8_EXPORT_PRIVATE CaseNode* OrderCases(ZoneVector<int>* cases, Zone* zone);
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index b44bc06ba3..a2c0ebebaf 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -351,10 +351,8 @@ void RelocInfo::update_wasm_memory_reference(
icache_flush_mode);
} else if (IsWasmMemorySizeReference(rmode_)) {
uint32_t current_size_reference = wasm_memory_size_reference();
- DCHECK(old_size == 0 || current_size_reference <= old_size);
- uint32_t offset = old_size - current_size_reference;
- DCHECK_GE(new_size, offset);
- uint32_t updated_size_reference = new_size - offset;
+ uint32_t updated_size_reference =
+ new_size + (current_size_reference - old_size);
unchecked_update_wasm_memory_size(updated_size_reference,
icache_flush_mode);
} else {
@@ -762,8 +760,10 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "internal reference";
case INTERNAL_REFERENCE_ENCODED:
return "encoded internal reference";
- case DEOPT_POSITION:
- return "deopt position";
+ case DEOPT_SCRIPT_OFFSET:
+ return "deopt script offset";
+ case DEOPT_INLINING_ID:
+ return "deopt inlining id";
case DEOPT_REASON:
return "deopt reason";
case DEOPT_ID:
@@ -803,7 +803,7 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
os << static_cast<const void*>(pc_) << " " << RelocModeName(rmode_);
if (IsComment(rmode_)) {
os << " (" << reinterpret_cast<char*>(data_) << ")";
- } else if (rmode_ == DEOPT_POSITION) {
+ } else if (rmode_ == DEOPT_SCRIPT_OFFSET || rmode_ == DEOPT_INLINING_ID) {
os << " (" << data() << ")";
} else if (rmode_ == DEOPT_REASON) {
os << " ("
@@ -874,7 +874,8 @@ void RelocInfo::Verify(Isolate* isolate) {
case RUNTIME_ENTRY:
case COMMENT:
case EXTERNAL_REFERENCE:
- case DEOPT_POSITION:
+ case DEOPT_SCRIPT_OFFSET:
+ case DEOPT_INLINING_ID:
case DEOPT_REASON:
case DEOPT_ID:
case CONST_POOL:
@@ -1216,19 +1217,6 @@ ExternalReference ExternalReference::log_leave_external_function(
Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal)));
}
-
-ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
- return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
-}
-
-
-ExternalReference ExternalReference::keyed_lookup_cache_field_offsets(
- Isolate* isolate) {
- return ExternalReference(
- isolate->keyed_lookup_cache()->field_offsets_address());
-}
-
-
ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
return ExternalReference(isolate->heap()->roots_array_start());
}
@@ -1906,11 +1894,12 @@ int ConstantPoolBuilder::Emit(Assembler* assm) {
// Platform specific but identical code for all the platforms.
-void Assembler::RecordDeoptReason(DeoptimizeReason reason, int raw_position,
- int id) {
+void Assembler::RecordDeoptReason(DeoptimizeReason reason,
+ SourcePosition position, int id) {
if (FLAG_trace_deopt || isolate()->is_profiling()) {
EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::DEOPT_POSITION, raw_position);
+ RecordRelocInfo(RelocInfo::DEOPT_SCRIPT_OFFSET, position.ScriptOffset());
+ RecordRelocInfo(RelocInfo::DEOPT_INLINING_ID, position.InliningId());
RecordRelocInfo(RelocInfo::DEOPT_REASON, static_cast<int>(reason));
RecordRelocInfo(RelocInfo::DEOPT_ID, id);
}
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index a925032e2d..2169b15c1d 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -38,6 +38,7 @@
#include "src/allocation.h"
#include "src/builtins/builtins.h"
#include "src/deoptimize-reason.h"
+#include "src/globals.h"
#include "src/isolate.h"
#include "src/log.h"
#include "src/register-configuration.h"
@@ -51,6 +52,7 @@ class ApiFunction;
namespace internal {
// Forward declarations.
+class SourcePosition;
class StatsCounter;
// -----------------------------------------------------------------------------
@@ -419,9 +421,10 @@ class RelocInfo {
CONST_POOL,
VENEER_POOL,
- DEOPT_POSITION, // Deoptimization source position.
- DEOPT_REASON, // Deoptimization reason index.
- DEOPT_ID, // Deoptimization inlining id.
+ DEOPT_SCRIPT_OFFSET,
+ DEOPT_INLINING_ID, // Deoptimization source position.
+ DEOPT_REASON, // Deoptimization reason index.
+ DEOPT_ID, // Deoptimization inlining id.
// This is not an actual reloc mode, but used to encode a long pc jump that
// cannot be encoded as part of another record.
@@ -479,7 +482,7 @@ class RelocInfo {
return mode == VENEER_POOL;
}
static inline bool IsDeoptPosition(Mode mode) {
- return mode == DEOPT_POSITION;
+ return mode == DEOPT_SCRIPT_OFFSET || mode == DEOPT_INLINING_ID;
}
static inline bool IsDeoptReason(Mode mode) {
return mode == DEOPT_REASON;
@@ -950,10 +953,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference log_enter_external_function(Isolate* isolate);
static ExternalReference log_leave_external_function(Isolate* isolate);
- // Static data in the keyed lookup cache.
- static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
- static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
-
// Static variable Heap::roots_array_start()
static ExternalReference roots_array_start(Isolate* isolate);
@@ -961,7 +960,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference allocation_sites_list_address(Isolate* isolate);
// Static variable StackGuard::address_of_jslimit()
- static ExternalReference address_of_stack_limit(Isolate* isolate);
+ V8_EXPORT_PRIVATE static ExternalReference address_of_stack_limit(
+ Isolate* isolate);
// Static variable StackGuard::address_of_real_jslimit()
static ExternalReference address_of_real_stack_limit(Isolate* isolate);
@@ -1047,7 +1047,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference invoke_function_callback(Isolate* isolate);
static ExternalReference invoke_accessor_getter_callback(Isolate* isolate);
- static ExternalReference runtime_function_table_address(Isolate* isolate);
+ V8_EXPORT_PRIVATE static ExternalReference runtime_function_table_address(
+ Isolate* isolate);
Address address() const { return reinterpret_cast<Address>(address_); }
@@ -1107,12 +1108,12 @@ class ExternalReference BASE_EMBEDDED {
void* address_;
};
-bool operator==(ExternalReference, ExternalReference);
+V8_EXPORT_PRIVATE bool operator==(ExternalReference, ExternalReference);
bool operator!=(ExternalReference, ExternalReference);
size_t hash_value(ExternalReference);
-std::ostream& operator<<(std::ostream&, ExternalReference);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ExternalReference);
// -----------------------------------------------------------------------------
// Utility functions
diff --git a/deps/v8/src/ast/ast-expression-rewriter.cc b/deps/v8/src/ast/ast-expression-rewriter.cc
index c4fa71be0a..d0db9eab66 100644
--- a/deps/v8/src/ast/ast-expression-rewriter.cc
+++ b/deps/v8/src/ast/ast-expression-rewriter.cc
@@ -19,11 +19,10 @@ namespace internal {
} while (false)
#define NOTHING() DCHECK_NULL(replacement_)
-
-void AstExpressionRewriter::VisitDeclarations(
- ZoneList<Declaration*>* declarations) {
- for (int i = 0; i < declarations->length(); i++) {
- AST_REWRITE_LIST_ELEMENT(Declaration, declarations, i);
+void AstExpressionRewriter::VisitDeclarations(Declaration::List* declarations) {
+ for (Declaration::List::Iterator it = declarations->begin();
+ it != declarations->end(); ++it) {
+ AST_REWRITE(Declaration, *it, it = replacement);
}
}
diff --git a/deps/v8/src/ast/ast-expression-rewriter.h b/deps/v8/src/ast/ast-expression-rewriter.h
index dfed3e195d..26eef24c1d 100644
--- a/deps/v8/src/ast/ast-expression-rewriter.h
+++ b/deps/v8/src/ast/ast-expression-rewriter.h
@@ -29,7 +29,7 @@ class AstExpressionRewriter : public AstVisitor<AstExpressionRewriter> {
}
virtual ~AstExpressionRewriter() {}
- virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ virtual void VisitDeclarations(Declaration::List* declarations);
virtual void VisitStatements(ZoneList<Statement*>* statements);
virtual void VisitExpressions(ZoneList<Expression*>* expressions);
diff --git a/deps/v8/src/ast/ast-numbering.cc b/deps/v8/src/ast/ast-numbering.cc
index e1b11f655a..82f9767281 100644
--- a/deps/v8/src/ast/ast-numbering.cc
+++ b/deps/v8/src/ast/ast-numbering.cc
@@ -37,7 +37,7 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
void VisitReference(Expression* expr);
void VisitStatements(ZoneList<Statement*>* statements);
- void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void VisitDeclarations(Declaration::List* declarations);
void VisitArguments(ZoneList<Expression*>* arguments);
void VisitLiteralProperty(LiteralProperty* property);
@@ -147,8 +147,15 @@ void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
void AstNumberingVisitor::VisitVariableProxyReference(VariableProxy* node) {
IncrementNodeCount();
- if (node->var()->IsLookupSlot()) {
- DisableCrankshaft(kReferenceToAVariableWhichRequiresDynamicLookup);
+ switch (node->var()->location()) {
+ case VariableLocation::LOOKUP:
+ DisableCrankshaft(kReferenceToAVariableWhichRequiresDynamicLookup);
+ break;
+ case VariableLocation::MODULE:
+ DisableCrankshaft(kReferenceToModuleVariable);
+ break;
+ default:
+ break;
}
node->set_base_id(ReserveIdRange(VariableProxy::num_ids()));
}
@@ -547,12 +554,8 @@ void AstNumberingVisitor::VisitStatements(ZoneList<Statement*>* statements) {
}
}
-
-void AstNumberingVisitor::VisitDeclarations(
- ZoneList<Declaration*>* declarations) {
- for (int i = 0; i < declarations->length(); i++) {
- Visit(declarations->at(i));
- }
+void AstNumberingVisitor::VisitDeclarations(Declaration::List* decls) {
+ for (Declaration* decl : *decls) Visit(decl);
}
@@ -592,12 +595,11 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
}
if (IsGeneratorFunction(node->kind()) || IsAsyncFunction(node->kind())) {
- // Generators can be optimized if --turbo-from-bytecode is set.
- if (FLAG_turbo_from_bytecode) {
- DisableCrankshaft(kGenerator);
- } else {
- DisableOptimization(kGenerator);
- }
+ DisableCrankshaft(kGenerator);
+ }
+
+ if (IsClassConstructor(node->kind())) {
+ DisableCrankshaft(kClassConstructorFunction);
}
VisitDeclarations(scope->declarations());
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
index e0f88e19a9..d93e02ffe0 100644
--- a/deps/v8/src/ast/ast-traversal-visitor.h
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -40,7 +40,7 @@ class AstTraversalVisitor : public AstVisitor<Subclass> {
bool VisitExpression(Expression* node) { return true; }
// Iteration left-to-right.
- void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void VisitDeclarations(Declaration::List* declarations);
void VisitStatements(ZoneList<Statement*>* statements);
// Individual nodes
@@ -104,9 +104,8 @@ AstTraversalVisitor<Subclass>::AstTraversalVisitor(uintptr_t stack_limit,
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitDeclarations(
- ZoneList<Declaration*>* decls) {
- for (int i = 0; i < decls->length(); ++i) {
- Declaration* decl = decls->at(i);
+ Declaration::List* decls) {
+ for (Declaration* decl : *decls) {
RECURSE(Visit(decl));
}
}
@@ -288,6 +287,8 @@ void AstTraversalVisitor<Subclass>::VisitFunctionLiteral(
PROCESS_EXPRESSION(expr);
DeclarationScope* scope = expr->scope();
RECURSE_EXPRESSION(VisitDeclarations(scope->declarations()));
+ // A lazily parsed function literal won't have a body.
+ if (expr->scope()->is_lazily_parsed()) return;
RECURSE_EXPRESSION(VisitStatements(expr->body()));
}
diff --git a/deps/v8/src/ast/ast-types.cc b/deps/v8/src/ast/ast-types.cc
index a075e8e787..49551dd7fa 100644
--- a/deps/v8/src/ast/ast-types.cc
+++ b/deps/v8/src/ast/ast-types.cc
@@ -208,6 +208,8 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case JS_DATE_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
+ case JS_MODULE_NAMESPACE_TYPE:
+ case JS_FIXED_ARRAY_ITERATOR_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_TYPE:
case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type.
@@ -218,6 +220,43 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
+
+ case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
+
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
case JS_PROMISE_TYPE:
@@ -245,6 +284,7 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case CODE_TYPE:
case PROPERTY_CELL_TYPE:
case MODULE_TYPE:
+ case MODULE_INFO_ENTRY_TYPE:
return kOtherInternal & kTaggedPointer;
// Remaining instance types are unsupported for now. If any of them do
@@ -260,7 +300,8 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case ACCESS_CHECK_INFO_TYPE:
case INTERCEPTOR_INFO_TYPE:
case CALL_HANDLER_INFO_TYPE:
- case PROMISE_CONTAINER_TYPE:
+ case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
+ case PROMISE_REACTION_JOB_INFO_TYPE:
case FUNCTION_TEMPLATE_INFO_TYPE:
case OBJECT_TEMPLATE_INFO_TYPE:
case SIGNATURE_INFO_TYPE:
@@ -274,6 +315,7 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case CELL_TYPE:
case WEAK_CELL_TYPE:
case PROTOTYPE_INFO_TYPE:
+ case TUPLE3_TYPE:
case CONTEXT_EXTENSION_TYPE:
UNREACHABLE();
return kNone;
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index 33ccec7fa8..ed2976f52a 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -98,10 +98,10 @@ void AstString::Internalize(Isolate* isolate) {
void AstRawString::Internalize(Isolate* isolate) {
if (literal_bytes_.length() == 0) {
- string_ = isolate->factory()->empty_string();
+ set_string(isolate->factory()->empty_string());
} else {
AstRawStringInternalizationKey key(this);
- string_ = StringTable::LookupKey(isolate, &key);
+ set_string(StringTable::LookupKey(isolate, &key));
}
}
@@ -131,9 +131,9 @@ bool AstRawString::IsOneByteEqualTo(const char* data) const {
void AstConsString::Internalize(Isolate* isolate) {
// AstRawStrings are internalized before AstConsStrings so left and right are
// already internalized.
- string_ = isolate->factory()
- ->NewConsString(left_->string(), right_->string())
- .ToHandleChecked();
+ set_string(isolate->factory()
+ ->NewConsString(left_->string(), right_->string())
+ .ToHandleChecked());
}
bool AstValue::IsPropertyName() const {
@@ -177,44 +177,44 @@ bool AstValue::BooleanValue() const {
void AstValue::Internalize(Isolate* isolate) {
switch (type_) {
case STRING:
- DCHECK(string_ != NULL);
+ DCHECK_NOT_NULL(string_);
// Strings are already internalized.
DCHECK(!string_->string().is_null());
break;
case SYMBOL:
if (symbol_name_[0] == 'i') {
DCHECK_EQ(0, strcmp(symbol_name_, "iterator_symbol"));
- value_ = isolate->factory()->iterator_symbol();
+ set_value(isolate->factory()->iterator_symbol());
} else if (strcmp(symbol_name_, "hasInstance_symbol") == 0) {
- value_ = isolate->factory()->has_instance_symbol();
+ set_value(isolate->factory()->has_instance_symbol());
} else {
DCHECK_EQ(0, strcmp(symbol_name_, "home_object_symbol"));
- value_ = isolate->factory()->home_object_symbol();
+ set_value(isolate->factory()->home_object_symbol());
}
break;
case NUMBER_WITH_DOT:
case NUMBER:
- value_ = isolate->factory()->NewNumber(number_, TENURED);
+ set_value(isolate->factory()->NewNumber(number_, TENURED));
break;
case SMI_WITH_DOT:
case SMI:
- value_ = handle(Smi::FromInt(smi_), isolate);
+ set_value(handle(Smi::FromInt(smi_), isolate));
break;
case BOOLEAN:
if (bool_) {
- value_ = isolate->factory()->true_value();
+ set_value(isolate->factory()->true_value());
} else {
- value_ = isolate->factory()->false_value();
+ set_value(isolate->factory()->false_value());
}
break;
case NULL_TYPE:
- value_ = isolate->factory()->null_value();
+ set_value(isolate->factory()->null_value());
break;
case THE_HOLE:
- value_ = isolate->factory()->the_hole_value();
+ set_value(isolate->factory()->the_hole_value());
break;
case UNDEFINED:
- value_ = isolate->factory()->undefined_value();
+ set_value(isolate->factory()->undefined_value());
break;
}
}
@@ -301,6 +301,7 @@ void AstValueFactory::Internalize(Isolate* isolate) {
current->Internalize(isolate);
current = next;
}
+
for (AstValue* current = values_; current != nullptr;) {
AstValue* next = current->next();
current->Internalize(isolate);
@@ -313,7 +314,7 @@ void AstValueFactory::Internalize(Isolate* isolate) {
const AstValue* AstValueFactory::NewString(const AstRawString* string) {
AstValue* value = new (zone_) AstValue(string);
- CHECK(string != nullptr);
+ CHECK_NOT_NULL(string);
return AddValue(value);
}
@@ -329,10 +330,12 @@ const AstValue* AstValueFactory::NewNumber(double number, bool with_dot) {
return AddValue(value);
}
+const AstValue* AstValueFactory::NewSmi(uint32_t number) {
+ bool cacheable_smi = number <= kMaxCachedSmi;
+ if (cacheable_smi && smis_[number] != nullptr) return smis_[number];
-const AstValue* AstValueFactory::NewSmi(int number) {
- AstValue* value =
- new (zone_) AstValue(AstValue::SMI, number);
+ AstValue* value = new (zone_) AstValue(AstValue::SMI, number);
+ if (cacheable_smi) smis_[number] = value;
return AddValue(value);
}
@@ -383,9 +386,9 @@ AstRawString* AstValueFactory::GetString(uint32_t hash, bool is_one_byte,
memcpy(new_literal_bytes, literal_bytes.start(), length);
AstRawString* new_string = new (zone_) AstRawString(
is_one_byte, Vector<const byte>(new_literal_bytes, length), hash);
- CHECK(new_string != nullptr);
- entry->key = new_string;
+ CHECK_NOT_NULL(new_string);
AddString(new_string);
+ entry->key = new_string;
entry->value = reinterpret_cast<void*>(1);
}
return reinterpret_cast<AstRawString*>(entry->key);
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index bc3eca264e..4ce480fe57 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -30,6 +30,7 @@
#include "src/api.h"
#include "src/base/hashmap.h"
+#include "src/globals.h"
#include "src/utils.h"
// AstString, AstValue and AstValueFactory are for storing strings and values
@@ -53,17 +54,21 @@ class AstString : public ZoneObject {
// This function can be called after internalizing.
V8_INLINE Handle<String> string() const {
- DCHECK(!string_.is_null());
- return string_;
+ DCHECK_NOT_NULL(string_);
+ return Handle<String>(string_);
}
+ AstString* next() { return next_; }
AstString** next_location() { return &next_; }
- AstString* next() const { return next_; }
protected:
- // Handle<String>::null() until internalized.
- Handle<String> string_;
- AstString* next_;
+ void set_string(Handle<String> string) { string_ = string.location(); }
+ // {string_} is stored as String** instead of a Handle<String> so it can be
+ // stored in a union with {next_}.
+ union {
+ AstString* next_;
+ String** string_;
+ };
// Poor-man's virtual dispatch to AstRawString / AstConsString. Takes less
// memory.
class IsRawStringBits : public BitField<bool, 0, 1> {};
@@ -203,13 +208,14 @@ class AstValue : public ZoneObject {
if (type_ == STRING) {
return string_->string();
}
- DCHECK(!value_.is_null());
- return value_;
+ DCHECK_NOT_NULL(value_);
+ return Handle<Object>(value_);
}
AstValue* next() const { return next_; }
void set_next(AstValue* next) { next_ = next; }
private:
+ void set_value(Handle<Object> object) { value_ = object.location(); }
friend class AstValueFactory;
enum Type {
@@ -257,19 +263,21 @@ class AstValue : public ZoneObject {
Type type_;
+ // {value_} is stored as Object** instead of a Handle<Object> so it can be
+ // stored in a union with {next_}.
+ union {
+ Object** value_; // if internalized
+ AstValue* next_; // if !internalized
+ };
+
// Uninternalized value.
union {
const AstRawString* string_;
double number_;
int smi_;
bool bool_;
- const AstRawString* strings_;
const char* symbol_name_;
};
-
- // Handle<String>::null() until internalized.
- Handle<Object> value_;
- AstValue* next_;
};
@@ -324,16 +332,18 @@ class AstValueFactory {
AstValueFactory(Zone* zone, uint32_t hash_seed)
: string_table_(AstRawStringCompare),
values_(nullptr),
+ smis_(),
+ strings_(nullptr),
strings_end_(&strings_),
zone_(zone),
hash_seed_(hash_seed) {
- ResetStrings();
#define F(name, str) name##_string_ = NULL;
STRING_CONSTANTS(F)
#undef F
#define F(name) name##_ = NULL;
OTHER_CONSTANTS(F)
#undef F
+ std::fill(smis_, smis_ + arraysize(smis_), nullptr);
}
Zone* zone() const { return zone_; }
@@ -373,7 +383,7 @@ class AstValueFactory {
// A JavaScript symbol (ECMA-262 edition 6).
const AstValue* NewSymbol(const char* name);
const AstValue* NewNumber(double number, bool with_dot = false);
- const AstValue* NewSmi(int number);
+ const AstValue* NewSmi(uint32_t number);
const AstValue* NewBoolean(bool b);
const AstValue* NewStringList(ZoneList<const AstRawString*>* strings);
const AstValue* NewNull();
@@ -381,6 +391,10 @@ class AstValueFactory {
const AstValue* NewTheHole();
private:
+ static const uint32_t kMaxCachedSmi = 1 << 10;
+
+ STATIC_ASSERT(kMaxCachedSmi <= Smi::kMaxValue);
+
AstValue* AddValue(AstValue* value) {
value->set_next(values_);
values_ = value;
@@ -395,7 +409,8 @@ class AstValueFactory {
strings_ = nullptr;
strings_end_ = &strings_;
}
- AstRawString* GetOneByteStringInternal(Vector<const uint8_t> literal);
+ V8_EXPORT_PRIVATE AstRawString* GetOneByteStringInternal(
+ Vector<const uint8_t> literal);
AstRawString* GetTwoByteStringInternal(Vector<const uint16_t> literal);
AstRawString* GetString(uint32_t hash, bool is_one_byte,
Vector<const byte> literal_bytes);
@@ -407,8 +422,10 @@ class AstValueFactory {
// For keeping track of all AstValues and AstRawStrings we've created (so that
// they can be internalized later).
AstValue* values_;
- // We need to keep track of strings_ in order, since cons strings require
- // their members to be internalized first.
+
+ AstValue* smis_[kMaxCachedSmi + 1];
+ // We need to keep track of strings_ in order since cons strings require their
+ // members to be internalized first.
AstString* strings_;
AstString** strings_end_;
Zone* zone_;
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 97d1f9d770..fc8bd8a5bd 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -14,6 +14,7 @@
#include "src/code-stubs.h"
#include "src/contexts.h"
#include "src/conversions.h"
+#include "src/elements.h"
#include "src/property-details.h"
#include "src/property.h"
#include "src/string-stream.h"
@@ -159,31 +160,30 @@ bool Statement::IsJump() const {
}
}
-VariableProxy::VariableProxy(Variable* var, int start_position,
- int end_position)
+VariableProxy::VariableProxy(Variable* var, int start_position)
: Expression(start_position, kVariableProxy),
- end_position_(end_position),
raw_name_(var->raw_name()),
next_unresolved_(nullptr) {
bit_field_ |= IsThisField::encode(var->is_this()) |
- IsAssignedField::encode(false) | IsResolvedField::encode(false);
+ IsAssignedField::encode(false) |
+ IsResolvedField::encode(false) |
+ HoleCheckModeField::encode(HoleCheckMode::kElided);
BindTo(var);
}
VariableProxy::VariableProxy(const AstRawString* name,
- VariableKind variable_kind, int start_position,
- int end_position)
+ VariableKind variable_kind, int start_position)
: Expression(start_position, kVariableProxy),
- end_position_(end_position),
raw_name_(name),
next_unresolved_(nullptr) {
bit_field_ |= IsThisField::encode(variable_kind == THIS_VARIABLE) |
- IsAssignedField::encode(false) | IsResolvedField::encode(false);
+ IsAssignedField::encode(false) |
+ IsResolvedField::encode(false) |
+ HoleCheckModeField::encode(HoleCheckMode::kElided);
}
VariableProxy::VariableProxy(const VariableProxy* copy_from)
: Expression(copy_from->position(), kVariableProxy),
- end_position_(copy_from->end_position_),
next_unresolved_(nullptr) {
bit_field_ = copy_from->bit_field_;
DCHECK(!copy_from->is_resolved());
@@ -288,14 +288,16 @@ Token::Value Assignment::binary_op() const {
return Token::ILLEGAL;
}
-
-bool FunctionLiteral::AllowsLazyCompilation() {
- return scope()->AllowsLazyCompilation();
+bool FunctionLiteral::ShouldEagerCompile() const {
+ return scope()->ShouldEagerCompile();
}
+void FunctionLiteral::SetShouldEagerCompile() {
+ scope()->set_should_eager_compile();
+}
-bool FunctionLiteral::AllowsLazyCompilationWithoutContext() {
- return scope()->AllowsLazyCompilationWithoutContext();
+bool FunctionLiteral::AllowsLazyCompilation() {
+ return scope()->AllowsLazyCompilation();
}
@@ -510,7 +512,7 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
continue;
}
- if (position == boilerplate_properties_ * 2) {
+ if (static_cast<uint32_t>(position) == boilerplate_properties_ * 2) {
DCHECK(property->is_computed_name());
is_simple = false;
break;
@@ -579,11 +581,9 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
if (!constant_elements_.is_null()) return;
int constants_length = values()->length();
-
- // Allocate a fixed array to hold all the object literals.
- Handle<JSArray> array = isolate->factory()->NewJSArray(
- FAST_HOLEY_SMI_ELEMENTS, constants_length, constants_length,
- INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ ElementsKind kind = FIRST_FAST_ELEMENTS_KIND;
+ Handle<FixedArray> fixed_array =
+ isolate->factory()->NewFixedArrayWithHoles(constants_length);
// Fill in the literals.
bool is_simple = true;
@@ -610,33 +610,38 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
}
if (boilerplate_value->IsUninitialized(isolate)) {
- boilerplate_value = handle(Smi::FromInt(0), isolate);
+ boilerplate_value = handle(Smi::kZero, isolate);
is_simple = false;
}
- JSObject::AddDataElement(array, array_index, boilerplate_value, NONE)
- .Assert();
+ kind = GetMoreGeneralElementsKind(kind,
+ boilerplate_value->OptimalElementsKind());
+ fixed_array->set(array_index, *boilerplate_value);
}
- JSObject::ValidateElements(array);
- Handle<FixedArrayBase> element_values(array->elements());
+ if (is_holey) kind = GetHoleyElementsKind(kind);
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
if (is_simple && depth_acc == 1 && array_index > 0 &&
- array->HasFastSmiOrObjectElements()) {
- element_values->set_map(isolate->heap()->fixed_cow_array_map());
+ IsFastSmiOrObjectElementsKind(kind)) {
+ fixed_array->set_map(isolate->heap()->fixed_cow_array_map());
+ }
+
+ Handle<FixedArrayBase> elements = fixed_array;
+ if (IsFastDoubleElementsKind(kind)) {
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
+ elements = isolate->factory()->NewFixedDoubleArray(constants_length);
+ // We are copying from non-fast-double to fast-double.
+ ElementsKind from_kind = TERMINAL_FAST_ELEMENTS_KIND;
+ accessor->CopyElements(fixed_array, from_kind, elements, constants_length);
}
// Remember both the literal's constant values as well as the ElementsKind
// in a 2-element FixedArray.
Handle<FixedArray> literals = isolate->factory()->NewFixedArray(2, TENURED);
-
- ElementsKind kind = array->GetElementsKind();
- kind = is_holey ? GetHoleyElementsKind(kind) : GetPackedElementsKind(kind);
-
literals->set(0, Smi::FromInt(kind));
- literals->set(1, *element_values);
+ literals->set(1, *elements);
constant_elements_ = literals;
set_is_simple(is_simple);
@@ -887,36 +892,20 @@ bool Expression::IsMonomorphic() const {
}
}
-bool Call::IsUsingCallFeedbackICSlot() const {
- return GetCallType() != POSSIBLY_EVAL_CALL;
-}
-
-bool Call::IsUsingCallFeedbackSlot() const {
- // SuperConstructorCall uses a CallConstructStub, which wants
- // a Slot, in addition to any IC slots requested elsewhere.
- return GetCallType() == SUPER_CALL;
-}
-
-
void Call::AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
- if (IsUsingCallFeedbackICSlot()) {
- ic_slot_ = spec->AddCallICSlot();
- }
- if (IsUsingCallFeedbackSlot()) {
- stub_slot_ = spec->AddGeneralSlot();
- }
+ ic_slot_ = spec->AddCallICSlot();
}
Call::CallType Call::GetCallType() const {
VariableProxy* proxy = expression()->AsVariableProxy();
if (proxy != NULL) {
- if (is_possibly_eval()) {
- return POSSIBLY_EVAL_CALL;
- } else if (proxy->var()->IsUnallocated()) {
+ if (proxy->var()->IsUnallocated()) {
return GLOBAL_CALL;
} else if (proxy->var()->IsLookupSlot()) {
- return LOOKUP_SLOT_CALL;
+ // Calls going through 'with' always use DYNAMIC rather than DYNAMIC_LOCAL
+ // or DYNAMIC_GLOBAL.
+ return proxy->var()->mode() == DYNAMIC ? WITH_CALL : OTHER_CALL;
}
}
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index a6661becf2..99e0672a4c 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -509,20 +509,25 @@ class DoExpression final : public Expression {
class Declaration : public AstNode {
public:
+ typedef ThreadedList<Declaration> List;
+
VariableProxy* proxy() const { return proxy_; }
Scope* scope() const { return scope_; }
protected:
Declaration(VariableProxy* proxy, Scope* scope, int pos, NodeType type)
- : AstNode(pos, type), proxy_(proxy), scope_(scope) {}
+ : AstNode(pos, type), proxy_(proxy), scope_(scope), next_(nullptr) {}
static const uint8_t kNextBitFieldIndex = AstNode::kNextBitFieldIndex;
private:
VariableProxy* proxy_;
-
// Nested scope from which the declaration originated.
Scope* scope_;
+ // Declarations list threaded through the declarations.
+ Declaration** next() { return &next_; }
+ Declaration* next_;
+ friend List;
};
@@ -751,7 +756,6 @@ class ForInStatement final : public ForEachStatement {
BailoutId FilterId() const { return BailoutId(local_id(4)); }
BailoutId AssignmentId() const { return BailoutId(local_id(5)); }
BailoutId IncrementId() const { return BailoutId(local_id(6)); }
- BailoutId ContinueId() const { return EntryId(); }
BailoutId StackCheckId() const { return BodyId(); }
private:
@@ -1671,7 +1675,13 @@ class VariableProxy final : public Expression {
bit_field_ = IsNewTargetField::update(bit_field_, true);
}
- int end_position() const { return end_position_; }
+ HoleCheckMode hole_check_mode() const {
+ return HoleCheckModeField::decode(bit_field_);
+ }
+ void set_needs_hole_check() {
+ bit_field_ =
+ HoleCheckModeField::update(bit_field_, HoleCheckMode::kRequired);
+ }
// Bind this proxy to the variable var.
void BindTo(Variable* var);
@@ -1693,9 +1703,9 @@ class VariableProxy final : public Expression {
private:
friend class AstNodeFactory;
- VariableProxy(Variable* var, int start_position, int end_position);
+ VariableProxy(Variable* var, int start_position);
VariableProxy(const AstRawString* name, VariableKind variable_kind,
- int start_position, int end_position);
+ int start_position);
explicit VariableProxy(const VariableProxy* copy_from);
static int parent_num_ids() { return Expression::num_ids(); }
@@ -1706,11 +1716,9 @@ class VariableProxy final : public Expression {
class IsAssignedField : public BitField<bool, IsThisField::kNext, 1> {};
class IsResolvedField : public BitField<bool, IsAssignedField::kNext, 1> {};
class IsNewTargetField : public BitField<bool, IsResolvedField::kNext, 1> {};
+ class HoleCheckModeField
+ : public BitField<HoleCheckMode, IsNewTargetField::kNext, 1> {};
- // Position is stored in the AstNode superclass, but VariableProxy needs to
- // know its end position too (for error messages). It cannot be inferred from
- // the variable name length because it can contain escapes.
- int end_position_;
FeedbackVectorSlot variable_feedback_slot_;
union {
const AstRawString* raw_name_; // if !is_resolved_
@@ -1839,8 +1847,6 @@ class Call final : public Expression {
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
- FeedbackVectorSlot CallFeedbackSlot() const { return stub_slot_; }
-
FeedbackVectorSlot CallFeedbackICSlot() const { return ic_slot_; }
SmallMapList* GetReceiverTypes() {
@@ -1894,9 +1900,8 @@ class Call final : public Expression {
void MarkTail() { bit_field_ = IsTailField::update(bit_field_, true); }
enum CallType {
- POSSIBLY_EVAL_CALL,
GLOBAL_CALL,
- LOOKUP_SLOT_CALL,
+ WITH_CALL,
NAMED_PROPERTY_CALL,
KEYED_PROPERTY_CALL,
NAMED_SUPER_PROPERTY_CALL,
@@ -1912,8 +1917,6 @@ class Call final : public Expression {
// Helpers to determine how to handle the call.
CallType GetCallType() const;
- bool IsUsingCallFeedbackSlot() const;
- bool IsUsingCallFeedbackICSlot() const;
#ifdef DEBUG
// Used to assert that the FullCodeGenerator records the return site.
@@ -1946,7 +1949,6 @@ class Call final : public Expression {
class IsPossiblyEvalField : public BitField<bool, IsTailField::kNext, 1> {};
FeedbackVectorSlot ic_slot_;
- FeedbackVectorSlot stub_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
Handle<JSFunction> target_;
@@ -2597,9 +2599,9 @@ class FunctionLiteral final : public Expression {
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
int parameter_count() { return parameter_count_; }
+ int function_length() { return function_length_; }
bool AllowsLazyCompilation();
- bool AllowsLazyCompilationWithoutContext();
Handle<String> debug_name() const {
if (raw_name_ != NULL && !raw_name_->IsEmpty()) {
@@ -2649,12 +2651,8 @@ class FunctionLiteral final : public Expression {
// function will be called immediately:
// - (function() { ... })();
// - var x = function() { ... }();
- bool should_eager_compile() const {
- return ShouldEagerCompile::decode(bit_field_);
- }
- void set_should_eager_compile() {
- bit_field_ = ShouldEagerCompile::update(bit_field_, true);
- }
+ bool ShouldEagerCompile() const;
+ void SetShouldEagerCompile();
// A hint that we expect this function to be called (exactly) once,
// i.e. we suspect it's an initialization function.
@@ -2708,6 +2706,10 @@ class FunctionLiteral final : public Expression {
IsClassFieldInitializer::update(bit_field_, is_class_field_initializer);
}
+ int return_position() {
+ return std::max(start_position(), end_position() - (has_braces_ ? 1 : 0));
+ }
+
private:
friend class AstNodeFactory;
@@ -2715,16 +2717,18 @@ class FunctionLiteral final : public Expression {
AstValueFactory* ast_value_factory, DeclarationScope* scope,
ZoneList<Statement*>* body, int materialized_literal_count,
int expected_property_count, int parameter_count,
- FunctionType function_type,
+ int function_length, FunctionType function_type,
ParameterFlag has_duplicate_parameters,
EagerCompileHint eager_compile_hint, int position,
- bool is_function)
+ bool is_function, bool has_braces)
: Expression(position, kFunctionLiteral),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
parameter_count_(parameter_count),
+ function_length_(function_length),
function_token_position_(kNoSourcePosition),
yield_count_(0),
+ has_braces_(has_braces),
raw_name_(name),
scope_(scope),
body_(body),
@@ -2735,11 +2739,11 @@ class FunctionLiteral final : public Expression {
HasDuplicateParameters::encode(has_duplicate_parameters ==
kHasDuplicateParameters) |
IsFunction::encode(is_function) |
- ShouldEagerCompile::encode(eager_compile_hint == kShouldEagerCompile) |
RequiresClassFieldInit::encode(false) |
ShouldNotBeUsedOnceHintField::encode(false) |
DontOptimizeReasonField::encode(kNoReason) |
IsClassFieldInitializer::encode(false);
+ if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
}
class FunctionTypeBits
@@ -2747,9 +2751,8 @@ class FunctionLiteral final : public Expression {
class Pretenure : public BitField<bool, FunctionTypeBits::kNext, 1> {};
class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
class IsFunction : public BitField<bool, HasDuplicateParameters::kNext, 1> {};
- class ShouldEagerCompile : public BitField<bool, IsFunction::kNext, 1> {};
class ShouldNotBeUsedOnceHintField
- : public BitField<bool, ShouldEagerCompile::kNext, 1> {};
+ : public BitField<bool, IsFunction::kNext, 1> {};
class RequiresClassFieldInit
: public BitField<bool, ShouldNotBeUsedOnceHintField::kNext, 1> {};
class IsClassFieldInitializer
@@ -2760,8 +2763,10 @@ class FunctionLiteral final : public Expression {
int materialized_literal_count_;
int expected_property_count_;
int parameter_count_;
+ int function_length_;
int function_token_position_;
int yield_count_;
+ bool has_braces_;
const AstString* raw_name_;
DeclarationScope* scope_;
@@ -2962,10 +2967,8 @@ class AstVisitor BASE_EMBEDDED {
public:
void Visit(AstNode* node) { impl()->Visit(node); }
- void VisitDeclarations(ZoneList<Declaration*>* declarations) {
- for (int i = 0; i < declarations->length(); i++) {
- Visit(declarations->at(i));
- }
+ void VisitDeclarations(Declaration::List* declarations) {
+ for (Declaration* decl : *declarations) Visit(decl);
}
void VisitStatements(ZoneList<Statement*>* statements) {
@@ -3279,7 +3282,7 @@ class AstNodeFactory final BASE_EMBEDDED {
Literal(ast_value_factory_->NewNumber(number, with_dot), pos);
}
- Literal* NewSmiLiteral(int number, int pos) {
+ Literal* NewSmiLiteral(uint32_t number, int pos) {
return new (zone_) Literal(ast_value_factory_->NewSmi(number), pos);
}
@@ -3339,18 +3342,15 @@ class AstNodeFactory final BASE_EMBEDDED {
}
VariableProxy* NewVariableProxy(Variable* var,
- int start_position = kNoSourcePosition,
- int end_position = kNoSourcePosition) {
- return new (zone_) VariableProxy(var, start_position, end_position);
+ int start_position = kNoSourcePosition) {
+ return new (zone_) VariableProxy(var, start_position);
}
VariableProxy* NewVariableProxy(const AstRawString* name,
VariableKind variable_kind,
- int start_position = kNoSourcePosition,
- int end_position = kNoSourcePosition) {
+ int start_position = kNoSourcePosition) {
DCHECK_NOT_NULL(name);
- return new (zone_)
- VariableProxy(name, variable_kind, start_position, end_position);
+ return new (zone_) VariableProxy(name, variable_kind, start_position);
}
// Recreates the VariableProxy in this Zone.
@@ -3459,15 +3459,16 @@ class AstNodeFactory final BASE_EMBEDDED {
FunctionLiteral* NewFunctionLiteral(
const AstRawString* name, DeclarationScope* scope,
ZoneList<Statement*>* body, int materialized_literal_count,
- int expected_property_count, int parameter_count,
+ int expected_property_count, int parameter_count, int function_length,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
- FunctionLiteral::EagerCompileHint eager_compile_hint, int position) {
- return new (zone_) FunctionLiteral(zone_, name, ast_value_factory_, scope,
- body, materialized_literal_count,
- expected_property_count, parameter_count,
- function_type, has_duplicate_parameters,
- eager_compile_hint, position, true);
+ FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
+ bool has_braces) {
+ return new (zone_) FunctionLiteral(
+ zone_, name, ast_value_factory_, scope, body,
+ materialized_literal_count, expected_property_count, parameter_count,
+ function_length, function_type, has_duplicate_parameters,
+ eager_compile_hint, position, true, has_braces);
}
// Creates a FunctionLiteral representing a top-level script, the
@@ -3480,9 +3481,9 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) FunctionLiteral(
zone_, ast_value_factory_->empty_string(), ast_value_factory_, scope,
body, materialized_literal_count, expected_property_count,
- parameter_count, FunctionLiteral::kAnonymousExpression,
+ parameter_count, parameter_count, FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::kShouldLazyCompile, 0, false);
+ FunctionLiteral::kShouldLazyCompile, 0, false, true);
}
ClassLiteral::Property* NewClassLiteralProperty(
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index 2d28d5564b..339d64c580 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -87,8 +87,8 @@ Handle<ModuleInfoEntry> ModuleDescriptor::Entry::Serialize(
return ModuleInfoEntry::New(
isolate, ToStringOrUndefined(isolate, export_name),
ToStringOrUndefined(isolate, local_name),
- ToStringOrUndefined(isolate, import_name),
- Handle<Object>(Smi::FromInt(module_request), isolate));
+ ToStringOrUndefined(isolate, import_name), module_request, cell_index,
+ location.beg_pos, location.end_pos);
}
ModuleDescriptor::Entry* ModuleDescriptor::Entry::Deserialize(
@@ -101,7 +101,8 @@ ModuleDescriptor::Entry* ModuleDescriptor::Entry::Deserialize(
isolate, avfactory, handle(entry->local_name(), isolate));
result->import_name = FromStringOrUndefined(
isolate, avfactory, handle(entry->import_name(), isolate));
- result->module_request = Smi::cast(entry->module_request())->value();
+ result->module_request = entry->module_request();
+ result->cell_index = entry->cell_index();
return result;
}
@@ -111,57 +112,68 @@ Handle<FixedArray> ModuleDescriptor::SerializeRegularExports(Isolate* isolate,
// local names and for each local name immediately access all its export
// names. (Regular exports have neither import name nor module request.)
- ZoneVector<Handle<Object>> data(zone);
- data.reserve(2 * regular_exports_.size());
+ ZoneVector<Handle<Object>> data(
+ ModuleInfo::kRegularExportLength * regular_exports_.size(), zone);
+ int index = 0;
for (auto it = regular_exports_.begin(); it != regular_exports_.end();) {
// Find out how many export names this local name has.
auto next = it;
- int size = 0;
+ int count = 0;
do {
+ DCHECK_EQ(it->second->local_name, next->second->local_name);
+ DCHECK_EQ(it->second->cell_index, next->second->cell_index);
++next;
- ++size;
+ ++count;
} while (next != regular_exports_.end() && next->first == it->first);
- Handle<FixedArray> export_names = isolate->factory()->NewFixedArray(size);
- data.push_back(it->second->local_name->string());
- data.push_back(export_names);
+ Handle<FixedArray> export_names = isolate->factory()->NewFixedArray(count);
+ data[index + ModuleInfo::kRegularExportLocalNameOffset] =
+ it->second->local_name->string();
+ data[index + ModuleInfo::kRegularExportCellIndexOffset] =
+ handle(Smi::FromInt(it->second->cell_index), isolate);
+ data[index + ModuleInfo::kRegularExportExportNamesOffset] = export_names;
+ index += ModuleInfo::kRegularExportLength;
// Collect the export names.
int i = 0;
for (; it != next; ++it) {
export_names->set(i++, *it->second->export_name->string());
}
- DCHECK_EQ(i, size);
+ DCHECK_EQ(i, count);
// Continue with the next distinct key.
DCHECK(it == next);
}
+ DCHECK_LE(index, static_cast<int>(data.size()));
+ data.resize(index);
// We cannot create the FixedArray earlier because we only now know the
- // precise size (the number of unique keys in regular_exports).
- int size = static_cast<int>(data.size());
- Handle<FixedArray> result = isolate->factory()->NewFixedArray(size);
- for (int i = 0; i < size; ++i) {
+ // precise size.
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(index);
+ for (int i = 0; i < index; ++i) {
result->set(i, *data[i]);
}
return result;
}
-void ModuleDescriptor::DeserializeRegularExports(Isolate* isolate,
- AstValueFactory* avfactory,
- Handle<FixedArray> data) {
- for (int i = 0, length_i = data->length(); i < length_i;) {
- Handle<String> local_name(String::cast(data->get(i++)), isolate);
- Handle<FixedArray> export_names(FixedArray::cast(data->get(i++)), isolate);
-
- for (int j = 0, length_j = export_names->length(); j < length_j; ++j) {
+void ModuleDescriptor::DeserializeRegularExports(
+ Isolate* isolate, AstValueFactory* avfactory,
+ Handle<ModuleInfo> module_info) {
+ for (int i = 0, count = module_info->RegularExportCount(); i < count; ++i) {
+ Handle<String> local_name(module_info->RegularExportLocalName(i), isolate);
+ int cell_index = module_info->RegularExportCellIndex(i);
+ Handle<FixedArray> export_names(module_info->RegularExportExportNames(i),
+ isolate);
+
+ for (int j = 0, length = export_names->length(); j < length; ++j) {
Handle<String> export_name(String::cast(export_names->get(j)), isolate);
Entry* entry =
new (avfactory->zone()) Entry(Scanner::Location::invalid());
entry->local_name = avfactory->GetString(local_name);
entry->export_name = avfactory->GetString(export_name);
+ entry->cell_index = cell_index;
AddRegularExport(entry);
}
@@ -184,6 +196,13 @@ void ModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) {
static_cast<int>(module_requests_.size()));
entry->import_name = import->second->import_name;
entry->module_request = import->second->module_request;
+ // Hack: When the indirect export cannot be resolved, we want the error
+ // message to point at the import statement, not at the export statement.
+ // Therefore we overwrite [entry]'s location here. Note that Validate()
+ // has already checked for duplicate exports, so it's guaranteed that we
+ // won't need to report any error pointing at the (now lost) export
+ // location.
+ entry->location = import->second->location;
entry->local_name = nullptr;
AddSpecialExport(entry, zone);
it = regular_exports_.erase(it);
@@ -193,6 +212,43 @@ void ModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) {
}
}
+ModuleDescriptor::CellIndexKind ModuleDescriptor::GetCellIndexKind(
+ int cell_index) {
+ if (cell_index > 0) return kExport;
+ if (cell_index < 0) return kImport;
+ return kInvalid;
+}
+
+void ModuleDescriptor::AssignCellIndices() {
+ int export_index = 1;
+ for (auto it = regular_exports_.begin(); it != regular_exports_.end();) {
+ auto current_key = it->first;
+ // This local name may be exported under multiple export names. Assign the
+ // same index to each such entry.
+ do {
+ Entry* entry = it->second;
+ DCHECK_NOT_NULL(entry->local_name);
+ DCHECK_NULL(entry->import_name);
+ DCHECK_LT(entry->module_request, 0);
+ DCHECK_EQ(entry->cell_index, 0);
+ entry->cell_index = export_index;
+ it++;
+ } while (it != regular_exports_.end() && it->first == current_key);
+ export_index++;
+ }
+
+ int import_index = -1;
+ for (const auto& elem : regular_imports_) {
+ Entry* entry = elem.second;
+ DCHECK_NOT_NULL(entry->local_name);
+ DCHECK_NOT_NULL(entry->import_name);
+ DCHECK_LE(0, entry->module_request);
+ DCHECK_EQ(entry->cell_index, 0);
+ entry->cell_index = import_index;
+ import_index--;
+ }
+}
+
namespace {
const ModuleDescriptor::Entry* BetterDuplicate(
@@ -259,6 +315,7 @@ bool ModuleDescriptor::Validate(ModuleScope* module_scope,
}
MakeIndirectExportsExplicit(zone);
+ AssignCellIndices();
return true;
}
diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h
index 4d36735fa8..94550fb5c9 100644
--- a/deps/v8/src/ast/modules.h
+++ b/deps/v8/src/ast/modules.h
@@ -14,6 +14,7 @@ namespace internal {
class AstRawString;
+class ModuleInfo;
class ModuleInfoEntry;
class ModuleDescriptor : public ZoneObject {
@@ -73,23 +74,34 @@ class ModuleDescriptor : public ZoneObject {
PendingCompilationErrorHandler* error_handler, Zone* zone);
struct Entry : public ZoneObject {
- const Scanner::Location location;
+ Scanner::Location location;
const AstRawString* export_name;
const AstRawString* local_name;
const AstRawString* import_name;
+
// The module_request value records the order in which modules are
// requested. It also functions as an index into the ModuleInfo's array of
// module specifiers and into the Module's array of requested modules. A
// negative value means no module request.
int module_request;
+ // Import/export entries that are associated with a MODULE-allocated
+ // variable (i.e. regular_imports and regular_exports after Validate) use
+ // the cell_index value to encode the location of their cell. During
+ // variable allocation, this will be be copied into the variable's index
+ // field.
+ // Entries that are not associated with a MODULE-allocated variable have
+ // GetCellIndexKind(cell_index) == kInvalid.
+ int cell_index;
+
// TODO(neis): Remove local_name component?
explicit Entry(Scanner::Location loc)
: location(loc),
export_name(nullptr),
local_name(nullptr),
import_name(nullptr),
- module_request(-1) {}
+ module_request(-1),
+ cell_index(0) {}
// (De-)serialization support.
// Note that the location value is not preserved as it's only needed by the
@@ -99,6 +111,9 @@ class ModuleDescriptor : public ZoneObject {
Handle<ModuleInfoEntry> entry);
};
+ enum CellIndexKind { kInvalid, kExport, kImport };
+ static CellIndexKind GetCellIndexKind(int cell_index);
+
// Module requests.
const ZoneMap<const AstRawString*, int>& module_requests() const {
return module_requests_;
@@ -110,7 +125,7 @@ class ModuleDescriptor : public ZoneObject {
}
// All the remaining imports, indexed by local name.
- const ZoneMap<const AstRawString*, const Entry*>& regular_imports() const {
+ const ZoneMap<const AstRawString*, Entry*>& regular_imports() const {
return regular_imports_;
}
@@ -139,7 +154,7 @@ class ModuleDescriptor : public ZoneObject {
special_exports_.Add(entry, zone);
}
- void AddRegularImport(const Entry* entry) {
+ void AddRegularImport(Entry* entry) {
DCHECK_NOT_NULL(entry->import_name);
DCHECK_NOT_NULL(entry->local_name);
DCHECK_NULL(entry->export_name);
@@ -160,7 +175,7 @@ class ModuleDescriptor : public ZoneObject {
Handle<FixedArray> SerializeRegularExports(Isolate* isolate,
Zone* zone) const;
void DeserializeRegularExports(Isolate* isolate, AstValueFactory* avfactory,
- Handle<FixedArray> data);
+ Handle<ModuleInfo> module_info);
private:
// TODO(neis): Use STL datastructure instead of ZoneList?
@@ -168,7 +183,7 @@ class ModuleDescriptor : public ZoneObject {
ZoneList<const Entry*> special_exports_;
ZoneList<const Entry*> namespace_imports_;
ZoneMultimap<const AstRawString*, Entry*> regular_exports_;
- ZoneMap<const AstRawString*, const Entry*> regular_imports_;
+ ZoneMap<const AstRawString*, Entry*> regular_imports_;
// If there are multiple export entries with the same export name, return the
// last of them (in source order). Otherwise return nullptr.
@@ -192,6 +207,11 @@ class ModuleDescriptor : public ZoneObject {
// (The import entry is never deleted.)
void MakeIndirectExportsExplicit(Zone* zone);
+ // Assign a cell_index of -1,-2,... to regular imports.
+ // Assign a cell_index of +1,+2,... to regular (local) exports.
+ // Assign a cell_index of 0 to anything else.
+ void AssignCellIndices();
+
int AddModuleRequest(const AstRawString* specifier) {
DCHECK_NOT_NULL(specifier);
auto it = module_requests_
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index 874c15991e..a3fc50ae57 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -529,6 +529,18 @@ void AstPrinter::PrintLiteral(Handle<Object> value, bool quote) {
}
} else if (object->IsFixedArray()) {
Print("FixedArray");
+ } else if (object->IsSymbol()) {
+ // Symbols can only occur as literals if they were inserted by the parser.
+ Symbol* symbol = Symbol::cast(object);
+ if (symbol->name()->IsString()) {
+ int length = 0;
+ String* string = String::cast(symbol->name());
+ std::unique_ptr<char[]> desc = string->ToCString(
+ ALLOW_NULLS, FAST_STRING_TRAVERSAL, 0, string->length(), &length);
+ Print("Symbol(%*s)", length, desc.get());
+ } else {
+ Print("Symbol()");
+ }
} else {
Print("<unknown literal %p>", static_cast<void*>(object));
}
@@ -650,13 +662,10 @@ void AstPrinter::PrintOut(Isolate* isolate, AstNode* node) {
PrintF("%s", printer.output_);
}
-
-void AstPrinter::PrintDeclarations(ZoneList<Declaration*>* declarations) {
- if (declarations->length() > 0) {
+void AstPrinter::PrintDeclarations(Declaration::List* declarations) {
+ if (!declarations->is_empty()) {
IndentedScope indent(this, "DECLS");
- for (int i = 0; i < declarations->length(); i++) {
- Visit(declarations->at(i));
- }
+ for (Declaration* decl : *declarations) Visit(decl);
}
}
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index 2d553babde..b56c834893 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -84,7 +84,7 @@ class AstPrinter final : public AstVisitor<AstPrinter> {
void PrintIndentedVisit(const char* s, AstNode* node);
void PrintStatements(ZoneList<Statement*>* statements);
- void PrintDeclarations(ZoneList<Declaration*>* declarations);
+ void PrintDeclarations(Declaration::List* declarations);
void PrintParameters(DeclarationScope* scope);
void PrintArguments(ZoneList<Expression*>* arguments);
void PrintCaseClause(CaseClause* clause);
diff --git a/deps/v8/src/ast/scopeinfo.cc b/deps/v8/src/ast/scopeinfo.cc
index 5354b8d737..3a3ea03189 100644
--- a/deps/v8/src/ast/scopeinfo.cc
+++ b/deps/v8/src/ast/scopeinfo.cc
@@ -58,7 +58,6 @@ bool ScopeInfo::Equals(ScopeInfo* other) const {
Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
MaybeHandle<ScopeInfo> outer_scope) {
// Collect variables.
- ZoneList<Variable*>* locals = scope->locals();
int stack_local_count = 0;
int context_local_count = 0;
int module_vars_count = 0;
@@ -67,8 +66,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
// slot index indicates at which offset a particular scope starts in the
// parent declaration scope.
int first_slot_index = 0;
- for (int i = 0; i < locals->length(); i++) {
- Variable* var = locals->at(i);
+ for (Variable* var : *scope->locals()) {
switch (var->location()) {
case VariableLocation::LOCAL:
if (stack_local_count == 0) first_slot_index = var->index();
@@ -198,8 +196,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
int context_local_info_base = context_local_base + context_local_count;
int module_var_entry = scope_info->ModuleVariablesIndex();
- for (int i = 0; i < locals->length(); ++i) {
- Variable* var = locals->at(i);
+ for (Variable* var : *scope->locals()) {
switch (var->location()) {
case VariableLocation::LOCAL: {
int local_index = var->index() - first_slot_index;
@@ -315,7 +312,7 @@ Handle<ScopeInfo> ScopeInfo::CreateForWithScope(
int index = kVariablePartIndex;
DCHECK_EQ(index, scope_info->ParameterNamesIndex());
DCHECK_EQ(index, scope_info->StackLocalFirstSlotIndex());
- scope_info->set(index++, Smi::FromInt(0));
+ scope_info->set(index++, Smi::kZero);
DCHECK_EQ(index, scope_info->StackLocalNamesIndex());
DCHECK_EQ(index, scope_info->ReceiverInfoIndex());
DCHECK_EQ(index, scope_info->FunctionNameInfoIndex());
@@ -373,7 +370,7 @@ Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
// Here we add info for context-allocated "this".
DCHECK_EQ(index, scope_info->ContextLocalNamesIndex());
- scope_info->set(index++, *isolate->factory()->this_string());
+ scope_info->set(index++, isolate->heap()->this_string());
DCHECK_EQ(index, scope_info->ContextLocalInfosIndex());
const uint32_t value = VariableModeField::encode(CONST) |
InitFlagField::encode(kCreatedInitialized) |
@@ -647,18 +644,14 @@ int ScopeInfo::ModuleIndex(Handle<String> name, VariableMode* mode,
int entry = ModuleVariablesIndex();
for (int i = 0; i < module_vars_count; ++i) {
if (*name == get(entry + kModuleVariableNameOffset)) {
- int index = Smi::cast(get(entry + kModuleVariableIndexOffset))->value();
- int properties =
- Smi::cast(get(entry + kModuleVariablePropertiesOffset))->value();
- *mode = VariableModeField::decode(properties);
- *init_flag = InitFlagField::decode(properties);
- *maybe_assigned_flag = MaybeAssignedFlagField::decode(properties);
+ int index;
+ ModuleVariable(i, nullptr, &index, mode, init_flag, maybe_assigned_flag);
return index;
}
entry += kModuleVariableEntryLength;
}
- return -1;
+ return 0;
}
int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
@@ -794,6 +787,35 @@ int ScopeInfo::ModuleVariableCountIndex() { return ModuleInfoIndex() + 1; }
int ScopeInfo::ModuleVariablesIndex() { return ModuleVariableCountIndex() + 1; }
+void ScopeInfo::ModuleVariable(int i, String** name, int* index,
+ VariableMode* mode,
+ InitializationFlag* init_flag,
+ MaybeAssignedFlag* maybe_assigned_flag) {
+ DCHECK_LE(0, i);
+ DCHECK_LT(i, Smi::cast(get(ModuleVariableCountIndex()))->value());
+
+ int entry = ModuleVariablesIndex() + i * kModuleVariableEntryLength;
+ int properties =
+ Smi::cast(get(entry + kModuleVariablePropertiesOffset))->value();
+
+ if (name != nullptr) {
+ *name = String::cast(get(entry + kModuleVariableNameOffset));
+ }
+ if (index != nullptr) {
+ *index = Smi::cast(get(entry + kModuleVariableIndexOffset))->value();
+ DCHECK_NE(*index, 0);
+ }
+ if (mode != nullptr) {
+ *mode = VariableModeField::decode(properties);
+ }
+ if (init_flag != nullptr) {
+ *init_flag = InitFlagField::decode(properties);
+ }
+ if (maybe_assigned_flag != nullptr) {
+ *maybe_assigned_flag = MaybeAssignedFlagField::decode(properties);
+ }
+}
+
#ifdef DEBUG
static void PrintList(const char* list_name,
@@ -843,12 +865,17 @@ Handle<ModuleInfoEntry> ModuleInfoEntry::New(Isolate* isolate,
Handle<Object> export_name,
Handle<Object> local_name,
Handle<Object> import_name,
- Handle<Object> module_request) {
- Handle<ModuleInfoEntry> result = isolate->factory()->NewModuleInfoEntry();
- result->set(kExportNameIndex, *export_name);
- result->set(kLocalNameIndex, *local_name);
- result->set(kImportNameIndex, *import_name);
- result->set(kModuleRequestIndex, *module_request);
+ int module_request, int cell_index,
+ int beg_pos, int end_pos) {
+ Handle<ModuleInfoEntry> result = Handle<ModuleInfoEntry>::cast(
+ isolate->factory()->NewStruct(MODULE_INFO_ENTRY_TYPE));
+ result->set_export_name(*export_name);
+ result->set_local_name(*local_name);
+ result->set_import_name(*import_name);
+ result->set_module_request(module_request);
+ result->set_cell_index(cell_index);
+ result->set_beg_pos(beg_pos);
+ result->set_end_pos(end_pos);
return result;
}
@@ -867,7 +894,8 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
{
int i = 0;
for (auto entry : descr->special_exports()) {
- special_exports->set(i++, *entry->Serialize(isolate));
+ Handle<ModuleInfoEntry> serialized_entry = entry->Serialize(isolate);
+ special_exports->set(i++, *serialized_entry);
}
}
@@ -877,7 +905,8 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
{
int i = 0;
for (auto entry : descr->namespace_imports()) {
- namespace_imports->set(i++, *entry->Serialize(isolate));
+ Handle<ModuleInfoEntry> serialized_entry = entry->Serialize(isolate);
+ namespace_imports->set(i++, *serialized_entry);
}
}
@@ -891,7 +920,9 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
{
int i = 0;
for (const auto& elem : descr->regular_imports()) {
- regular_imports->set(i++, *elem.second->Serialize(isolate));
+ Handle<ModuleInfoEntry> serialized_entry =
+ elem.second->Serialize(isolate);
+ regular_imports->set(i++, *serialized_entry);
}
}
@@ -904,5 +935,41 @@ Handle<ModuleInfo> ModuleInfo::New(Isolate* isolate, Zone* zone,
return result;
}
+int ModuleInfo::RegularExportCount() const {
+ DCHECK_EQ(regular_exports()->length() % kRegularExportLength, 0);
+ return regular_exports()->length() / kRegularExportLength;
+}
+
+String* ModuleInfo::RegularExportLocalName(int i) const {
+ return String::cast(regular_exports()->get(i * kRegularExportLength +
+ kRegularExportLocalNameOffset));
+}
+
+int ModuleInfo::RegularExportCellIndex(int i) const {
+ return Smi::cast(regular_exports()->get(i * kRegularExportLength +
+ kRegularExportCellIndexOffset))
+ ->value();
+}
+
+FixedArray* ModuleInfo::RegularExportExportNames(int i) const {
+ return FixedArray::cast(regular_exports()->get(
+ i * kRegularExportLength + kRegularExportExportNamesOffset));
+}
+
+Handle<ModuleInfoEntry> ModuleInfo::LookupRegularImport(
+ Handle<ModuleInfo> info, Handle<String> local_name) {
+ Isolate* isolate = info->GetIsolate();
+ Handle<FixedArray> regular_imports(info->regular_imports(), isolate);
+ for (int i = 0, n = regular_imports->length(); i < n; ++i) {
+ Handle<ModuleInfoEntry> entry(
+ ModuleInfoEntry::cast(regular_imports->get(i)), isolate);
+ if (String::cast(entry->local_name())->Equals(*local_name)) {
+ return entry;
+ }
+ }
+ UNREACHABLE();
+ return Handle<ModuleInfoEntry>();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index c531ef5901..c1679a40b8 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -96,8 +96,6 @@ Scope::Scope(Zone* zone)
: zone_(zone),
outer_scope_(nullptr),
variables_(zone),
- locals_(4, zone),
- decls_(4, zone),
scope_type_(SCRIPT_SCOPE) {
SetDefaults();
}
@@ -106,8 +104,6 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type)
: zone_(zone),
outer_scope_(outer_scope),
variables_(zone),
- locals_(4, zone),
- decls_(4, zone),
scope_type_(scope_type) {
DCHECK_NE(SCRIPT_SCOPE, scope_type);
SetDefaults();
@@ -121,8 +117,8 @@ Scope::Snapshot::Snapshot(Scope* scope)
: outer_scope_(scope),
top_inner_scope_(scope->inner_scope_),
top_unresolved_(scope->unresolved_),
- top_local_(scope->GetClosureScope()->locals_.length()),
- top_decl_(scope->GetClosureScope()->decls_.length()) {}
+ top_local_(scope->GetClosureScope()->locals_.end()),
+ top_decl_(scope->GetClosureScope()->decls_.end()) {}
DeclarationScope::DeclarationScope(Zone* zone,
AstValueFactory* ast_value_factory)
@@ -164,7 +160,7 @@ ModuleScope::ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
AstValueFactory* avfactory)
: DeclarationScope(avfactory->zone(), MODULE_SCOPE, scope_info) {
Zone* zone = avfactory->zone();
- ModuleInfo* module_info = scope_info->ModuleDescriptorInfo();
+ Handle<ModuleInfo> module_info(scope_info->ModuleDescriptorInfo(), isolate);
set_language_mode(STRICT);
module_descriptor_ = new (zone) ModuleDescriptor(zone);
@@ -181,9 +177,8 @@ ModuleScope::ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
}
// Deserialize regular exports.
- Handle<FixedArray> regular_exports(module_info->regular_exports(), isolate);
module_descriptor_->DeserializeRegularExports(isolate, avfactory,
- regular_exports);
+ module_info);
// Deserialize namespace imports.
Handle<FixedArray> namespace_imports(module_info->namespace_imports(),
@@ -211,8 +206,6 @@ Scope::Scope(Zone* zone, ScopeType scope_type, Handle<ScopeInfo> scope_info)
: zone_(zone),
outer_scope_(nullptr),
variables_(zone),
- locals_(0, zone),
- decls_(0, zone),
scope_info_(scope_info),
scope_type_(scope_type) {
DCHECK(!scope_info.is_null());
@@ -241,8 +234,6 @@ Scope::Scope(Zone* zone, const AstRawString* catch_variable_name,
: zone_(zone),
outer_scope_(nullptr),
variables_(zone),
- locals_(0, zone),
- decls_(0, zone),
scope_info_(scope_info),
scope_type_(CATCH_SCOPE) {
SetDefaults();
@@ -271,7 +262,8 @@ void DeclarationScope::SetDefaults() {
function_ = nullptr;
arguments_ = nullptr;
this_function_ = nullptr;
- arity_ = 0;
+ should_eager_compile_ = false;
+ is_lazily_parsed_ = false;
}
void Scope::SetDefaults() {
@@ -301,8 +293,6 @@ void Scope::SetDefaults() {
force_context_allocation_ = false;
is_declaration_scope_ = false;
-
- is_lazily_parsed_ = false;
}
bool Scope::HasSimpleParameters() {
@@ -310,6 +300,14 @@ bool Scope::HasSimpleParameters() {
return !scope->is_function_scope() || scope->has_simple_parameters();
}
+bool DeclarationScope::ShouldEagerCompile() const {
+ return force_eager_compilation_ || should_eager_compile_;
+}
+
+void DeclarationScope::set_should_eager_compile() {
+ should_eager_compile_ = !is_lazily_parsed_;
+}
+
void DeclarationScope::set_asm_module() {
asm_module_ = true;
// Mark any existing inner function scopes as asm function scopes.
@@ -552,6 +550,9 @@ void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
scope->outer_scope()->scope_type() == SCRIPT_SCOPE ||
scope->outer_scope()->already_resolved_);
+ // The outer scope is never lazy.
+ scope->set_should_eager_compile();
+
scope->AllocateVariables(info, mode);
// Ensuring that the outer script scope has a scope info avoids having
@@ -707,13 +708,32 @@ Scope* Scope::FinalizeBlockScope() {
return NULL;
}
+void DeclarationScope::AddLocal(Variable* var) {
+ DCHECK(!already_resolved_);
+ // Temporaries are only placed in ClosureScopes.
+ DCHECK_EQ(GetClosureScope(), this);
+ locals_.Add(var);
+}
+
+Variable* Scope::Declare(Zone* zone, Scope* scope, const AstRawString* name,
+ VariableMode mode, VariableKind kind,
+ InitializationFlag initialization_flag,
+ MaybeAssignedFlag maybe_assigned_flag) {
+ bool added;
+ Variable* var =
+ variables_.Declare(zone, scope, name, mode, kind, initialization_flag,
+ maybe_assigned_flag, &added);
+ if (added) locals_.Add(var);
+ return var;
+}
+
void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
DCHECK_EQ(new_parent, outer_scope_->inner_scope_);
DCHECK_EQ(new_parent->outer_scope_, outer_scope_);
DCHECK_EQ(new_parent, new_parent->GetClosureScope());
DCHECK_NULL(new_parent->inner_scope_);
DCHECK_NULL(new_parent->unresolved_);
- DCHECK_EQ(0, new_parent->locals_.length());
+ DCHECK(new_parent->locals_.is_empty());
Scope* inner_scope = new_parent->sibling_;
if (inner_scope != top_inner_scope_) {
for (; inner_scope->sibling() != top_inner_scope_;
@@ -745,13 +765,13 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
// name in the closure-scope. See
// test/mjsunit/harmony/default-parameter-do-expression.js.
DeclarationScope* outer_closure = outer_scope_->GetClosureScope();
- for (int i = top_local_; i < outer_closure->locals_.length(); i++) {
- Variable* local = outer_closure->locals_.at(i);
+
+ new_parent->locals_.MoveTail(outer_closure->locals(), top_local_);
+ for (Variable* local : new_parent->locals_) {
DCHECK(local->mode() == TEMPORARY || local->mode() == VAR);
DCHECK_EQ(local->scope(), local->scope()->GetClosureScope());
DCHECK_NE(local->scope(), new_parent);
local->set_scope(new_parent);
- new_parent->AddLocal(local);
if (local->mode() == VAR) {
outer_closure->variables_.Remove(local);
new_parent->variables_.Add(new_parent->zone(), local);
@@ -787,20 +807,29 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
// There should be no local slot with the given name.
DCHECK_LT(scope_info_->StackSlotIndex(*name_handle), 0);
+ bool found = false;
+
+ VariableLocation location;
+ int index;
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
- VariableLocation location = VariableLocation::CONTEXT;
- int index = ScopeInfo::ContextSlotIndex(scope_info_, name_handle, &mode,
- &init_flag, &maybe_assigned_flag);
- if (index < 0 && scope_type() == MODULE_SCOPE) {
+ {
+ location = VariableLocation::CONTEXT;
+ index = ScopeInfo::ContextSlotIndex(scope_info_, name_handle, &mode,
+ &init_flag, &maybe_assigned_flag);
+ found = index >= 0;
+ }
+
+ if (!found && scope_type() == MODULE_SCOPE) {
location = VariableLocation::MODULE;
index = scope_info_->ModuleIndex(name_handle, &mode, &init_flag,
&maybe_assigned_flag);
+ found = index != 0;
}
- if (index < 0) {
+ if (!found) {
index = scope_info_->FunctionContextSlotIndex(*name_handle);
if (index < 0) return nullptr; // Nowhere found.
Variable* var = AsDeclarationScope()->DeclareFunctionVar(name);
@@ -849,9 +878,6 @@ Variable* DeclarationScope::DeclareParameter(
// TODO(wingo): Avoid O(n^2) check.
*is_duplicate = IsDeclaredParameter(name);
}
- if (!is_optional && !is_rest && arity_ == params_.length()) {
- ++arity_;
- }
has_rest_ = is_rest;
params_.Add(var, zone());
if (name == ast_value_factory->arguments_string()) {
@@ -971,22 +997,20 @@ Variable* Scope::DeclareVariable(
// same variable if it is declared several times. This is not a
// semantic issue, but it may be a performance issue since it may
// lead to repeated DeclareEvalVar or DeclareEvalFunction calls.
- decls_.Add(declaration, zone());
+ decls_.Add(declaration);
proxy->BindTo(var);
return var;
}
VariableProxy* Scope::NewUnresolved(AstNodeFactory* factory,
const AstRawString* name,
- int start_position, int end_position,
- VariableKind kind) {
+ int start_position, VariableKind kind) {
// Note that we must not share the unresolved variables with
// the same name because they may be removed selectively via
// RemoveUnresolved().
DCHECK(!already_resolved_);
DCHECK_EQ(!needs_migration_, factory->zone() == zone());
- VariableProxy* proxy =
- factory->NewVariableProxy(name, kind, start_position, end_position);
+ VariableProxy* proxy = factory->NewVariableProxy(name, kind, start_position);
proxy->set_next_unresolved(unresolved_);
unresolved_ = proxy;
return proxy;
@@ -1027,7 +1051,7 @@ bool Scope::RemoveUnresolved(VariableProxy* var) {
}
bool Scope::RemoveUnresolved(const AstRawString* name) {
- if (unresolved_->raw_name() == name) {
+ if (unresolved_ != nullptr && unresolved_->raw_name() == name) {
VariableProxy* removed = unresolved_;
unresolved_ = unresolved_->next_unresolved();
removed->set_next_unresolved(nullptr);
@@ -1036,7 +1060,7 @@ bool Scope::RemoveUnresolved(const AstRawString* name) {
VariableProxy* current = unresolved_;
while (current != nullptr) {
VariableProxy* next = current->next_unresolved();
- if (next->raw_name() == name) {
+ if (next != nullptr && next->raw_name() == name) {
current->set_next_unresolved(next->next_unresolved());
next->set_next_unresolved(nullptr);
return true;
@@ -1055,9 +1079,7 @@ Variable* Scope::NewTemporary(const AstRawString* name) {
}
Declaration* Scope::CheckConflictingVarDeclarations() {
- int length = decls_.length();
- for (int i = 0; i < length; i++) {
- Declaration* decl = decls_[i];
+ for (Declaration* decl : decls_) {
VariableMode mode = decl->proxy()->var()->mode();
if (IsLexicalVariableMode(mode) && !is_block_scope()) continue;
@@ -1092,10 +1114,8 @@ Declaration* Scope::CheckLexDeclarationsConflictingWith(
// Conflict; find and return its declaration.
DCHECK(IsLexicalVariableMode(var->mode()));
const AstRawString* name = names.at(i);
- for (int j = 0; j < decls_.length(); ++j) {
- if (decls_[j]->proxy()->raw_name() == name) {
- return decls_[j];
- }
+ for (Declaration* decl : decls_) {
+ if (decl->proxy()->raw_name() == name) return decl;
}
DCHECK(false);
}
@@ -1104,16 +1124,20 @@ Declaration* Scope::CheckLexDeclarationsConflictingWith(
}
void DeclarationScope::AllocateVariables(ParseInfo* info, AnalyzeMode mode) {
+ // Module variables must be allocated before variable resolution
+ // to ensure that AccessNeedsHoleCheck() can detect import variables.
+ if (is_module_scope()) AsModuleScope()->AllocateModuleVariables();
+
ResolveVariablesRecursively(info);
AllocateVariablesRecursively();
MaybeHandle<ScopeInfo> outer_scope;
- for (const Scope* s = outer_scope_; s != nullptr; s = s->outer_scope_) {
- if (s->scope_info_.is_null()) continue;
- outer_scope = s->scope_info_;
- break;
+ if (outer_scope_ != nullptr) outer_scope = outer_scope_->scope_info_;
+
+ AllocateScopeInfosRecursively(info->isolate(), outer_scope);
+ if (mode == AnalyzeMode::kDebugger) {
+ AllocateDebuggerScopeInfos(info->isolate(), outer_scope);
}
- AllocateScopeInfosRecursively(info->isolate(), mode, outer_scope);
// The debugger expects all shared function infos to contain a scope info.
// Since the top-most scope will end up in a shared function info, make sure
// it has one, even if it doesn't need a scope info.
@@ -1123,14 +1147,29 @@ void DeclarationScope::AllocateVariables(ParseInfo* info, AnalyzeMode mode) {
}
}
-bool Scope::AllowsLazyParsingWithoutUnresolvedVariables() const {
- // If we are inside a block scope, we must find unresolved variables in the
- // inner scopes to find out how to allocate variables on the block scope. At
- // this point, declarations may not have yet been parsed.
- for (const Scope* s = this; s != nullptr; s = s->outer_scope_) {
- if (s->is_block_scope()) return false;
- // TODO(marja): Refactor parsing modes: also add s->is_function_scope()
- // here.
+bool Scope::AllowsLazyParsingWithoutUnresolvedVariables(
+ const Scope* outer) const {
+ // If none of the outer scopes need to decide whether to context allocate
+ // specific variables, we can preparse inner functions without unresolved
+ // variables. Otherwise we need to find unresolved variables to force context
+ // allocation of the matching declarations. We can stop at the outer scope for
+ // the parse, since context allocation of those variables is already
+ // guaranteed to be correct.
+ for (const Scope* s = this; s != outer; s = s->outer_scope_) {
+ // Eval forces context allocation on all outer scopes, so we don't need to
+ // look at those scopes. Sloppy eval makes all top-level variables dynamic,
+ // whereas strict-mode requires context allocation.
+ if (s->is_eval_scope()) return !is_strict(s->language_mode());
+ // Catch scopes force context allocation of all variables.
+ if (s->is_catch_scope()) continue;
+ // With scopes do not introduce variables that need allocation.
+ if (s->is_with_scope()) continue;
+ // If everything is guaranteed to be context allocated we can ignore the
+ // scope.
+ if (s->has_forced_context_allocation()) continue;
+ // Only block scopes and function scopes should disallow preparsing.
+ DCHECK(s->is_block_scope() || s->is_function_scope());
+ return false;
}
return true;
}
@@ -1139,17 +1178,6 @@ bool DeclarationScope::AllowsLazyCompilation() const {
return !force_eager_compilation_;
}
-bool DeclarationScope::AllowsLazyCompilationWithoutContext() const {
- if (force_eager_compilation_) return false;
- // Disallow lazy compilation without context if any outer scope needs a
- // context.
- for (const Scope* scope = outer_scope_; scope != nullptr;
- scope = scope->outer_scope_) {
- if (scope->NeedsContext()) return false;
- }
- return true;
-}
-
int Scope::ContextChainLength(Scope* scope) const {
int n = 0;
for (const Scope* s = this; s != scope; s = s->outer_scope_) {
@@ -1175,6 +1203,7 @@ int Scope::ContextChainLengthUntilOutermostSloppyEval() const {
int Scope::MaxNestedContextChainLength() {
int max_context_chain_length = 0;
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+ if (scope->is_function_scope()) continue;
max_context_chain_length = std::max(scope->MaxNestedContextChainLength(),
max_context_chain_length);
}
@@ -1192,6 +1221,14 @@ DeclarationScope* Scope::GetDeclarationScope() {
return scope->AsDeclarationScope();
}
+const DeclarationScope* Scope::GetClosureScope() const {
+ const Scope* scope = this;
+ while (!scope->is_declaration_scope() || scope->is_block_scope()) {
+ scope = scope->outer_scope();
+ }
+ return scope->AsDeclarationScope();
+}
+
DeclarationScope* Scope::GetClosureScope() {
Scope* scope = this;
while (!scope->is_declaration_scope() || scope->is_block_scope()) {
@@ -1200,6 +1237,15 @@ DeclarationScope* Scope::GetClosureScope() {
return scope->AsDeclarationScope();
}
+bool Scope::NeedsScopeInfo() const {
+ DCHECK(!already_resolved_);
+ DCHECK(GetClosureScope()->ShouldEagerCompile());
+ // The debugger expects all functions to have scope infos.
+ // TODO(jochen|yangguo): Remove this requirement.
+ if (is_function_scope()) return true;
+ return NeedsContext();
+}
+
ModuleScope* Scope::GetModuleScope() {
Scope* scope = this;
DCHECK(!scope->is_script_scope());
@@ -1243,32 +1289,17 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
DCHECK(is_function_scope());
// Reset all non-trivial members.
- decls_.Rewind(0);
- locals_.Rewind(0);
+ params_.Clear();
+ decls_.Clear();
+ locals_.Clear();
sloppy_block_function_map_.Clear();
variables_.Clear();
// Make sure we won't walk the scope tree from here on.
inner_scope_ = nullptr;
unresolved_ = nullptr;
- // TODO(verwaest): We should properly preparse the parameters (no declarations
- // should be created), and reparse on abort.
- if (aborted) {
- if (!IsArrowFunction(function_kind_)) {
- DeclareDefaultFunctionVariables(ast_value_factory);
- }
- // Recreate declarations for parameters.
- for (int i = 0; i < params_.length(); i++) {
- Variable* var = params_[i];
- if (var->mode() == TEMPORARY) {
- locals_.Add(var, zone());
- } else if (variables_.Lookup(var->raw_name()) == nullptr) {
- variables_.Add(zone(), var);
- locals_.Add(var, zone());
- }
- }
- } else {
- params_.Rewind(0);
+ if (aborted && !IsArrowFunction(function_kind_)) {
+ DeclareDefaultFunctionVariables(ast_value_factory);
}
#ifdef DEBUG
@@ -1378,9 +1409,9 @@ static void PrintVar(int indent, Variable* var) {
PrintF("forced context allocation");
comma = true;
}
- if (var->maybe_assigned() == kMaybeAssigned) {
+ if (var->maybe_assigned() == kNotAssigned) {
if (comma) PrintF(", ");
- PrintF("maybe assigned");
+ PrintF("never assigned");
}
PrintF("\n");
}
@@ -1454,7 +1485,11 @@ void Scope::Print(int n) {
Indent(n1, "// scope uses 'super' property\n");
}
if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
- if (is_lazily_parsed_) Indent(n1, "// lazily parsed\n");
+ if (is_declaration_scope()) {
+ DeclarationScope* scope = AsDeclarationScope();
+ if (scope->is_lazily_parsed()) Indent(n1, "// lazily parsed\n");
+ if (scope->ShouldEagerCompile()) Indent(n1, "// will be compiled\n");
+ }
if (num_stack_slots_ > 0) {
Indent(n1, "// ");
PrintF("%d stack slots\n", num_stack_slots_);
@@ -1491,8 +1526,7 @@ void Scope::Print(int n) {
}
void Scope::CheckScopePositions() {
- // A scope is allowed to have invalid positions if it is hidden and has no
- // inner scopes
+ // Visible leaf scopes must have real positions.
if (!is_hidden() && inner_scope_ == nullptr) {
CHECK_NE(kNoSourcePosition, start_position());
CHECK_NE(kNoSourcePosition, end_position());
@@ -1632,6 +1666,59 @@ void Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy) {
}
}
+namespace {
+
+bool AccessNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
+ if (!var->binding_needs_init()) {
+ return false;
+ }
+
+ // It's impossible to eliminate module import hole checks here, because it's
+ // unknown at compilation time whether the binding referred to in the
+ // exporting module itself requires hole checks.
+ if (var->location() == VariableLocation::MODULE && !var->IsExport()) {
+ return true;
+ }
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding, both the Variable and the VariableProxy have the same
+ // declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code), the VariableProxy is in
+ // the source physically located after the initializer of the variable,
+ // and that the initializer cannot be skipped due to a nonlinear scope.
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ // The check cannot be skipped on non-linear scopes, namely switch
+ // scopes, to ensure tests are done in cases like the following:
+ // switch (1) { case 0: let x = 2; case 1: f(x); }
+ // The scope of the variable needs to be checked, in case the use is
+ // in a sub-block which may be linear.
+ if (var->scope()->GetDeclarationScope() != scope->GetDeclarationScope()) {
+ return true;
+ }
+
+ if (var->is_this()) {
+ DCHECK(
+ IsSubclassConstructor(scope->GetDeclarationScope()->function_kind()));
+ // TODO(littledan): implement 'this' hole check elimination.
+ return true;
+ }
+
+ // We should always have valid source positions.
+ DCHECK(var->initializer_position() != kNoSourcePosition);
+ DCHECK(proxy->position() != kNoSourcePosition);
+
+ return var->scope()->is_nonlinear() ||
+ var->initializer_position() >= proxy->position();
+}
+
+} // anonymous namespace
+
void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
#ifdef DEBUG
if (info->script_is_native()) {
@@ -1656,6 +1743,7 @@ void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
DCHECK_NOT_NULL(var);
if (proxy->is_assigned()) var->set_maybe_assigned();
+ if (AccessNeedsHoleCheck(var, proxy, this)) proxy->set_needs_hole_check();
proxy->BindTo(var);
}
@@ -1833,8 +1921,8 @@ void Scope::AllocateNonParameterLocal(Variable* var) {
}
void Scope::AllocateNonParameterLocalsAndDeclaredGlobals() {
- for (int i = 0; i < locals_.length(); i++) {
- AllocateNonParameterLocal(locals_[i]);
+ for (Variable* local : locals_) {
+ AllocateNonParameterLocal(local);
}
if (is_declaration_scope()) {
@@ -1866,13 +1954,14 @@ void DeclarationScope::AllocateLocals() {
void ModuleScope::AllocateModuleVariables() {
for (const auto& it : module()->regular_imports()) {
Variable* var = LookupLocal(it.first);
- // TODO(neis): Use a meaningful index.
- var->AllocateTo(VariableLocation::MODULE, 42);
+ var->AllocateTo(VariableLocation::MODULE, it.second->cell_index);
+ DCHECK(!var->IsExport());
}
for (const auto& it : module()->regular_exports()) {
Variable* var = LookupLocal(it.first);
- var->AllocateTo(VariableLocation::MODULE, 0);
+ var->AllocateTo(VariableLocation::MODULE, it.second->cell_index);
+ DCHECK(var->IsExport());
}
}
@@ -1880,7 +1969,9 @@ void Scope::AllocateVariablesRecursively() {
DCHECK(!already_resolved_);
DCHECK_EQ(0, num_stack_slots_);
// Don't allocate variables of preparsed scopes.
- if (is_lazily_parsed_) return;
+ if (is_declaration_scope() && AsDeclarationScope()->is_lazily_parsed()) {
+ return;
+ }
// Allocate variables for inner scopes.
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
@@ -1893,9 +1984,7 @@ void Scope::AllocateVariablesRecursively() {
// Allocate variables for this scope.
// Parameters must be allocated first, if any.
if (is_declaration_scope()) {
- if (is_module_scope()) {
- AsModuleScope()->AllocateModuleVariables();
- } else if (is_function_scope()) {
+ if (is_function_scope()) {
AsDeclarationScope()->AllocateParameterLocals();
}
AsDeclarationScope()->AllocateReceiver();
@@ -1921,21 +2010,36 @@ void Scope::AllocateVariablesRecursively() {
DCHECK(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
}
-void Scope::AllocateScopeInfosRecursively(Isolate* isolate, AnalyzeMode mode,
+void Scope::AllocateScopeInfosRecursively(Isolate* isolate,
MaybeHandle<ScopeInfo> outer_scope) {
DCHECK(scope_info_.is_null());
- if (mode == AnalyzeMode::kDebugger || NeedsScopeInfo()) {
+ MaybeHandle<ScopeInfo> next_outer_scope = outer_scope;
+
+ if (NeedsScopeInfo()) {
scope_info_ = ScopeInfo::Create(isolate, zone(), this, outer_scope);
+ // The ScopeInfo chain should mirror the context chain, so we only link to
+ // the next outer scope that needs a context.
+ if (NeedsContext()) next_outer_scope = scope_info_;
}
- // The ScopeInfo chain should mirror the context chain, so we only link to
- // the next outer scope that needs a context.
- MaybeHandle<ScopeInfo> next_outer_scope = outer_scope;
- if (NeedsContext()) next_outer_scope = scope_info_;
-
// Allocate ScopeInfos for inner scopes.
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
- scope->AllocateScopeInfosRecursively(isolate, mode, next_outer_scope);
+ if (!scope->is_function_scope() ||
+ scope->AsDeclarationScope()->ShouldEagerCompile()) {
+ scope->AllocateScopeInfosRecursively(isolate, next_outer_scope);
+ }
+ }
+}
+
+void Scope::AllocateDebuggerScopeInfos(Isolate* isolate,
+ MaybeHandle<ScopeInfo> outer_scope) {
+ if (scope_info_.is_null()) {
+ scope_info_ = ScopeInfo::Create(isolate, zone(), this, outer_scope);
+ }
+ MaybeHandle<ScopeInfo> outer = NeedsContext() ? scope_info_ : outer_scope;
+ for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+ if (scope->is_function_scope()) continue;
+ scope->AllocateDebuggerScopeInfos(isolate, outer);
}
}
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 0acff8ac32..c7d88aca11 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -5,6 +5,7 @@
#ifndef V8_AST_SCOPES_H_
#define V8_AST_SCOPES_H_
+#include "src/base/compiler-specific.h"
#include "src/base/hashmap.h"
#include "src/globals.h"
#include "src/objects.h"
@@ -62,7 +63,7 @@ enum class AnalyzeMode { kRegular, kDebugger };
// and ModuleScope. DeclarationScope is used for any scope that hosts 'var'
// declarations. This includes script, module, eval, varblock, and function
// scope. ModuleScope further specializes DeclarationScope.
-class Scope: public ZoneObject {
+class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
public:
// ---------------------------------------------------------------------------
// Construction
@@ -95,8 +96,8 @@ class Scope: public ZoneObject {
Scope* outer_scope_;
Scope* top_inner_scope_;
VariableProxy* top_unresolved_;
- int top_local_;
- int top_decl_;
+ ThreadedList<Variable>::Iterator top_local_;
+ ThreadedList<Declaration>::Iterator top_decl_;
};
enum class DeserializationMode { kIncludingVariables, kScopesOnly };
@@ -157,15 +158,14 @@ class Scope: public ZoneObject {
bool* ok);
// Declarations list.
- ZoneList<Declaration*>* declarations() { return &decls_; }
+ ThreadedList<Declaration>* declarations() { return &decls_; }
- ZoneList<Variable*>* locals() { return &locals_; }
+ ThreadedList<Variable>* locals() { return &locals_; }
// Create a new unresolved variable.
VariableProxy* NewUnresolved(AstNodeFactory* factory,
const AstRawString* name,
int start_position = kNoSourcePosition,
- int end_position = kNoSourcePosition,
VariableKind kind = NORMAL_VARIABLE);
void AddUnresolved(VariableProxy* proxy);
@@ -351,7 +351,7 @@ class Scope: public ZoneObject {
// Determine if we can parse a function literal in this scope lazily without
// caring about the unresolved variables within.
- bool AllowsLazyParsingWithoutUnresolvedVariables() const;
+ bool AllowsLazyParsingWithoutUnresolvedVariables(const Scope* outer) const;
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope) const;
@@ -374,6 +374,7 @@ class Scope: public ZoneObject {
// the scope for which a function prologue allocates a context) or declaring
// temporaries.
DeclarationScope* GetClosureScope();
+ const DeclarationScope* GetClosureScope() const;
// Find the first (non-arrow) function or script scope. This is where
// 'this' is bound, and what determines the function kind.
@@ -422,8 +423,6 @@ class Scope: public ZoneObject {
void set_is_debug_evaluate_scope() { is_debug_evaluate_scope_ = true; }
bool is_debug_evaluate_scope() const { return is_debug_evaluate_scope_; }
- bool is_lazily_parsed() const { return is_lazily_parsed_; }
-
protected:
explicit Scope(Zone* zone);
@@ -435,29 +434,13 @@ class Scope: public ZoneObject {
Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
VariableMode mode, VariableKind kind,
InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned) {
- bool added;
- Variable* var =
- variables_.Declare(zone, scope, name, mode, kind, initialization_flag,
- maybe_assigned_flag, &added);
- if (added) locals_.Add(var, zone);
- return var;
- }
+ MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
// This method should only be invoked on scopes created during parsing (i.e.,
// not deserialized from a context). Also, since NeedsContext() is only
// returning a valid result after variables are resolved, NeedsScopeInfo()
// should also be invoked after resolution.
- bool NeedsScopeInfo() const {
- DCHECK(!already_resolved_);
- // A lazily parsed scope doesn't contain enough information to create a
- // ScopeInfo from it.
- if (is_lazily_parsed_) return false;
- // The debugger expects all functions to have scope infos.
- // TODO(jochen|yangguo): Remove this requirement.
- if (is_function_scope()) return true;
- return NeedsContext();
- }
+ bool NeedsScopeInfo() const;
Zone* zone_;
@@ -474,13 +457,12 @@ class Scope: public ZoneObject {
VariableMap variables_;
// In case of non-scopeinfo-backed scopes, this contains the variables of the
// map above in order of addition.
- // TODO(verwaest): Thread through Variable.
- ZoneList<Variable*> locals_;
+ ThreadedList<Variable> locals_;
// Unresolved variables referred to from this scope. The proxies themselves
// form a linked list of all unresolved proxies.
VariableProxy* unresolved_;
// Declarations.
- ZoneList<Declaration*> decls_;
+ ThreadedList<Declaration> decls_;
// Serialized scope info support.
Handle<ScopeInfo> scope_info_;
@@ -527,8 +509,6 @@ class Scope: public ZoneObject {
// True if it holds 'var' declarations.
bool is_declaration_scope_ : 1;
- bool is_lazily_parsed_ : 1;
-
// Create a non-local variable with a given name.
// These variables are looked up dynamically at runtime.
Variable* NonLocal(const AstRawString* name, VariableMode mode);
@@ -563,8 +543,10 @@ class Scope: public ZoneObject {
void AllocateNonParameterLocalsAndDeclaredGlobals();
void AllocateVariablesRecursively();
- void AllocateScopeInfosRecursively(Isolate* isolate, AnalyzeMode mode,
+ void AllocateScopeInfosRecursively(Isolate* isolate,
MaybeHandle<ScopeInfo> outer_scope);
+ void AllocateDebuggerScopeInfos(Isolate* isolate,
+ MaybeHandle<ScopeInfo> outer_scope);
// Construct a scope based on the scope info.
Scope(Zone* zone, ScopeType type, Handle<ScopeInfo> scope_info);
@@ -634,6 +616,10 @@ class DeclarationScope : public Scope {
IsClassConstructor(function_kind())));
}
+ bool is_lazily_parsed() const { return is_lazily_parsed_; }
+ bool ShouldEagerCompile() const;
+ void set_should_eager_compile();
+
void SetScriptScopeInfo(Handle<ScopeInfo> scope_info) {
DCHECK(is_script_scope());
DCHECK(scope_info_.is_null());
@@ -704,16 +690,6 @@ class DeclarationScope : public Scope {
return params_[index];
}
- // Returns the default function arity excluding default or rest parameters.
- // This will be used to set the length of the function, by default.
- // Class field initializers use this property to indicate the number of
- // fields being initialized.
- int arity() const { return arity_; }
-
- // Normal code should not need to call this. Class field initializers use this
- // property to indicate the number of fields being initialized.
- void set_arity(int arity) { arity_ = arity; }
-
// Returns the number of formal parameters, excluding a possible rest
// parameter. Examples:
// function foo(a, b) {} ==> 2
@@ -758,12 +734,7 @@ class DeclarationScope : public Scope {
// Adds a local variable in this scope's locals list. This is for adjusting
// the scope of temporaries and do-expression vars when desugaring parameter
// initializers.
- void AddLocal(Variable* var) {
- DCHECK(!already_resolved_);
- // Temporaries are only placed in ClosureScopes.
- DCHECK_EQ(GetClosureScope(), this);
- locals_.Add(var, zone());
- }
+ void AddLocal(Variable* var);
void DeclareSloppyBlockFunction(const AstRawString* name,
SloppyBlockFunctionStatement* statement) {
@@ -796,16 +767,15 @@ class DeclarationScope : public Scope {
// Determine if we can use lazy compilation for this scope.
bool AllowsLazyCompilation() const;
- // Determine if we can use lazy compilation for this scope without a context.
- bool AllowsLazyCompilationWithoutContext() const;
-
// Make sure this closure and all outer closures are eagerly compiled.
void ForceEagerCompilation() {
DCHECK_EQ(this, GetClosureScope());
- for (DeclarationScope* s = this; !s->is_script_scope();
+ DeclarationScope* s;
+ for (s = this; !s->is_script_scope();
s = s->outer_scope()->GetClosureScope()) {
s->force_eager_compilation_ = true;
}
+ s->force_eager_compilation_ = true;
}
#ifdef DEBUG
@@ -848,9 +818,9 @@ class DeclarationScope : public Scope {
bool has_arguments_parameter_ : 1;
// This scope uses "super" property ('super.foo').
bool scope_uses_super_property_ : 1;
+ bool should_eager_compile_ : 1;
+ bool is_lazily_parsed_ : 1;
- // Info about the parameter list of a function.
- int arity_;
// Parameter list in source order.
ZoneList<Variable*> params_;
// Map of function names to lists of functions defined in sloppy blocks
@@ -884,7 +854,7 @@ class ModuleScope final : public DeclarationScope {
return module_descriptor_;
}
- // Set MODULE as VariableLocation for all variables that will live in some
+ // Set MODULE as VariableLocation for all variables that will live in a
// module's export table.
void AllocateModuleVariables();
diff --git a/deps/v8/src/ast/variables.cc b/deps/v8/src/ast/variables.cc
index cc269cd0c7..3771bfee12 100644
--- a/deps/v8/src/ast/variables.cc
+++ b/deps/v8/src/ast/variables.cc
@@ -19,6 +19,7 @@ Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
: scope_(scope),
name_(name),
local_if_not_shadowed_(nullptr),
+ next_(nullptr),
index_(-1),
initializer_position_(kNoSourcePosition),
bit_field_(MaybeAssignedFlagField::encode(maybe_assigned_flag) |
@@ -40,14 +41,5 @@ bool Variable::IsGlobalObjectProperty() const {
scope_ != NULL && scope_->is_script_scope();
}
-
-bool Variable::IsStaticGlobalObjectProperty() const {
- // Temporaries are never global, they must always be allocated in the
- // activation frame.
- return (IsDeclaredVariableMode(mode()) && !IsLexicalVariableMode(mode())) &&
- scope_ != NULL && scope_->is_script_scope();
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index 5bc7869646..b7d9226b1c 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -64,7 +64,6 @@ class Variable final : public ZoneObject {
bool IsContextSlot() const { return location() == VariableLocation::CONTEXT; }
bool IsLookupSlot() const { return location() == VariableLocation::LOOKUP; }
bool IsGlobalObjectProperty() const;
- bool IsStaticGlobalObjectProperty() const;
bool is_dynamic() const { return IsDynamicVariableMode(mode()); }
bool binding_needs_init() const {
@@ -102,13 +101,15 @@ class Variable final : public ZoneObject {
int index() const { return index_; }
bool IsExport() const {
- DCHECK(location() == VariableLocation::MODULE);
- return index() == 0;
+ DCHECK_EQ(location(), VariableLocation::MODULE);
+ DCHECK_NE(index(), 0);
+ return index() > 0;
}
void AllocateTo(VariableLocation location, int index) {
DCHECK(IsUnallocated() ||
(this->location() == location && this->index() == index));
+ DCHECK_IMPLIES(location == VariableLocation::MODULE, index != 0);
bit_field_ = LocationField::update(bit_field_, location);
DCHECK_EQ(location, this->location());
index_ = index;
@@ -119,6 +120,8 @@ class Variable final : public ZoneObject {
return mode == VAR ? kCreatedInitialized : kNeedsInitialization;
}
+ typedef ThreadedList<Variable> List;
+
private:
Scope* scope_;
const AstRawString* name_;
@@ -128,6 +131,7 @@ class Variable final : public ZoneObject {
// sloppy 'eval' calls between the reference scope (inclusive) and the
// binding scope (exclusive).
Variable* local_if_not_shadowed_;
+ Variable* next_;
int index_;
int initializer_position_;
uint16_t bit_field_;
@@ -146,6 +150,8 @@ class Variable final : public ZoneObject {
class MaybeAssignedFlagField
: public BitField16<MaybeAssignedFlag, InitializationFlagField::kNext,
2> {};
+ Variable** next() { return &next_; }
+ friend List;
};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/background-parsing-task.cc b/deps/v8/src/background-parsing-task.cc
index 83075c1eec..e0af700ce5 100644
--- a/deps/v8/src/background-parsing-task.cc
+++ b/deps/v8/src/background-parsing-task.cc
@@ -29,24 +29,20 @@ BackgroundParsingTask::BackgroundParsingTask(
// Prepare the data for the internalization phase and compilation phase, which
// will happen in the main thread after parsing.
- Zone* zone = new Zone(isolate->allocator());
+ Zone* zone = new Zone(isolate->allocator(), ZONE_NAME);
ParseInfo* info = new ParseInfo(zone);
+ info->set_toplevel();
source->zone.reset(zone);
source->info.reset(info);
info->set_isolate(isolate);
info->set_source_stream(source->source_stream.get());
info->set_source_stream_encoding(source->encoding);
info->set_hash_seed(isolate->heap()->HashSeed());
- info->set_global();
info->set_unicode_cache(&source_->unicode_cache);
info->set_compile_options(options);
- // Parse eagerly with ignition since we will compile eagerly.
- info->set_allow_lazy_parsing(!(i::FLAG_ignition && i::FLAG_ignition_eager));
+ info->set_allow_lazy_parsing();
- if (options == ScriptCompiler::kProduceParserCache ||
- options == ScriptCompiler::kProduceCodeCache) {
- source_->info->set_cached_data(&script_data_);
- }
+ source_->info->set_cached_data(&script_data_);
// Parser needs to stay alive for finalizing the parsing on the main
// thread.
source_->parser.reset(new Parser(source_->info.get()));
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index 6b7da16ad5..247024ff41 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -35,6 +35,7 @@ namespace internal {
V(kBailoutWasNotPrepared, "Bailout was not prepared") \
V(kBothRegistersWereSmisInSelectNonSmi, \
"Both registers were smis in SelectNonSmi") \
+ V(kClassConstructorFunction, "Class constructor function") \
V(kClassLiteral, "Class literal") \
V(kCodeGenerationFailed, "Code generation failed") \
V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
@@ -60,6 +61,8 @@ namespace internal {
V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
"EmitLoadRegister: Unsupported double immediate") \
+ V(kCyclicObjectStateDetectedInEscapeAnalysis, \
+ "Cyclic object state detected by escape analysis") \
V(kEval, "eval") \
V(kExpectedAllocationSite, "Expected allocation site") \
V(kExpectedBooleanValue, "Expected boolean value") \
@@ -74,7 +77,6 @@ namespace internal {
V(kExpectedPositiveZero, "Expected +0.0") \
V(kExpectedNewSpaceObject, "Expected new space object") \
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
- V(kExpectingAlignmentForCopyBytes, "Expecting alignment for CopyBytes") \
V(kExternalStringExpectedButNotFound, \
"External string expected, but not found") \
V(kForInStatementWithNonLocalEachVariable, \
@@ -116,10 +118,6 @@ namespace internal {
V(kInvalidLhsInCountOperation, "Invalid lhs in count operation") \
V(kInvalidMinLength, "Invalid min_length") \
V(kInvalidRegisterFileInGenerator, "invalid register file in generator") \
- V(kJSGlobalObjectNativeContextShouldBeANativeContext, \
- "JSGlobalObject::native_context should be a native context") \
- V(kJSGlobalProxyContextShouldNotBeNull, \
- "JSGlobalProxy::context() should not be null") \
V(kJSObjectWithFastElementsMapHasSlowElements, \
"JSObject with fast elements map has slow elements") \
V(kLetBindingReInitialization, "Let binding re-initialization") \
@@ -176,6 +174,7 @@ namespace internal {
V(kReferenceToAVariableWhichRequiresDynamicLookup, \
"Reference to a variable which requires dynamic lookup") \
V(kReferenceToGlobalLexicalVariable, "Reference to global lexical variable") \
+ V(kReferenceToModuleVariable, "Reference to module-allocated variable") \
V(kReferenceToUninitializedVariable, "Reference to uninitialized variable") \
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "Register was clobbered") \
@@ -247,6 +246,7 @@ namespace internal {
V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment") \
V(kUnsupportedLookupSlotInDeclaration, \
"Unsupported lookup slot in declaration") \
+ V(kUnsupportedModuleOperation, "Unsupported module operation") \
V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
V(kUnsupportedPhiUseOfConstVariable, \
@@ -256,8 +256,6 @@ namespace internal {
V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate") \
V(kUnstableConstantTypeHeapObject, "Unstable constant-type heap object") \
V(kVariableResolvedToWithContext, "Variable resolved to with context") \
- V(kWeShouldNotHaveAnEmptyLexicalContext, \
- "We should not have an empty lexical context") \
V(kWithStatement, "WithStatement") \
V(kWrongFunctionContext, "Wrong context passed to function") \
V(kWrongAddressOrValuePassedToRecordWrite, \
diff --git a/deps/v8/src/base/atomic-utils.h b/deps/v8/src/base/atomic-utils.h
index 31db603bf9..f40853c587 100644
--- a/deps/v8/src/base/atomic-utils.h
+++ b/deps/v8/src/base/atomic-utils.h
@@ -51,6 +51,60 @@ class AtomicNumber {
base::AtomicWord value_;
};
+// This type uses no barrier accessors to change atomic word. Be careful with
+// data races.
+template <typename T>
+class NoBarrierAtomicValue {
+ public:
+ NoBarrierAtomicValue() : value_(0) {}
+
+ explicit NoBarrierAtomicValue(T initial)
+ : value_(cast_helper<T>::to_storage_type(initial)) {}
+
+ static NoBarrierAtomicValue* FromAddress(void* address) {
+ return reinterpret_cast<base::NoBarrierAtomicValue<T>*>(address);
+ }
+
+ V8_INLINE bool TrySetValue(T old_value, T new_value) {
+ return base::NoBarrier_CompareAndSwap(
+ &value_, cast_helper<T>::to_storage_type(old_value),
+ cast_helper<T>::to_storage_type(new_value)) ==
+ cast_helper<T>::to_storage_type(old_value);
+ }
+
+ V8_INLINE T Value() const {
+ return cast_helper<T>::to_return_type(base::NoBarrier_Load(&value_));
+ }
+
+ V8_INLINE void SetValue(T new_value) {
+ base::NoBarrier_Store(&value_, cast_helper<T>::to_storage_type(new_value));
+ }
+
+ private:
+ STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
+
+ template <typename S>
+ struct cast_helper {
+ static base::AtomicWord to_storage_type(S value) {
+ return static_cast<base::AtomicWord>(value);
+ }
+ static S to_return_type(base::AtomicWord value) {
+ return static_cast<S>(value);
+ }
+ };
+
+ template <typename S>
+ struct cast_helper<S*> {
+ static base::AtomicWord to_storage_type(S* value) {
+ return reinterpret_cast<base::AtomicWord>(value);
+ }
+ static S* to_return_type(base::AtomicWord value) {
+ return reinterpret_cast<S*>(value);
+ }
+ };
+
+ base::AtomicWord value_;
+};
// Flag using T atomically. Also accepts void* as T.
template <typename T>
@@ -73,7 +127,7 @@ class AtomicValue {
}
V8_INLINE void SetBits(T bits, T mask) {
- DCHECK_EQ(bits & ~mask, 0);
+ DCHECK_EQ(bits & ~mask, static_cast<T>(0));
T old_value;
T new_value;
do {
diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h
index 973e96b9ea..927ebbee11 100644
--- a/deps/v8/src/base/atomicops.h
+++ b/deps/v8/src/base/atomicops.h
@@ -26,9 +26,17 @@
#define V8_BASE_ATOMICOPS_H_
#include <stdint.h>
+
+// Small C++ header which defines implementation specific macros used to
+// identify the STL implementation.
+// - libc++: captures __config for _LIBCPP_VERSION
+// - libstdc++: captures bits/c++config.h for __GLIBCXX__
+#include <cstddef>
+
+#include "src/base/base-export.h"
#include "src/base/build_config.h"
-#if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT)
+#if defined(V8_OS_WIN) && defined(V8_HOST_ARCH_64_BIT)
// windows.h #defines this (only on x64). This causes problems because the
// public API also uses MemoryBarrier at the public name for this fence. So, on
// X64, undef it, and call its documented
@@ -100,13 +108,11 @@ Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
void MemoryBarrier();
void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value);
void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
-void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
Atomic8 NoBarrier_Load(volatile const Atomic8* ptr);
Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
Atomic32 Acquire_Load(volatile const Atomic32* ptr);
-Atomic32 Release_Load(volatile const Atomic32* ptr);
// 64-bit atomic operations (only available on 64-bit processors).
#ifdef V8_HOST_ARCH_64_BIT
@@ -124,44 +130,25 @@ Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value);
void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
-void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
void Release_Store(volatile Atomic64* ptr, Atomic64 value);
Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
Atomic64 Acquire_Load(volatile const Atomic64* ptr);
-Atomic64 Release_Load(volatile const Atomic64* ptr);
#endif // V8_HOST_ARCH_64_BIT
} // namespace base
} // namespace v8
-// Include our platform specific implementation.
-#if defined(THREAD_SANITIZER)
-#include "src/base/atomicops_internals_tsan.h"
-#elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+#if defined(V8_OS_WIN)
+// TODO(hpayer): The MSVC header includes windows.h, which other files end up
+// relying on. Fix this as part of crbug.com/559247.
#include "src/base/atomicops_internals_x86_msvc.h"
-#elif defined(__APPLE__)
-#include "src/base/atomicops_internals_mac.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_ARM64
-#include "src/base/atomicops_internals_arm64_gcc.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_ARM
-#include "src/base/atomicops_internals_arm_gcc.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_PPC
-#include "src/base/atomicops_internals_ppc_gcc.h"
-#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-#include "src/base/atomicops_internals_x86_gcc.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
-#include "src/base/atomicops_internals_mips_gcc.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS64
-#include "src/base/atomicops_internals_mips64_gcc.h"
-#elif defined(__GNUC__) && V8_HOST_ARCH_S390
-#include "src/base/atomicops_internals_s390_gcc.h"
#else
-#error "Atomic operations are not supported on your platform"
+#include "src/base/atomicops_internals_portable.h"
#endif
// On some platforms we need additional declarations to make
// AtomicWord compatible with our other Atomic* types.
-#if defined(__APPLE__) || defined(__OpenBSD__) || defined(V8_OS_AIX)
+#if defined(V8_OS_MACOSX) || defined(V8_OS_OPENBSD) || defined(V8_OS_AIX)
#include "src/base/atomicops_internals_atomicword_compat.h"
#endif
diff --git a/deps/v8/src/base/atomicops_internals_arm64_gcc.h b/deps/v8/src/base/atomicops_internals_arm64_gcc.h
deleted file mode 100644
index f24050a3e6..0000000000
--- a/deps/v8/src/base/atomicops_internals_arm64_gcc.h
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
-
-namespace v8 {
-namespace base {
-
-inline void MemoryBarrier() {
- __asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT
-}
-
-// NoBarrier versions of the operation include "memory" in the clobber list.
-// This is not required for direct usage of the NoBarrier versions of the
-// operations. However this is required for correctness when they are used as
-// part of the Acquire or Release versions, to ensure that nothing from outside
-// the call is reordered between the operation and the memory barrier. This does
-// not change the code generated, so has no or minimal impact on the
-// NoBarrier operations.
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
- "cmp %w[prev], %w[old_value] \n\t"
- "bne 1f \n\t"
- "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
- "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
- "1: \n\t"
- : [prev]"=&r" (prev),
- [temp]"=&r" (temp),
- [ptr]"+Q" (*ptr)
- : [old_value]"IJr" (old_value),
- [new_value]"r" (new_value)
- : "cc", "memory"
- ); // NOLINT
-
- return prev;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 result;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
- "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
- "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
- : [result]"=&r" (result),
- [temp]"=&r" (temp),
- [ptr]"+Q" (*ptr)
- : [new_value]"r" (new_value)
- : "memory"
- ); // NOLINT
-
- return result;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 result;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
- "add %w[result], %w[result], %w[increment]\n\t"
- "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
- "cbnz %w[temp], 0b \n\t" // Retry on failure.
- : [result]"=&r" (result),
- [temp]"=&r" (temp),
- [ptr]"+Q" (*ptr)
- : [increment]"IJr" (increment)
- : "memory"
- ); // NOLINT
-
- return result;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 result;
-
- MemoryBarrier();
- result = NoBarrier_AtomicIncrement(ptr, increment);
- MemoryBarrier();
-
- return result;
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev;
-
- prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- MemoryBarrier();
-
- return prev;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev;
-
- MemoryBarrier();
- prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-
- return prev;
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- __asm__ __volatile__ ( // NOLINT
- "stlr %w[value], %[ptr] \n\t"
- : [ptr]"=Q" (*ptr)
- : [value]"r" (value)
- : "memory"
- ); // NOLINT
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
- return *ptr;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value;
-
- __asm__ __volatile__ ( // NOLINT
- "ldar %w[value], %[ptr] \n\t"
- : [value]"=r" (value)
- : [ptr]"Q" (*ptr)
- : "memory"
- ); // NOLINT
-
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-// 64-bit versions of the operations.
-// See the 32-bit versions for comments.
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %[prev], %[ptr] \n\t"
- "cmp %[prev], %[old_value] \n\t"
- "bne 1f \n\t"
- "stxr %w[temp], %[new_value], %[ptr] \n\t"
- "cbnz %w[temp], 0b \n\t"
- "1: \n\t"
- : [prev]"=&r" (prev),
- [temp]"=&r" (temp),
- [ptr]"+Q" (*ptr)
- : [old_value]"IJr" (old_value),
- [new_value]"r" (new_value)
- : "cc", "memory"
- ); // NOLINT
-
- return prev;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- Atomic64 result;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %[result], %[ptr] \n\t"
- "stxr %w[temp], %[new_value], %[ptr] \n\t"
- "cbnz %w[temp], 0b \n\t"
- : [result]"=&r" (result),
- [temp]"=&r" (temp),
- [ptr]"+Q" (*ptr)
- : [new_value]"r" (new_value)
- : "memory"
- ); // NOLINT
-
- return result;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 result;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "0: \n\t"
- "ldxr %[result], %[ptr] \n\t"
- "add %[result], %[result], %[increment] \n\t"
- "stxr %w[temp], %[result], %[ptr] \n\t"
- "cbnz %w[temp], 0b \n\t"
- : [result]"=&r" (result),
- [temp]"=&r" (temp),
- [ptr]"+Q" (*ptr)
- : [increment]"IJr" (increment)
- : "memory"
- ); // NOLINT
-
- return result;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 result;
-
- MemoryBarrier();
- result = NoBarrier_AtomicIncrement(ptr, increment);
- MemoryBarrier();
-
- return result;
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev;
-
- prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- MemoryBarrier();
-
- return prev;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev;
-
- MemoryBarrier();
- prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-
- return prev;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- __asm__ __volatile__ ( // NOLINT
- "stlr %x[value], %[ptr] \n\t"
- : [ptr]"=Q" (*ptr)
- : [value]"r" (value)
- : "memory"
- ); // NOLINT
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value;
-
- __asm__ __volatile__ ( // NOLINT
- "ldar %x[value], %[ptr] \n\t"
- : [value]"=r" (value)
- : [ptr]"Q" (*ptr)
- : "memory"
- ); // NOLINT
-
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_arm_gcc.h b/deps/v8/src/base/atomicops_internals_arm_gcc.h
deleted file mode 100644
index 8d049e04b4..0000000000
--- a/deps/v8/src/base/atomicops_internals_arm_gcc.h
+++ /dev/null
@@ -1,304 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-//
-// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
-
-#if defined(__QNXNTO__)
-#include <sys/cpuinline.h>
-#endif
-
-namespace v8 {
-namespace base {
-
-// Memory barriers on ARM are funky, but the kernel is here to help:
-//
-// * ARMv5 didn't support SMP, there is no memory barrier instruction at
-// all on this architecture, or when targeting its machine code.
-//
-// * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by
-// writing a random value to a very specific coprocessor register.
-//
-// * On ARMv7, the "dmb" instruction is used to perform a full memory
-// barrier (though writing to the co-processor will still work).
-// However, on single core devices (e.g. Nexus One, or Nexus S),
-// this instruction will take up to 200 ns, which is huge, even though
-// it's completely un-needed on these devices.
-//
-// * There is no easy way to determine at runtime if the device is
-// single or multi-core. However, the kernel provides a useful helper
-// function at a fixed memory address (0xffff0fa0), which will always
-// perform a memory barrier in the most efficient way. I.e. on single
-// core devices, this is an empty function that exits immediately.
-// On multi-core devices, it implements a full memory barrier.
-//
-// * This source could be compiled to ARMv5 machine code that runs on a
-// multi-core ARMv6 or ARMv7 device. In this case, memory barriers
-// are needed for correct execution. Always call the kernel helper, even
-// when targeting ARMv5TE.
-//
-
-inline void MemoryBarrier() {
-#if defined(__ANDROID__)
- // Note: This is a function call, which is also an implicit compiler barrier.
- typedef void (*KernelMemoryBarrierFunc)();
- ((KernelMemoryBarrierFunc)0xffff0fa0)();
-#elif defined(__QNXNTO__)
- __cpu_membarrier();
-#else
- // Fallback to GCC built-in function
- __sync_synchronize();
-#endif
-}
-
-// An ARM toolchain would only define one of these depending on which
-// variant of the target architecture is being used. This tests against
-// any known ARMv6 or ARMv7 variant, where it is possible to directly
-// use ldrex/strex instructions to implement fast atomic operations.
-#if defined(__ARM_ARCH_8A__) || \
- defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
- defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
- defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
- defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
- defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__)
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- int reloop;
- do {
- // The following is equivalent to:
- //
- // prev_value = LDREX(ptr)
- // reloop = 0
- // if (prev_value != old_value)
- // reloop = STREX(ptr, new_value)
- __asm__ __volatile__(" ldrex %0, [%3]\n"
- " mov %1, #0\n"
- " cmp %0, %4\n"
-#ifdef __thumb2__
- " it eq\n"
-#endif
- " strexeq %1, %5, [%3]\n"
- : "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr)
- : "r"(ptr), "r"(old_value), "r"(new_value)
- : "cc", "memory");
- } while (reloop != 0);
- return prev_value;
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- MemoryBarrier();
- return result;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- MemoryBarrier();
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 value;
- int reloop;
- do {
- // Equivalent to:
- //
- // value = LDREX(ptr)
- // value += increment
- // reloop = STREX(ptr, value)
- //
- __asm__ __volatile__(" ldrex %0, [%3]\n"
- " add %0, %0, %4\n"
- " strex %1, %0, [%3]\n"
- : "=&r"(value), "=&r"(reloop), "+m"(*ptr)
- : "r"(ptr), "r"(increment)
- : "cc", "memory");
- } while (reloop);
- return value;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- // TODO(digit): Investigate if it's possible to implement this with
- // a single MemoryBarrier() operation between the LDREX and STREX.
- // See http://crbug.com/246514
- MemoryBarrier();
- Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
- MemoryBarrier();
- return result;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- int reloop;
- do {
- // old_value = LDREX(ptr)
- // reloop = STREX(ptr, new_value)
- __asm__ __volatile__(" ldrex %0, [%3]\n"
- " strex %1, %4, [%3]\n"
- : "=&r"(old_value), "=&r"(reloop), "+m"(*ptr)
- : "r"(ptr), "r"(new_value)
- : "cc", "memory");
- } while (reloop != 0);
- return old_value;
-}
-
-// This tests against any known ARMv5 variant.
-#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
- defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
-
-// The kernel also provides a helper function to perform an atomic
-// compare-and-swap operation at the hard-wired address 0xffff0fc0.
-// On ARMv5, this is implemented by a special code path that the kernel
-// detects and treats specially when thread pre-emption happens.
-// On ARMv6 and higher, it uses LDREX/STREX instructions instead.
-//
-// Note that this always perform a full memory barrier, there is no
-// need to add calls MemoryBarrier() before or after it. It also
-// returns 0 on success, and 1 on exit.
-//
-// Available and reliable since Linux 2.6.24. Both Android and ChromeOS
-// use newer kernel revisions, so this should not be a concern.
-namespace {
-
-inline int LinuxKernelCmpxchg(Atomic32 old_value,
- Atomic32 new_value,
- volatile Atomic32* ptr) {
- typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*);
- return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr);
-}
-
-} // namespace
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- for (;;) {
- prev_value = *ptr;
- if (prev_value != old_value)
- return prev_value;
- if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
- return old_value;
- }
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (LinuxKernelCmpxchg(old_value, new_value, ptr));
- return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- for (;;) {
- // Atomic exchange the old value with an incremented one.
- Atomic32 old_value = *ptr;
- Atomic32 new_value = old_value + increment;
- if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) {
- // The exchange took place as expected.
- return new_value;
- }
- // Otherwise, *ptr changed mid-loop and we need to retry.
- }
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- for (;;) {
- prev_value = *ptr;
- if (prev_value != old_value) {
- // Always ensure acquire semantics.
- MemoryBarrier();
- return prev_value;
- }
- if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
- return old_value;
- }
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- // This could be implemented as:
- // MemoryBarrier();
- // return NoBarrier_CompareAndSwap();
- //
- // But would use 3 barriers per succesful CAS. To save performance,
- // use Acquire_CompareAndSwap(). Its implementation guarantees that:
- // - A succesful swap uses only 2 barriers (in the kernel helper).
- // - An early return due to (prev_value != old_value) performs
- // a memory barrier with no store, which is equivalent to the
- // generic implementation above.
- return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-#else
-# error "Your CPU's ARM architecture is not supported yet"
-#endif
-
-// NOTE: Atomicity of the following load and store operations is only
-// guaranteed in case of 32-bit alignement of |ptr| values.
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-// Byte accessors.
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
-
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_atomicword_compat.h b/deps/v8/src/base/atomicops_internals_atomicword_compat.h
index 4f758a7299..5071f442b4 100644
--- a/deps/v8/src/base/atomicops_internals_atomicword_compat.h
+++ b/deps/v8/src/base/atomicops_internals_atomicword_compat.h
@@ -67,11 +67,6 @@ inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
reinterpret_cast<volatile Atomic32*>(ptr), value);
}
-inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
- return v8::base::Acquire_Store(
- reinterpret_cast<volatile Atomic32*>(ptr), value);
-}
-
inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
return v8::base::Release_Store(
reinterpret_cast<volatile Atomic32*>(ptr), value);
@@ -87,11 +82,6 @@ inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
reinterpret_cast<volatile const Atomic32*>(ptr));
}
-inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
- return v8::base::Release_Load(
- reinterpret_cast<volatile const Atomic32*>(ptr));
-}
-
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/atomicops_internals_mac.h b/deps/v8/src/base/atomicops_internals_mac.h
deleted file mode 100644
index c112506238..0000000000
--- a/deps/v8/src/base/atomicops_internals_mac.h
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
-
-#include <libkern/OSAtomic.h>
-
-namespace v8 {
-namespace base {
-
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
-inline void MemoryBarrier() { OSMemoryBarrier(); }
-
-inline void AcquireMemoryBarrier() {
-// On x86 processors, loads already have acquire semantics, so
-// there is no need to put a full barrier here.
-#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
- ATOMICOPS_COMPILER_BARRIER();
-#else
- MemoryBarrier();
-#endif
-}
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- do {
- if (OSAtomicCompareAndSwap32(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (!OSAtomicCompareAndSwap32(old_value, new_value,
- const_cast<Atomic32*>(ptr)));
- return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- do {
- if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
- return *ptr;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- AcquireMemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#ifdef __LP64__
-
-// 64-bit implementation on 64-bit platform
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev_value;
- do {
- if (OSAtomicCompareAndSwap64(old_value, new_value,
- reinterpret_cast<volatile int64_t*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- Atomic64 old_value;
- do {
- old_value = *ptr;
- } while (!OSAtomicCompareAndSwap64(old_value, new_value,
- reinterpret_cast<volatile int64_t*>(ptr)));
- return old_value;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return OSAtomicAdd64Barrier(increment,
- reinterpret_cast<volatile int64_t*>(ptr));
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev_value;
- do {
- if (OSAtomicCompareAndSwap64Barrier(
- old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- // The lib kern interface does not distinguish between
- // Acquire and Release memory barriers; they are equivalent.
- return Acquire_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr;
- AcquireMemoryBarrier();
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#endif // defined(__LP64__)
-
-#undef ATOMICOPS_COMPILER_BARRIER
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
diff --git a/deps/v8/src/base/atomicops_internals_mips64_gcc.h b/deps/v8/src/base/atomicops_internals_mips64_gcc.h
deleted file mode 100644
index cf2e194e50..0000000000
--- a/deps/v8/src/base/atomicops_internals_mips64_gcc.h
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-
-namespace v8 {
-namespace base {
-
-// Atomically execute:
-// result = *ptr;
-// if (*ptr == old_value)
-// *ptr = new_value;
-// return result;
-//
-// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
-// Always return the old value of "*ptr"
-//
-// This routine implies no memory barriers.
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev, tmp;
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "ll %0, %5\n" // prev = *ptr
- "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
- "move %2, %4\n" // tmp = new_value
- "sc %2, %1\n" // *ptr = tmp (with atomic check)
- "beqz %2, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- "2:\n"
- ".set pop\n"
- : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
- : "r" (old_value), "r" (new_value), "m" (*ptr)
- : "memory");
- return prev;
-}
-
-// Atomically store new_value into *ptr, returning the previous value held in
-// *ptr. This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 temp, old;
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "ll %1, %2\n" // old = *ptr
- "move %0, %3\n" // temp = new_value
- "sc %0, %2\n" // *ptr = temp (with atomic check)
- "beqz %0, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- ".set pop\n"
- : "=&r" (temp), "=&r" (old), "=m" (*ptr)
- : "r" (new_value), "m" (*ptr)
- : "memory");
-
- return old;
-}
-
-// Atomically increment *ptr by "increment". Returns the new value of
-// *ptr with the increment applied. This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 temp, temp2;
-
- __asm__ __volatile__(
- ".set push\n"
- ".set noreorder\n"
- "1:\n"
- "ll %0, %2\n" // temp = *ptr
- "addu %1, %0, %3\n" // temp2 = temp + increment
- "sc %1, %2\n" // *ptr = temp2 (with atomic check)
- "beqz %1, 1b\n" // start again on atomic error
- "addu %1, %0, %3\n" // temp2 = temp + increment
- ".set pop\n"
- : "=&r"(temp), "=&r"(temp2), "=ZC"(*ptr)
- : "Ir"(increment), "m"(*ptr)
- : "memory");
- // temp2 now holds the final value.
- return temp2;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- MemoryBarrier();
- Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
- MemoryBarrier();
- return res;
-}
-
-// "Acquire" operations
-// ensure that no later memory access can be reordered ahead of the operation.
-// "Release" operations ensure that no previous memory access can be reordered
-// after the operation. "Barrier" operations have both "Acquire" and "Release"
-// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
-// access.
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- MemoryBarrier();
- return res;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- MemoryBarrier();
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void MemoryBarrier() {
- __asm__ __volatile__("sync" : : : "memory");
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
- return *ptr;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-
-// 64-bit versions of the atomic ops.
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev, tmp;
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "lld %0, %5\n" // prev = *ptr
- "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
- "move %2, %4\n" // tmp = new_value
- "scd %2, %1\n" // *ptr = tmp (with atomic check)
- "beqz %2, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- "2:\n"
- ".set pop\n"
- : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
- : "r" (old_value), "r" (new_value), "m" (*ptr)
- : "memory");
- return prev;
-}
-
-// Atomically store new_value into *ptr, returning the previous value held in
-// *ptr. This routine implies no memory barriers.
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- Atomic64 temp, old;
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "lld %1, %2\n" // old = *ptr
- "move %0, %3\n" // temp = new_value
- "scd %0, %2\n" // *ptr = temp (with atomic check)
- "beqz %0, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- ".set pop\n"
- : "=&r" (temp), "=&r" (old), "=m" (*ptr)
- : "r" (new_value), "m" (*ptr)
- : "memory");
-
- return old;
-}
-
-// Atomically increment *ptr by "increment". Returns the new value of
-// *ptr with the increment applied. This routine implies no memory barriers.
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 temp, temp2;
-
- __asm__ __volatile__(
- ".set push\n"
- ".set noreorder\n"
- "1:\n"
- "lld %0, %2\n" // temp = *ptr
- "daddu %1, %0, %3\n" // temp2 = temp + increment
- "scd %1, %2\n" // *ptr = temp2 (with atomic check)
- "beqz %1, 1b\n" // start again on atomic error
- "daddu %1, %0, %3\n" // temp2 = temp + increment
- ".set pop\n"
- : "=&r"(temp), "=&r"(temp2), "=ZC"(*ptr)
- : "Ir"(increment), "m"(*ptr)
- : "memory");
- // temp2 now holds the final value.
- return temp2;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- MemoryBarrier();
- Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
- MemoryBarrier();
- return res;
-}
-
-// "Acquire" operations
-// ensure that no later memory access can be reordered ahead of the operation.
-// "Release" operations ensure that no previous memory access can be reordered
-// after the operation. "Barrier" operations have both "Acquire" and "Release"
-// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
-// access.
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- MemoryBarrier();
- return res;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- MemoryBarrier();
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_mips_gcc.h b/deps/v8/src/base/atomicops_internals_mips_gcc.h
deleted file mode 100644
index 8d65db2127..0000000000
--- a/deps/v8/src/base/atomicops_internals_mips_gcc.h
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
-
-namespace v8 {
-namespace base {
-
-// Atomically execute:
-// result = *ptr;
-// if (*ptr == old_value)
-// *ptr = new_value;
-// return result;
-//
-// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
-// Always return the old value of "*ptr"
-//
-// This routine implies no memory barriers.
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev, tmp;
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "ll %0, 0(%4)\n" // prev = *ptr
- "bne %0, %2, 2f\n" // if (prev != old_value) goto 2
- "move %1, %3\n" // tmp = new_value
- "sc %1, 0(%4)\n" // *ptr = tmp (with atomic check)
- "beqz %1, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- "2:\n"
- ".set pop\n"
- : "=&r" (prev), "=&r" (tmp)
- : "r" (old_value), "r" (new_value), "r" (ptr)
- : "memory");
- return prev;
-}
-
-// Atomically store new_value into *ptr, returning the previous value held in
-// *ptr. This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 temp, old;
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- ".set at\n"
- "1:\n"
- "ll %1, 0(%3)\n" // old = *ptr
- "move %0, %2\n" // temp = new_value
- "sc %0, 0(%3)\n" // *ptr = temp (with atomic check)
- "beqz %0, 1b\n" // start again on atomic error
- "nop\n" // delay slot nop
- ".set pop\n"
- : "=&r" (temp), "=&r" (old)
- : "r" (new_value), "r" (ptr)
- : "memory");
-
- return old;
-}
-
-// Atomically increment *ptr by "increment". Returns the new value of
-// *ptr with the increment applied. This routine implies no memory barriers.
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 temp, temp2;
-
- __asm__ __volatile__(".set push\n"
- ".set noreorder\n"
- "1:\n"
- "ll %0, 0(%3)\n" // temp = *ptr
- "addu %1, %0, %2\n" // temp2 = temp + increment
- "sc %1, 0(%3)\n" // *ptr = temp2 (with atomic check)
- "beqz %1, 1b\n" // start again on atomic error
- "addu %1, %0, %2\n" // temp2 = temp + increment
- ".set pop\n"
- : "=&r" (temp), "=&r" (temp2)
- : "Ir" (increment), "r" (ptr)
- : "memory");
- // temp2 now holds the final value.
- return temp2;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- MemoryBarrier();
- Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
- MemoryBarrier();
- return res;
-}
-
-// "Acquire" operations
-// ensure that no later memory access can be reordered ahead of the operation.
-// "Release" operations ensure that no previous memory access can be reordered
-// after the operation. "Barrier" operations have both "Acquire" and "Release"
-// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
-// access.
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- MemoryBarrier();
- return res;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- MemoryBarrier();
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void MemoryBarrier() {
- __asm__ __volatile__("sync" : : : "memory");
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
- return *ptr;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_portable.h b/deps/v8/src/base/atomicops_internals_portable.h
new file mode 100644
index 0000000000..72c1d9a328
--- /dev/null
+++ b/deps/v8/src/base/atomicops_internals_portable.h
@@ -0,0 +1,172 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+// This implementation uses C++11 atomics' member functions. The code base is
+// currently written assuming atomicity revolves around accesses instead of
+// C++11's memory locations. The burden is on the programmer to ensure that all
+// memory locations accessed atomically are never accessed non-atomically (tsan
+// should help with this).
+//
+// Of note in this implementation:
+// * All NoBarrier variants are implemented as relaxed.
+// * All Barrier variants are implemented as sequentially-consistent.
+// * Compare exchange's failure ordering is always the same as the success one
+// (except for release, which fails as relaxed): using a weaker ordering is
+// only valid under certain uses of compare exchange.
+// * Acquire store doesn't exist in the C11 memory model, it is instead
+// implemented as a relaxed store followed by a sequentially consistent
+// fence.
+// * Release load doesn't exist in the C11 memory model, it is instead
+// implemented as sequentially consistent fence followed by a relaxed load.
+// * Atomic increment is expected to return the post-incremented value, whereas
+// C11 fetch add returns the previous value. The implementation therefore
+// needs to increment twice (which the compiler should be able to detect and
+// optimize).
+
+#ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+#define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
+
+#include <atomic>
+
+#include "src/base/build_config.h"
+
+namespace v8 {
+namespace base {
+
+// This implementation is transitional and maintains the original API for
+// atomicops.h.
+
+inline void MemoryBarrier() {
+#if defined(__GLIBCXX__)
+ // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
+ // not defined, leading to the linker complaining about undefined references.
+ __atomic_thread_fence(std::memory_order_seq_cst);
+#else
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+#endif
+}
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+ __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ return old_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+ __ATOMIC_RELEASE, __ATOMIC_RELAXED);
+ return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return __atomic_load_n(ptr, __ATOMIC_RELAXED);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return __atomic_load_n(ptr, __ATOMIC_RELAXED);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
+}
+
+#if defined(V8_HOST_ARCH_64_BIT)
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+ __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
+ return old_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
+ __ATOMIC_RELEASE, __ATOMIC_RELAXED);
+ return old_value;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return __atomic_load_n(ptr, __ATOMIC_RELAXED);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
+}
+
+#endif // defined(V8_HOST_ARCH_64_BIT)
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
diff --git a/deps/v8/src/base/atomicops_internals_ppc_gcc.h b/deps/v8/src/base/atomicops_internals_ppc_gcc.h
deleted file mode 100644
index 0d16500d1b..0000000000
--- a/deps/v8/src/base/atomicops_internals_ppc_gcc.h
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-//
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_PPC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_PPC_H_
-
-namespace v8 {
-namespace base {
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return (__sync_val_compare_and_swap(ptr, old_value, new_value));
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
- return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- for (;;) {
- Atomic32 old_value = *ptr;
- Atomic32 new_value = old_value + increment;
- if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
- return new_value;
- // The exchange took place as expected.
- }
- // Otherwise, *ptr changed mid-loop and we need to retry.
- }
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void MemoryBarrier() {
- __asm__ __volatile__("sync" : : : "memory"); }
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#ifdef V8_TARGET_ARCH_PPC64
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- return (__sync_val_compare_and_swap(ptr, old_value, new_value));
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- Atomic64 old_value;
- do {
- old_value = *ptr;
- } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
- return old_value;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- for (;;) {
- Atomic64 old_value = *ptr;
- Atomic64 new_value = old_value + increment;
- if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
- return new_value;
- // The exchange took place as expected.
- }
- // Otherwise, *ptr changed mid-loop and we need to retry.
- }
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; }
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#endif
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_s390_gcc.h b/deps/v8/src/base/atomicops_internals_s390_gcc.h
deleted file mode 100644
index 6e34f305e3..0000000000
--- a/deps/v8/src/base/atomicops_internals_s390_gcc.h
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_S390_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_S390_H_
-
-namespace v8 {
-namespace base {
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return (__sync_val_compare_and_swap(ptr, old_value, new_value));
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
- return old_value;
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return __sync_add_and_fetch(ptr, increment);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-inline void MemoryBarrier() { __sync_synchronize(); }
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#ifdef V8_TARGET_ARCH_S390X
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- return (__sync_val_compare_and_swap(ptr, old_value, new_value));
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- Atomic64 old_value;
- do {
- old_value = *ptr;
- } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
- return old_value;
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return Barrier_AtomicIncrement(ptr, increment);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return __sync_add_and_fetch(ptr, increment);
-}
-
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- MemoryBarrier();
- *ptr = value;
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; }
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr;
- MemoryBarrier();
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#endif
-
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_S390_H_
diff --git a/deps/v8/src/base/atomicops_internals_tsan.h b/deps/v8/src/base/atomicops_internals_tsan.h
deleted file mode 100644
index 646e5bd4b7..0000000000
--- a/deps/v8/src/base/atomicops_internals_tsan.h
+++ /dev/null
@@ -1,363 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-
-// This file is an internal atomic implementation for compiler-based
-// ThreadSanitizer. Use base/atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
-
-namespace v8 {
-namespace base {
-
-#ifndef TSAN_INTERFACE_ATOMIC_H
-#define TSAN_INTERFACE_ATOMIC_H
-
-
-extern "C" {
-typedef char __tsan_atomic8;
-typedef short __tsan_atomic16; // NOLINT
-typedef int __tsan_atomic32;
-typedef long __tsan_atomic64; // NOLINT
-
-#if defined(__SIZEOF_INT128__) \
- || (__clang_major__ * 100 + __clang_minor__ >= 302)
-typedef __int128 __tsan_atomic128;
-#define __TSAN_HAS_INT128 1
-#else
-typedef char __tsan_atomic128;
-#define __TSAN_HAS_INT128 0
-#endif
-
-typedef enum {
- __tsan_memory_order_relaxed,
- __tsan_memory_order_consume,
- __tsan_memory_order_acquire,
- __tsan_memory_order_release,
- __tsan_memory_order_acq_rel,
- __tsan_memory_order_seq_cst,
-} __tsan_memory_order;
-
-__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
- __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
- __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
- __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
- __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128* a,
- __tsan_memory_order mo);
-
-void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
- __tsan_memory_order mo);
-void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
- __tsan_memory_order mo);
-void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
- __tsan_memory_order mo);
-void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
- __tsan_memory_order mo);
-void __tsan_atomic128_store(volatile __tsan_atomic128* a, __tsan_atomic128 v,
- __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128* a,
- __tsan_atomic128 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128* a,
- __tsan_atomic128 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128* a,
- __tsan_atomic128 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128* a,
- __tsan_atomic128 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128* a,
- __tsan_atomic128 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a,
- __tsan_atomic128 v, __tsan_memory_order mo);
-
-int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
- __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
- __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
- __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
- __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128* a,
- __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-
-int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
- __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
- __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
- __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
- __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128* a,
- __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
-
-__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
- volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
-__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
- volatile __tsan_atomic16* a, __tsan_atomic16 c, __tsan_atomic16 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
-__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
- volatile __tsan_atomic32* a, __tsan_atomic32 c, __tsan_atomic32 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
-__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
- volatile __tsan_atomic64* a, __tsan_atomic64 c, __tsan_atomic64 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
-__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
- volatile __tsan_atomic128* a, __tsan_atomic128 c, __tsan_atomic128 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
-
-void __tsan_atomic_thread_fence(__tsan_memory_order mo);
-void __tsan_atomic_signal_fence(__tsan_memory_order mo);
-} // extern "C"
-
-#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 cmp = old_value;
- __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
- return cmp;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_acquire);
-}
-
-inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_release);
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return increment + __tsan_atomic32_fetch_add(ptr, increment,
- __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return increment + __tsan_atomic32_fetch_add(ptr, increment,
- __tsan_memory_order_acq_rel);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 cmp = old_value;
- __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_acquire, __tsan_memory_order_acquire);
- return cmp;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 cmp = old_value;
- __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_release, __tsan_memory_order_relaxed);
- return cmp;
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- __tsan_atomic8_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
- return __tsan_atomic8_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
- return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 cmp = old_value;
- __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
- return cmp;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
-}
-
-inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return increment + __tsan_atomic64_fetch_add(ptr, increment,
- __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return increment + __tsan_atomic64_fetch_add(ptr, increment,
- __tsan_memory_order_acq_rel);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
- return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 cmp = old_value;
- __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_acquire, __tsan_memory_order_acquire);
- return cmp;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 cmp = old_value;
- __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_release, __tsan_memory_order_relaxed);
- return cmp;
-}
-
-inline void MemoryBarrier() {
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
diff --git a/deps/v8/src/base/atomicops_internals_x86_gcc.cc b/deps/v8/src/base/atomicops_internals_x86_gcc.cc
deleted file mode 100644
index c0310300a1..0000000000
--- a/deps/v8/src/base/atomicops_internals_x86_gcc.cc
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This module gets enough CPU information to optimize the
-// atomicops module on x86.
-
-#include <string.h> // NOLINT(build/include)
-
-#include "src/base/atomicops.h"
-
-// This file only makes sense with atomicops_internals_x86_gcc.h -- it
-// depends on structs that are defined in that file. If atomicops.h
-// doesn't sub-include that file, then we aren't needed, and shouldn't
-// try to do anything.
-#ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
-
-// Inline cpuid instruction. In PIC compilations, %ebx contains the address
-// of the global offset table. To avoid breaking such executables, this code
-// must preserve that register's value across cpuid instructions.
-#if defined(__i386__)
-#define cpuid(a, b, c, d, inp) \
- asm("mov %%ebx, %%edi\n" \
- "cpuid\n" \
- "xchg %%edi, %%ebx\n" \
- : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
-#elif defined(__x86_64__)
-#define cpuid(a, b, c, d, inp) \
- asm("mov %%rbx, %%rdi\n" \
- "cpuid\n" \
- "xchg %%rdi, %%rbx\n" \
- : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
-#endif
-
-#if defined(cpuid) // initialize the struct only on x86
-
-namespace v8 {
-namespace base {
-
-// Set the flags so that code will run correctly and conservatively, so even
-// if we haven't been initialized yet, we're probably single threaded, and our
-// default values should hopefully be pretty safe.
-struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
- false, // bug can't exist before process spawns multiple threads
-#if !defined(__SSE2__)
- false, // no SSE2
-#endif
-};
-
-} // namespace base
-} // namespace v8
-
-namespace {
-
-// Initialize the AtomicOps_Internalx86CPUFeatures struct.
-void AtomicOps_Internalx86CPUFeaturesInit() {
- using v8::base::AtomicOps_Internalx86CPUFeatures;
-
- uint32_t eax = 0;
- uint32_t ebx = 0;
- uint32_t ecx = 0;
- uint32_t edx = 0;
-
- // Get vendor string (issue CPUID with eax = 0)
- cpuid(eax, ebx, ecx, edx, 0);
- char vendor[13];
- memcpy(vendor, &ebx, 4);
- memcpy(vendor + 4, &edx, 4);
- memcpy(vendor + 8, &ecx, 4);
- vendor[12] = 0;
-
- // get feature flags in ecx/edx, and family/model in eax
- cpuid(eax, ebx, ecx, edx, 1);
-
- int family = (eax >> 8) & 0xf; // family and model fields
- int model = (eax >> 4) & 0xf;
- if (family == 0xf) { // use extended family and model fields
- family += (eax >> 20) & 0xff;
- model += ((eax >> 16) & 0xf) << 4;
- }
-
- // Opteron Rev E has a bug in which on very rare occasions a locked
- // instruction doesn't act as a read-acquire barrier if followed by a
- // non-locked read-modify-write instruction. Rev F has this bug in
- // pre-release versions, but not in versions released to customers,
- // so we test only for Rev E, which is family 15, model 32..63 inclusive.
- if (strcmp(vendor, "AuthenticAMD") == 0 && // AMD
- family == 15 &&
- 32 <= model && model <= 63) {
- AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
- } else {
- AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
- }
-
-#if !defined(__SSE2__)
- // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
- AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
-#endif
-}
-
-class AtomicOpsx86Initializer {
- public:
- AtomicOpsx86Initializer() {
- AtomicOps_Internalx86CPUFeaturesInit();
- }
-};
-
-
-// A global to get use initialized on startup via static initialization :/
-AtomicOpsx86Initializer g_initer;
-
-} // namespace
-
-#endif // if x86
-
-#endif // ifdef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_x86_gcc.h b/deps/v8/src/base/atomicops_internals_x86_gcc.h
deleted file mode 100644
index 55bc44cd8b..0000000000
--- a/deps/v8/src/base/atomicops_internals_x86_gcc.h
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
-
-namespace v8 {
-namespace base {
-
-// This struct is not part of the public API of this module; clients may not
-// use it.
-// Features of this x86. Values may not be correct before main() is run,
-// but are set conservatively.
-struct AtomicOps_x86CPUFeatureStruct {
- bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
- // after acquire compare-and-swap.
-#if !defined(__SSE2__)
- bool has_sse2; // Processor has SSE2.
-#endif
-};
-extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
-
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
-// 32-bit low-level operations on any platform.
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev;
- __asm__ __volatile__("lock; cmpxchgl %1,%2"
- : "=a" (prev)
- : "q" (new_value), "m" (*ptr), "0" (old_value)
- : "memory");
- return prev;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
- : "=r" (new_value)
- : "m" (*ptr), "0" (new_value)
- : "memory");
- return new_value; // Now it's the previous value.
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 temp = increment;
- __asm__ __volatile__("lock; xaddl %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now holds the old value of *ptr
- return temp + increment;
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- Atomic32 temp = increment;
- __asm__ __volatile__("lock; xaddl %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now holds the old value of *ptr
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return temp + increment;
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return x;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
- *ptr = value;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
-}
-
-#if defined(__x86_64__) || defined(__SSE2__)
-
-// 64-bit implementations of memory barrier can be simpler, because it
-// "mfence" is guaranteed to exist.
-inline void MemoryBarrier() {
- __asm__ __volatile__("mfence" : : : "memory");
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-#else
-
-inline void MemoryBarrier() {
- if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
- __asm__ __volatile__("mfence" : : : "memory");
- } else { // mfence is faster but not present on PIII
- Atomic32 x = 0;
- NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
- }
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
- *ptr = value;
- __asm__ __volatile__("mfence" : : : "memory");
- } else {
- NoBarrier_AtomicExchange(ptr, value);
- // acts as a barrier on PIII
- }
-}
-#endif
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- ATOMICOPS_COMPILER_BARRIER();
- *ptr = value; // An x86 store acts as a release barrier.
- // See comments in Atomic64 version of Release_Store(), below.
-}
-
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
- return *ptr;
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
- // See comments in Atomic64 version of Release_Store(), below.
- ATOMICOPS_COMPILER_BARRIER();
- return value;
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-#if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT)
-
-// 64-bit low-level operations on 64-bit platform.
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev;
- __asm__ __volatile__("lock; cmpxchgq %1,%2"
- : "=a" (prev)
- : "q" (new_value), "m" (*ptr), "0" (old_value)
- : "memory");
- return prev;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
- : "=r" (new_value)
- : "m" (*ptr), "0" (new_value)
- : "memory");
- return new_value; // Now it's the previous value.
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 temp = increment;
- __asm__ __volatile__("lock; xaddq %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now contains the previous value of *ptr
- return temp + increment;
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- Atomic64 temp = increment;
- __asm__ __volatile__("lock; xaddq %0,%1"
- : "+r" (temp), "+m" (*ptr)
- : : "memory");
- // temp now contains the previous value of *ptr
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return temp + increment;
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- ATOMICOPS_COMPILER_BARRIER();
-
- *ptr = value; // An x86 store acts as a release barrier
- // for current AMD/Intel chips as of Jan 2008.
- // See also Acquire_Load(), below.
-
- // When new chips come out, check:
- // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
- // System Programming Guide, Chatper 7: Multiple-processor management,
- // Section 7.2, Memory Ordering.
- // Last seen at:
- // http://developer.intel.com/design/pentium4/manuals/index_new.htm
- //
- // x86 stores/loads fail to act as barriers for a few instructions (clflush
- // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
- // not generated by the compiler, and are rare. Users of these instructions
- // need to know about cache behaviour in any case since all of these involve
- // either flushing cache lines or non-temporal cache hints.
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
- // for current AMD/Intel chips as of Jan 2008.
- // See also Release_Store(), above.
- ATOMICOPS_COMPILER_BARRIER();
- return value;
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
- if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
- __asm__ __volatile__("lfence" : : : "memory");
- }
- return x;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-}
-
-#endif // defined(__x86_64__)
-
-} // namespace base
-} // namespace v8
-
-#undef ATOMICOPS_COMPILER_BARRIER
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/deps/v8/src/base/atomicops_internals_x86_msvc.h b/deps/v8/src/base/atomicops_internals_x86_msvc.h
index c37bc78df6..0d2068e9f0 100644
--- a/deps/v8/src/base/atomicops_internals_x86_msvc.h
+++ b/deps/v8/src/base/atomicops_internals_x86_msvc.h
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// This file is an internal atomic implementation, use atomicops.h instead.
+// This file is an internal atomic implementation, use base/atomicops.h instead.
#ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
#define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
@@ -26,25 +26,23 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
LONG result = InterlockedCompareExchange(
- reinterpret_cast<volatile LONG*>(ptr),
- static_cast<LONG>(new_value),
+ reinterpret_cast<volatile LONG*>(ptr), static_cast<LONG>(new_value),
static_cast<LONG>(old_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
- LONG result = InterlockedExchange(
- reinterpret_cast<volatile LONG*>(ptr),
- static_cast<LONG>(new_value));
+ LONG result = InterlockedExchange(reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(new_value));
return static_cast<Atomic32>(result);
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
- return InterlockedExchangeAdd(
- reinterpret_cast<volatile LONG*>(ptr),
- static_cast<LONG>(increment)) + increment;
+ return InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(increment)) +
+ increment;
}
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
@@ -52,9 +50,6 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
return Barrier_AtomicIncrement(ptr, increment);
}
-#if !(defined(_MSC_VER) && _MSC_VER >= 1400)
-#error "We require at least vs2005 for MemoryBarrier"
-#endif
inline void MemoryBarrier() {
#if defined(V8_HOST_ARCH_64_BIT)
// See #undef and note at the top of this file.
@@ -85,11 +80,6 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- NoBarrier_AtomicExchange(ptr, value);
- // acts as a barrier in this implementation
-}
-
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
// See comments in Atomic64 version of Release_Store() below.
@@ -108,16 +98,11 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
return value;
}
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
#if defined(_WIN64)
// 64-bit low-level operations on 64-bit platform.
-STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID));
+static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic");
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
@@ -152,11 +137,6 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- NoBarrier_AtomicExchange(ptr, value);
- // acts as a barrier in this implementation
-}
-
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value; // works w/o barrier for current Intel chips as of June 2005
@@ -177,11 +157,6 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
return value;
}
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- MemoryBarrier();
- return *ptr;
-}
-
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
diff --git a/deps/v8/src/base/base-export.h b/deps/v8/src/base/base-export.h
new file mode 100644
index 0000000000..a2b3dacaf7
--- /dev/null
+++ b/deps/v8/src/base/base-export.h
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_BASE_EXPORT_H_
+#define V8_BASE_BASE_EXPORT_H_
+
+#include "include/v8config.h"
+
+#if V8_OS_WIN
+
+#ifdef BUILDING_V8_BASE_SHARED
+#define V8_BASE_EXPORT __declspec(dllexport)
+#elif USING_V8_BASE_SHARED
+#define V8_BASE_EXPORT __declspec(dllimport)
+#else
+#define V8_BASE_EXPORT
+#endif // BUILDING_V8_BASE_SHARED
+
+#else // !V8_OS_WIN
+
+// Setup for Linux shared library export.
+#ifdef BUILDING_V8_BASE_SHARED
+#define V8_BASE_EXPORT __attribute__((visibility("default")))
+#else
+#define V8_BASE_EXPORT
+#endif
+
+#endif // V8_OS_WIN
+
+#endif // V8_BASE_BASE_EXPORT_H_
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index da12ee60fe..b1864940b8 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -6,6 +6,8 @@
#define V8_BASE_BITS_H_
#include <stdint.h>
+
+#include "src/base/base-export.h"
#include "src/base/macros.h"
#if V8_CC_MSVC
#include <intrin.h>
@@ -172,8 +174,7 @@ inline bool IsPowerOfTwo64(uint64_t value) {
// power of two, it is returned as is. |value| must be less than or equal to
// 0x80000000u. Implementation is from "Hacker's Delight" by Henry S. Warren,
// Jr., figure 3-3, page 48, where the function is called clp2.
-uint32_t RoundUpToPowerOfTwo32(uint32_t value);
-
+V8_BASE_EXPORT uint32_t RoundUpToPowerOfTwo32(uint32_t value);
// RoundDownToPowerOfTwo32(value) returns the greatest power of two which is
// less than or equal to |value|. If you pass in a |value| that is already a
@@ -241,7 +242,7 @@ inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
// SignedMulOverflow32(lhs,rhs,val) performs a signed multiplication of |lhs|
// and |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed multiplication resulted in an overflow.
-bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val);
+V8_BASE_EXPORT bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val);
// SignedAddOverflow64(lhs,rhs,val) performs a signed summation of |lhs| and
// |rhs| and stores the result into the variable pointed to by |val| and
@@ -265,31 +266,28 @@ inline bool SignedSubOverflow64(int64_t lhs, int64_t rhs, int64_t* val) {
// SignedMulOverflow64(lhs,rhs,val) performs a signed multiplication of |lhs|
// and |rhs| and stores the result into the variable pointed to by |val| and
// returns true if the signed multiplication resulted in an overflow.
-bool SignedMulOverflow64(int64_t lhs, int64_t rhs, int64_t* val);
+V8_BASE_EXPORT bool SignedMulOverflow64(int64_t lhs, int64_t rhs, int64_t* val);
// SignedMulHigh32(lhs, rhs) multiplies two signed 32-bit values |lhs| and
// |rhs|, extracts the most significant 32 bits of the result, and returns
// those.
-int32_t SignedMulHigh32(int32_t lhs, int32_t rhs);
-
+V8_BASE_EXPORT int32_t SignedMulHigh32(int32_t lhs, int32_t rhs);
// SignedMulHighAndAdd32(lhs, rhs, acc) multiplies two signed 32-bit values
// |lhs| and |rhs|, extracts the most significant 32 bits of the result, and
// adds the accumulate value |acc|.
-int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs, int32_t acc);
-
+V8_BASE_EXPORT int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs,
+ int32_t acc);
// SignedDiv32(lhs, rhs) divides |lhs| by |rhs| and returns the quotient
// truncated to int32. If |rhs| is zero, then zero is returned. If |lhs|
// is minint and |rhs| is -1, it returns minint.
-int32_t SignedDiv32(int32_t lhs, int32_t rhs);
-
+V8_BASE_EXPORT int32_t SignedDiv32(int32_t lhs, int32_t rhs);
// SignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder
// truncated to int32. If either |rhs| is zero or |lhs| is minint and |rhs|
// is -1, it returns zero.
-int32_t SignedMod32(int32_t lhs, int32_t rhs);
-
+V8_BASE_EXPORT int32_t SignedMod32(int32_t lhs, int32_t rhs);
// UnsignedAddOverflow32(lhs,rhs,val) performs an unsigned summation of |lhs|
// and |rhs| and stores the result into the variable pointed to by |val| and
@@ -319,18 +317,16 @@ inline uint32_t UnsignedMod32(uint32_t lhs, uint32_t rhs) {
// Clamp |value| on overflow and underflow conditions.
-int64_t FromCheckedNumeric(const internal::CheckedNumeric<int64_t> value);
-
+V8_BASE_EXPORT int64_t
+FromCheckedNumeric(const internal::CheckedNumeric<int64_t> value);
// SignedSaturatedAdd64(lhs, rhs) adds |lhs| and |rhs|,
// checks and returns the result.
-int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs);
-
+V8_BASE_EXPORT int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs);
// SignedSaturatedSub64(lhs, rhs) substracts |lhs| by |rhs|,
// checks and returns the result.
-int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs);
-
+V8_BASE_EXPORT int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs);
} // namespace bits
} // namespace base
diff --git a/deps/v8/src/base/compiler-specific.h b/deps/v8/src/base/compiler-specific.h
index 822893ffec..1858caa047 100644
--- a/deps/v8/src/base/compiler-specific.h
+++ b/deps/v8/src/base/compiler-specific.h
@@ -60,4 +60,46 @@
#define STATIC_CONST_MEMBER_DEFINITION
#endif
+#if V8_CC_MSVC
+
+#include <sal.h>
+
+// Macros for suppressing and disabling warnings on MSVC.
+//
+// Warning numbers are enumerated at:
+// http://msdn.microsoft.com/en-us/library/8x5x43k7(VS.80).aspx
+//
+// The warning pragma:
+// http://msdn.microsoft.com/en-us/library/2c8f766e(VS.80).aspx
+//
+// Using __pragma instead of #pragma inside macros:
+// http://msdn.microsoft.com/en-us/library/d9x1s805.aspx
+
+// MSVC_SUPPRESS_WARNING disables warning |n| for the remainder of the line and
+// for the next line of the source file.
+#define MSVC_SUPPRESS_WARNING(n) __pragma(warning(suppress : n))
+
+// Allows exporting a class that inherits from a non-exported base class.
+// This uses suppress instead of push/pop because the delimiter after the
+// declaration (either "," or "{") has to be placed before the pop macro.
+//
+// Example usage:
+// class EXPORT_API Foo : NON_EXPORTED_BASE(public Bar) {
+//
+// MSVC Compiler warning C4275:
+// non dll-interface class 'Bar' used as base for dll-interface class 'Foo'.
+// Note that this is intended to be used only when no access to the base class'
+// static data is done through derived classes or inline methods. For more info,
+// see http://msdn.microsoft.com/en-us/library/3tdb471s(VS.80).aspx
+#define NON_EXPORTED_BASE(code) \
+ MSVC_SUPPRESS_WARNING(4275) \
+ code
+
+#else // Not MSVC
+
+#define MSVC_SUPPRESS_WARNING(n)
+#define NON_EXPORTED_BASE(code) code
+
+#endif // V8_CC_MSVC
+
#endif // V8_BASE_COMPILER_SPECIFIC_H_
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index 7757192920..cf1f9c399d 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -415,7 +415,7 @@ CPU::CPU()
}
// Check if CPU has non stoppable time stamp counter.
- const int parameter_containing_non_stop_time_stamp_counter = 0x80000007;
+ const unsigned parameter_containing_non_stop_time_stamp_counter = 0x80000007;
if (num_ext_ids >= parameter_containing_non_stop_time_stamp_counter) {
__cpuid(cpu_info, parameter_containing_non_stop_time_stamp_counter);
has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
@@ -607,7 +607,7 @@ CPU::CPU()
char* implementer = cpu_info.ExtractField("CPU implementer");
if (implementer != NULL) {
char* end;
- implementer_ = strtol(implementer, &end, 0);
+ implementer_ = static_cast<int>(strtol(implementer, &end, 0));
if (end == implementer) {
implementer_ = 0;
}
@@ -617,7 +617,7 @@ CPU::CPU()
char* variant = cpu_info.ExtractField("CPU variant");
if (variant != NULL) {
char* end;
- variant_ = strtol(variant, &end, 0);
+ variant_ = static_cast<int>(strtol(variant, &end, 0));
if (end == variant) {
variant_ = -1;
}
@@ -628,7 +628,7 @@ CPU::CPU()
char* part = cpu_info.ExtractField("CPU part");
if (part != NULL) {
char* end;
- part_ = strtol(part, &end, 0);
+ part_ = static_cast<int>(strtol(part, &end, 0));
if (end == part) {
part_ = 0;
}
diff --git a/deps/v8/src/base/cpu.h b/deps/v8/src/base/cpu.h
index 19d4102f5b..e0fcea1ca0 100644
--- a/deps/v8/src/base/cpu.h
+++ b/deps/v8/src/base/cpu.h
@@ -13,6 +13,7 @@
#ifndef V8_BASE_CPU_H_
#define V8_BASE_CPU_H_
+#include "src/base/base-export.h"
#include "src/base/macros.h"
namespace v8 {
@@ -28,7 +29,7 @@ namespace base {
// architectures. For each architecture the file cpu_<arch>.cc contains the
// implementation of these static functions.
-class CPU final {
+class V8_BASE_EXPORT CPU final {
public:
CPU();
diff --git a/deps/v8/src/base/debug/stack_trace.h b/deps/v8/src/base/debug/stack_trace.h
index e938ef2868..1361bb545a 100644
--- a/deps/v8/src/base/debug/stack_trace.h
+++ b/deps/v8/src/base/debug/stack_trace.h
@@ -13,6 +13,7 @@
#include <iosfwd>
#include <string>
+#include "src/base/base-export.h"
#include "src/base/build_config.h"
#if V8_OS_POSIX
@@ -31,8 +32,8 @@ namespace debug {
// Enables stack dump to console output on exception and signals.
// When enabled, the process will quit immediately. This is meant to be used in
// tests only!
-bool EnableInProcessStackDumping();
-void DisableSignalStackDump();
+V8_BASE_EXPORT bool EnableInProcessStackDumping();
+V8_BASE_EXPORT void DisableSignalStackDump();
// A stacktrace can be helpful in debugging. For example, you can include a
// stacktrace member in a object (probably around #ifndef NDEBUG) so that you
diff --git a/deps/v8/src/base/division-by-constant.cc b/deps/v8/src/base/division-by-constant.cc
index 5167b7a60c..03d198e9bf 100644
--- a/deps/v8/src/base/division-by-constant.cc
+++ b/deps/v8/src/base/division-by-constant.cc
@@ -13,13 +13,6 @@ namespace v8 {
namespace base {
template <class T>
-bool MagicNumbersForDivision<T>::operator==(
- const MagicNumbersForDivision& rhs) const {
- return multiplier == rhs.multiplier && shift == rhs.shift && add == rhs.add;
-}
-
-
-template <class T>
MagicNumbersForDivision<T> SignedDivisionByConstant(T d) {
STATIC_ASSERT(static_cast<T>(0) < static_cast<T>(-1));
DCHECK(d != static_cast<T>(-1) && d != 0 && d != 1);
@@ -100,8 +93,8 @@ MagicNumbersForDivision<T> UnsignedDivisionByConstant(T d,
// -----------------------------------------------------------------------------
// Instantiations.
-template struct MagicNumbersForDivision<uint32_t>;
-template struct MagicNumbersForDivision<uint64_t>;
+template struct V8_BASE_EXPORT MagicNumbersForDivision<uint32_t>;
+template struct V8_BASE_EXPORT MagicNumbersForDivision<uint64_t>;
template MagicNumbersForDivision<uint32_t> SignedDivisionByConstant(uint32_t d);
template MagicNumbersForDivision<uint64_t> SignedDivisionByConstant(uint64_t d);
diff --git a/deps/v8/src/base/division-by-constant.h b/deps/v8/src/base/division-by-constant.h
index 02e7e14b01..5d063f8bd5 100644
--- a/deps/v8/src/base/division-by-constant.h
+++ b/deps/v8/src/base/division-by-constant.h
@@ -5,6 +5,10 @@
#ifndef V8_BASE_DIVISION_BY_CONSTANT_H_
#define V8_BASE_DIVISION_BY_CONSTANT_H_
+#include <stdint.h>
+
+#include "src/base/base-export.h"
+
namespace v8 {
namespace base {
@@ -14,10 +18,12 @@ namespace base {
// Delight", chapter 10. The template parameter must be one of the unsigned
// integral types.
template <class T>
-struct MagicNumbersForDivision {
+struct V8_BASE_EXPORT MagicNumbersForDivision {
MagicNumbersForDivision(T m, unsigned s, bool a)
: multiplier(m), shift(s), add(a) {}
- bool operator==(const MagicNumbersForDivision& rhs) const;
+ bool operator==(const MagicNumbersForDivision& rhs) const {
+ return multiplier == rhs.multiplier && shift == rhs.shift && add == rhs.add;
+ }
T multiplier;
unsigned shift;
@@ -28,17 +34,26 @@ struct MagicNumbersForDivision {
// Calculate the multiplier and shift for signed division via multiplication.
// The divisor must not be -1, 0 or 1 when interpreted as a signed value.
template <class T>
-MagicNumbersForDivision<T> SignedDivisionByConstant(T d);
-
+V8_BASE_EXPORT MagicNumbersForDivision<T> SignedDivisionByConstant(T d);
// Calculate the multiplier and shift for unsigned division via multiplication,
// see Warren's "Hacker's Delight", chapter 10. The divisor must not be 0 and
// leading_zeros can be used to speed up the calculation if the given number of
// upper bits of the dividend value are known to be zero.
template <class T>
-MagicNumbersForDivision<T> UnsignedDivisionByConstant(
+V8_BASE_EXPORT MagicNumbersForDivision<T> UnsignedDivisionByConstant(
T d, unsigned leading_zeros = 0);
+extern template V8_BASE_EXPORT MagicNumbersForDivision<uint32_t>
+SignedDivisionByConstant(uint32_t d);
+extern template V8_BASE_EXPORT MagicNumbersForDivision<uint64_t>
+SignedDivisionByConstant(uint64_t d);
+
+extern template V8_BASE_EXPORT MagicNumbersForDivision<uint32_t>
+UnsignedDivisionByConstant(uint32_t d, unsigned leading_zeros);
+extern template V8_BASE_EXPORT MagicNumbersForDivision<uint64_t>
+UnsignedDivisionByConstant(uint64_t d, unsigned leading_zeros);
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/file-utils.cc b/deps/v8/src/base/file-utils.cc
index 2262df97d0..31b1b41190 100644
--- a/deps/v8/src/base/file-utils.cc
+++ b/deps/v8/src/base/file-utils.cc
@@ -10,13 +10,13 @@
#include "src/base/platform/platform.h"
namespace v8 {
-namespace internal {
+namespace base {
char* RelativePath(char** buffer, const char* exec_path, const char* name) {
DCHECK(exec_path);
int path_separator = static_cast<int>(strlen(exec_path)) - 1;
while (path_separator >= 0 &&
- !base::OS::isDirectorySeparator(exec_path[path_separator])) {
+ !OS::isDirectorySeparator(exec_path[path_separator])) {
path_separator--;
}
if (path_separator >= 0) {
@@ -32,5 +32,5 @@ char* RelativePath(char** buffer, const char* exec_path, const char* name) {
return *buffer;
}
-} // namespace internal
+} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/file-utils.h b/deps/v8/src/base/file-utils.h
index ce9e9a1c41..271f0ffb05 100644
--- a/deps/v8/src/base/file-utils.h
+++ b/deps/v8/src/base/file-utils.h
@@ -2,17 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_FILE_UTILS_H_
-#define V8_FILE_UTILS_H_
+#ifndef V8_BASE_FILE_UTILS_H_
+#define V8_BASE_FILE_UTILS_H_
+
+#include "src/base/base-export.h"
namespace v8 {
-namespace internal {
+namespace base {
// Helper functions to manipulate file paths.
-char* RelativePath(char** buffer, const char* exec_path, const char* name);
+V8_BASE_EXPORT char* RelativePath(char** buffer, const char* exec_path,
+ const char* name);
-} // namespace internal
+} // namespace base
} // namespace v8
#endif // V8_FILE_UTILS_H_
diff --git a/deps/v8/src/base/functional.h b/deps/v8/src/base/functional.h
index ff0d8075b9..634e7bac85 100644
--- a/deps/v8/src/base/functional.h
+++ b/deps/v8/src/base/functional.h
@@ -13,6 +13,7 @@
#include <functional>
#include <utility>
+#include "src/base/base-export.h"
#include "src/base/macros.h"
namespace v8 {
@@ -67,7 +68,7 @@ struct hash;
V8_INLINE size_t hash_combine() { return 0u; }
V8_INLINE size_t hash_combine(size_t seed) { return seed; }
-size_t hash_combine(size_t seed, size_t value);
+V8_BASE_EXPORT size_t hash_combine(size_t seed, size_t value);
template <typename T, typename... Ts>
V8_INLINE size_t hash_combine(T const& v, Ts const&... vs) {
return hash_combine(hash_combine(vs...), hash<T>()(v));
@@ -91,9 +92,9 @@ V8_BASE_HASH_VALUE_TRIVIAL(unsigned char)
V8_BASE_HASH_VALUE_TRIVIAL(unsigned short) // NOLINT(runtime/int)
#undef V8_BASE_HASH_VALUE_TRIVIAL
-size_t hash_value(unsigned int);
-size_t hash_value(unsigned long); // NOLINT(runtime/int)
-size_t hash_value(unsigned long long); // NOLINT(runtime/int)
+V8_BASE_EXPORT size_t hash_value(unsigned int);
+V8_BASE_EXPORT size_t hash_value(unsigned long); // NOLINT(runtime/int)
+V8_BASE_EXPORT size_t hash_value(unsigned long long); // NOLINT(runtime/int)
#define V8_BASE_HASH_VALUE_SIGNED(type) \
V8_INLINE size_t hash_value(signed type v) { \
diff --git a/deps/v8/src/base/hashmap.h b/deps/v8/src/base/hashmap.h
index 54038c5ef3..d2fc1337a6 100644
--- a/deps/v8/src/base/hashmap.h
+++ b/deps/v8/src/base/hashmap.h
@@ -229,9 +229,8 @@ template <typename Key, typename Value, typename MatchFun,
class AllocationPolicy>
void TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Clear() {
// Mark all entries as empty.
- const Entry* end = map_end();
- for (Entry* entry = map_; entry < end; entry++) {
- entry->clear();
+ for (size_t i = 0; i < capacity_; ++i) {
+ map_[i].clear();
}
occupancy_ = 0;
}
@@ -264,19 +263,15 @@ typename TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Entry*
TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::Probe(
const Key& key, uint32_t hash) const {
DCHECK(base::bits::IsPowerOfTwo32(capacity_));
- Entry* entry = map_ + (hash & (capacity_ - 1));
- const Entry* end = map_end();
- DCHECK(map_ <= entry && entry < end);
+ size_t i = hash & (capacity_ - 1);
+ DCHECK(i < capacity_);
DCHECK(occupancy_ < capacity_); // Guarantees loop termination.
- while (entry->exists() && !match_(hash, entry->hash, key, entry->key)) {
- entry++;
- if (entry >= end) {
- entry = map_;
- }
+ while (map_[i].exists() && !match_(hash, map_[i].hash, key, map_[i].key)) {
+ i = (i + 1) & (capacity_ - 1);
}
- return entry;
+ return &map_[i];
}
template <typename Key, typename Value, typename MatchFun,
diff --git a/deps/v8/src/base/ieee754.h b/deps/v8/src/base/ieee754.h
index 80523a1414..72f3db15ef 100644
--- a/deps/v8/src/base/ieee754.h
+++ b/deps/v8/src/base/ieee754.h
@@ -5,73 +5,75 @@
#ifndef V8_BASE_IEEE754_H_
#define V8_BASE_IEEE754_H_
+#include "src/base/base-export.h"
+
namespace v8 {
namespace base {
namespace ieee754 {
// Returns the arc cosine of |x|; that is the value whose cosine is |x|.
-double acos(double x);
+V8_BASE_EXPORT double acos(double x);
// Returns the inverse hyperbolic cosine of |x|; that is the value whose
// hyperbolic cosine is |x|.
-double acosh(double x);
+V8_BASE_EXPORT double acosh(double x);
// Returns the arc sine of |x|; that is the value whose sine is |x|.
-double asin(double x);
+V8_BASE_EXPORT double asin(double x);
// Returns the inverse hyperbolic sine of |x|; that is the value whose
// hyperbolic sine is |x|.
-double asinh(double x);
+V8_BASE_EXPORT double asinh(double x);
// Returns the principal value of the arc tangent of |x|; that is the value
// whose tangent is |x|.
-double atan(double x);
+V8_BASE_EXPORT double atan(double x);
// Returns the principal value of the arc tangent of |y/x|, using the signs of
// the two arguments to determine the quadrant of the result.
-double atan2(double y, double x);
+V8_BASE_EXPORT double atan2(double y, double x);
// Returns the cosine of |x|, where |x| is given in radians.
-double cos(double x);
+V8_BASE_EXPORT double cos(double x);
// Returns the base-e exponential of |x|.
-double exp(double x);
+V8_BASE_EXPORT double exp(double x);
-double atanh(double x);
+V8_BASE_EXPORT double atanh(double x);
// Returns the natural logarithm of |x|.
-double log(double x);
+V8_BASE_EXPORT double log(double x);
// Returns a value equivalent to |log(1+x)|, but computed in a way that is
// accurate even if the value of |x| is near zero.
-double log1p(double x);
+V8_BASE_EXPORT double log1p(double x);
// Returns the base 2 logarithm of |x|.
-double log2(double x);
+V8_BASE_EXPORT double log2(double x);
// Returns the base 10 logarithm of |x|.
-double log10(double x);
+V8_BASE_EXPORT double log10(double x);
// Returns the cube root of |x|.
-double cbrt(double x);
+V8_BASE_EXPORT double cbrt(double x);
// Returns exp(x)-1, the exponential of |x| minus 1.
-double expm1(double x);
+V8_BASE_EXPORT double expm1(double x);
// Returns the sine of |x|, where |x| is given in radians.
-double sin(double x);
+V8_BASE_EXPORT double sin(double x);
// Returns the tangent of |x|, where |x| is given in radians.
-double tan(double x);
+V8_BASE_EXPORT double tan(double x);
// Returns the hyperbolic cosine of |x|, where |x| is given radians.
-double cosh(double x);
+V8_BASE_EXPORT double cosh(double x);
// Returns the hyperbolic sine of |x|, where |x| is given radians.
-double sinh(double x);
+V8_BASE_EXPORT double sinh(double x);
// Returns the hyperbolic tangent of |x|, where |x| is given radians.
-double tanh(double x);
+V8_BASE_EXPORT double tanh(double x);
} // namespace ieee754
} // namespace base
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index 50fceca88b..7bbb82a485 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -9,10 +9,11 @@
#include <sstream>
#include <string>
+#include "src/base/base-export.h"
#include "src/base/build_config.h"
#include "src/base/compiler-specific.h"
-extern "C" PRINTF_FORMAT(3, 4) V8_NORETURN
+extern "C" PRINTF_FORMAT(3, 4) V8_NORETURN V8_BASE_EXPORT
void V8_Fatal(const char* file, int line, const char* format, ...);
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
@@ -87,8 +88,8 @@ std::string* MakeCheckOpString(Lhs const& lhs, Rhs const& rhs,
// Commonly used instantiations of MakeCheckOpString<>. Explicitly instantiated
// in logging.cc.
-#define DEFINE_MAKE_CHECK_OP_STRING(type) \
- extern template std::string* MakeCheckOpString<type, type>( \
+#define DEFINE_MAKE_CHECK_OP_STRING(type) \
+ extern template V8_BASE_EXPORT std::string* MakeCheckOpString<type, type>( \
type const&, type const&, char const*);
DEFINE_MAKE_CHECK_OP_STRING(int)
DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int)
@@ -117,10 +118,11 @@ DEFINE_MAKE_CHECK_OP_STRING(void const*)
char const* msg) { \
return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \
} \
- extern template std::string* Check##NAME##Impl<float, float>( \
+ extern template V8_BASE_EXPORT std::string* Check##NAME##Impl<float, float>( \
float const& lhs, float const& rhs, char const* msg); \
- extern template std::string* Check##NAME##Impl<double, double>( \
- double const& lhs, double const& rhs, char const* msg);
+ extern template V8_BASE_EXPORT std::string* \
+ Check##NAME##Impl<double, double>(double const& lhs, double const& rhs, \
+ char const* msg);
DEFINE_CHECK_OP_IMPL(EQ, ==)
DEFINE_CHECK_OP_IMPL(NE, !=)
DEFINE_CHECK_OP_IMPL(LE, <=)
diff --git a/deps/v8/src/base/once.h b/deps/v8/src/base/once.h
index 790a8866e0..8008812d75 100644
--- a/deps/v8/src/base/once.h
+++ b/deps/v8/src/base/once.h
@@ -55,6 +55,7 @@
#include <stddef.h>
#include "src/base/atomicops.h"
+#include "src/base/base-export.h"
namespace v8 {
namespace base {
@@ -79,7 +80,8 @@ struct OneArgFunction {
typedef void (*type)(T);
};
-void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg);
+V8_BASE_EXPORT void CallOnceImpl(OnceType* once, PointerArgFunction init_func,
+ void* arg);
inline void CallOnce(OnceType* once, NoArgFunction init_func) {
if (Acquire_Load(once) != ONCE_STATE_DONE) {
diff --git a/deps/v8/src/base/platform/condition-variable.h b/deps/v8/src/base/platform/condition-variable.h
index 72d6f28507..48e7c369ca 100644
--- a/deps/v8/src/base/platform/condition-variable.h
+++ b/deps/v8/src/base/platform/condition-variable.h
@@ -5,6 +5,7 @@
#ifndef V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
#define V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
+#include "src/base/base-export.h"
#include "src/base/lazy-instance.h"
#include "src/base/platform/mutex.h"
@@ -28,7 +29,7 @@ class TimeDelta;
// the mutex and suspend the execution of the calling thread. When the condition
// variable is notified, the thread is awakened, and the mutex is reacquired.
-class ConditionVariable final {
+class V8_BASE_EXPORT ConditionVariable final {
public:
ConditionVariable();
~ConditionVariable();
diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h
index 61df19d66a..e7231bdd9e 100644
--- a/deps/v8/src/base/platform/mutex.h
+++ b/deps/v8/src/base/platform/mutex.h
@@ -5,6 +5,7 @@
#ifndef V8_BASE_PLATFORM_MUTEX_H_
#define V8_BASE_PLATFORM_MUTEX_H_
+#include "src/base/base-export.h"
#include "src/base/lazy-instance.h"
#if V8_OS_WIN
#include "src/base/win32-headers.h"
@@ -33,7 +34,7 @@ namespace base {
// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed
// while still owned by some thread. The Mutex class is non-copyable.
-class Mutex final {
+class V8_BASE_EXPORT Mutex final {
public:
Mutex();
~Mutex();
@@ -127,7 +128,7 @@ typedef LazyStaticInstance<Mutex, DefaultConstructTrait<Mutex>,
// The behavior of a program is undefined if a recursive mutex is destroyed
// while still owned by some thread. The RecursiveMutex class is non-copyable.
-class RecursiveMutex final {
+class V8_BASE_EXPORT RecursiveMutex final {
public:
RecursiveMutex();
~RecursiveMutex();
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index d3b6c9c1cf..5d570e7048 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -25,6 +25,7 @@
#include <string>
#include <vector>
+#include "src/base/base-export.h"
#include "src/base/build_config.h"
#include "src/base/compiler-specific.h"
#include "src/base/platform/mutex.h"
@@ -69,7 +70,7 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
#define V8_FAST_TLS_SUPPORTED 1
-extern intptr_t kMacTlsBaseOffset;
+extern V8_BASE_EXPORT intptr_t kMacTlsBaseOffset;
INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
@@ -102,7 +103,7 @@ class TimezoneCache;
// functions. Add methods here to cope with differences between the
// supported platforms.
-class OS {
+class V8_BASE_EXPORT OS {
public:
// Initialize the OS class.
// - random_seed: Used for the GetRandomMmapAddress() if non-zero.
@@ -211,7 +212,7 @@ class OS {
char text[kStackWalkMaxTextLen];
};
- class MemoryMappedFile {
+ class V8_BASE_EXPORT MemoryMappedFile {
public:
virtual ~MemoryMappedFile() {}
virtual void* memory() const = 0;
@@ -286,7 +287,7 @@ class OS {
// Control of the reserved memory can be assigned to another VirtualMemory
// object by assignment or copy-contructing. This removes the reserved memory
// from the original object.
-class VirtualMemory {
+class V8_BASE_EXPORT VirtualMemory {
public:
// Empty VirtualMemory object, controlling no reserved memory.
VirtualMemory();
@@ -418,7 +419,7 @@ class VirtualMemory {
// thread. The Thread object should not be deallocated before the thread has
// terminated.
-class Thread {
+class V8_BASE_EXPORT Thread {
public:
// Opaque data type for thread-local storage keys.
typedef int32_t LocalStorageKey;
diff --git a/deps/v8/src/base/platform/semaphore.h b/deps/v8/src/base/platform/semaphore.h
index 39029c83fc..31aeca3d9b 100644
--- a/deps/v8/src/base/platform/semaphore.h
+++ b/deps/v8/src/base/platform/semaphore.h
@@ -5,6 +5,7 @@
#ifndef V8_BASE_PLATFORM_SEMAPHORE_H_
#define V8_BASE_PLATFORM_SEMAPHORE_H_
+#include "src/base/base-export.h"
#include "src/base/lazy-instance.h"
#if V8_OS_WIN
#include "src/base/win32-headers.h"
@@ -31,7 +32,7 @@ class TimeDelta;
// count reaches zero, threads waiting for the semaphore blocks until the
// count becomes non-zero.
-class Semaphore final {
+class V8_BASE_EXPORT Semaphore final {
public:
explicit Semaphore(int count);
~Semaphore();
diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h
index be62014f91..ed1751268f 100644
--- a/deps/v8/src/base/platform/time.h
+++ b/deps/v8/src/base/platform/time.h
@@ -9,6 +9,7 @@
#include <iosfwd>
#include <limits>
+#include "src/base/base-export.h"
#include "src/base/bits.h"
#include "src/base/macros.h"
#include "src/base/safe_math.h"
@@ -42,7 +43,7 @@ class TimeBase;
// This class represents a duration of time, internally represented in
// microseonds.
-class TimeDelta final {
+class V8_BASE_EXPORT TimeDelta final {
public:
TimeDelta() : delta_(0) {}
@@ -277,7 +278,7 @@ class TimeBase {
// This class represents an absolute point in time, internally represented as
// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
-class Time final : public time_internal::TimeBase<Time> {
+class V8_BASE_EXPORT Time final : public time_internal::TimeBase<Time> {
public:
// Contains the NULL time. Use Time::Now() to get the current time.
Time() : TimeBase(0) {}
@@ -322,7 +323,7 @@ class Time final : public time_internal::TimeBase<Time> {
explicit Time(int64_t us) : TimeBase(us) {}
};
-std::ostream& operator<<(std::ostream&, const Time&);
+V8_BASE_EXPORT std::ostream& operator<<(std::ostream&, const Time&);
inline Time operator+(const TimeDelta& delta, const Time& time) {
return time + delta;
@@ -339,7 +340,8 @@ inline Time operator+(const TimeDelta& delta, const Time& time) {
// Time::Now() may actually decrease or jump). But note that TimeTicks may
// "stand still", for example if the computer suspended.
-class TimeTicks final : public time_internal::TimeBase<TimeTicks> {
+class V8_BASE_EXPORT TimeTicks final
+ : public time_internal::TimeBase<TimeTicks> {
public:
TimeTicks() : TimeBase(0) {}
@@ -376,7 +378,8 @@ inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
// Represents a clock, specific to a particular thread, than runs only while the
// thread is running.
-class ThreadTicks final : public time_internal::TimeBase<ThreadTicks> {
+class V8_BASE_EXPORT ThreadTicks final
+ : public time_internal::TimeBase<ThreadTicks> {
public:
ThreadTicks() : TimeBase(0) {}
@@ -408,6 +411,9 @@ class ThreadTicks final : public time_internal::TimeBase<ThreadTicks> {
#endif
private:
+ template <class TimeClass>
+ friend class time_internal::TimeBase;
+
// Please use Now() or GetForThread() to create a new object. This is for
// internal use and testing. Ticks are in microseconds.
explicit ThreadTicks(int64_t ticks) : TimeBase(ticks) {}
diff --git a/deps/v8/src/base/ring-buffer.h b/deps/v8/src/base/ring-buffer.h
new file mode 100644
index 0000000000..b347977640
--- /dev/null
+++ b/deps/v8/src/base/ring-buffer.h
@@ -0,0 +1,54 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_RING_BUFFER_H_
+#define V8_BASE_RING_BUFFER_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+template <typename T>
+class RingBuffer {
+ public:
+ RingBuffer() { Reset(); }
+ static const int kSize = 10;
+ void Push(const T& value) {
+ if (count_ == kSize) {
+ elements_[start_++] = value;
+ if (start_ == kSize) start_ = 0;
+ } else {
+ DCHECK_EQ(start_, 0);
+ elements_[count_++] = value;
+ }
+ }
+
+ int Count() const { return count_; }
+
+ template <typename Callback>
+ T Sum(Callback callback, const T& initial) const {
+ int j = start_ + count_ - 1;
+ if (j >= kSize) j -= kSize;
+ T result = initial;
+ for (int i = 0; i < count_; i++) {
+ result = callback(result, elements_[j]);
+ if (--j == -1) j += kSize;
+ }
+ return result;
+ }
+
+ void Reset() { start_ = count_ = 0; }
+
+ private:
+ T elements_[kSize];
+ int start_;
+ int count_;
+ DISALLOW_COPY_AND_ASSIGN(RingBuffer);
+};
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_RING_BUFFER_H_
diff --git a/deps/v8/src/base/sys-info.h b/deps/v8/src/base/sys-info.h
index 4504c82e37..772f44336f 100644
--- a/deps/v8/src/base/sys-info.h
+++ b/deps/v8/src/base/sys-info.h
@@ -6,12 +6,14 @@
#define V8_BASE_SYS_INFO_H_
#include <stdint.h>
+
+#include "src/base/base-export.h"
#include "src/base/compiler-specific.h"
namespace v8 {
namespace base {
-class SysInfo final {
+class V8_BASE_EXPORT SysInfo final {
public:
// Returns the number of logical processors/core on the current machine.
static int NumberOfProcessors();
diff --git a/deps/v8/src/base/utils/random-number-generator.h b/deps/v8/src/base/utils/random-number-generator.h
index cd3e6bfdc8..7a322b5332 100644
--- a/deps/v8/src/base/utils/random-number-generator.h
+++ b/deps/v8/src/base/utils/random-number-generator.h
@@ -5,6 +5,7 @@
#ifndef V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
#define V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
+#include "src/base/base-export.h"
#include "src/base/macros.h"
namespace v8 {
@@ -31,7 +32,7 @@ namespace base {
// https://code.google.com/p/v8/issues/detail?id=2905
// This class is neither reentrant nor threadsafe.
-class RandomNumberGenerator final {
+class V8_BASE_EXPORT RandomNumberGenerator final {
public:
// EntropySource is used as a callback function when V8 needs a source of
// entropy.
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 62cebfb732..ba5f4d5c1d 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -364,17 +364,6 @@ void InstallFunction(Handle<JSObject> target, Handle<JSFunction> function,
InstallFunction(target, name, function, name_string, attributes);
}
-Handle<JSFunction> InstallGetter(Handle<JSObject> target,
- Handle<Name> property_name,
- Handle<JSFunction> getter,
- PropertyAttributes attributes = DONT_ENUM) {
- Handle<Object> setter = target->GetIsolate()->factory()->undefined_value();
- JSObject::DefineAccessor(target, property_name, getter, setter, attributes)
- .Check();
- getter->shared()->set_native(true);
- return getter;
-}
-
Handle<JSFunction> CreateFunction(Isolate* isolate, Handle<String> name,
InstanceType type, int instance_size,
MaybeHandle<JSObject> maybe_prototype,
@@ -460,17 +449,54 @@ Handle<JSFunction> SimpleInstallFunction(Handle<JSObject> base,
return fun;
}
+void SimpleInstallGetterSetter(Handle<JSObject> base, Handle<String> name,
+ Builtins::Name call_getter,
+ Builtins::Name call_setter,
+ PropertyAttributes attribs) {
+ Isolate* const isolate = base->GetIsolate();
+
+ Handle<String> getter_name =
+ Name::ToFunctionName(name, isolate->factory()->get_string())
+ .ToHandleChecked();
+ Handle<JSFunction> getter =
+ SimpleCreateFunction(isolate, getter_name, call_getter, 0, true);
+ getter->shared()->set_native(true);
+
+ Handle<String> setter_name =
+ Name::ToFunctionName(name, isolate->factory()->set_string())
+ .ToHandleChecked();
+ Handle<JSFunction> setter =
+ SimpleCreateFunction(isolate, setter_name, call_setter, 1, true);
+ setter->shared()->set_native(true);
+
+ JSObject::DefineAccessor(base, name, getter, setter, attribs).Check();
+}
+
Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
- Handle<String> name, Builtins::Name call,
- bool adapt) {
+ Handle<String> name,
+ Handle<Name> property_name,
+ Builtins::Name call, bool adapt) {
Isolate* const isolate = base->GetIsolate();
- Handle<String> fun_name =
+
+ Handle<String> getter_name =
Name::ToFunctionName(name, isolate->factory()->get_string())
.ToHandleChecked();
- Handle<JSFunction> fun =
- SimpleCreateFunction(isolate, fun_name, call, 0, adapt);
- InstallGetter(base, name, fun);
- return fun;
+ Handle<JSFunction> getter =
+ SimpleCreateFunction(isolate, getter_name, call, 0, adapt);
+ getter->shared()->set_native(true);
+
+ Handle<Object> setter = isolate->factory()->undefined_value();
+
+ JSObject::DefineAccessor(base, property_name, getter, setter, DONT_ENUM)
+ .Check();
+
+ return getter;
+}
+
+Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
+ Handle<String> name, Builtins::Name call,
+ bool adapt) {
+ return SimpleInstallGetter(base, name, name, call, adapt);
}
Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
@@ -934,8 +960,9 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
if (global_proxy_template.IsEmpty()) {
Handle<String> name = Handle<String>(heap()->empty_string());
Handle<Code> code = isolate()->builtins()->Illegal();
- global_proxy_function = factory()->NewFunction(
- name, code, JS_GLOBAL_PROXY_TYPE, JSGlobalProxy::kSize);
+ global_proxy_function =
+ factory()->NewFunction(name, code, JS_GLOBAL_PROXY_TYPE,
+ JSGlobalProxy::SizeWithInternalFields(0));
} else {
Handle<ObjectTemplateInfo> data =
v8::Utils::OpenHandle(*global_proxy_template);
@@ -1030,7 +1057,6 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
Builtins::kErrorPrototypeToString, 0, true);
isolate->native_context()->set_error_to_string(*to_string_fun);
} else {
- DCHECK(context_index != Context::ERROR_FUNCTION_INDEX);
DCHECK(isolate->native_context()->error_to_string()->IsJSFunction());
InstallFunction(prototype, isolate->error_to_string(),
@@ -1104,9 +1130,12 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(object_function, factory->assign_string(),
Builtins::kObjectAssign, 2, false);
SimpleInstallFunction(object_function, factory->create_string(),
- Builtins::kObjectCreate, 2, false);
+ Builtins::kObjectCreate, 2, true);
SimpleInstallFunction(object_function, "getOwnPropertyDescriptor",
Builtins::kObjectGetOwnPropertyDescriptor, 2, false);
+ SimpleInstallFunction(object_function,
+ factory->getOwnPropertyDescriptors_string(),
+ Builtins::kObjectGetOwnPropertyDescriptors, 1, false);
SimpleInstallFunction(object_function, "getOwnPropertyNames",
Builtins::kObjectGetOwnPropertyNames, 1, false);
SimpleInstallFunction(object_function, "getOwnPropertySymbols",
@@ -1136,6 +1165,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
object_function, "getPrototypeOf", Builtins::kObjectGetPrototypeOf,
1, false);
native_context()->set_object_get_prototype_of(*object_get_prototype_of);
+ SimpleInstallFunction(object_function, "setPrototypeOf",
+ Builtins::kObjectSetPrototypeOf, 2, false);
Handle<JSFunction> object_is_extensible = SimpleInstallFunction(
object_function, "isExtensible", Builtins::kObjectIsExtensible,
@@ -1153,6 +1184,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> object_keys = SimpleInstallFunction(
object_function, "keys", Builtins::kObjectKeys, 1, false);
native_context()->set_object_keys(*object_keys);
+ SimpleInstallFunction(object_function, factory->entries_string(),
+ Builtins::kObjectEntries, 1, false);
+ SimpleInstallFunction(object_function, factory->values_string(),
+ Builtins::kObjectValues, 1, false);
SimpleInstallFunction(isolate->initial_object_prototype(),
"__defineGetter__", Builtins::kObjectDefineGetter, 2,
@@ -1171,6 +1206,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(
isolate->initial_object_prototype(), "propertyIsEnumerable",
Builtins::kObjectPrototypePropertyIsEnumerable, 1, false);
+
+ SimpleInstallGetterSetter(isolate->initial_object_prototype(),
+ factory->proto_string(),
+ Builtins::kObjectPrototypeGetProto,
+ Builtins::kObjectPrototypeSetProto, DONT_ENUM);
}
Handle<JSObject> global(native_context()->global_object());
@@ -1280,6 +1320,97 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_is_arraylike(*is_arraylike);
}
+ { // --- A r r a y I t e r a t o r ---
+ Handle<JSObject> iterator_prototype(
+ native_context()->initial_iterator_prototype());
+
+ Handle<JSObject> array_iterator_prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ JSObject::ForceSetPrototype(array_iterator_prototype, iterator_prototype);
+
+ JSObject::AddProperty(
+ array_iterator_prototype, factory->to_string_tag_symbol(),
+ factory->ArrayIterator_string(),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ Handle<JSFunction> next = InstallFunction(
+ array_iterator_prototype, "next", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ MaybeHandle<JSObject>(), Builtins::kArrayIteratorPrototypeNext);
+ next->shared()->set_builtin_function_id(kArrayIteratorNext);
+
+ // Set the expected parameters for %ArrayIteratorPrototype%.next to 0 (not
+ // including the receiver), as required by the builtin.
+ next->shared()->set_internal_formal_parameter_count(0);
+
+ // Set the length for the function to satisfy ECMA-262.
+ next->shared()->set_length(0);
+
+ Handle<JSFunction> array_iterator_function = CreateFunction(
+ isolate, factory->ArrayIterator_string(),
+ JS_FAST_ARRAY_VALUE_ITERATOR_TYPE, JSArrayIterator::kSize,
+ array_iterator_prototype, Builtins::kIllegal);
+ array_iterator_function->shared()->set_instance_class_name(
+ isolate->heap()->ArrayIterator_string());
+
+ native_context()->set_initial_array_iterator_prototype(
+ *array_iterator_prototype);
+ native_context()->set_initial_array_iterator_prototype_map(
+ array_iterator_prototype->map());
+
+ Handle<Map> initial_map(array_iterator_function->initial_map(), isolate);
+
+#define ARRAY_ITERATOR_LIST(V) \
+ V(TYPED_ARRAY, KEY, typed_array, key) \
+ V(FAST_ARRAY, KEY, fast_array, key) \
+ V(GENERIC_ARRAY, KEY, array, key) \
+ V(UINT8_ARRAY, KEY_VALUE, uint8_array, key_value) \
+ V(INT8_ARRAY, KEY_VALUE, int8_array, key_value) \
+ V(UINT16_ARRAY, KEY_VALUE, uint16_array, key_value) \
+ V(INT16_ARRAY, KEY_VALUE, int16_array, key_value) \
+ V(UINT32_ARRAY, KEY_VALUE, uint32_array, key_value) \
+ V(INT32_ARRAY, KEY_VALUE, int32_array, key_value) \
+ V(FLOAT32_ARRAY, KEY_VALUE, float32_array, key_value) \
+ V(FLOAT64_ARRAY, KEY_VALUE, float64_array, key_value) \
+ V(UINT8_CLAMPED_ARRAY, KEY_VALUE, uint8_clamped_array, key_value) \
+ V(FAST_SMI_ARRAY, KEY_VALUE, fast_smi_array, key_value) \
+ V(FAST_HOLEY_SMI_ARRAY, KEY_VALUE, fast_holey_smi_array, key_value) \
+ V(FAST_ARRAY, KEY_VALUE, fast_array, key_value) \
+ V(FAST_HOLEY_ARRAY, KEY_VALUE, fast_holey_array, key_value) \
+ V(FAST_DOUBLE_ARRAY, KEY_VALUE, fast_double_array, key_value) \
+ V(FAST_HOLEY_DOUBLE_ARRAY, KEY_VALUE, fast_holey_double_array, key_value) \
+ V(GENERIC_ARRAY, KEY_VALUE, array, key_value) \
+ V(UINT8_ARRAY, VALUE, uint8_array, value) \
+ V(INT8_ARRAY, VALUE, int8_array, value) \
+ V(UINT16_ARRAY, VALUE, uint16_array, value) \
+ V(INT16_ARRAY, VALUE, int16_array, value) \
+ V(UINT32_ARRAY, VALUE, uint32_array, value) \
+ V(INT32_ARRAY, VALUE, int32_array, value) \
+ V(FLOAT32_ARRAY, VALUE, float32_array, value) \
+ V(FLOAT64_ARRAY, VALUE, float64_array, value) \
+ V(UINT8_CLAMPED_ARRAY, VALUE, uint8_clamped_array, value) \
+ V(FAST_SMI_ARRAY, VALUE, fast_smi_array, value) \
+ V(FAST_HOLEY_SMI_ARRAY, VALUE, fast_holey_smi_array, value) \
+ V(FAST_ARRAY, VALUE, fast_array, value) \
+ V(FAST_HOLEY_ARRAY, VALUE, fast_holey_array, value) \
+ V(FAST_DOUBLE_ARRAY, VALUE, fast_double_array, value) \
+ V(FAST_HOLEY_DOUBLE_ARRAY, VALUE, fast_holey_double_array, value) \
+ V(GENERIC_ARRAY, VALUE, array, value)
+
+#define CREATE_ARRAY_ITERATOR_MAP(PREFIX, SUFFIX, prefix, suffix) \
+ do { \
+ const InstanceType type = JS_##PREFIX##_##SUFFIX##_ITERATOR_TYPE; \
+ Handle<Map> map = \
+ Map::Copy(initial_map, "JS_" #PREFIX "_" #SUFFIX "_ITERATOR_TYPE"); \
+ map->set_instance_type(type); \
+ native_context()->set_##prefix##_##suffix##_iterator_map(*map); \
+ } while (0);
+
+ ARRAY_ITERATOR_LIST(CREATE_ARRAY_ITERATOR_MAP)
+
+#undef CREATE_ARRAY_ITERATOR_MAP
+#undef ARRAY_ITERATOR_LIST
+ }
+
{ // --- N u m b e r ---
Handle<JSFunction> number_fun = InstallFunction(
global, "Number", JS_VALUE_TYPE, JSValue::kSize,
@@ -1294,7 +1425,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Create the %NumberPrototype%
Handle<JSValue> prototype =
Handle<JSValue>::cast(factory->NewJSObject(number_fun, TENURED));
- prototype->set_value(Smi::FromInt(0));
+ prototype->set_value(Smi::kZero);
Accessors::FunctionSetPrototype(number_fun, prototype).Assert();
// Install the "constructor" property on the {prototype}.
@@ -1325,6 +1456,20 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(number_fun, "isNaN", Builtins::kNumberIsNaN, 1, true);
SimpleInstallFunction(number_fun, "isSafeInteger",
Builtins::kNumberIsSafeInteger, 1, true);
+
+ // Install Number.parseFloat and Global.parseFloat.
+ Handle<JSFunction> parse_float_fun = SimpleInstallFunction(
+ number_fun, "parseFloat", Builtins::kNumberParseFloat, 1, true);
+ JSObject::AddProperty(global_object,
+ factory->NewStringFromAsciiChecked("parseFloat"),
+ parse_float_fun, DONT_ENUM);
+
+ // Install Number.parseInt and Global.parseInt.
+ Handle<JSFunction> parse_int_fun = SimpleInstallFunction(
+ number_fun, "parseInt", Builtins::kNumberParseInt, 2, true);
+ JSObject::AddProperty(global_object,
+ factory->NewStringFromAsciiChecked("parseInt"),
+ parse_int_fun, DONT_ENUM);
}
{ // --- B o o l e a n ---
@@ -1385,7 +1530,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Install the String.fromCharCode function.
SimpleInstallFunction(string_fun, "fromCharCode",
- Builtins::kStringFromCharCode, 1, true);
+ Builtins::kStringFromCharCode, 1, false);
// Install the String.fromCodePoint function.
SimpleInstallFunction(string_fun, "fromCodePoint",
@@ -1406,6 +1551,12 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
1, true);
SimpleInstallFunction(prototype, "charCodeAt",
Builtins::kStringPrototypeCharCodeAt, 1, true);
+ SimpleInstallFunction(prototype, "endsWith",
+ Builtins::kStringPrototypeEndsWith, 1, false);
+ SimpleInstallFunction(prototype, "includes",
+ Builtins::kStringPrototypeIncludes, 1, false);
+ SimpleInstallFunction(prototype, "indexOf",
+ Builtins::kStringPrototypeIndexOf, 1, false);
SimpleInstallFunction(prototype, "lastIndexOf",
Builtins::kStringPrototypeLastIndexOf, 1, false);
SimpleInstallFunction(prototype, "localeCompare",
@@ -1416,6 +1567,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
2, true);
SimpleInstallFunction(prototype, "substring",
Builtins::kStringPrototypeSubstring, 2, true);
+ SimpleInstallFunction(prototype, "startsWith",
+ Builtins::kStringPrototypeStartsWith, 1, false);
SimpleInstallFunction(prototype, "toString",
Builtins::kStringPrototypeToString, 0, true);
SimpleInstallFunction(prototype, "trim", Builtins::kStringPrototypeTrim, 0,
@@ -1431,6 +1584,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate, factory->NewStringFromAsciiChecked("[Symbol.iterator]"),
Builtins::kStringPrototypeIterator, 0, true);
iterator->shared()->set_native(true);
+ iterator->shared()->set_builtin_function_id(kStringIterator);
JSObject::AddProperty(prototype, factory->iterator_symbol(), iterator,
static_cast<PropertyAttributes>(DONT_ENUM));
}
@@ -1662,14 +1816,142 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
shared->DontAdaptArguments();
shared->set_length(2);
- // RegExp.prototype setup.
+ {
+ // RegExp.prototype setup.
- // Install the "constructor" property on the {prototype}.
- JSObject::AddProperty(prototype, factory->constructor_string(), regexp_fun,
- DONT_ENUM);
+ // Install the "constructor" property on the {prototype}.
+ JSObject::AddProperty(prototype, factory->constructor_string(),
+ regexp_fun, DONT_ENUM);
- SimpleInstallFunction(prototype, "exec", Builtins::kRegExpPrototypeExec, 1,
- true, DONT_ENUM);
+ {
+ Handle<JSFunction> fun = SimpleInstallFunction(
+ prototype, factory->exec_string(), Builtins::kRegExpPrototypeExec,
+ 1, true, DONT_ENUM);
+ native_context()->set_regexp_exec_function(*fun);
+ }
+
+ SimpleInstallGetter(prototype, factory->flags_string(),
+ Builtins::kRegExpPrototypeFlagsGetter, true);
+ SimpleInstallGetter(prototype, factory->global_string(),
+ Builtins::kRegExpPrototypeGlobalGetter, true);
+ SimpleInstallGetter(prototype, factory->ignoreCase_string(),
+ Builtins::kRegExpPrototypeIgnoreCaseGetter, true);
+ SimpleInstallGetter(prototype, factory->multiline_string(),
+ Builtins::kRegExpPrototypeMultilineGetter, true);
+ SimpleInstallGetter(prototype, factory->source_string(),
+ Builtins::kRegExpPrototypeSourceGetter, false);
+ SimpleInstallGetter(prototype, factory->sticky_string(),
+ Builtins::kRegExpPrototypeStickyGetter, true);
+ SimpleInstallGetter(prototype, factory->unicode_string(),
+ Builtins::kRegExpPrototypeUnicodeGetter, true);
+
+ SimpleInstallFunction(prototype, "compile",
+ Builtins::kRegExpPrototypeCompile, 2, false,
+ DONT_ENUM);
+ SimpleInstallFunction(prototype, factory->toString_string(),
+ Builtins::kRegExpPrototypeToString, 0, false,
+ DONT_ENUM);
+ SimpleInstallFunction(prototype, "test", Builtins::kRegExpPrototypeTest,
+ 1, true, DONT_ENUM);
+
+ {
+ Handle<JSFunction> fun = SimpleCreateFunction(
+ isolate, factory->InternalizeUtf8String("[Symbol.match]"),
+ Builtins::kRegExpPrototypeMatch, 1, false);
+ InstallFunction(prototype, fun, factory->match_symbol(), DONT_ENUM);
+ }
+
+ {
+ Handle<JSFunction> fun = SimpleCreateFunction(
+ isolate, factory->InternalizeUtf8String("[Symbol.replace]"),
+ Builtins::kRegExpPrototypeReplace, 2, true);
+ InstallFunction(prototype, fun, factory->replace_symbol(), DONT_ENUM);
+ }
+
+ {
+ Handle<JSFunction> fun = SimpleCreateFunction(
+ isolate, factory->InternalizeUtf8String("[Symbol.search]"),
+ Builtins::kRegExpPrototypeSearch, 1, true);
+ InstallFunction(prototype, fun, factory->search_symbol(), DONT_ENUM);
+ }
+
+ {
+ Handle<JSFunction> fun = SimpleCreateFunction(
+ isolate, factory->InternalizeUtf8String("[Symbol.split]"),
+ Builtins::kRegExpPrototypeSplit, 2, false);
+ InstallFunction(prototype, fun, factory->split_symbol(), DONT_ENUM);
+ }
+
+ // Store the initial RegExp.prototype map. This is used in fast-path
+ // checks. Do not alter the prototype after this point.
+ native_context()->set_regexp_prototype_map(prototype->map());
+ }
+
+ {
+ // RegExp getters and setters.
+
+ SimpleInstallGetter(regexp_fun,
+ factory->InternalizeUtf8String("[Symbol.species]"),
+ factory->species_symbol(),
+ Builtins::kRegExpPrototypeSpeciesGetter, false);
+
+ // Static properties set by a successful match.
+
+ const PropertyAttributes no_enum = DONT_ENUM;
+ SimpleInstallGetterSetter(regexp_fun, factory->input_string(),
+ Builtins::kRegExpInputGetter,
+ Builtins::kRegExpInputSetter, no_enum);
+ SimpleInstallGetterSetter(
+ regexp_fun, factory->InternalizeUtf8String("$_"),
+ Builtins::kRegExpInputGetter, Builtins::kRegExpInputSetter, no_enum);
+
+ SimpleInstallGetterSetter(
+ regexp_fun, factory->InternalizeUtf8String("lastMatch"),
+ Builtins::kRegExpLastMatchGetter, Builtins::kEmptyFunction, no_enum);
+ SimpleInstallGetterSetter(
+ regexp_fun, factory->InternalizeUtf8String("$&"),
+ Builtins::kRegExpLastMatchGetter, Builtins::kEmptyFunction, no_enum);
+
+ SimpleInstallGetterSetter(
+ regexp_fun, factory->InternalizeUtf8String("lastParen"),
+ Builtins::kRegExpLastParenGetter, Builtins::kEmptyFunction, no_enum);
+ SimpleInstallGetterSetter(
+ regexp_fun, factory->InternalizeUtf8String("$+"),
+ Builtins::kRegExpLastParenGetter, Builtins::kEmptyFunction, no_enum);
+
+ SimpleInstallGetterSetter(regexp_fun,
+ factory->InternalizeUtf8String("leftContext"),
+ Builtins::kRegExpLeftContextGetter,
+ Builtins::kEmptyFunction, no_enum);
+ SimpleInstallGetterSetter(regexp_fun,
+ factory->InternalizeUtf8String("$`"),
+ Builtins::kRegExpLeftContextGetter,
+ Builtins::kEmptyFunction, no_enum);
+
+ SimpleInstallGetterSetter(regexp_fun,
+ factory->InternalizeUtf8String("rightContext"),
+ Builtins::kRegExpRightContextGetter,
+ Builtins::kEmptyFunction, no_enum);
+ SimpleInstallGetterSetter(regexp_fun,
+ factory->InternalizeUtf8String("$'"),
+ Builtins::kRegExpRightContextGetter,
+ Builtins::kEmptyFunction, no_enum);
+
+#define INSTALL_CAPTURE_GETTER(i) \
+ SimpleInstallGetterSetter( \
+ regexp_fun, factory->InternalizeUtf8String("$" #i), \
+ Builtins::kRegExpCapture##i##Getter, Builtins::kEmptyFunction, no_enum)
+ INSTALL_CAPTURE_GETTER(1);
+ INSTALL_CAPTURE_GETTER(2);
+ INSTALL_CAPTURE_GETTER(3);
+ INSTALL_CAPTURE_GETTER(4);
+ INSTALL_CAPTURE_GETTER(5);
+ INSTALL_CAPTURE_GETTER(6);
+ INSTALL_CAPTURE_GETTER(7);
+ INSTALL_CAPTURE_GETTER(8);
+ INSTALL_CAPTURE_GETTER(9);
+#undef INSTALL_CAPTURE_GETTER
+ }
DCHECK(regexp_fun->has_initial_map());
Handle<Map> initial_map(regexp_fun->initial_map());
@@ -1681,7 +1963,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// ECMA-262, section 15.10.7.5.
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- DataDescriptor field(factory->last_index_string(),
+ DataDescriptor field(factory->lastIndex_string(),
JSRegExp::kLastIndexFieldIndex, writable,
Representation::Tagged());
initial_map->AppendDescriptor(&field);
@@ -1691,6 +1973,24 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
initial_map->set_unused_property_fields(0);
initial_map->set_instance_size(initial_map->instance_size() +
num_fields * kPointerSize);
+
+ { // Internal: RegExpInternalMatch
+ Handle<JSFunction> function =
+ factory->NewFunction(isolate->factory()->empty_string(),
+ isolate->builtins()->RegExpInternalMatch(),
+ JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ function->shared()->set_internal_formal_parameter_count(2);
+ function->shared()->set_length(2);
+ function->shared()->set_native(true);
+ native_context()->set(Context::REGEXP_INTERNAL_MATCH, *function);
+ }
+
+ // Create the last match info. One for external use, and one for internal
+ // use when we don't want to modify the externally visible match info.
+ Handle<RegExpMatchInfo> last_match_info = factory->NewRegExpMatchInfo();
+ native_context()->set_regexp_last_match_info(*last_match_info);
+ Handle<RegExpMatchInfo> internal_match_info = factory->NewRegExpMatchInfo();
+ native_context()->set_regexp_internal_match_info(*internal_match_info);
}
{ // -- E r r o r
@@ -1738,6 +2038,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::MAKE_URI_ERROR_INDEX);
}
+ { // -- C o m p i l e E r r o r
+ Handle<JSObject> dummy = factory->NewJSObject(isolate->object_function());
+ InstallError(isolate, dummy, factory->CompileError_string(),
+ Context::WASM_COMPILE_ERROR_FUNCTION_INDEX);
+
+ // -- R u n t i m e E r r o r
+ InstallError(isolate, dummy, factory->RuntimeError_string(),
+ Context::WASM_RUNTIME_ERROR_FUNCTION_INDEX);
+ }
+
// Initialize the embedder data slot.
Handle<FixedArray> embedder_data = factory->NewFixedArray(3);
native_context()->set_embedder_data(*embedder_data);
@@ -1798,6 +2108,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> math_pow =
SimpleInstallFunction(math, "pow", Builtins::kMathPow, 2, true);
native_context()->set_math_pow(*math_pow);
+ SimpleInstallFunction(math, "random", Builtins::kMathRandom, 0, true);
SimpleInstallFunction(math, "round", Builtins::kMathRound, 1, true);
SimpleInstallFunction(math, "sign", Builtins::kMathSign, 1, true);
SimpleInstallFunction(math, "sin", Builtins::kMathSin, 1, true);
@@ -1840,6 +2151,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
math, factory->NewStringFromAsciiChecked("SQRT2"),
factory->NewNumber(std::sqrt(2.0)),
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+ JSObject::AddProperty(
+ math, factory->to_string_tag_symbol(),
+ factory->NewStringFromAsciiChecked("Math"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
{ // -- A r r a y B u f f e r
@@ -1878,6 +2193,24 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallGetter(prototype, factory->length_string(),
Builtins::kTypedArrayPrototypeLength, true,
kTypedArrayLength);
+
+ // Install "keys", "values" and "entries" methods on the {prototype}.
+ Handle<JSFunction> entries =
+ SimpleInstallFunction(prototype, factory->entries_string(),
+ Builtins::kTypedArrayPrototypeEntries, 0, true);
+ entries->shared()->set_builtin_function_id(kTypedArrayEntries);
+
+ Handle<JSFunction> keys =
+ SimpleInstallFunction(prototype, factory->keys_string(),
+ Builtins::kTypedArrayPrototypeKeys, 0, true);
+ keys->shared()->set_builtin_function_id(kTypedArrayKeys);
+
+ Handle<JSFunction> values =
+ SimpleInstallFunction(prototype, factory->values_string(),
+ Builtins::kTypedArrayPrototypeValues, 0, true);
+ values->shared()->set_builtin_function_id(kTypedArrayValues);
+ JSObject::AddProperty(prototype, factory->iterator_symbol(), values,
+ DONT_ENUM);
}
{ // -- T y p e d A r r a y s
@@ -1978,6 +2311,33 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::JS_SET_FUN_INDEX);
}
+ { // -- J S M o d u l e N a m e s p a c e
+ Handle<Map> map =
+ factory->NewMap(JS_MODULE_NAMESPACE_TYPE, JSModuleNamespace::kSize);
+ Map::SetPrototype(map, isolate->factory()->null_value());
+ Map::EnsureDescriptorSlack(map, 2);
+ native_context()->set_js_module_namespace_map(*map);
+
+ { // Install @@toStringTag.
+ PropertyAttributes attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+ DataConstantDescriptor d(factory->to_string_tag_symbol(),
+ factory->NewStringFromAsciiChecked("Module"),
+ attribs);
+ map->AppendDescriptor(&d);
+ }
+
+ { // Install @@iterator.
+ Handle<JSFunction> iterator = SimpleCreateFunction(
+ isolate, factory->NewStringFromAsciiChecked("[Symbol.iterator]"),
+ Builtins::kModuleNamespaceIterator, 0, true);
+ iterator->shared()->set_native(true);
+ // TODO(neis): Is this really supposed to be writable?
+ DataConstantDescriptor d(factory->iterator_symbol(), iterator, DONT_ENUM);
+ map->AppendDescriptor(&d);
+ }
+ }
+
{ // -- I t e r a t o r R e s u l t
Handle<Map> map =
factory->NewMap(JS_OBJECT_TYPE, JSIteratorResult::kSize);
@@ -2174,23 +2534,20 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
const PropertyAttributes attributes =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
- // Create the ThrowTypeError functions.
+ // Create the ThrowTypeError function.
Handle<AccessorPair> callee = factory->NewAccessorPair();
- Handle<AccessorPair> caller = factory->NewAccessorPair();
Handle<JSFunction> poison = GetStrictArgumentsPoisonFunction();
- // Install the ThrowTypeError functions.
+ // Install the ThrowTypeError function.
callee->set_getter(*poison);
callee->set_setter(*poison);
- caller->set_getter(*poison);
- caller->set_setter(*poison);
// Create the map. Allocate one in-object field for length.
Handle<Map> map = factory->NewMap(
JS_ARGUMENTS_TYPE, JSStrictArgumentsObject::kSize, FAST_ELEMENTS);
// Create the descriptor array for the arguments object.
- Map::EnsureDescriptorSlack(map, 3);
+ Map::EnsureDescriptorSlack(map, 2);
{ // length
DataDescriptor d(factory->length_string(),
@@ -2203,11 +2560,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
attributes);
map->AppendDescriptor(&d);
}
- { // caller
- AccessorConstantDescriptor d(factory->caller_string(), caller,
- attributes);
- map->AppendDescriptor(&d);
- }
// @@iterator method is added later.
DCHECK_EQ(native_context()->object_function()->prototype(),
@@ -2269,16 +2621,10 @@ void Genesis::InstallTypedArray(const char* name, ElementsKind elements_kind,
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- Handle<JSFunction> result =
- InstallFunction(global, name, JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize,
- prototype, Builtins::kIllegal);
-
- Handle<Map> initial_map = isolate()->factory()->NewMap(
- JS_TYPED_ARRAY_TYPE,
- JSTypedArray::kSizeWithInternalFields,
- elements_kind);
- JSFunction::SetInitialMap(result, initial_map,
- handle(initial_map->prototype(), isolate()));
+ Handle<JSFunction> result = InstallFunction(
+ global, name, JS_TYPED_ARRAY_TYPE, JSTypedArray::kSizeWithInternalFields,
+ prototype, Builtins::kIllegal);
+ result->initial_map()->set_elements_kind(elements_kind);
CHECK(JSObject::SetPrototype(result, typed_array_function, false,
Object::DONT_THROW)
@@ -2585,6 +2931,25 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
*generator_function_function);
}
+ { // -- F i x e d A r r a y I t e r a t o r
+ int size = JSFixedArrayIterator::kHeaderSize +
+ JSFixedArrayIterator::kInObjectPropertyCount * kPointerSize;
+ Handle<Map> map = factory->NewMap(JS_FIXED_ARRAY_ITERATOR_TYPE, size);
+ Map::SetPrototype(map, iterator_prototype);
+ Map::EnsureDescriptorSlack(map,
+ JSFixedArrayIterator::kInObjectPropertyCount);
+ map->SetInObjectProperties(JSFixedArrayIterator::kInObjectPropertyCount);
+ map->SetConstructor(native_context->object_function());
+
+ { // next
+ DataDescriptor d(factory->next_string(), JSFixedArrayIterator::kNextIndex,
+ DONT_ENUM, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+
+ native_context->set_fixed_array_iterator_map(*map);
+ }
+
{ // -- S e t I t e r a t o r
Handle<JSObject> set_iterator_prototype =
isolate->factory()->NewJSObject(isolate->object_function(), TENURED);
@@ -2680,15 +3045,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
script_map->AppendDescriptor(&d);
}
- Handle<AccessorInfo> script_line_ends =
- Accessors::ScriptLineEndsInfo(isolate, attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_line_ends->name())), script_line_ends,
- attribs);
- script_map->AppendDescriptor(&d);
- }
-
Handle<AccessorInfo> script_context_data =
Accessors::ScriptContextDataInfo(isolate, attribs);
{
@@ -2868,13 +3224,11 @@ void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
void Genesis::InitializeGlobal_##id() {}
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_for_in)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_lookbehind)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_named_captures)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_declarations)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_string_padding)
#ifdef V8_I18N_SUPPORT
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(datetime_format_to_parts)
@@ -2959,35 +3313,6 @@ void Genesis::InitializeGlobal_harmony_simd() {
}
-void Genesis::InitializeGlobal_harmony_object_values_entries() {
- if (!FLAG_harmony_object_values_entries) return;
-
- Handle<JSGlobalObject> global(
- JSGlobalObject::cast(native_context()->global_object()));
- Isolate* isolate = global->GetIsolate();
- Factory* factory = isolate->factory();
-
- Handle<JSFunction> object_function = isolate->object_function();
- SimpleInstallFunction(object_function, factory->entries_string(),
- Builtins::kObjectEntries, 1, false);
- SimpleInstallFunction(object_function, factory->values_string(),
- Builtins::kObjectValues, 1, false);
-}
-
-void Genesis::InitializeGlobal_harmony_object_own_property_descriptors() {
- if (!FLAG_harmony_object_own_property_descriptors) return;
-
- Handle<JSGlobalObject> global(
- JSGlobalObject::cast(native_context()->global_object()));
- Isolate* isolate = global->GetIsolate();
- Factory* factory = isolate->factory();
-
- Handle<JSFunction> object_function = isolate->object_function();
- SimpleInstallFunction(object_function,
- factory->getOwnPropertyDescriptors_string(),
- Builtins::kObjectGetOwnPropertyDescriptors, 1, false);
-}
-
void Genesis::InitializeGlobal_harmony_array_prototype_values() {
if (!FLAG_harmony_array_prototype_values) return;
Handle<JSFunction> array_constructor(native_context()->array_function());
@@ -3165,12 +3490,13 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
HeapObject::cast(object_function->initial_map()->prototype())->map());
// Set up the map for Object.create(null) instances.
- Handle<Map> object_with_null_prototype_map =
+ Handle<Map> slow_object_with_null_prototype_map =
Map::CopyInitialMap(handle(object_function->initial_map(), isolate()));
- Map::SetPrototype(object_with_null_prototype_map,
+ slow_object_with_null_prototype_map->set_dictionary_map(true);
+ Map::SetPrototype(slow_object_with_null_prototype_map,
isolate()->factory()->null_value());
- native_context()->set_object_with_null_prototype_map(
- *object_with_null_prototype_map);
+ native_context()->set_slow_object_with_null_prototype_map(
+ *slow_object_with_null_prototype_map);
// Store the map for the %StringPrototype% after the natives has been compiled
// and the String function has been set up.
@@ -3271,6 +3597,33 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
*isolate()->builtins()->JSBuiltinsConstructStub());
InstallWithIntrinsicDefaultProto(isolate(), function,
Context::PROMISE_FUNCTION_INDEX);
+
+ {
+ Handle<Code> code = handle(
+ isolate()->builtins()->builtin(Builtins::kPromiseResolveClosure),
+ isolate());
+ Handle<SharedFunctionInfo> info =
+ isolate()->factory()->NewSharedFunctionInfo(factory()->empty_string(),
+ code, false);
+ info->set_internal_formal_parameter_count(1);
+ info->set_length(1);
+ native_context()->set_promise_resolve_shared_fun(*info);
+
+ code = handle(
+ isolate()->builtins()->builtin(Builtins::kPromiseRejectClosure),
+ isolate());
+ info = isolate()->factory()->NewSharedFunctionInfo(
+ factory()->empty_string(), code, false);
+ info->set_internal_formal_parameter_count(2);
+ info->set_length(1);
+ native_context()->set_promise_reject_shared_fun(*info);
+ }
+
+ Handle<JSFunction> create_resolving_functions =
+ SimpleCreateFunction(isolate(), factory()->empty_string(),
+ Builtins::kCreateResolvingFunctions, 2, false);
+ native_context()->set_create_resolving_functions(
+ *create_resolving_functions);
}
InstallBuiltinFunctionIds();
@@ -3461,15 +3814,10 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_simd_natives[] = {"native harmony-simd.js",
nullptr};
static const char* harmony_do_expressions_natives[] = {nullptr};
- static const char* harmony_for_in_natives[] = {nullptr};
static const char* harmony_regexp_lookbehind_natives[] = {nullptr};
- static const char* harmony_restrictive_declarations_natives[] = {nullptr};
static const char* harmony_regexp_named_captures_natives[] = {nullptr};
static const char* harmony_regexp_property_natives[] = {nullptr};
static const char* harmony_function_sent_natives[] = {nullptr};
- static const char* harmony_object_values_entries_natives[] = {nullptr};
- static const char* harmony_object_own_property_descriptors_natives[] = {
- nullptr};
static const char* harmony_array_prototype_values_natives[] = {nullptr};
static const char* harmony_string_padding_natives[] = {
"native harmony-string-padding.js", nullptr};
@@ -3695,11 +4043,11 @@ bool Genesis::InstallExtensions(Handle<Context> native_context,
InstallExtension(isolate, "v8/gc", &extension_states)) &&
(!FLAG_expose_externalize_string ||
InstallExtension(isolate, "v8/externalize", &extension_states)) &&
- (!FLAG_track_gc_object_stats ||
+ (!FLAG_gc_stats ||
InstallExtension(isolate, "v8/statistics", &extension_states)) &&
(!FLAG_expose_trigger_failure ||
InstallExtension(isolate, "v8/trigger-failure", &extension_states)) &&
- (!(FLAG_ignition && FLAG_trace_ignition_dispatches) ||
+ (!FLAG_trace_ignition_dispatches ||
InstallExtension(isolate, "v8/ignition-statistics",
&extension_states)) &&
InstallRequestedExtensions(isolate, extensions, &extension_states);
@@ -4037,7 +4385,12 @@ Genesis::Genesis(Isolate* isolate,
// and initialize it later in CreateNewGlobals.
Handle<JSGlobalProxy> global_proxy;
if (!maybe_global_proxy.ToHandle(&global_proxy)) {
- global_proxy = isolate->factory()->NewUninitializedJSGlobalProxy();
+ const int internal_field_count =
+ !global_proxy_template.IsEmpty()
+ ? global_proxy_template->InternalFieldCount()
+ : 0;
+ global_proxy = isolate->factory()->NewUninitializedJSGlobalProxy(
+ JSGlobalProxy::SizeWithInternalFields(internal_field_count));
}
// We can only de-serialize a context if the isolate was initialized from
@@ -4093,7 +4446,7 @@ Genesis::Genesis(Isolate* isolate,
isolate->counters()->contexts_created_from_scratch()->Increment();
// Re-initialize the counter because it got incremented during snapshot
// creation.
- isolate->native_context()->set_errors_thrown(Smi::FromInt(0));
+ isolate->native_context()->set_errors_thrown(Smi::kZero);
}
// Install experimental natives. Do not include them into the
@@ -4146,9 +4499,12 @@ Genesis::Genesis(Isolate* isolate,
return;
}
+ const int proxy_size = JSGlobalProxy::SizeWithInternalFields(
+ global_proxy_template->InternalFieldCount());
+
Handle<JSGlobalProxy> global_proxy;
if (!maybe_global_proxy.ToHandle(&global_proxy)) {
- global_proxy = factory()->NewUninitializedJSGlobalProxy();
+ global_proxy = factory()->NewUninitializedJSGlobalProxy(proxy_size);
}
// CreateNewGlobals.
@@ -4164,9 +4520,10 @@ Genesis::Genesis(Isolate* isolate,
Handle<JSFunction> global_proxy_function =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
initial_map, shared, factory()->undefined_value());
- DCHECK_EQ(global_proxy_data->internal_field_count(), 0);
+ DCHECK_EQ(global_proxy_data->internal_field_count(),
+ global_proxy_template->InternalFieldCount());
Handle<Map> global_proxy_map = isolate->factory()->NewMap(
- JS_GLOBAL_PROXY_TYPE, JSGlobalProxy::kSize, FAST_HOLEY_SMI_ELEMENTS);
+ JS_GLOBAL_PROXY_TYPE, proxy_size, FAST_HOLEY_SMI_ELEMENTS);
JSFunction::SetInitialMap(global_proxy_function, global_proxy_map,
factory()->null_value());
global_proxy_map->set_is_access_check_needed(true);
@@ -4179,7 +4536,7 @@ Genesis::Genesis(Isolate* isolate,
factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
// HookUpGlobalProxy.
- global_proxy->set_native_context(*factory()->null_value());
+ global_proxy->set_native_context(heap()->null_value());
// DetachGlobal.
JSObject::ForceSetPrototype(global_proxy, factory()->null_value());
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 2c0bef2556..6103971787 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -260,7 +260,7 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// 2b. No arguments, return +0.
__ bind(&no_arguments);
- __ Move(r0, Smi::FromInt(0));
+ __ Move(r0, Smi::kZero);
__ Ret(1);
}
@@ -288,7 +288,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ b(&done);
__ bind(&no_arguments);
- __ Move(r2, Smi::FromInt(0));
+ __ Move(r2, Smi::kZero);
__ bind(&done);
}
@@ -547,14 +547,14 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
- // -- r2 : allocation site or undefined
// -- r3 : new target
// -- cp : context
// -- lr : return address
@@ -568,10 +568,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the incoming parameters on the stack.
- __ AssertUndefinedOrAllocationSite(r2, r4);
- __ Push(cp);
__ SmiTag(r0);
- __ Push(r2, r0);
+ __ Push(cp, r0);
if (create_implicit_receiver) {
// Allocate the new receiver object.
@@ -701,6 +699,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Jump(lr);
}
+} // namespace
+
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
@@ -1146,31 +1146,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Jump(r4);
}
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
- // Save the function and context for call to CompileBaseline.
- __ ldr(r1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ ldr(kContextRegister,
- MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // Leave the frame before recompiling for baseline so that we don't count as
- // an activation on the stack.
- LeaveInterpreterFrame(masm, r2);
-
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Push return value.
- __ push(r0);
-
- // Push function as argument and compile for baseline.
- __ push(r1);
- __ CallRuntime(Runtime::kCompileBaseline);
-
- // Restore return value.
- __ pop(r0);
- }
- __ Jump(lr);
-}
-
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch,
Label* stack_overflow) {
@@ -1321,12 +1296,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
}
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
- DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
__ Move(r2, masm->isolate()->builtins()->InterpreterEntryTrampoline());
__ add(lr, r2, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1362,6 +1337,29 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
__ mov(pc, ip);
}
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+ // Advance the current bytecode offset stored within the given interpreter
+ // stack frame. This simulates what all bytecode handlers do upon completion
+ // of the underlying operation.
+ __ ldr(r1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ ldr(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister, r1, r2);
+ __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+ __ mov(r2, r0); // Result is the new bytecode offset.
+ __ Pop(kInterpreterAccumulatorRegister);
+ }
+ __ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ Generate_InterpreterEnterBytecode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
@@ -1370,7 +1368,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime, gotta_call_runtime_no_stack;
- Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
@@ -1437,15 +1434,12 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &maybe_call_runtime);
+ __ JumpIfSmi(entry, &try_shared);
// Found literals and code. Get them into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- Label install_optimized_code_and_tailcall;
- __ bind(&install_optimized_code_and_tailcall);
__ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, r5);
@@ -1480,25 +1474,18 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
- __ bind(&maybe_call_runtime);
- __ pop(closure);
-
- // Last possibility. Check the context free optimized code map entry.
- __ ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
- SharedFunctionInfo::kSharedCodeIndex));
- __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Store code entry in the closure.
- __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(&install_optimized_code_and_tailcall);
-
__ bind(&try_shared);
+ __ pop(closure);
__ pop(new_target);
__ pop(argument_count);
- // Is the full code valid?
__ ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ // Is the shared function marked for tier up?
+ __ ldrb(r5, FieldMemOperand(entry,
+ SharedFunctionInfo::kMarkedForTierUpByteOffset));
+ __ tst(r5, Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
+ __ b(ne, &gotta_call_runtime_no_stack);
+ // Is the full code valid?
__ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ ldr(r5, FieldMemOperand(entry, Code::kFlagsOffset));
__ and_(r5, r5, Operand(Code::KindField::kMask));
@@ -1859,7 +1846,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// If the code object is null, just return to the caller.
Label skip;
- __ cmp(r0, Operand(Smi::FromInt(0)));
+ __ cmp(r0, Operand(Smi::kZero));
__ b(ne, &skip);
__ Ret();
@@ -2443,8 +2430,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Push(r0, r1);
__ mov(r0, r3);
__ Push(cp);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToObject(),
+ RelocInfo::CODE_TARGET);
__ Pop(cp);
__ mov(r3, r0);
__ Pop(r0, r1);
@@ -2773,7 +2760,7 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -----------------------------------
__ SmiTag(r1);
__ Push(r1);
- __ Move(cp, Smi::FromInt(0));
+ __ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
@@ -2786,7 +2773,7 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
__ SmiTag(r1);
__ Move(r2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(r1, r2);
- __ Move(cp, Smi::FromInt(0));
+ __ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
@@ -2797,7 +2784,7 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
// -- lr : return address
// -----------------------------------
__ Push(r1);
- __ Move(cp, Smi::FromInt(0));
+ __ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAbort);
}
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 48551dea00..aeb0508a20 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -278,7 +278,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ Ldr(x2, MemOperand(jssp, x0, LSL, kPointerSizeLog2));
__ B(&done);
__ Bind(&no_arguments);
- __ Mov(x2, Smi::FromInt(0));
+ __ Mov(x2, Smi::kZero);
__ Bind(&done);
}
@@ -535,14 +535,14 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
- // -- x2 : allocation site or undefined
// -- x3 : new target
// -- lr : return address
// -- cp : context pointer
@@ -560,14 +560,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the four incoming parameters on the stack.
Register argc = x0;
Register constructor = x1;
- Register allocation_site = x2;
Register new_target = x3;
// Preserve the incoming parameters on the stack.
- __ AssertUndefinedOrAllocationSite(allocation_site, x10);
- __ Push(cp);
__ SmiTag(argc);
- __ Push(allocation_site, argc);
+ __ Push(cp, argc);
if (create_implicit_receiver) {
// Allocate the new receiver object.
@@ -703,6 +700,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Ret();
}
+} // namespace
+
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
@@ -1155,31 +1154,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Jump(x7);
}
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
- // Save the function and context for call to CompileBaseline.
- __ ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ ldr(kContextRegister,
- MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // Leave the frame before recompiling for baseline so that we don't count as
- // an activation on the stack.
- LeaveInterpreterFrame(masm, x2);
-
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Push return value.
- __ push(x0);
-
- // Push function as argument and compile for baseline.
- __ push(x1);
- __ CallRuntime(Runtime::kCompileBaseline);
-
- // Restore return value.
- __ pop(x0);
- }
- __ Ret();
-}
-
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch,
Label* stack_overflow) {
@@ -1332,12 +1306,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
}
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
- DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
__ LoadObject(x1, masm->isolate()->builtins()->InterpreterEntryTrampoline());
__ Add(lr, x1, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1373,6 +1347,29 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
__ Jump(ip0);
}
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+ // Advance the current bytecode offset stored within the given interpreter
+ // stack frame. This simulates what all bytecode handlers do upon completion
+ // of the underlying operation.
+ __ Ldr(x1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Ldr(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister, x1, x2);
+ __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+ __ Mov(x2, x0); // Result is the new bytecode offset.
+ __ Pop(kInterpreterAccumulatorRegister);
+ }
+ __ Str(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ Generate_InterpreterEnterBytecode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
@@ -1381,7 +1378,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
@@ -1439,13 +1435,10 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &maybe_call_runtime);
+ __ JumpIfSmi(entry, &try_shared);
// Found literals and code. Get them into the closure and return.
__ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- Label install_optimized_code_and_tailcall;
- __ Bind(&install_optimized_code_and_tailcall);
__ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, x5);
@@ -1476,22 +1469,16 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// We found neither literals nor code.
__ B(&gotta_call_runtime);
- __ Bind(&maybe_call_runtime);
-
- // Last possibility. Check the context free optimized code map entry.
- __ Ldr(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
- SharedFunctionInfo::kSharedCodeIndex));
- __ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Store code entry in the closure.
- __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ B(&install_optimized_code_and_tailcall);
-
__ Bind(&try_shared);
- // Is the full code valid?
__ Ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ // Is the shared function marked for tier up?
+ __ Ldrb(temp, FieldMemOperand(
+ entry, SharedFunctionInfo::kMarkedForTierUpByteOffset));
+ __ TestAndBranchIfAnySet(
+ temp, 1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte,
+ &gotta_call_runtime);
+ // Is the full code valid?
__ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ Ldr(x5, FieldMemOperand(entry, Code::kFlagsOffset));
__ and_(x5, x5, Operand(Code::KindField::kMask));
@@ -1863,7 +1850,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// If the code object is null, just return to the caller.
Label skip;
- __ CompareAndBranch(x0, Smi::FromInt(0), ne, &skip);
+ __ CompareAndBranch(x0, Smi::kZero, ne, &skip);
__ Ret();
__ Bind(&skip);
@@ -2512,8 +2499,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Push(x0, x1);
__ Mov(x0, x3);
__ Push(cp);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToObject(),
+ RelocInfo::CODE_TARGET);
__ Pop(cp);
__ Mov(x3, x0);
__ Pop(x1, x0);
@@ -2847,7 +2834,7 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -----------------------------------
__ SmiTag(x1);
__ Push(x1);
- __ Move(cp, Smi::FromInt(0));
+ __ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
@@ -2861,7 +2848,7 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
__ SmiTag(x1);
__ Move(x2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(x1, x2);
- __ Move(cp, Smi::FromInt(0));
+ __ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
@@ -2874,7 +2861,7 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
// -----------------------------------
MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
__ Push(x1);
- __ Move(cp, Smi::FromInt(0));
+ __ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAbort);
}
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index aed10b1288..defc4dcf62 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -122,7 +122,7 @@ MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
BUILTIN(HandleApiCall) {
HandleScope scope(isolate);
- Handle<JSFunction> function = args.target<JSFunction>();
+ Handle<JSFunction> function = args.target();
Handle<Object> receiver = args.receiver();
Handle<HeapObject> new_target = args.new_target();
Handle<FunctionTemplateInfo> fun_data(function->shared()->get_api_func_data(),
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index b4969f1e57..c09f11b2e8 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
+#include "src/contexts.h"
#include "src/elements.h"
namespace v8 {
@@ -407,14 +408,18 @@ namespace {
*/
class ArrayConcatVisitor {
public:
- ArrayConcatVisitor(Isolate* isolate, Handle<Object> storage,
+ ArrayConcatVisitor(Isolate* isolate, Handle<HeapObject> storage,
bool fast_elements)
: isolate_(isolate),
storage_(isolate->global_handles()->Create(*storage)),
index_offset_(0u),
- bit_field_(FastElementsField::encode(fast_elements) |
- ExceedsLimitField::encode(false) |
- IsFixedArrayField::encode(storage->IsFixedArray())) {
+ bit_field_(
+ FastElementsField::encode(fast_elements) |
+ ExceedsLimitField::encode(false) |
+ IsFixedArrayField::encode(storage->IsFixedArray()) |
+ HasSimpleElementsField::encode(storage->IsFixedArray() ||
+ storage->map()->instance_type() >
+ LAST_CUSTOM_ELEMENTS_RECEIVER)) {
DCHECK(!(this->fast_elements() && !is_fixed_array()));
}
@@ -503,12 +508,16 @@ class ArrayConcatVisitor {
// (otherwise)
Handle<FixedArray> storage_fixed_array() {
DCHECK(is_fixed_array());
+ DCHECK(has_simple_elements());
return Handle<FixedArray>::cast(storage_);
}
Handle<JSReceiver> storage_jsreceiver() {
DCHECK(!is_fixed_array());
return Handle<JSReceiver>::cast(storage_);
}
+ bool has_simple_elements() const {
+ return HasSimpleElementsField::decode(bit_field_);
+ }
private:
// Convert storage to dictionary mode.
@@ -541,12 +550,14 @@ class ArrayConcatVisitor {
inline void set_storage(FixedArray* storage) {
DCHECK(is_fixed_array());
+ DCHECK(has_simple_elements());
storage_ = isolate_->global_handles()->Create(storage);
}
class FastElementsField : public BitField<bool, 0, 1> {};
class ExceedsLimitField : public BitField<bool, 1, 1> {};
class IsFixedArrayField : public BitField<bool, 2, 1> {};
+ class HasSimpleElementsField : public BitField<bool, 3, 1> {};
bool fast_elements() const { return FastElementsField::decode(bit_field_); }
void set_fast_elements(bool fast) {
@@ -772,7 +783,6 @@ bool IterateElementsSlow(Isolate* isolate, Handle<JSReceiver> receiver,
visitor->increase_index_offset(length);
return true;
}
-
/**
* A helper function that visits "array" elements of a JSReceiver in numerical
* order.
@@ -802,7 +812,8 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
return IterateElementsSlow(isolate, receiver, length, visitor);
}
- if (!HasOnlySimpleElements(isolate, *receiver)) {
+ if (!HasOnlySimpleElements(isolate, *receiver) ||
+ !visitor->has_simple_elements()) {
return IterateElementsSlow(isolate, receiver, length, visitor);
}
Handle<JSObject> array = Handle<JSObject>::cast(receiver);
@@ -1071,7 +1082,7 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
// In case of failure, fall through.
}
- Handle<Object> storage;
+ Handle<HeapObject> storage;
if (fast_case) {
// The backing storage array must have non-existing elements to preserve
// holes across concat operations.
@@ -1084,12 +1095,12 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
storage = SeededNumberDictionary::New(isolate, at_least_space_for);
} else {
DCHECK(species->IsConstructor());
- Handle<Object> length(Smi::FromInt(0), isolate);
+ Handle<Object> length(Smi::kZero, isolate);
Handle<Object> storage_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, storage_object,
Execution::New(isolate, species, species, 1, &length));
- storage = storage_object;
+ storage = Handle<HeapObject>::cast(storage_object);
}
ArrayConcatVisitor visitor(isolate, storage, fast_case);
@@ -1236,7 +1247,7 @@ void Builtins::Generate_ArrayIsArray(CodeStubAssembler* assembler) {
Label call_runtime(assembler), return_true(assembler),
return_false(assembler);
- assembler->GotoIf(assembler->WordIsSmi(object), &return_false);
+ assembler->GotoIf(assembler->TaggedIsSmi(object), &return_false);
Node* instance_type = assembler->LoadInstanceType(object);
assembler->GotoIf(assembler->Word32Equal(
@@ -1296,7 +1307,7 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
{
// Handle case where JSArray length is not an Smi in the runtime
Node* len = assembler->LoadObjectField(array, JSArray::kLengthOffset);
- assembler->GotoUnless(assembler->WordIsSmi(len), &call_runtime);
+ assembler->GotoUnless(assembler->TaggedIsSmi(len), &call_runtime);
len_var.Bind(assembler->SmiToWord(len));
assembler->Branch(assembler->WordEqual(len_var.value(), intptr_zero),
@@ -1309,7 +1320,7 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
init_k_zero(assembler), init_k_n(assembler);
Node* tagged_n = assembler->ToInteger(context, start_from);
- assembler->Branch(assembler->WordIsSmi(tagged_n), &init_k_smi,
+ assembler->Branch(assembler->TaggedIsSmi(tagged_n), &init_k_smi,
&init_k_heap_num);
assembler->Bind(&init_k_smi);
@@ -1395,7 +1406,7 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
undef_loop(assembler, &index_var), not_smi(assembler),
not_heap_num(assembler);
- assembler->GotoUnless(assembler->WordIsSmi(search_element), &not_smi);
+ assembler->GotoUnless(assembler->TaggedIsSmi(search_element), &not_smi);
search_num.Bind(assembler->SmiToFloat64(search_element));
assembler->Goto(&heap_num_loop);
@@ -1464,7 +1475,7 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
Node* element_k = assembler->LoadFixedArrayElement(
elements, index_var.value(), 0,
CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoUnless(assembler->WordIsSmi(element_k), &not_smi);
+ assembler->GotoUnless(assembler->TaggedIsSmi(element_k), &not_smi);
assembler->Branch(
assembler->Float64Equal(search_num.value(),
assembler->SmiToFloat64(element_k)),
@@ -1474,8 +1485,9 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
heap_number_map),
&continue_loop);
- assembler->BranchIfFloat64Equal(
- search_num.value(), assembler->LoadHeapNumberValue(element_k),
+ assembler->Branch(
+ assembler->Float64Equal(search_num.value(),
+ assembler->LoadHeapNumberValue(element_k)),
&return_true, &continue_loop);
assembler->Bind(&continue_loop);
@@ -1492,7 +1504,7 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
Node* element_k = assembler->LoadFixedArrayElement(
elements, index_var.value(), 0,
CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+ assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
heap_number_map),
&continue_loop);
@@ -1514,7 +1526,7 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
&return_false);
Node* element_k = assembler->LoadFixedArrayElement(
elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+ assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
assembler->GotoUnless(assembler->IsStringInstanceType(
assembler->LoadInstanceType(element_k)),
&continue_loop);
@@ -1546,7 +1558,7 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
Node* element_k = assembler->LoadFixedArrayElement(
elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+ assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
Node* map_k = assembler->LoadMap(element_k);
assembler->BranchIfSimd128Equal(search_element, map, element_k, map_k,
@@ -1564,7 +1576,8 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
hole_loop(assembler, &index_var), search_notnan(assembler);
Variable search_num(assembler, MachineRepresentation::kFloat64);
- assembler->GotoUnless(assembler->WordIsSmi(search_element), &search_notnan);
+ assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
+ &search_notnan);
search_num.Bind(assembler->SmiToFloat64(search_element));
assembler->Goto(&not_nan_loop);
@@ -1588,8 +1601,8 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
Node* element_k = assembler->LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->BranchIfFloat64Equal(element_k, search_num.value(),
- &return_true, &continue_loop);
+ assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
+ &return_true, &continue_loop);
assembler->Bind(&continue_loop);
index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop);
@@ -1618,7 +1631,8 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
hole_loop(assembler, &index_var), search_notnan(assembler);
Variable search_num(assembler, MachineRepresentation::kFloat64);
- assembler->GotoUnless(assembler->WordIsSmi(search_element), &search_notnan);
+ assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
+ &search_notnan);
search_num.Bind(assembler->SmiToFloat64(search_element));
assembler->Goto(&not_nan_loop);
@@ -1647,8 +1661,8 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
- assembler->BranchIfFloat64Equal(element_k, search_num.value(),
- &return_true, &continue_loop);
+ assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
+ &return_true, &continue_loop);
assembler->Bind(&continue_loop);
index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop);
@@ -1738,7 +1752,7 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
{
// Handle case where JSArray length is not an Smi in the runtime
Node* len = assembler->LoadObjectField(array, JSArray::kLengthOffset);
- assembler->GotoUnless(assembler->WordIsSmi(len), &call_runtime);
+ assembler->GotoUnless(assembler->TaggedIsSmi(len), &call_runtime);
len_var.Bind(assembler->SmiToWord(len));
assembler->Branch(assembler->WordEqual(len_var.value(), intptr_zero),
@@ -1751,7 +1765,7 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
init_k_zero(assembler), init_k_n(assembler);
Node* tagged_n = assembler->ToInteger(context, start_from);
- assembler->Branch(assembler->WordIsSmi(tagged_n), &init_k_smi,
+ assembler->Branch(assembler->TaggedIsSmi(tagged_n), &init_k_smi,
&init_k_heap_num);
assembler->Bind(&init_k_smi);
@@ -1837,7 +1851,7 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
undef_loop(assembler, &index_var), not_smi(assembler),
not_heap_num(assembler);
- assembler->GotoUnless(assembler->WordIsSmi(search_element), &not_smi);
+ assembler->GotoUnless(assembler->TaggedIsSmi(search_element), &not_smi);
search_num.Bind(assembler->SmiToFloat64(search_element));
assembler->Goto(&heap_num_loop);
@@ -1903,7 +1917,7 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
Node* element_k = assembler->LoadFixedArrayElement(
elements, index_var.value(), 0,
CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoUnless(assembler->WordIsSmi(element_k), &not_smi);
+ assembler->GotoUnless(assembler->TaggedIsSmi(element_k), &not_smi);
assembler->Branch(
assembler->Float64Equal(search_num.value(),
assembler->SmiToFloat64(element_k)),
@@ -1913,8 +1927,9 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
heap_number_map),
&continue_loop);
- assembler->BranchIfFloat64Equal(
- search_num.value(), assembler->LoadHeapNumberValue(element_k),
+ assembler->Branch(
+ assembler->Float64Equal(search_num.value(),
+ assembler->LoadHeapNumberValue(element_k)),
&return_found, &continue_loop);
assembler->Bind(&continue_loop);
@@ -1931,7 +1946,7 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
&return_not_found);
Node* element_k = assembler->LoadFixedArrayElement(
elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+ assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
assembler->GotoUnless(assembler->IsStringInstanceType(
assembler->LoadInstanceType(element_k)),
&continue_loop);
@@ -1963,7 +1978,7 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
Node* element_k = assembler->LoadFixedArrayElement(
elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->WordIsSmi(element_k), &continue_loop);
+ assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
Node* map_k = assembler->LoadMap(element_k);
assembler->BranchIfSimd128Equal(search_element, map, element_k, map_k,
@@ -1980,7 +1995,8 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
Label not_nan_loop(assembler, &index_var), search_notnan(assembler);
Variable search_num(assembler, MachineRepresentation::kFloat64);
- assembler->GotoUnless(assembler->WordIsSmi(search_element), &search_notnan);
+ assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
+ &search_notnan);
search_num.Bind(assembler->SmiToFloat64(search_element));
assembler->Goto(&not_nan_loop);
@@ -2004,8 +2020,8 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
Node* element_k = assembler->LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->BranchIfFloat64Equal(element_k, search_num.value(),
- &return_found, &continue_loop);
+ assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
+ &return_found, &continue_loop);
assembler->Bind(&continue_loop);
index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop);
@@ -2017,7 +2033,8 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
Label not_nan_loop(assembler, &index_var), search_notnan(assembler);
Variable search_num(assembler, MachineRepresentation::kFloat64);
- assembler->GotoUnless(assembler->WordIsSmi(search_element), &search_notnan);
+ assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
+ &search_notnan);
search_num.Bind(assembler->SmiToFloat64(search_element));
assembler->Goto(&not_nan_loop);
@@ -2044,8 +2061,8 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
- assembler->BranchIfFloat64Equal(element_k, search_num.value(),
- &return_found, &continue_loop);
+ assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
+ &return_found, &continue_loop);
assembler->Bind(&continue_loop);
index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
assembler->Goto(&not_nan_loop);
@@ -2063,5 +2080,555 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
array, search_element, start_from));
}
+namespace {
+
+template <IterationKind kIterationKind>
+void Generate_ArrayPrototypeIterationMethod(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* receiver = assembler->Parameter(0);
+ Node* context = assembler->Parameter(3);
+
+ Variable var_array(assembler, MachineRepresentation::kTagged);
+ Variable var_map(assembler, MachineRepresentation::kTagged);
+ Variable var_type(assembler, MachineRepresentation::kWord32);
+
+ Label if_isnotobject(assembler, Label::kDeferred);
+ Label create_array_iterator(assembler);
+
+ assembler->GotoIf(assembler->TaggedIsSmi(receiver), &if_isnotobject);
+ var_array.Bind(receiver);
+ var_map.Bind(assembler->LoadMap(receiver));
+ var_type.Bind(assembler->LoadMapInstanceType(var_map.value()));
+ assembler->Branch(assembler->IsJSReceiverInstanceType(var_type.value()),
+ &create_array_iterator, &if_isnotobject);
+
+ assembler->Bind(&if_isnotobject);
+ {
+ Callable callable = CodeFactory::ToObject(assembler->isolate());
+ Node* result = assembler->CallStub(callable, context, receiver);
+ var_array.Bind(result);
+ var_map.Bind(assembler->LoadMap(result));
+ var_type.Bind(assembler->LoadMapInstanceType(var_map.value()));
+ assembler->Goto(&create_array_iterator);
+ }
+
+ assembler->Bind(&create_array_iterator);
+ assembler->Return(assembler->CreateArrayIterator(
+ var_array.value(), var_map.value(), var_type.value(), context,
+ kIterationKind));
+}
+
+} // namespace
+
+void Builtins::Generate_ArrayPrototypeValues(CodeStubAssembler* assembler) {
+ Generate_ArrayPrototypeIterationMethod<IterationKind::kValues>(assembler);
+}
+
+void Builtins::Generate_ArrayPrototypeEntries(CodeStubAssembler* assembler) {
+ Generate_ArrayPrototypeIterationMethod<IterationKind::kEntries>(assembler);
+}
+
+void Builtins::Generate_ArrayPrototypeKeys(CodeStubAssembler* assembler) {
+ Generate_ArrayPrototypeIterationMethod<IterationKind::kKeys>(assembler);
+}
+
+void Builtins::Generate_ArrayIteratorPrototypeNext(
+ CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* iterator = assembler->Parameter(0);
+ Node* context = assembler->Parameter(3);
+
+ Variable var_value(assembler, MachineRepresentation::kTagged);
+ Variable var_done(assembler, MachineRepresentation::kTagged);
+
+ // Required, or else `throw_bad_receiver` fails a DCHECK due to these
+ // variables not being bound along all paths, despite not being used.
+ var_done.Bind(assembler->TrueConstant());
+ var_value.Bind(assembler->UndefinedConstant());
+
+ Label throw_bad_receiver(assembler, Label::kDeferred);
+ Label set_done(assembler);
+ Label allocate_key_result(assembler);
+ Label allocate_entry_if_needed(assembler);
+ Label allocate_iterator_result(assembler);
+ Label generic_values(assembler);
+
+ // If O does not have all of the internal slots of an Array Iterator Instance
+ // (22.1.5.3), throw a TypeError exception
+ assembler->GotoIf(assembler->TaggedIsSmi(iterator), &throw_bad_receiver);
+ Node* instance_type = assembler->LoadInstanceType(iterator);
+ assembler->GotoIf(
+ assembler->Uint32LessThan(
+ assembler->Int32Constant(LAST_ARRAY_ITERATOR_TYPE -
+ FIRST_ARRAY_ITERATOR_TYPE),
+ assembler->Int32Sub(instance_type, assembler->Int32Constant(
+ FIRST_ARRAY_ITERATOR_TYPE))),
+ &throw_bad_receiver);
+
+ // Let a be O.[[IteratedObject]].
+ Node* array = assembler->LoadObjectField(
+ iterator, JSArrayIterator::kIteratedObjectOffset);
+
+ // Let index be O.[[ArrayIteratorNextIndex]].
+ Node* index =
+ assembler->LoadObjectField(iterator, JSArrayIterator::kNextIndexOffset);
+ Node* orig_map = assembler->LoadObjectField(
+ iterator, JSArrayIterator::kIteratedObjectMapOffset);
+ Node* array_map = assembler->LoadMap(array);
+
+ Label if_isfastarray(assembler), if_isnotfastarray(assembler);
+
+ assembler->Branch(assembler->WordEqual(orig_map, array_map), &if_isfastarray,
+ &if_isnotfastarray);
+
+ assembler->Bind(&if_isfastarray);
+ {
+ CSA_ASSERT(assembler,
+ assembler->Word32Equal(assembler->LoadMapInstanceType(array_map),
+ assembler->Int32Constant(JS_ARRAY_TYPE)));
+
+ Node* length = assembler->LoadObjectField(array, JSArray::kLengthOffset);
+
+ CSA_ASSERT(assembler, assembler->TaggedIsSmi(length));
+ CSA_ASSERT(assembler, assembler->TaggedIsSmi(index));
+
+ assembler->GotoUnless(assembler->SmiBelow(index, length), &set_done);
+
+ Node* one = assembler->SmiConstant(Smi::FromInt(1));
+ assembler->StoreObjectFieldNoWriteBarrier(
+ iterator, JSArrayIterator::kNextIndexOffset,
+ assembler->IntPtrAdd(assembler->BitcastTaggedToWord(index),
+ assembler->BitcastTaggedToWord(one)));
+
+ var_done.Bind(assembler->FalseConstant());
+ Node* elements = assembler->LoadElements(array);
+
+ static int32_t kInstanceType[] = {
+ JS_FAST_ARRAY_KEY_ITERATOR_TYPE,
+ JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_FAST_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
+ };
+
+ Label packed_object_values(assembler), holey_object_values(assembler),
+ packed_double_values(assembler), holey_double_values(assembler);
+ Label* kInstanceTypeHandlers[] = {
+ &allocate_key_result, &packed_object_values, &holey_object_values,
+ &packed_object_values, &holey_object_values, &packed_double_values,
+ &holey_double_values, &packed_object_values, &holey_object_values,
+ &packed_object_values, &holey_object_values, &packed_double_values,
+ &holey_double_values};
+
+ assembler->Switch(instance_type, &throw_bad_receiver, kInstanceType,
+ kInstanceTypeHandlers, arraysize(kInstanceType));
+
+ assembler->Bind(&packed_object_values);
+ {
+ var_value.Bind(assembler->LoadFixedArrayElement(
+ elements, index, 0, CodeStubAssembler::SMI_PARAMETERS));
+ assembler->Goto(&allocate_entry_if_needed);
+ }
+
+ assembler->Bind(&packed_double_values);
+ {
+ Node* value = assembler->LoadFixedDoubleArrayElement(
+ elements, index, MachineType::Float64(), 0,
+ CodeStubAssembler::SMI_PARAMETERS);
+ var_value.Bind(assembler->AllocateHeapNumberWithValue(value));
+ assembler->Goto(&allocate_entry_if_needed);
+ }
+
+ assembler->Bind(&holey_object_values);
+ {
+ // Check the array_protector cell, and take the slow path if it's invalid.
+ Node* invalid =
+ assembler->SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+ Node* cell = assembler->LoadRoot(Heap::kArrayProtectorRootIndex);
+ Node* cell_value =
+ assembler->LoadObjectField(cell, PropertyCell::kValueOffset);
+ assembler->GotoIf(assembler->WordEqual(cell_value, invalid),
+ &generic_values);
+
+ var_value.Bind(assembler->UndefinedConstant());
+ Node* value = assembler->LoadFixedArrayElement(
+ elements, index, 0, CodeStubAssembler::SMI_PARAMETERS);
+ assembler->GotoIf(
+ assembler->WordEqual(value, assembler->TheHoleConstant()),
+ &allocate_entry_if_needed);
+ var_value.Bind(value);
+ assembler->Goto(&allocate_entry_if_needed);
+ }
+
+ assembler->Bind(&holey_double_values);
+ {
+ // Check the array_protector cell, and take the slow path if it's invalid.
+ Node* invalid =
+ assembler->SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+ Node* cell = assembler->LoadRoot(Heap::kArrayProtectorRootIndex);
+ Node* cell_value =
+ assembler->LoadObjectField(cell, PropertyCell::kValueOffset);
+ assembler->GotoIf(assembler->WordEqual(cell_value, invalid),
+ &generic_values);
+
+ var_value.Bind(assembler->UndefinedConstant());
+ Node* value = assembler->LoadFixedDoubleArrayElement(
+ elements, index, MachineType::Float64(), 0,
+ CodeStubAssembler::SMI_PARAMETERS, &allocate_entry_if_needed);
+ var_value.Bind(assembler->AllocateHeapNumberWithValue(value));
+ assembler->Goto(&allocate_entry_if_needed);
+ }
+ }
+
+ assembler->Bind(&if_isnotfastarray);
+ {
+ Label if_istypedarray(assembler), if_isgeneric(assembler);
+
+ // If a is undefined, return CreateIterResultObject(undefined, true)
+ assembler->GotoIf(
+ assembler->WordEqual(array, assembler->UndefinedConstant()),
+ &allocate_iterator_result);
+
+ Node* array_type = assembler->LoadInstanceType(array);
+ assembler->Branch(
+ assembler->Word32Equal(array_type,
+ assembler->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ &if_istypedarray, &if_isgeneric);
+
+ assembler->Bind(&if_isgeneric);
+ {
+ Label if_wasfastarray(assembler);
+
+ Node* length = nullptr;
+ {
+ Variable var_length(assembler, MachineRepresentation::kTagged);
+ Label if_isarray(assembler), if_isnotarray(assembler), done(assembler);
+ assembler->Branch(
+ assembler->Word32Equal(array_type,
+ assembler->Int32Constant(JS_ARRAY_TYPE)),
+ &if_isarray, &if_isnotarray);
+
+ assembler->Bind(&if_isarray);
+ {
+ var_length.Bind(
+ assembler->LoadObjectField(array, JSArray::kLengthOffset));
+
+ // Invalidate protector cell if needed
+ assembler->Branch(
+ assembler->WordNotEqual(orig_map, assembler->UndefinedConstant()),
+ &if_wasfastarray, &done);
+
+ assembler->Bind(&if_wasfastarray);
+ {
+ Label if_invalid(assembler, Label::kDeferred);
+ // A fast array iterator transitioned to a slow iterator during
+ // iteration. Invalidate fast_array_iteration_prtoector cell to
+ // prevent potential deopt loops.
+ assembler->StoreObjectFieldNoWriteBarrier(
+ iterator, JSArrayIterator::kIteratedObjectMapOffset,
+ assembler->UndefinedConstant());
+ assembler->GotoIf(
+ assembler->Uint32LessThanOrEqual(
+ instance_type, assembler->Int32Constant(
+ JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
+ &done);
+
+ Node* invalid = assembler->SmiConstant(
+ Smi::FromInt(Isolate::kProtectorInvalid));
+ Node* cell = assembler->LoadRoot(
+ Heap::kFastArrayIterationProtectorRootIndex);
+ assembler->StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset,
+ invalid);
+ assembler->Goto(&done);
+ }
+ }
+
+ assembler->Bind(&if_isnotarray);
+ {
+ Node* length_string = assembler->HeapConstant(
+ assembler->isolate()->factory()->length_string());
+ Callable get_property =
+ CodeFactory::GetProperty(assembler->isolate());
+ Node* length =
+ assembler->CallStub(get_property, context, array, length_string);
+ Callable to_length = CodeFactory::ToLength(assembler->isolate());
+ var_length.Bind(assembler->CallStub(to_length, context, length));
+ assembler->Goto(&done);
+ }
+
+ assembler->Bind(&done);
+ length = var_length.value();
+ }
+
+ assembler->GotoUnlessNumberLessThan(index, length, &set_done);
+
+ assembler->StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
+ assembler->NumberInc(index));
+ var_done.Bind(assembler->FalseConstant());
+
+ assembler->Branch(
+ assembler->Uint32LessThanOrEqual(
+ instance_type,
+ assembler->Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
+ &allocate_key_result, &generic_values);
+
+ assembler->Bind(&generic_values);
+ {
+ Callable get_property = CodeFactory::GetProperty(assembler->isolate());
+ var_value.Bind(
+ assembler->CallStub(get_property, context, array, index));
+ assembler->Goto(&allocate_entry_if_needed);
+ }
+ }
+
+ assembler->Bind(&if_istypedarray);
+ {
+ Node* length = nullptr;
+ {
+ Variable var_length(assembler, MachineRepresentation::kTagged);
+ Label if_isdetached(assembler, Label::kDeferred),
+ if_isnotdetached(assembler), done(assembler);
+
+ Node* buffer =
+ assembler->LoadObjectField(array, JSTypedArray::kBufferOffset);
+ assembler->Branch(assembler->IsDetachedBuffer(buffer), &if_isdetached,
+ &if_isnotdetached);
+
+ assembler->Bind(&if_isnotdetached);
+ {
+ var_length.Bind(
+ assembler->LoadObjectField(array, JSTypedArray::kLengthOffset));
+ assembler->Goto(&done);
+ }
+
+ assembler->Bind(&if_isdetached);
+ {
+ // TODO(caitp): If IsDetached(buffer) is true, throw a TypeError, per
+ // https://github.com/tc39/ecma262/issues/713
+ var_length.Bind(assembler->SmiConstant(Smi::kZero));
+ assembler->Goto(&done);
+ }
+
+ assembler->Bind(&done);
+ length = var_length.value();
+ }
+ CSA_ASSERT(assembler, assembler->TaggedIsSmi(length));
+ CSA_ASSERT(assembler, assembler->TaggedIsSmi(index));
+
+ assembler->GotoUnless(assembler->SmiBelow(index, length), &set_done);
+
+ Node* one = assembler->SmiConstant(Smi::FromInt(1));
+ assembler->StoreObjectFieldNoWriteBarrier(
+ iterator, JSArrayIterator::kNextIndexOffset,
+ assembler->IntPtrAdd(assembler->BitcastTaggedToWord(index),
+ assembler->BitcastTaggedToWord(one)));
+ var_done.Bind(assembler->FalseConstant());
+
+ Node* elements = assembler->LoadElements(array);
+ Node* base_ptr = assembler->LoadObjectField(
+ elements, FixedTypedArrayBase::kBasePointerOffset);
+ Node* external_ptr = assembler->LoadObjectField(
+ elements, FixedTypedArrayBase::kExternalPointerOffset);
+ Node* data_ptr = assembler->IntPtrAdd(base_ptr, external_ptr);
+
+ static int32_t kInstanceType[] = {
+ JS_TYPED_ARRAY_KEY_ITERATOR_TYPE,
+ JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_INT8_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_INT16_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_INT32_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE,
+ };
+
+ Label uint8_values(assembler), int8_values(assembler),
+ uint16_values(assembler), int16_values(assembler),
+ uint32_values(assembler), int32_values(assembler),
+ float32_values(assembler), float64_values(assembler);
+ Label* kInstanceTypeHandlers[] = {
+ &allocate_key_result, &uint8_values, &uint8_values,
+ &int8_values, &uint16_values, &int16_values,
+ &uint32_values, &int32_values, &float32_values,
+ &float64_values, &uint8_values, &uint8_values,
+ &int8_values, &uint16_values, &int16_values,
+ &uint32_values, &int32_values, &float32_values,
+ &float64_values,
+ };
+
+ var_done.Bind(assembler->FalseConstant());
+ assembler->Switch(instance_type, &throw_bad_receiver, kInstanceType,
+ kInstanceTypeHandlers, arraysize(kInstanceType));
+
+ assembler->Bind(&uint8_values);
+ {
+ Node* value_uint8 = assembler->LoadFixedTypedArrayElement(
+ data_ptr, index, UINT8_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
+ var_value.Bind(assembler->SmiFromWord(value_uint8));
+ assembler->Goto(&allocate_entry_if_needed);
+ }
+
+ assembler->Bind(&int8_values);
+ {
+ Node* value_int8 = assembler->LoadFixedTypedArrayElement(
+ data_ptr, index, INT8_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
+ var_value.Bind(assembler->SmiFromWord(value_int8));
+ assembler->Goto(&allocate_entry_if_needed);
+ }
+
+ assembler->Bind(&uint16_values);
+ {
+ Node* value_uint16 = assembler->LoadFixedTypedArrayElement(
+ data_ptr, index, UINT16_ELEMENTS,
+ CodeStubAssembler::SMI_PARAMETERS);
+ var_value.Bind(assembler->SmiFromWord(value_uint16));
+ assembler->Goto(&allocate_entry_if_needed);
+ }
+
+ assembler->Bind(&int16_values);
+ {
+ Node* value_int16 = assembler->LoadFixedTypedArrayElement(
+ data_ptr, index, INT16_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
+ var_value.Bind(assembler->SmiFromWord(value_int16));
+ assembler->Goto(&allocate_entry_if_needed);
+ }
+
+ assembler->Bind(&uint32_values);
+ {
+ Node* value_uint32 = assembler->LoadFixedTypedArrayElement(
+ data_ptr, index, UINT32_ELEMENTS,
+ CodeStubAssembler::SMI_PARAMETERS);
+ var_value.Bind(assembler->ChangeUint32ToTagged(value_uint32));
+ assembler->Goto(&allocate_entry_if_needed);
+ }
+ assembler->Bind(&int32_values);
+ {
+ Node* value_int32 = assembler->LoadFixedTypedArrayElement(
+ data_ptr, index, INT32_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
+ var_value.Bind(assembler->ChangeInt32ToTagged(value_int32));
+ assembler->Goto(&allocate_entry_if_needed);
+ }
+ assembler->Bind(&float32_values);
+ {
+ Node* value_float32 = assembler->LoadFixedTypedArrayElement(
+ data_ptr, index, FLOAT32_ELEMENTS,
+ CodeStubAssembler::SMI_PARAMETERS);
+ var_value.Bind(assembler->AllocateHeapNumberWithValue(
+ assembler->ChangeFloat32ToFloat64(value_float32)));
+ assembler->Goto(&allocate_entry_if_needed);
+ }
+ assembler->Bind(&float64_values);
+ {
+ Node* value_float64 = assembler->LoadFixedTypedArrayElement(
+ data_ptr, index, FLOAT64_ELEMENTS,
+ CodeStubAssembler::SMI_PARAMETERS);
+ var_value.Bind(assembler->AllocateHeapNumberWithValue(value_float64));
+ assembler->Goto(&allocate_entry_if_needed);
+ }
+ }
+ }
+
+ assembler->Bind(&set_done);
+ {
+ assembler->StoreObjectFieldNoWriteBarrier(
+ iterator, JSArrayIterator::kIteratedObjectOffset,
+ assembler->UndefinedConstant());
+ assembler->Goto(&allocate_iterator_result);
+ }
+
+ assembler->Bind(&allocate_key_result);
+ {
+ var_value.Bind(index);
+ var_done.Bind(assembler->FalseConstant());
+ assembler->Goto(&allocate_iterator_result);
+ }
+
+ assembler->Bind(&allocate_entry_if_needed);
+ {
+ assembler->GotoIf(
+ assembler->Int32GreaterThan(
+ instance_type,
+ assembler->Int32Constant(LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE)),
+ &allocate_iterator_result);
+
+ Node* elements = assembler->AllocateFixedArray(FAST_ELEMENTS,
+ assembler->Int32Constant(2));
+ assembler->StoreFixedArrayElement(elements, assembler->Int32Constant(0),
+ index, SKIP_WRITE_BARRIER);
+ assembler->StoreFixedArrayElement(elements, assembler->Int32Constant(1),
+ var_value.value(), SKIP_WRITE_BARRIER);
+
+ Node* entry = assembler->Allocate(JSArray::kSize);
+ Node* map = assembler->LoadContextElement(
+ assembler->LoadNativeContext(context),
+ Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX);
+
+ assembler->StoreMapNoWriteBarrier(entry, map);
+ assembler->StoreObjectFieldRoot(entry, JSArray::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler->StoreObjectFieldNoWriteBarrier(entry, JSArray::kElementsOffset,
+ elements);
+ assembler->StoreObjectFieldNoWriteBarrier(
+ entry, JSArray::kLengthOffset, assembler->SmiConstant(Smi::FromInt(2)));
+
+ var_value.Bind(entry);
+ assembler->Goto(&allocate_iterator_result);
+ }
+
+ assembler->Bind(&allocate_iterator_result);
+ {
+ Node* result = assembler->Allocate(JSIteratorResult::kSize);
+ Node* map =
+ assembler->LoadContextElement(assembler->LoadNativeContext(context),
+ Context::ITERATOR_RESULT_MAP_INDEX);
+ assembler->StoreMapNoWriteBarrier(result, map);
+ assembler->StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler->StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler->StoreObjectFieldNoWriteBarrier(
+ result, JSIteratorResult::kValueOffset, var_value.value());
+ assembler->StoreObjectFieldNoWriteBarrier(
+ result, JSIteratorResult::kDoneOffset, var_done.value());
+ assembler->Return(result);
+ }
+
+ assembler->Bind(&throw_bad_receiver);
+ {
+ // The {receiver} is not a valid JSArrayIterator.
+ Node* result = assembler->CallRuntime(
+ Runtime::kThrowIncompatibleMethodReceiver, context,
+ assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
+ "Array Iterator.prototype.next", TENURED)),
+ iterator);
+ assembler->Return(result);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index addf8ac291..ad367587b1 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -14,7 +14,7 @@ namespace internal {
// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Call]] case.
BUILTIN(ArrayBufferConstructor) {
HandleScope scope(isolate);
- Handle<JSFunction> target = args.target<JSFunction>();
+ Handle<JSFunction> target = args.target();
DCHECK(*target == target->native_context()->array_buffer_fun() ||
*target == target->native_context()->shared_array_buffer_fun());
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -25,7 +25,7 @@ BUILTIN(ArrayBufferConstructor) {
// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Construct]] case.
BUILTIN(ArrayBufferConstructor_ConstructStub) {
HandleScope scope(isolate);
- Handle<JSFunction> target = args.target<JSFunction>();
+ Handle<JSFunction> target = args.target();
Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
Handle<Object> length = args.atOrUndefined(isolate, 1);
DCHECK(*target == target->native_context()->array_buffer_fun() ||
diff --git a/deps/v8/src/builtins/builtins-boolean.cc b/deps/v8/src/builtins/builtins-boolean.cc
index 5f5bed1bda..e7ccf95973 100644
--- a/deps/v8/src/builtins/builtins-boolean.cc
+++ b/deps/v8/src/builtins/builtins-boolean.cc
@@ -22,7 +22,7 @@ BUILTIN(BooleanConstructor) {
BUILTIN(BooleanConstructor_ConstructStub) {
HandleScope scope(isolate);
Handle<Object> value = args.atOrUndefined(isolate, 1);
- Handle<JSFunction> target = args.target<JSFunction>();
+ Handle<JSFunction> target = args.target();
Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
DCHECK(*target == target->native_context()->boolean_function());
Handle<JSObject> result;
diff --git a/deps/v8/src/builtins/builtins-conversion.cc b/deps/v8/src/builtins/builtins-conversion.cc
index 7fbe4f859e..0eaf79ca23 100644
--- a/deps/v8/src/builtins/builtins-conversion.cc
+++ b/deps/v8/src/builtins/builtins-conversion.cc
@@ -60,7 +60,7 @@ void Generate_NonPrimitiveToPrimitive(CodeStubAssembler* assembler,
// Verify that the {result} is actually a primitive.
Label if_resultisprimitive(assembler),
if_resultisnotprimitive(assembler, Label::kDeferred);
- assembler->GotoIf(assembler->WordIsSmi(result), &if_resultisprimitive);
+ assembler->GotoIf(assembler->TaggedIsSmi(result), &if_resultisprimitive);
Node* result_instance_type = assembler->LoadInstanceType(result);
STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
assembler->Branch(assembler->Int32LessThanOrEqual(
@@ -162,7 +162,7 @@ void Builtins::Generate_ToString(CodeStubAssembler* assembler) {
Label is_number(assembler);
Label runtime(assembler);
- assembler->GotoIf(assembler->WordIsSmi(input), &is_number);
+ assembler->GotoIf(assembler->TaggedIsSmi(input), &is_number);
Node* input_map = assembler->LoadMap(input);
Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
@@ -183,11 +183,7 @@ void Builtins::Generate_ToString(CodeStubAssembler* assembler) {
}
assembler->Bind(&is_number);
- {
- // TODO(tebbi): inline as soon as NumberToString is in the CodeStubAssembler
- Callable callable = CodeFactory::NumberToString(assembler->isolate());
- assembler->Return(assembler->CallStub(callable, context, input));
- }
+ { assembler->Return(assembler->NumberToString(context, input)); }
assembler->Bind(&not_heap_number);
{
@@ -252,15 +248,10 @@ void Generate_OrdinaryToPrimitive(CodeStubAssembler* assembler,
// Check if the {method} is callable.
Label if_methodiscallable(assembler),
if_methodisnotcallable(assembler, Label::kDeferred);
- assembler->GotoIf(assembler->WordIsSmi(method), &if_methodisnotcallable);
+ assembler->GotoIf(assembler->TaggedIsSmi(method), &if_methodisnotcallable);
Node* method_map = assembler->LoadMap(method);
- Node* method_bit_field = assembler->LoadMapBitField(method_map);
- assembler->Branch(
- assembler->Word32Equal(
- assembler->Word32And(method_bit_field, assembler->Int32Constant(
- 1 << Map::kIsCallable)),
- assembler->Int32Constant(0)),
- &if_methodisnotcallable, &if_methodiscallable);
+ assembler->Branch(assembler->IsCallableMap(method_map),
+ &if_methodiscallable, &if_methodisnotcallable);
assembler->Bind(&if_methodiscallable);
{
@@ -270,7 +261,7 @@ void Generate_OrdinaryToPrimitive(CodeStubAssembler* assembler,
var_result.Bind(result);
// Return the {result} if it is a primitive.
- assembler->GotoIf(assembler->WordIsSmi(result), &return_result);
+ assembler->GotoIf(assembler->TaggedIsSmi(result), &return_result);
Node* result_instance_type = assembler->LoadInstanceType(result);
STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
assembler->GotoIf(assembler->Int32LessThanOrEqual(
@@ -319,5 +310,168 @@ void Builtins::Generate_ToBoolean(CodeStubAssembler* assembler) {
assembler->Return(assembler->BooleanConstant(false));
}
+void Builtins::Generate_ToLength(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* context = assembler->Parameter(1);
+
+ // We might need to loop once for ToNumber conversion.
+ Variable var_len(assembler, MachineRepresentation::kTagged);
+ Label loop(assembler, &var_len);
+ var_len.Bind(assembler->Parameter(0));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Shared entry points.
+ Label return_len(assembler),
+ return_two53minus1(assembler, Label::kDeferred),
+ return_zero(assembler, Label::kDeferred);
+
+ // Load the current {len} value.
+ Node* len = var_len.value();
+
+ // Check if {len} is a positive Smi.
+ assembler->GotoIf(assembler->WordIsPositiveSmi(len), &return_len);
+
+ // Check if {len} is a (negative) Smi.
+ assembler->GotoIf(assembler->TaggedIsSmi(len), &return_zero);
+
+ // Check if {len} is a HeapNumber.
+ Label if_lenisheapnumber(assembler),
+ if_lenisnotheapnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->IsHeapNumberMap(assembler->LoadMap(len)),
+ &if_lenisheapnumber, &if_lenisnotheapnumber);
+
+ assembler->Bind(&if_lenisheapnumber);
+ {
+ // Load the floating-point value of {len}.
+ Node* len_value = assembler->LoadHeapNumberValue(len);
+
+ // Check if {len} is not greater than zero.
+ assembler->GotoUnless(assembler->Float64GreaterThan(
+ len_value, assembler->Float64Constant(0.0)),
+ &return_zero);
+
+ // Check if {len} is greater than or equal to 2^53-1.
+ assembler->GotoIf(
+ assembler->Float64GreaterThanOrEqual(
+ len_value, assembler->Float64Constant(kMaxSafeInteger)),
+ &return_two53minus1);
+
+ // Round the {len} towards -Infinity.
+ Node* value = assembler->Float64Floor(len_value);
+ Node* result = assembler->ChangeFloat64ToTagged(value);
+ assembler->Return(result);
+ }
+
+ assembler->Bind(&if_lenisnotheapnumber);
+ {
+ // Need to convert {len} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_len.Bind(assembler->CallStub(callable, context, len));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&return_len);
+ assembler->Return(var_len.value());
+
+ assembler->Bind(&return_two53minus1);
+ assembler->Return(assembler->NumberConstant(kMaxSafeInteger));
+
+ assembler->Bind(&return_zero);
+ assembler->Return(assembler->SmiConstant(Smi::kZero));
+ }
+}
+
+void Builtins::Generate_ToInteger(CodeStubAssembler* assembler) {
+ typedef TypeConversionDescriptor Descriptor;
+
+ compiler::Node* input = assembler->Parameter(Descriptor::kArgument);
+ compiler::Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->Return(assembler->ToInteger(context, input));
+}
+
+// ES6 section 7.1.13 ToObject (argument)
+void Builtins::Generate_ToObject(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ typedef TypeConversionDescriptor Descriptor;
+
+ Label if_number(assembler, Label::kDeferred), if_notsmi(assembler),
+ if_jsreceiver(assembler), if_noconstructor(assembler, Label::kDeferred),
+ if_wrapjsvalue(assembler);
+
+ Node* object = assembler->Parameter(Descriptor::kArgument);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ Variable constructor_function_index_var(assembler,
+ MachineType::PointerRepresentation());
+
+ assembler->Branch(assembler->TaggedIsSmi(object), &if_number, &if_notsmi);
+
+ assembler->Bind(&if_notsmi);
+ Node* map = assembler->LoadMap(object);
+
+ assembler->GotoIf(assembler->IsHeapNumberMap(map), &if_number);
+
+ Node* instance_type = assembler->LoadMapInstanceType(map);
+ assembler->GotoIf(assembler->IsJSReceiverInstanceType(instance_type),
+ &if_jsreceiver);
+
+ Node* constructor_function_index =
+ assembler->LoadMapConstructorFunctionIndex(map);
+ assembler->GotoIf(assembler->WordEqual(constructor_function_index,
+ assembler->IntPtrConstant(
+ Map::kNoConstructorFunctionIndex)),
+ &if_noconstructor);
+ constructor_function_index_var.Bind(constructor_function_index);
+ assembler->Goto(&if_wrapjsvalue);
+
+ assembler->Bind(&if_number);
+ constructor_function_index_var.Bind(
+ assembler->IntPtrConstant(Context::NUMBER_FUNCTION_INDEX));
+ assembler->Goto(&if_wrapjsvalue);
+
+ assembler->Bind(&if_wrapjsvalue);
+ Node* native_context = assembler->LoadNativeContext(context);
+ Node* constructor = assembler->LoadFixedArrayElement(
+ native_context, constructor_function_index_var.value(), 0,
+ CodeStubAssembler::INTPTR_PARAMETERS);
+ Node* initial_map = assembler->LoadObjectField(
+ constructor, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* js_value = assembler->Allocate(JSValue::kSize);
+ assembler->StoreMapNoWriteBarrier(js_value, initial_map);
+ assembler->StoreObjectFieldRoot(js_value, JSValue::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler->StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler->StoreObjectField(js_value, JSValue::kValueOffset, object);
+ assembler->Return(js_value);
+
+ assembler->Bind(&if_noconstructor);
+ assembler->TailCallRuntime(
+ Runtime::kThrowUndefinedOrNullToObject, context,
+ assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
+ "ToObject", TENURED)));
+
+ assembler->Bind(&if_jsreceiver);
+ assembler->Return(object);
+}
+
+// ES6 section 12.5.5 typeof operator
+void Builtins::Generate_Typeof(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef TypeofDescriptor Descriptor;
+
+ Node* object = assembler->Parameter(Descriptor::kObject);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->Return(assembler->Typeof(object, context));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index 3d14e31d3a..45a5fd91a8 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -23,7 +23,7 @@ BUILTIN(DataViewConstructor) {
// ES6 section 24.2.2 The DataView Constructor for the [[Construct]] case.
BUILTIN(DataViewConstructor_ConstructStub) {
HandleScope scope(isolate);
- Handle<JSFunction> target = args.target<JSFunction>();
+ Handle<JSFunction> target = args.target();
Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
Handle<Object> buffer = args.atOrUndefined(isolate, 1);
Handle<Object> byte_offset = args.atOrUndefined(isolate, 2);
@@ -88,7 +88,7 @@ BUILTIN(DataViewConstructor_ConstructStub) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
JSObject::New(target, new_target));
for (int i = 0; i < ArrayBufferView::kInternalFieldCount; ++i) {
- Handle<JSDataView>::cast(result)->SetInternalField(i, Smi::FromInt(0));
+ Handle<JSDataView>::cast(result)->SetInternalField(i, Smi::kZero);
}
// 12. Set O's [[ViewedArrayBuffer]] internal slot to buffer.
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index 205c8c971f..949620b6b2 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -203,7 +203,7 @@ BUILTIN(DateConstructor) {
BUILTIN(DateConstructor_ConstructStub) {
HandleScope scope(isolate);
int const argc = args.length() - 1;
- Handle<JSFunction> target = args.target<JSFunction>();
+ Handle<JSFunction> target = args.target();
Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
double time_val;
if (argc == 0) {
@@ -919,7 +919,7 @@ void Builtins::Generate_DatePrototype_GetField(CodeStubAssembler* assembler,
Label receiver_not_date(assembler, Label::kDeferred);
- assembler->GotoIf(assembler->WordIsSmi(receiver), &receiver_not_date);
+ assembler->GotoIf(assembler->TaggedIsSmi(receiver), &receiver_not_date);
Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
assembler->GotoIf(
assembler->Word32NotEqual(receiver_instance_type,
diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc
index c2a7b99035..24ae56bd06 100644
--- a/deps/v8/src/builtins/builtins-error.cc
+++ b/deps/v8/src/builtins/builtins-error.cc
@@ -28,7 +28,7 @@ BUILTIN(ErrorConstructor) {
}
RETURN_RESULT_OR_FAILURE(
- isolate, ErrorUtils::Construct(isolate, args.target<JSFunction>(),
+ isolate, ErrorUtils::Construct(isolate, args.target(),
Handle<Object>::cast(args.new_target()),
args.atOrUndefined(isolate, 1), mode,
caller, false));
@@ -55,7 +55,7 @@ BUILTIN(ErrorCaptureStackTrace) {
Handle<Object> stack_trace =
isolate->CaptureSimpleStackTrace(object, mode, caller);
- if (!stack_trace->IsJSArray()) return *isolate->factory()->undefined_value();
+ if (!stack_trace->IsJSArray()) return isolate->heap()->undefined_value();
Handle<Object> formatted_stack_trace;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index 0a631bff5c..9a8ee796b5 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -21,7 +21,7 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
DCHECK_LE(1, args.length());
int const argc = args.length() - 1;
- Handle<JSFunction> target = args.target<JSFunction>();
+ Handle<JSFunction> target = args.target();
Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
if (!Builtins::AllowDynamicFunction(isolate, target, target_global_proxy)) {
@@ -198,7 +198,7 @@ Object* DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
if (!target->IsJSFunction() ||
length_lookup.state() != LookupIterator::ACCESSOR ||
!length_lookup.GetAccessors()->IsAccessorInfo()) {
- Handle<Object> length(Smi::FromInt(0), isolate);
+ Handle<Object> length(Smi::kZero, isolate);
Maybe<PropertyAttributes> attributes =
JSReceiver::GetPropertyAttributes(&length_lookup);
if (!attributes.IsJust()) return isolate->heap()->exception();
diff --git a/deps/v8/src/builtins/builtins-generator.cc b/deps/v8/src/builtins/builtins-generator.cc
index 93b2e48cbd..fe1f2d2304 100644
--- a/deps/v8/src/builtins/builtins-generator.cc
+++ b/deps/v8/src/builtins/builtins-generator.cc
@@ -26,7 +26,8 @@ void Generate_GeneratorPrototypeResume(
// Check if the {receiver} is actually a JSGeneratorObject.
Label if_receiverisincompatible(assembler, Label::kDeferred);
- assembler->GotoIf(assembler->WordIsSmi(receiver), &if_receiverisincompatible);
+ assembler->GotoIf(assembler->TaggedIsSmi(receiver),
+ &if_receiverisincompatible);
Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
assembler->GotoUnless(assembler->Word32Equal(
receiver_instance_type,
diff --git a/deps/v8/src/builtins/builtins-global.cc b/deps/v8/src/builtins/builtins-global.cc
index 2205788cfc..1fa0967aa9 100644
--- a/deps/v8/src/builtins/builtins-global.cc
+++ b/deps/v8/src/builtins/builtins-global.cc
@@ -83,7 +83,7 @@ BUILTIN(GlobalUnescape) {
BUILTIN(GlobalEval) {
HandleScope scope(isolate);
Handle<Object> x = args.atOrUndefined(isolate, 1);
- Handle<JSFunction> target = args.target<JSFunction>();
+ Handle<JSFunction> target = args.target();
Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
if (!x->IsString()) return *x;
if (!Builtins::AllowDynamicFunction(isolate, target, target_global_proxy)) {
@@ -121,7 +121,7 @@ void Builtins::Generate_GlobalIsFinite(CodeStubAssembler* assembler) {
Node* num = var_num.value();
// Check if {num} is a Smi or a HeapObject.
- assembler->GotoIf(assembler->WordIsSmi(num), &return_true);
+ assembler->GotoIf(assembler->TaggedIsSmi(num), &return_true);
// Check if {num} is a HeapNumber.
Label if_numisheapnumber(assembler),
@@ -176,7 +176,7 @@ void Builtins::Generate_GlobalIsNaN(CodeStubAssembler* assembler) {
Node* num = var_num.value();
// Check if {num} is a Smi or a HeapObject.
- assembler->GotoIf(assembler->WordIsSmi(num), &return_false);
+ assembler->GotoIf(assembler->TaggedIsSmi(num), &return_false);
// Check if {num} is a HeapNumber.
Label if_numisheapnumber(assembler),
diff --git a/deps/v8/src/builtins/builtins-handler.cc b/deps/v8/src/builtins/builtins-handler.cc
index ebbc9784a1..88597f8add 100644
--- a/deps/v8/src/builtins/builtins-handler.cc
+++ b/deps/v8/src/builtins/builtins-handler.cc
@@ -2,18 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/ic/keyed-store-generic.h"
namespace v8 {
namespace internal {
-void Builtins::Generate_KeyedLoadIC_Megamorphic(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMegamorphic(masm);
-}
-
void Builtins::Generate_KeyedLoadIC_Megamorphic_TF(
CodeStubAssembler* assembler) {
typedef compiler::Node Node;
@@ -44,6 +41,32 @@ void Builtins::Generate_KeyedStoreIC_Megamorphic_Strict(MacroAssembler* masm) {
KeyedStoreIC::GenerateMegamorphic(masm, STRICT);
}
+void KeyedStoreICMegamorphic(CodeStubAssembler* assembler, LanguageMode mode) {
+ typedef compiler::Node Node;
+ typedef StoreWithVectorDescriptor Descriptor;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* value = assembler->Parameter(Descriptor::kValue);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
+ vector);
+ KeyedStoreGenericGenerator::Generate(assembler, &p, mode);
+}
+
+void Builtins::Generate_KeyedStoreIC_Megamorphic_TF(
+ CodeStubAssembler* assembler) {
+ KeyedStoreICMegamorphic(assembler, SLOPPY);
+}
+
+void Builtins::Generate_KeyedStoreIC_Megamorphic_Strict_TF(
+ CodeStubAssembler* assembler) {
+ KeyedStoreICMegamorphic(assembler, STRICT);
+}
+
void Builtins::Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
KeyedStoreIC::GenerateMiss(masm);
}
diff --git a/deps/v8/src/builtins/builtins-iterator.cc b/deps/v8/src/builtins/builtins-iterator.cc
index 7b91e364eb..7f74c20667 100644
--- a/deps/v8/src/builtins/builtins-iterator.cc
+++ b/deps/v8/src/builtins/builtins-iterator.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/frames-inl.h"
namespace v8 {
namespace internal {
@@ -13,5 +14,55 @@ void Builtins::Generate_IteratorPrototypeIterator(
assembler->Return(assembler->Parameter(0));
}
+BUILTIN(ModuleNamespaceIterator) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ Handle<Object> receiver = args.at<Object>(0);
+
+ if (!receiver->IsJSModuleNamespace()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ isolate->factory()->iterator_symbol(), receiver));
+ }
+ auto ns = Handle<JSModuleNamespace>::cast(receiver);
+
+ Handle<FixedArray> names =
+ KeyAccumulator::GetKeys(ns, KeyCollectionMode::kOwnOnly, SKIP_SYMBOLS)
+ .ToHandleChecked();
+ return *isolate->factory()->NewJSFixedArrayIterator(names);
+}
+
+BUILTIN(FixedArrayIteratorNext) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ Handle<Object> receiver = args.at<Object>(0);
+
+ // It is an error if this function is called on anything other than the
+ // particular iterator object for which the function was created.
+ if (!receiver->IsJSFixedArrayIterator() ||
+ Handle<JSFixedArrayIterator>::cast(receiver)->initial_next() !=
+ *args.target()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ isolate->factory()->next_string(), receiver));
+ }
+
+ auto iterator = Handle<JSFixedArrayIterator>::cast(receiver);
+ Handle<Object> value;
+ bool done;
+
+ int index = iterator->index();
+ if (index < iterator->array()->length()) {
+ value = handle(iterator->array()->get(index), isolate);
+ done = false;
+ iterator->set_index(index + 1);
+ } else {
+ value = isolate->factory()->undefined_value();
+ done = true;
+ }
+
+ return *isolate->factory()->NewJSIteratorResult(value, done);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-math.cc b/deps/v8/src/builtins/builtins-math.cc
index e8d429ebac..30f12ba12c 100644
--- a/deps/v8/src/builtins/builtins-math.cc
+++ b/deps/v8/src/builtins/builtins-math.cc
@@ -15,99 +15,94 @@ namespace internal {
// ES6 section - 20.2.2.1 Math.abs ( x )
void Builtins::Generate_MathAbs(CodeStubAssembler* assembler) {
- using compiler::Node;
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Abs(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
-}
-
-// ES6 section 20.2.2.2 Math.acos ( x )
-void Builtins::Generate_MathAcos(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Acos(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
-}
-
-// ES6 section 20.2.2.3 Math.acosh ( x )
-void Builtins::Generate_MathAcosh(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Acosh(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
-}
-
-// ES6 section 20.2.2.4 Math.asin ( x )
-void Builtins::Generate_MathAsin(CodeStubAssembler* assembler) {
- using compiler::Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
- Node* x = assembler->Parameter(1);
Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Asin(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
-}
-// ES6 section 20.2.2.5 Math.asinh ( x )
-void Builtins::Generate_MathAsinh(CodeStubAssembler* assembler) {
- using compiler::Node;
+ // We might need to loop once for ToNumber conversion.
+ Variable var_x(assembler, MachineRepresentation::kTagged);
+ Label loop(assembler, &var_x);
+ var_x.Bind(assembler->Parameter(1));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {x} value.
+ Node* x = var_x.value();
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Asinh(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
-}
+ // Check if {x} is a Smi or a HeapObject.
+ Label if_xissmi(assembler), if_xisnotsmi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
-// ES6 section 20.2.2.6 Math.atan ( x )
-void Builtins::Generate_MathAtan(CodeStubAssembler* assembler) {
- using compiler::Node;
+ assembler->Bind(&if_xissmi);
+ {
+ // Check if {x} is already positive.
+ Label if_xispositive(assembler), if_xisnotpositive(assembler);
+ assembler->BranchIfSmiLessThanOrEqual(
+ assembler->SmiConstant(Smi::FromInt(0)), x, &if_xispositive,
+ &if_xisnotpositive);
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Atan(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
-}
+ assembler->Bind(&if_xispositive);
+ {
+ // Just return the input {x}.
+ assembler->Return(x);
+ }
-// ES6 section 20.2.2.7 Math.atanh ( x )
-void Builtins::Generate_MathAtanh(CodeStubAssembler* assembler) {
- using compiler::Node;
+ assembler->Bind(&if_xisnotpositive);
+ {
+ // Try to negate the {x} value.
+ Node* pair = assembler->IntPtrSubWithOverflow(
+ assembler->IntPtrConstant(0), assembler->BitcastTaggedToWord(x));
+ Node* overflow = assembler->Projection(1, pair);
+ Label if_overflow(assembler, Label::kDeferred),
+ if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_notoverflow);
+ {
+ // There is a Smi representation for negated {x}.
+ Node* result = assembler->Projection(0, pair);
+ result = assembler->BitcastWordToTagged(result);
+ assembler->Return(result);
+ }
+
+ assembler->Bind(&if_overflow);
+ {
+ Node* result = assembler->NumberConstant(0.0 - Smi::kMinValue);
+ assembler->Return(result);
+ }
+ }
+ }
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Atanh(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
-}
+ assembler->Bind(&if_xisnotsmi);
+ {
+ // Check if {x} is a HeapNumber.
+ Label if_xisheapnumber(assembler),
+ if_xisnotheapnumber(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->WordEqual(assembler->LoadMap(x),
+ assembler->HeapNumberMapConstant()),
+ &if_xisheapnumber, &if_xisnotheapnumber);
-// ES6 section 20.2.2.8 Math.atan2 ( y, x )
-void Builtins::Generate_MathAtan2(CodeStubAssembler* assembler) {
- using compiler::Node;
+ assembler->Bind(&if_xisheapnumber);
+ {
+ Node* x_value = assembler->LoadHeapNumberValue(x);
+ Node* value = assembler->Float64Abs(x_value);
+ Node* result = assembler->AllocateHeapNumberWithValue(value);
+ assembler->Return(result);
+ }
- Node* y = assembler->Parameter(1);
- Node* x = assembler->Parameter(2);
- Node* context = assembler->Parameter(5);
- Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Atan2(y_value, x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ assembler->Bind(&if_xisnotheapnumber);
+ {
+ // Need to convert {x} to a Number first.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_x.Bind(assembler->CallStub(callable, context, x));
+ assembler->Goto(&loop);
+ }
+ }
+ }
}
namespace {
@@ -133,7 +128,7 @@ void Generate_MathRoundingOperation(
// Check if {x} is a Smi or a HeapObject.
Label if_xissmi(assembler), if_xisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(x), &if_xissmi, &if_xisnotsmi);
+ assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
assembler->Bind(&if_xissmi);
{
@@ -171,8 +166,65 @@ void Generate_MathRoundingOperation(
}
}
+void Generate_MathUnaryOperation(
+ CodeStubAssembler* assembler,
+ compiler::Node* (CodeStubAssembler::*float64op)(compiler::Node*)) {
+ typedef compiler::Node Node;
+
+ Node* x = assembler->Parameter(1);
+ Node* context = assembler->Parameter(4);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = (assembler->*float64op)(x_value);
+ Node* result = assembler->AllocateHeapNumberWithValue(value);
+ assembler->Return(result);
+}
+
} // namespace
+// ES6 section 20.2.2.2 Math.acos ( x )
+void Builtins::Generate_MathAcos(CodeStubAssembler* assembler) {
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Acos);
+}
+
+// ES6 section 20.2.2.3 Math.acosh ( x )
+void Builtins::Generate_MathAcosh(CodeStubAssembler* assembler) {
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Acosh);
+}
+
+// ES6 section 20.2.2.4 Math.asin ( x )
+void Builtins::Generate_MathAsin(CodeStubAssembler* assembler) {
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Asin);
+}
+
+// ES6 section 20.2.2.5 Math.asinh ( x )
+void Builtins::Generate_MathAsinh(CodeStubAssembler* assembler) {
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Asinh);
+}
+
+// ES6 section 20.2.2.6 Math.atan ( x )
+void Builtins::Generate_MathAtan(CodeStubAssembler* assembler) {
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Atan);
+}
+
+// ES6 section 20.2.2.7 Math.atanh ( x )
+void Builtins::Generate_MathAtanh(CodeStubAssembler* assembler) {
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Atanh);
+}
+
+// ES6 section 20.2.2.8 Math.atan2 ( y, x )
+void Builtins::Generate_MathAtan2(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* y = assembler->Parameter(1);
+ Node* x = assembler->Parameter(2);
+ Node* context = assembler->Parameter(5);
+ Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
+ Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* value = assembler->Float64Atan2(y_value, x_value);
+ Node* result = assembler->AllocateHeapNumberWithValue(value);
+ assembler->Return(result);
+}
+
// ES6 section 20.2.2.10 Math.ceil ( x )
void Builtins::Generate_MathCeil(CodeStubAssembler* assembler) {
Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Ceil);
@@ -180,14 +232,7 @@ void Builtins::Generate_MathCeil(CodeStubAssembler* assembler) {
// ES6 section 20.2.2.9 Math.cbrt ( x )
void Builtins::Generate_MathCbrt(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Cbrt(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cbrt);
}
// ES6 section 20.2.2.11 Math.clz32 ( x )
@@ -214,7 +259,7 @@ void Builtins::Generate_MathClz32(CodeStubAssembler* assembler) {
// Check if {x} is a Smi or a HeapObject.
Label if_xissmi(assembler), if_xisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(x), &if_xissmi, &if_xisnotsmi);
+ assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
assembler->Bind(&if_xissmi);
{
@@ -260,38 +305,22 @@ void Builtins::Generate_MathClz32(CodeStubAssembler* assembler) {
// ES6 section 20.2.2.12 Math.cos ( x )
void Builtins::Generate_MathCos(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Cos(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cos);
}
// ES6 section 20.2.2.13 Math.cosh ( x )
void Builtins::Generate_MathCosh(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Cosh(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cosh);
}
// ES6 section 20.2.2.14 Math.exp ( x )
void Builtins::Generate_MathExp(CodeStubAssembler* assembler) {
- using compiler::Node;
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Exp);
+}
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Exp(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+// ES6 section 20.2.2.15 Math.expm1 ( x )
+void Builtins::Generate_MathExpm1(CodeStubAssembler* assembler) {
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Expm1);
}
// ES6 section 20.2.2.16 Math.floor ( x )
@@ -308,7 +337,7 @@ void Builtins::Generate_MathFround(CodeStubAssembler* assembler) {
Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
Node* value32 = assembler->TruncateFloat64ToFloat32(x_value);
Node* value = assembler->ChangeFloat32ToFloat64(value32);
- Node* result = assembler->ChangeFloat64ToTagged(value);
+ Node* result = assembler->AllocateHeapNumberWithValue(value);
assembler->Return(result);
}
@@ -316,7 +345,7 @@ void Builtins::Generate_MathFround(CodeStubAssembler* assembler) {
BUILTIN(MathHypot) {
HandleScope scope(isolate);
int const length = args.length() - 1;
- if (length == 0) return Smi::FromInt(0);
+ if (length == 0) return Smi::kZero;
DCHECK_LT(0, length);
double max = 0;
bool one_arg_is_nan = false;
@@ -341,11 +370,11 @@ BUILTIN(MathHypot) {
}
if (one_arg_is_nan) {
- return *isolate->factory()->nan_value();
+ return isolate->heap()->nan_value();
}
if (max == 0) {
- return Smi::FromInt(0);
+ return Smi::kZero;
}
DCHECK_GT(max, 0);
@@ -380,62 +409,22 @@ void Builtins::Generate_MathImul(CodeStubAssembler* assembler) {
// ES6 section 20.2.2.20 Math.log ( x )
void Builtins::Generate_MathLog(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Log(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log);
}
// ES6 section 20.2.2.21 Math.log1p ( x )
void Builtins::Generate_MathLog1p(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Log1p(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log1p);
}
// ES6 section 20.2.2.22 Math.log10 ( x )
void Builtins::Generate_MathLog10(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Log10(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log10);
}
// ES6 section 20.2.2.23 Math.log2 ( x )
void Builtins::Generate_MathLog2(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Log2(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
-}
-
-// ES6 section 20.2.2.15 Math.expm1 ( x )
-void Builtins::Generate_MathExpm1(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Expm1(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log2);
}
// ES6 section 20.2.2.26 Math.pow ( x, y )
@@ -452,6 +441,46 @@ void Builtins::Generate_MathPow(CodeStubAssembler* assembler) {
assembler->Return(result);
}
+// ES6 section 20.2.2.27 Math.random ( )
+void Builtins::Generate_MathRandom(CodeStubAssembler* assembler) {
+ using compiler::Node;
+
+ Node* context = assembler->Parameter(3);
+ Node* native_context = assembler->LoadNativeContext(context);
+
+ // Load cache index.
+ CodeStubAssembler::Variable smi_index(assembler,
+ MachineRepresentation::kTagged);
+ smi_index.Bind(assembler->LoadContextElement(
+ native_context, Context::MATH_RANDOM_INDEX_INDEX));
+
+ // Cached random numbers are exhausted if index is 0. Go to slow path.
+ CodeStubAssembler::Label if_cached(assembler);
+ assembler->GotoIf(assembler->SmiAbove(smi_index.value(),
+ assembler->SmiConstant(Smi::kZero)),
+ &if_cached);
+
+ // Cache exhausted, populate the cache. Return value is the new index.
+ smi_index.Bind(
+ assembler->CallRuntime(Runtime::kGenerateRandomNumbers, context));
+ assembler->Goto(&if_cached);
+
+ // Compute next index by decrement.
+ assembler->Bind(&if_cached);
+ Node* new_smi_index = assembler->SmiSub(
+ smi_index.value(), assembler->SmiConstant(Smi::FromInt(1)));
+ assembler->StoreContextElement(
+ native_context, Context::MATH_RANDOM_INDEX_INDEX, new_smi_index);
+
+ // Load and return next cached random number.
+ Node* array = assembler->LoadContextElement(native_context,
+ Context::MATH_RANDOM_CACHE_INDEX);
+ Node* random = assembler->LoadFixedDoubleArrayElement(
+ array, new_smi_index, MachineType::Float64(), 0,
+ CodeStubAssembler::SMI_PARAMETERS);
+ assembler->Return(assembler->AllocateHeapNumberWithValue(random));
+}
+
// ES6 section 20.2.2.28 Math.round ( x )
void Builtins::Generate_MathRound(CodeStubAssembler* assembler) {
Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Round);
@@ -486,62 +515,27 @@ void Builtins::Generate_MathSign(CodeStubAssembler* assembler) {
// ES6 section 20.2.2.30 Math.sin ( x )
void Builtins::Generate_MathSin(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Sin(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sin);
}
// ES6 section 20.2.2.31 Math.sinh ( x )
void Builtins::Generate_MathSinh(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Sinh(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sinh);
}
// ES6 section 20.2.2.32 Math.sqrt ( x )
void Builtins::Generate_MathSqrt(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Sqrt(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sqrt);
}
// ES6 section 20.2.2.33 Math.tan ( x )
void Builtins::Generate_MathTan(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Tan(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Tan);
}
// ES6 section 20.2.2.34 Math.tanh ( x )
void Builtins::Generate_MathTanh(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Tanh(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Tanh);
}
// ES6 section 20.2.2.35 Math.trunc ( x )
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index 17628445d1..3e2bc556b6 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
namespace v8 {
namespace internal {
@@ -21,7 +22,7 @@ void Builtins::Generate_NumberIsFinite(CodeStubAssembler* assembler) {
Label return_true(assembler), return_false(assembler);
// Check if {number} is a Smi.
- assembler->GotoIf(assembler->WordIsSmi(number), &return_true);
+ assembler->GotoIf(assembler->TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
assembler->GotoUnless(
@@ -52,7 +53,7 @@ void Builtins::Generate_NumberIsInteger(CodeStubAssembler* assembler) {
Label return_true(assembler), return_false(assembler);
// Check if {number} is a Smi.
- assembler->GotoIf(assembler->WordIsSmi(number), &return_true);
+ assembler->GotoIf(assembler->TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
assembler->GotoUnless(
@@ -67,9 +68,10 @@ void Builtins::Generate_NumberIsInteger(CodeStubAssembler* assembler) {
Node* integer = assembler->Float64Trunc(number_value);
// Check if {number}s value matches the integer (ruling out the infinities).
- assembler->BranchIfFloat64Equal(assembler->Float64Sub(number_value, integer),
- assembler->Float64Constant(0.0), &return_true,
- &return_false);
+ assembler->Branch(
+ assembler->Float64Equal(assembler->Float64Sub(number_value, integer),
+ assembler->Float64Constant(0.0)),
+ &return_true, &return_false);
assembler->Bind(&return_true);
assembler->Return(assembler->BooleanConstant(true));
@@ -88,7 +90,7 @@ void Builtins::Generate_NumberIsNaN(CodeStubAssembler* assembler) {
Label return_true(assembler), return_false(assembler);
// Check if {number} is a Smi.
- assembler->GotoIf(assembler->WordIsSmi(number), &return_false);
+ assembler->GotoIf(assembler->TaggedIsSmi(number), &return_false);
// Check if {number} is a HeapNumber.
assembler->GotoUnless(
@@ -117,7 +119,7 @@ void Builtins::Generate_NumberIsSafeInteger(CodeStubAssembler* assembler) {
Label return_true(assembler), return_false(assembler);
// Check if {number} is a Smi.
- assembler->GotoIf(assembler->WordIsSmi(number), &return_true);
+ assembler->GotoIf(assembler->TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
assembler->GotoUnless(
@@ -138,9 +140,10 @@ void Builtins::Generate_NumberIsSafeInteger(CodeStubAssembler* assembler) {
&return_false);
// Check if the {integer} value is in safe integer range.
- assembler->BranchIfFloat64LessThanOrEqual(
- assembler->Float64Abs(integer),
- assembler->Float64Constant(kMaxSafeInteger), &return_true, &return_false);
+ assembler->Branch(assembler->Float64LessThanOrEqual(
+ assembler->Float64Abs(integer),
+ assembler->Float64Constant(kMaxSafeInteger)),
+ &return_true, &return_false);
assembler->Bind(&return_true);
assembler->Return(assembler->BooleanConstant(true));
@@ -149,6 +152,209 @@ void Builtins::Generate_NumberIsSafeInteger(CodeStubAssembler* assembler) {
assembler->Return(assembler->BooleanConstant(false));
}
+// ES6 section 20.1.2.12 Number.parseFloat ( string )
+void Builtins::Generate_NumberParseFloat(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* context = assembler->Parameter(4);
+
+ // We might need to loop once for ToString conversion.
+ Variable var_input(assembler, MachineRepresentation::kTagged);
+ Label loop(assembler, &var_input);
+ var_input.Bind(assembler->Parameter(1));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {input} value.
+ Node* input = var_input.value();
+
+ // Check if the {input} is a HeapObject or a Smi.
+ Label if_inputissmi(assembler), if_inputisnotsmi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(input), &if_inputissmi,
+ &if_inputisnotsmi);
+
+ assembler->Bind(&if_inputissmi);
+ {
+ // The {input} is already a Number, no need to do anything.
+ assembler->Return(input);
+ }
+
+ assembler->Bind(&if_inputisnotsmi);
+ {
+ // The {input} is a HeapObject, check if it's already a String.
+ Label if_inputisstring(assembler), if_inputisnotstring(assembler);
+ Node* input_map = assembler->LoadMap(input);
+ Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
+ assembler->Branch(assembler->IsStringInstanceType(input_instance_type),
+ &if_inputisstring, &if_inputisnotstring);
+
+ assembler->Bind(&if_inputisstring);
+ {
+ // The {input} is already a String, check if {input} contains
+ // a cached array index.
+ Label if_inputcached(assembler), if_inputnotcached(assembler);
+ Node* input_hash = assembler->LoadNameHashField(input);
+ Node* input_bit = assembler->Word32And(
+ input_hash,
+ assembler->Int32Constant(String::kContainsCachedArrayIndexMask));
+ assembler->Branch(
+ assembler->Word32Equal(input_bit, assembler->Int32Constant(0)),
+ &if_inputcached, &if_inputnotcached);
+
+ assembler->Bind(&if_inputcached);
+ {
+ // Just return the {input}s cached array index.
+ Node* input_array_index =
+ assembler->DecodeWordFromWord32<String::ArrayIndexValueBits>(
+ input_hash);
+ assembler->Return(assembler->SmiTag(input_array_index));
+ }
+
+ assembler->Bind(&if_inputnotcached);
+ {
+ // Need to fall back to the runtime to convert {input} to double.
+ assembler->Return(assembler->CallRuntime(Runtime::kStringParseFloat,
+ context, input));
+ }
+ }
+
+ assembler->Bind(&if_inputisnotstring);
+ {
+ // The {input} is neither a String nor a Smi, check for HeapNumber.
+ Label if_inputisnumber(assembler),
+ if_inputisnotnumber(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->WordEqual(input_map, assembler->HeapNumberMapConstant()),
+ &if_inputisnumber, &if_inputisnotnumber);
+
+ assembler->Bind(&if_inputisnumber);
+ {
+ // The {input} is already a Number, take care of -0.
+ Label if_inputiszero(assembler), if_inputisnotzero(assembler);
+ Node* input_value = assembler->LoadHeapNumberValue(input);
+ assembler->Branch(assembler->Float64Equal(
+ input_value, assembler->Float64Constant(0.0)),
+ &if_inputiszero, &if_inputisnotzero);
+
+ assembler->Bind(&if_inputiszero);
+ assembler->Return(assembler->SmiConstant(0));
+
+ assembler->Bind(&if_inputisnotzero);
+ assembler->Return(input);
+ }
+
+ assembler->Bind(&if_inputisnotnumber);
+ {
+ // Need to convert the {input} to String first.
+ // TODO(bmeurer): This could be more efficient if necessary.
+ Callable callable = CodeFactory::ToString(assembler->isolate());
+ var_input.Bind(assembler->CallStub(callable, context, input));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+ }
+}
+
+// ES6 section 20.1.2.13 Number.parseInt ( string, radix )
+void Builtins::Generate_NumberParseInt(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Node* input = assembler->Parameter(1);
+ Node* radix = assembler->Parameter(2);
+ Node* context = assembler->Parameter(5);
+
+ // Check if {radix} is treated as 10 (i.e. undefined, 0 or 10).
+ Label if_radix10(assembler), if_generic(assembler, Label::kDeferred);
+ assembler->GotoIf(assembler->WordEqual(radix, assembler->UndefinedConstant()),
+ &if_radix10);
+ assembler->GotoIf(
+ assembler->WordEqual(radix, assembler->SmiConstant(Smi::FromInt(10))),
+ &if_radix10);
+ assembler->GotoIf(
+ assembler->WordEqual(radix, assembler->SmiConstant(Smi::FromInt(0))),
+ &if_radix10);
+ assembler->Goto(&if_generic);
+
+ assembler->Bind(&if_radix10);
+ {
+ // Check if we can avoid the ToString conversion on {input}.
+ Label if_inputissmi(assembler), if_inputisheapnumber(assembler),
+ if_inputisstring(assembler);
+ assembler->GotoIf(assembler->TaggedIsSmi(input), &if_inputissmi);
+ Node* input_map = assembler->LoadMap(input);
+ assembler->GotoIf(
+ assembler->WordEqual(input_map, assembler->HeapNumberMapConstant()),
+ &if_inputisheapnumber);
+ Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
+ assembler->Branch(assembler->IsStringInstanceType(input_instance_type),
+ &if_inputisstring, &if_generic);
+
+ assembler->Bind(&if_inputissmi);
+ {
+ // Just return the {input}.
+ assembler->Return(input);
+ }
+
+ assembler->Bind(&if_inputisheapnumber);
+ {
+ // Check if the {input} value is in Signed32 range.
+ Label if_inputissigned32(assembler);
+ Node* input_value = assembler->LoadHeapNumberValue(input);
+ Node* input_value32 = assembler->TruncateFloat64ToWord32(input_value);
+ assembler->GotoIf(
+ assembler->Float64Equal(
+ input_value, assembler->ChangeInt32ToFloat64(input_value32)),
+ &if_inputissigned32);
+
+ // Check if the absolute {input} value is in the ]0.01,1e9[ range.
+ Node* input_value_abs = assembler->Float64Abs(input_value);
+
+ assembler->GotoUnless(
+ assembler->Float64LessThan(input_value_abs,
+ assembler->Float64Constant(1e9)),
+ &if_generic);
+ assembler->Branch(assembler->Float64LessThan(
+ assembler->Float64Constant(0.01), input_value_abs),
+ &if_inputissigned32, &if_generic);
+
+ // Return the truncated int32 value, and return the tagged result.
+ assembler->Bind(&if_inputissigned32);
+ Node* result = assembler->ChangeInt32ToTagged(input_value32);
+ assembler->Return(result);
+ }
+
+ assembler->Bind(&if_inputisstring);
+ {
+ // Check if the String {input} has a cached array index.
+ Node* input_hash = assembler->LoadNameHashField(input);
+ Node* input_bit = assembler->Word32And(
+ input_hash,
+ assembler->Int32Constant(String::kContainsCachedArrayIndexMask));
+ assembler->GotoIf(
+ assembler->Word32NotEqual(input_bit, assembler->Int32Constant(0)),
+ &if_generic);
+
+ // Return the cached array index as result.
+ Node* input_index =
+ assembler->DecodeWordFromWord32<String::ArrayIndexValueBits>(
+ input_hash);
+ Node* result = assembler->SmiTag(input_index);
+ assembler->Return(result);
+ }
+ }
+
+ assembler->Bind(&if_generic);
+ {
+ Node* result =
+ assembler->CallRuntime(Runtime::kStringParseInt, context, input, radix);
+ assembler->Return(result);
+ }
+}
+
// ES6 section 20.1.3.2 Number.prototype.toExponential ( fractionDigits )
BUILTIN(NumberPrototypeToExponential) {
HandleScope scope(isolate);
@@ -369,5 +575,1244 @@ void Builtins::Generate_NumberPrototypeValueOf(CodeStubAssembler* assembler) {
assembler->Return(result);
}
+// static
+void Builtins::Generate_Add(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* left = assembler->Parameter(0);
+ Node* right = assembler->Parameter(1);
+ Node* context = assembler->Parameter(2);
+
+ // Shared entry for floating point addition.
+ Label do_fadd(assembler);
+ Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
+ var_fadd_rhs(assembler, MachineRepresentation::kFloat64);
+
+ // We might need to loop several times due to ToPrimitive, ToString and/or
+ // ToNumber conversions.
+ Variable var_lhs(assembler, MachineRepresentation::kTagged),
+ var_rhs(assembler, MachineRepresentation::kTagged),
+ var_result(assembler, MachineRepresentation::kTagged);
+ Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+ Label loop(assembler, 2, loop_vars), end(assembler),
+ string_add_convert_left(assembler, Label::kDeferred),
+ string_add_convert_right(assembler, Label::kDeferred);
+ var_lhs.Bind(left);
+ var_rhs.Bind(right);
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {lhs} and {rhs} values.
+ Node* lhs = var_lhs.value();
+ Node* rhs = var_rhs.value();
+
+ // Check if the {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi,
+ &if_lhsisnotsmi);
+
+ assembler->Bind(&if_lhsissmi);
+ {
+ // Check if the {rhs} is also a Smi.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Try fast Smi addition first.
+ Node* pair = assembler->IntPtrAddWithOverflow(
+ assembler->BitcastTaggedToWord(lhs),
+ assembler->BitcastTaggedToWord(rhs));
+ Node* overflow = assembler->Projection(1, pair);
+
+ // Check if the Smi additon overflowed.
+ Label if_overflow(assembler), if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_overflow);
+ {
+ var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fadd);
+ }
+
+ assembler->Bind(&if_notoverflow);
+ var_result.Bind(assembler->BitcastWordToTaggedSigned(
+ assembler->Projection(0, pair)));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if the {rhs} is a HeapNumber.
+ Label if_rhsisnumber(assembler),
+ if_rhsisnotnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
+ &if_rhsisnotnumber);
+
+ assembler->Bind(&if_rhsisnumber);
+ {
+ var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fadd);
+ }
+
+ assembler->Bind(&if_rhsisnotnumber);
+ {
+ // Load the instance type of {rhs}.
+ Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
+
+ // Check if the {rhs} is a String.
+ Label if_rhsisstring(assembler, Label::kDeferred),
+ if_rhsisnotstring(assembler, Label::kDeferred);
+ assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
+ &if_rhsisstring, &if_rhsisnotstring);
+
+ assembler->Bind(&if_rhsisstring);
+ {
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
+ assembler->Goto(&string_add_convert_left);
+ }
+
+ assembler->Bind(&if_rhsisnotstring);
+ {
+ // Check if {rhs} is a JSReceiver.
+ Label if_rhsisreceiver(assembler, Label::kDeferred),
+ if_rhsisnotreceiver(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ assembler->Bind(&if_rhsisreceiver);
+ {
+ // Convert {rhs} to a primitive first passing no hint.
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_rhsisnotreceiver);
+ {
+ // Convert {rhs} to a Number first.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotsmi);
+ {
+ // Load the map and instance type of {lhs}.
+ Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+
+ // Check if {lhs} is a String.
+ Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
+ assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
+ &if_lhsisstring, &if_lhsisnotstring);
+
+ assembler->Bind(&if_lhsisstring);
+ {
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
+ assembler->Goto(&string_add_convert_right);
+ }
+
+ assembler->Bind(&if_lhsisnotstring);
+ {
+ // Check if {rhs} is a Smi.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Check if {lhs} is a Number.
+ Label if_lhsisnumber(assembler),
+ if_lhsisnotnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->Word32Equal(
+ lhs_instance_type,
+ assembler->Int32Constant(HEAP_NUMBER_TYPE)),
+ &if_lhsisnumber, &if_lhsisnotnumber);
+
+ assembler->Bind(&if_lhsisnumber);
+ {
+ // The {lhs} is a HeapNumber, the {rhs} is a Smi, just add them.
+ var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fadd);
+ }
+
+ assembler->Bind(&if_lhsisnotnumber);
+ {
+ // The {lhs} is neither a Number nor a String, and the {rhs} is a
+ // Smi.
+ Label if_lhsisreceiver(assembler, Label::kDeferred),
+ if_lhsisnotreceiver(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->IsJSReceiverInstanceType(lhs_instance_type),
+ &if_lhsisreceiver, &if_lhsisnotreceiver);
+
+ assembler->Bind(&if_lhsisreceiver);
+ {
+ // Convert {lhs} to a primitive first passing no hint.
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_lhsisnotreceiver);
+ {
+ // Convert {lhs} to a Number first.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the instance type of {rhs}.
+ Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+
+ // Check if {rhs} is a String.
+ Label if_rhsisstring(assembler), if_rhsisnotstring(assembler);
+ assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
+ &if_rhsisstring, &if_rhsisnotstring);
+
+ assembler->Bind(&if_rhsisstring);
+ {
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
+ assembler->Goto(&string_add_convert_left);
+ }
+
+ assembler->Bind(&if_rhsisnotstring);
+ {
+ // Check if {lhs} is a HeapNumber.
+ Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
+ assembler->Branch(assembler->Word32Equal(
+ lhs_instance_type,
+ assembler->Int32Constant(HEAP_NUMBER_TYPE)),
+ &if_lhsisnumber, &if_lhsisnotnumber);
+
+ assembler->Bind(&if_lhsisnumber);
+ {
+ // Check if {rhs} is also a HeapNumber.
+ Label if_rhsisnumber(assembler),
+ if_rhsisnotnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->Word32Equal(
+ rhs_instance_type,
+ assembler->Int32Constant(HEAP_NUMBER_TYPE)),
+ &if_rhsisnumber, &if_rhsisnotnumber);
+
+ assembler->Bind(&if_rhsisnumber);
+ {
+ // Perform a floating point addition.
+ var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fadd);
+ }
+
+ assembler->Bind(&if_rhsisnotnumber);
+ {
+ // Check if {rhs} is a JSReceiver.
+ Label if_rhsisreceiver(assembler, Label::kDeferred),
+ if_rhsisnotreceiver(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ assembler->Bind(&if_rhsisreceiver);
+ {
+ // Convert {rhs} to a primitive first passing no hint.
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_rhsisnotreceiver);
+ {
+ // Convert {rhs} to a Number first.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotnumber);
+ {
+ // Check if {lhs} is a JSReceiver.
+ Label if_lhsisreceiver(assembler, Label::kDeferred),
+ if_lhsisnotreceiver(assembler);
+ assembler->Branch(
+ assembler->IsJSReceiverInstanceType(lhs_instance_type),
+ &if_lhsisreceiver, &if_lhsisnotreceiver);
+
+ assembler->Bind(&if_lhsisreceiver);
+ {
+ // Convert {lhs} to a primitive first passing no hint.
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_lhsisnotreceiver);
+ {
+ // Check if {rhs} is a JSReceiver.
+ Label if_rhsisreceiver(assembler, Label::kDeferred),
+ if_rhsisnotreceiver(assembler, Label::kDeferred);
+ assembler->Branch(
+ assembler->IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ assembler->Bind(&if_rhsisreceiver);
+ {
+ // Convert {rhs} to a primitive first passing no hint.
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+
+ assembler->Bind(&if_rhsisnotreceiver);
+ {
+ // Convert {lhs} to a Number first.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ assembler->Bind(&string_add_convert_left);
+ {
+ // Convert {lhs}, which is a Smi, to a String and concatenate the
+ // resulting string with the String {rhs}.
+ Callable callable = CodeFactory::StringAdd(
+ assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
+ var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
+ var_rhs.value()));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&string_add_convert_right);
+ {
+ // Convert {lhs}, which is a Smi, to a String and concatenate the
+ // resulting string with the String {rhs}.
+ Callable callable = CodeFactory::StringAdd(
+ assembler->isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
+ var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
+ var_rhs.value()));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&do_fadd);
+ {
+ Node* lhs_value = var_fadd_lhs.value();
+ Node* rhs_value = var_fadd_rhs.value();
+ Node* value = assembler->Float64Add(lhs_value, rhs_value);
+ Node* result = assembler->AllocateHeapNumberWithValue(value);
+ var_result.Bind(result);
+ assembler->Goto(&end);
+ }
+ assembler->Bind(&end);
+ assembler->Return(var_result.value());
+}
+
+void Builtins::Generate_Subtract(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* left = assembler->Parameter(0);
+ Node* right = assembler->Parameter(1);
+ Node* context = assembler->Parameter(2);
+
+ // Shared entry for floating point subtraction.
+ Label do_fsub(assembler), end(assembler);
+ Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
+ var_fsub_rhs(assembler, MachineRepresentation::kFloat64);
+
+ // We might need to loop several times due to ToPrimitive and/or ToNumber
+ // conversions.
+ Variable var_lhs(assembler, MachineRepresentation::kTagged),
+ var_rhs(assembler, MachineRepresentation::kTagged),
+ var_result(assembler, MachineRepresentation::kTagged);
+ Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+ Label loop(assembler, 2, loop_vars);
+ var_lhs.Bind(left);
+ var_rhs.Bind(right);
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {lhs} and {rhs} values.
+ Node* lhs = var_lhs.value();
+ Node* rhs = var_rhs.value();
+
+ // Check if the {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi,
+ &if_lhsisnotsmi);
+
+ assembler->Bind(&if_lhsissmi);
+ {
+ // Check if the {rhs} is also a Smi.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Try a fast Smi subtraction first.
+ Node* pair = assembler->IntPtrSubWithOverflow(
+ assembler->BitcastTaggedToWord(lhs),
+ assembler->BitcastTaggedToWord(rhs));
+ Node* overflow = assembler->Projection(1, pair);
+
+ // Check if the Smi subtraction overflowed.
+ Label if_overflow(assembler), if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_overflow);
+ {
+ // The result doesn't fit into Smi range.
+ var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fsub);
+ }
+
+ assembler->Bind(&if_notoverflow);
+ var_result.Bind(assembler->BitcastWordToTaggedSigned(
+ assembler->Projection(0, pair)));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of the {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ Label if_rhsisnumber(assembler),
+ if_rhsisnotnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
+ &if_rhsisnotnumber);
+
+ assembler->Bind(&if_rhsisnumber);
+ {
+ // Perform a floating point subtraction.
+ var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
+ var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fsub);
+ }
+
+ assembler->Bind(&if_rhsisnotnumber);
+ {
+ // Convert the {rhs} to a Number first.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotsmi);
+ {
+ // Load the map of the {lhs}.
+ Node* lhs_map = assembler->LoadMap(lhs);
+
+ // Check if the {lhs} is a HeapNumber.
+ Label if_lhsisnumber(assembler),
+ if_lhsisnotnumber(assembler, Label::kDeferred);
+ Node* number_map = assembler->HeapNumberMapConstant();
+ assembler->Branch(assembler->WordEqual(lhs_map, number_map),
+ &if_lhsisnumber, &if_lhsisnotnumber);
+
+ assembler->Bind(&if_lhsisnumber);
+ {
+ // Check if the {rhs} is a Smi.
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
+
+ assembler->Bind(&if_rhsissmi);
+ {
+ // Perform a floating point subtraction.
+ var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fsub);
+ }
+
+ assembler->Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of the {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if the {rhs} is a HeapNumber.
+ Label if_rhsisnumber(assembler),
+ if_rhsisnotnumber(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+ &if_rhsisnumber, &if_rhsisnotnumber);
+
+ assembler->Bind(&if_rhsisnumber);
+ {
+ // Perform a floating point subtraction.
+ var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fsub);
+ }
+
+ assembler->Bind(&if_rhsisnotnumber);
+ {
+ // Convert the {rhs} to a Number first.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_rhs.Bind(assembler->CallStub(callable, context, rhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&if_lhsisnotnumber);
+ {
+ // Convert the {lhs} to a Number first.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&do_fsub);
+ {
+ Node* lhs_value = var_fsub_lhs.value();
+ Node* rhs_value = var_fsub_rhs.value();
+ Node* value = assembler->Float64Sub(lhs_value, rhs_value);
+ var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
+ assembler->Goto(&end);
+ }
+ assembler->Bind(&end);
+ assembler->Return(var_result.value());
+}
+
+void Builtins::Generate_Multiply(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* left = assembler->Parameter(0);
+ Node* right = assembler->Parameter(1);
+ Node* context = assembler->Parameter(2);
+
+ // Shared entry point for floating point multiplication.
+ Label do_fmul(assembler), return_result(assembler);
+ Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
+ var_rhs_float64(assembler, MachineRepresentation::kFloat64);
+
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ // We might need to loop one or two times due to ToNumber conversions.
+ Variable var_lhs(assembler, MachineRepresentation::kTagged),
+ var_rhs(assembler, MachineRepresentation::kTagged),
+ var_result(assembler, MachineRepresentation::kTagged);
+ Variable* loop_variables[] = {&var_lhs, &var_rhs};
+ Label loop(assembler, 2, loop_variables);
+ var_lhs.Bind(left);
+ var_rhs.Bind(right);
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ Node* lhs = var_lhs.value();
+ Node* rhs = var_rhs.value();
+
+ Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(lhs), &lhs_is_smi,
+ &lhs_is_not_smi);
+
+ assembler->Bind(&lhs_is_smi);
+ {
+ Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
+ &rhs_is_not_smi);
+
+ assembler->Bind(&rhs_is_smi);
+ {
+ // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
+ // in case of overflow.
+ var_result.Bind(assembler->SmiMul(lhs, rhs));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&rhs_is_not_smi);
+ {
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ Label rhs_is_number(assembler),
+ rhs_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+ &rhs_is_number, &rhs_is_not_number);
+
+ assembler->Bind(&rhs_is_number);
+ {
+ // Convert {lhs} to a double and multiply it with the value of {rhs}.
+ var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
+ var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fmul);
+ }
+
+ assembler->Bind(&rhs_is_not_number);
+ {
+ // Multiplication is commutative, swap {lhs} with {rhs} and loop.
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&lhs_is_not_smi);
+ {
+ Node* lhs_map = assembler->LoadMap(lhs);
+
+ // Check if {lhs} is a HeapNumber.
+ Label lhs_is_number(assembler),
+ lhs_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(lhs_map, number_map),
+ &lhs_is_number, &lhs_is_not_number);
+
+ assembler->Bind(&lhs_is_number);
+ {
+ // Check if {rhs} is a Smi.
+ Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
+ &rhs_is_not_smi);
+
+ assembler->Bind(&rhs_is_smi);
+ {
+ // Convert {rhs} to a double and multiply it with the value of {lhs}.
+ var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_rhs_float64.Bind(assembler->SmiToFloat64(rhs));
+ assembler->Goto(&do_fmul);
+ }
+
+ assembler->Bind(&rhs_is_not_smi);
+ {
+ Node* rhs_map = assembler->LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ Label rhs_is_number(assembler),
+ rhs_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(rhs_map, number_map),
+ &rhs_is_number, &rhs_is_not_number);
+
+ assembler->Bind(&rhs_is_number);
+ {
+ // Both {lhs} and {rhs} are HeapNumbers. Load their values and
+ // multiply them.
+ var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
+ var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
+ assembler->Goto(&do_fmul);
+ }
+
+ assembler->Bind(&rhs_is_not_number);
+ {
+ // Multiplication is commutative, swap {lhs} with {rhs} and loop.
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&lhs_is_not_number);
+ {
+ // Convert {lhs} to a Number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_lhs.Bind(assembler->CallStub(callable, context, lhs));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&do_fmul);
+ {
+ Node* value =
+ assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
+ Node* result = assembler->AllocateHeapNumberWithValue(value);
+ var_result.Bind(result);
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&return_result);
+ assembler->Return(var_result.value());
+}
+
+void Builtins::Generate_Divide(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* left = assembler->Parameter(0);
+ Node* right = assembler->Parameter(1);
+ Node* context = assembler->Parameter(2);
+
+ // Shared entry point for floating point division.
+ Label do_fdiv(assembler), end(assembler);
+ Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
+ var_divisor_float64(assembler, MachineRepresentation::kFloat64);
+
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ // We might need to loop one or two times due to ToNumber conversions.
+ Variable var_dividend(assembler, MachineRepresentation::kTagged),
+ var_divisor(assembler, MachineRepresentation::kTagged),
+ var_result(assembler, MachineRepresentation::kTagged);
+ Variable* loop_variables[] = {&var_dividend, &var_divisor};
+ Label loop(assembler, 2, loop_variables);
+ var_dividend.Bind(left);
+ var_divisor.Bind(right);
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ Node* dividend = var_dividend.value();
+ Node* divisor = var_divisor.value();
+
+ Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
+ &dividend_is_not_smi);
+
+ assembler->Bind(&dividend_is_smi);
+ {
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ Label bailout(assembler);
+
+ // Do floating point division if {divisor} is zero.
+ assembler->GotoIf(
+ assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
+ &bailout);
+
+ // Do floating point division {dividend} is zero and {divisor} is
+ // negative.
+ Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
+ assembler->Branch(
+ assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
+ &dividend_is_zero, &dividend_is_not_zero);
+
+ assembler->Bind(&dividend_is_zero);
+ {
+ assembler->GotoIf(
+ assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
+ &bailout);
+ assembler->Goto(&dividend_is_not_zero);
+ }
+ assembler->Bind(&dividend_is_not_zero);
+
+ Node* untagged_divisor = assembler->SmiUntag(divisor);
+ Node* untagged_dividend = assembler->SmiUntag(dividend);
+
+ // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
+ // if the Smi size is 31) and {divisor} is -1.
+ Label divisor_is_minus_one(assembler),
+ divisor_is_not_minus_one(assembler);
+ assembler->Branch(assembler->Word32Equal(untagged_divisor,
+ assembler->Int32Constant(-1)),
+ &divisor_is_minus_one, &divisor_is_not_minus_one);
+
+ assembler->Bind(&divisor_is_minus_one);
+ {
+ assembler->GotoIf(
+ assembler->Word32Equal(
+ untagged_dividend,
+ assembler->Int32Constant(
+ kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))),
+ &bailout);
+ assembler->Goto(&divisor_is_not_minus_one);
+ }
+ assembler->Bind(&divisor_is_not_minus_one);
+
+ // TODO(epertoso): consider adding a machine instruction that returns
+ // both the result and the remainder.
+ Node* untagged_result =
+ assembler->Int32Div(untagged_dividend, untagged_divisor);
+ Node* truncated =
+ assembler->Int32Mul(untagged_result, untagged_divisor);
+ // Do floating point division if the remainder is not 0.
+ assembler->GotoIf(
+ assembler->Word32NotEqual(untagged_dividend, truncated), &bailout);
+ var_result.Bind(assembler->SmiTag(untagged_result));
+ assembler->Goto(&end);
+
+ // Bailout: convert {dividend} and {divisor} to double and do double
+ // division.
+ assembler->Bind(&bailout);
+ {
+ var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+ var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ Label divisor_is_number(assembler),
+ divisor_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+ &divisor_is_number, &divisor_is_not_number);
+
+ assembler->Bind(&divisor_is_number);
+ {
+ // Convert {dividend} to a double and divide it with the value of
+ // {divisor}.
+ var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+
+ assembler->Bind(&divisor_is_not_number);
+ {
+ // Convert {divisor} to a number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&dividend_is_not_smi);
+ {
+ Node* dividend_map = assembler->LoadMap(dividend);
+
+ // Check if {dividend} is a HeapNumber.
+ Label dividend_is_number(assembler),
+ dividend_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(dividend_map, number_map),
+ &dividend_is_number, &dividend_is_not_number);
+
+ assembler->Bind(&dividend_is_number);
+ {
+ // Check if {divisor} is a Smi.
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ // Convert {divisor} to a double and use it for a floating point
+ // division.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ Label divisor_is_number(assembler),
+ divisor_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+ &divisor_is_number, &divisor_is_not_number);
+
+ assembler->Bind(&divisor_is_number);
+ {
+ // Both {dividend} and {divisor} are HeapNumbers. Load their values
+ // and divide them.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fdiv);
+ }
+
+ assembler->Bind(&divisor_is_not_number);
+ {
+ // Convert {divisor} to a number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&dividend_is_not_number);
+ {
+ // Convert {dividend} to a Number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_dividend.Bind(assembler->CallStub(callable, context, dividend));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&do_fdiv);
+ {
+ Node* value = assembler->Float64Div(var_dividend_float64.value(),
+ var_divisor_float64.value());
+ var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
+ assembler->Goto(&end);
+ }
+ assembler->Bind(&end);
+ assembler->Return(var_result.value());
+}
+
+void Builtins::Generate_Modulus(CodeStubAssembler* assembler) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* left = assembler->Parameter(0);
+ Node* right = assembler->Parameter(1);
+ Node* context = assembler->Parameter(2);
+
+ Variable var_result(assembler, MachineRepresentation::kTagged);
+ Label return_result(assembler, &var_result);
+
+ // Shared entry point for floating point modulus.
+ Label do_fmod(assembler);
+ Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
+ var_divisor_float64(assembler, MachineRepresentation::kFloat64);
+
+ Node* number_map = assembler->HeapNumberMapConstant();
+
+ // We might need to loop one or two times due to ToNumber conversions.
+ Variable var_dividend(assembler, MachineRepresentation::kTagged),
+ var_divisor(assembler, MachineRepresentation::kTagged);
+ Variable* loop_variables[] = {&var_dividend, &var_divisor};
+ Label loop(assembler, 2, loop_variables);
+ var_dividend.Bind(left);
+ var_divisor.Bind(right);
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ Node* dividend = var_dividend.value();
+ Node* divisor = var_divisor.value();
+
+ Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
+ &dividend_is_not_smi);
+
+ assembler->Bind(&dividend_is_smi);
+ {
+ Label dividend_is_not_zero(assembler);
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ // Compute the modulus of two Smis.
+ var_result.Bind(assembler->SmiMod(dividend, divisor));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ Label divisor_is_number(assembler),
+ divisor_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+ &divisor_is_number, &divisor_is_not_number);
+
+ assembler->Bind(&divisor_is_number);
+ {
+ // Convert {dividend} to a double and compute its modulus with the
+ // value of {dividend}.
+ var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fmod);
+ }
+
+ assembler->Bind(&divisor_is_not_number);
+ {
+ // Convert {divisor} to a number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&dividend_is_not_smi);
+ {
+ Node* dividend_map = assembler->LoadMap(dividend);
+
+ // Check if {dividend} is a HeapNumber.
+ Label dividend_is_number(assembler),
+ dividend_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(dividend_map, number_map),
+ &dividend_is_number, &dividend_is_not_number);
+
+ assembler->Bind(&dividend_is_number);
+ {
+ // Check if {divisor} is a Smi.
+ Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
+ &divisor_is_not_smi);
+
+ assembler->Bind(&divisor_is_smi);
+ {
+ // Convert {divisor} to a double and compute {dividend}'s modulus with
+ // it.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
+ assembler->Goto(&do_fmod);
+ }
+
+ assembler->Bind(&divisor_is_not_smi);
+ {
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ Label divisor_is_number(assembler),
+ divisor_is_not_number(assembler, Label::kDeferred);
+ assembler->Branch(assembler->WordEqual(divisor_map, number_map),
+ &divisor_is_number, &divisor_is_not_number);
+
+ assembler->Bind(&divisor_is_number);
+ {
+ // Both {dividend} and {divisor} are HeapNumbers. Load their values
+ // and compute their modulus.
+ var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
+ assembler->Goto(&do_fmod);
+ }
+
+ assembler->Bind(&divisor_is_not_number);
+ {
+ // Convert {divisor} to a number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_divisor.Bind(assembler->CallStub(callable, context, divisor));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&dividend_is_not_number);
+ {
+ // Convert {dividend} to a Number and loop.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_dividend.Bind(assembler->CallStub(callable, context, dividend));
+ assembler->Goto(&loop);
+ }
+ }
+ }
+
+ assembler->Bind(&do_fmod);
+ {
+ Node* value = assembler->Float64Mod(var_dividend_float64.value(),
+ var_divisor_float64.value());
+ var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
+ assembler->Goto(&return_result);
+ }
+
+ assembler->Bind(&return_result);
+ assembler->Return(var_result.value());
+}
+
+void Builtins::Generate_ShiftLeft(CodeStubAssembler* assembler) {
+ compiler::Node* left = assembler->Parameter(0);
+ compiler::Node* right = assembler->Parameter(1);
+ compiler::Node* context = assembler->Parameter(2);
+
+ using compiler::Node;
+
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+ Node* shift_count =
+ assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
+ Node* value = assembler->Word32Shl(lhs_value, shift_count);
+ Node* result = assembler->ChangeInt32ToTagged(value);
+ assembler->Return(result);
+}
+
+void Builtins::Generate_ShiftRight(CodeStubAssembler* assembler) {
+ compiler::Node* left = assembler->Parameter(0);
+ compiler::Node* right = assembler->Parameter(1);
+ compiler::Node* context = assembler->Parameter(2);
+
+ using compiler::Node;
+
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+ Node* shift_count =
+ assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
+ Node* value = assembler->Word32Sar(lhs_value, shift_count);
+ Node* result = assembler->ChangeInt32ToTagged(value);
+ assembler->Return(result);
+}
+
+void Builtins::Generate_ShiftRightLogical(CodeStubAssembler* assembler) {
+ compiler::Node* left = assembler->Parameter(0);
+ compiler::Node* right = assembler->Parameter(1);
+ compiler::Node* context = assembler->Parameter(2);
+
+ using compiler::Node;
+
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+ Node* shift_count =
+ assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
+ Node* value = assembler->Word32Shr(lhs_value, shift_count);
+ Node* result = assembler->ChangeUint32ToTagged(value);
+ assembler->Return(result);
+}
+
+void Builtins::Generate_BitwiseAnd(CodeStubAssembler* assembler) {
+ compiler::Node* left = assembler->Parameter(0);
+ compiler::Node* right = assembler->Parameter(1);
+ compiler::Node* context = assembler->Parameter(2);
+
+ using compiler::Node;
+
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+ Node* value = assembler->Word32And(lhs_value, rhs_value);
+ Node* result = assembler->ChangeInt32ToTagged(value);
+ assembler->Return(result);
+}
+
+void Builtins::Generate_BitwiseOr(CodeStubAssembler* assembler) {
+ compiler::Node* left = assembler->Parameter(0);
+ compiler::Node* right = assembler->Parameter(1);
+ compiler::Node* context = assembler->Parameter(2);
+
+ using compiler::Node;
+
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+ Node* value = assembler->Word32Or(lhs_value, rhs_value);
+ Node* result = assembler->ChangeInt32ToTagged(value);
+ assembler->Return(result);
+}
+
+void Builtins::Generate_BitwiseXor(CodeStubAssembler* assembler) {
+ compiler::Node* left = assembler->Parameter(0);
+ compiler::Node* right = assembler->Parameter(1);
+ compiler::Node* context = assembler->Parameter(2);
+
+ using compiler::Node;
+
+ Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
+ Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
+ Node* value = assembler->Word32Xor(lhs_value, rhs_value);
+ Node* result = assembler->ChangeInt32ToTagged(value);
+ assembler->Return(result);
+}
+
+void Builtins::Generate_LessThan(CodeStubAssembler* assembler) {
+ compiler::Node* lhs = assembler->Parameter(0);
+ compiler::Node* rhs = assembler->Parameter(1);
+ compiler::Node* context = assembler->Parameter(2);
+
+ assembler->Return(assembler->RelationalComparison(
+ CodeStubAssembler::kLessThan, lhs, rhs, context));
+}
+
+void Builtins::Generate_LessThanOrEqual(CodeStubAssembler* assembler) {
+ compiler::Node* lhs = assembler->Parameter(0);
+ compiler::Node* rhs = assembler->Parameter(1);
+ compiler::Node* context = assembler->Parameter(2);
+
+ assembler->Return(assembler->RelationalComparison(
+ CodeStubAssembler::kLessThanOrEqual, lhs, rhs, context));
+}
+
+void Builtins::Generate_GreaterThan(CodeStubAssembler* assembler) {
+ compiler::Node* lhs = assembler->Parameter(0);
+ compiler::Node* rhs = assembler->Parameter(1);
+ compiler::Node* context = assembler->Parameter(2);
+
+ assembler->Return(assembler->RelationalComparison(
+ CodeStubAssembler::kGreaterThan, lhs, rhs, context));
+}
+
+void Builtins::Generate_GreaterThanOrEqual(CodeStubAssembler* assembler) {
+ compiler::Node* lhs = assembler->Parameter(0);
+ compiler::Node* rhs = assembler->Parameter(1);
+ compiler::Node* context = assembler->Parameter(2);
+
+ assembler->Return(assembler->RelationalComparison(
+ CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context));
+}
+
+void Builtins::Generate_Equal(CodeStubAssembler* assembler) {
+ compiler::Node* lhs = assembler->Parameter(0);
+ compiler::Node* rhs = assembler->Parameter(1);
+ compiler::Node* context = assembler->Parameter(2);
+
+ assembler->Return(assembler->Equal(CodeStubAssembler::kDontNegateResult, lhs,
+ rhs, context));
+}
+
+void Builtins::Generate_NotEqual(CodeStubAssembler* assembler) {
+ compiler::Node* lhs = assembler->Parameter(0);
+ compiler::Node* rhs = assembler->Parameter(1);
+ compiler::Node* context = assembler->Parameter(2);
+
+ assembler->Return(
+ assembler->Equal(CodeStubAssembler::kNegateResult, lhs, rhs, context));
+}
+
+void Builtins::Generate_StrictEqual(CodeStubAssembler* assembler) {
+ compiler::Node* lhs = assembler->Parameter(0);
+ compiler::Node* rhs = assembler->Parameter(1);
+ compiler::Node* context = assembler->Parameter(2);
+
+ assembler->Return(assembler->StrictEqual(CodeStubAssembler::kDontNegateResult,
+ lhs, rhs, context));
+}
+
+void Builtins::Generate_StrictNotEqual(CodeStubAssembler* assembler) {
+ compiler::Node* lhs = assembler->Parameter(0);
+ compiler::Node* rhs = assembler->Parameter(1);
+ compiler::Node* context = assembler->Parameter(2);
+
+ assembler->Return(assembler->StrictEqual(CodeStubAssembler::kNegateResult,
+ lhs, rhs, context));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 671397d9ea..abb5c47555 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -28,7 +28,7 @@ void Builtins::Generate_ObjectHasOwnProperty(CodeStubAssembler* assembler) {
// Smi receivers do not have own properties.
Label if_objectisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(object), &return_false,
+ assembler->Branch(assembler->TaggedIsSmi(object), &return_false,
&if_objectisnotsmi);
assembler->Bind(&if_objectisnotsmi);
@@ -228,7 +228,7 @@ void IsString(CodeStubAssembler* assembler, compiler::Node* object,
typedef CodeStubAssembler::Label Label;
Label if_notsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(object), if_notstring, &if_notsmi);
+ assembler->Branch(assembler->TaggedIsSmi(object), if_notstring, &if_notsmi);
assembler->Bind(&if_notsmi);
{
@@ -300,13 +300,13 @@ void Builtins::Generate_ObjectProtoToString(CodeStubAssembler* assembler) {
Node* context = assembler->Parameter(3);
assembler->GotoIf(
- assembler->Word32Equal(receiver, assembler->UndefinedConstant()),
+ assembler->WordEqual(receiver, assembler->UndefinedConstant()),
&return_undefined);
- assembler->GotoIf(assembler->Word32Equal(receiver, assembler->NullConstant()),
+ assembler->GotoIf(assembler->WordEqual(receiver, assembler->NullConstant()),
&return_null);
- assembler->GotoIf(assembler->WordIsSmi(receiver), &return_number);
+ assembler->GotoIf(assembler->TaggedIsSmi(receiver), &return_number);
Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
ReturnIfPrimitive(assembler, receiver_instance_type, &return_string,
@@ -431,7 +431,7 @@ void Builtins::Generate_ObjectProtoToString(CodeStubAssembler* assembler) {
assembler->Bind(&return_jsvalue);
{
Node* value = assembler->LoadJSValueValue(receiver);
- assembler->GotoIf(assembler->WordIsSmi(value), &return_number);
+ assembler->GotoIf(assembler->TaggedIsSmi(value), &return_number);
ReturnIfPrimitive(assembler, assembler->LoadInstanceType(value),
&return_string, &return_boolean, &return_number);
@@ -447,13 +447,8 @@ void Builtins::Generate_ObjectProtoToString(CodeStubAssembler* assembler) {
Node* map = assembler->LoadMap(receiver);
// Return object if the proxy {receiver} is not callable.
- assembler->Branch(
- assembler->Word32Equal(
- assembler->Word32And(
- assembler->LoadMapBitField(map),
- assembler->Int32Constant(1 << Map::kIsCallable)),
- assembler->Int32Constant(0)),
- &return_object, &return_function);
+ assembler->Branch(assembler->IsCallableMap(map), &return_function,
+ &return_object);
}
// Default
@@ -463,57 +458,95 @@ void Builtins::Generate_ObjectProtoToString(CodeStubAssembler* assembler) {
}
}
-// ES6 section 19.1.2.2 Object.create ( O [ , Properties ] )
-// TODO(verwaest): Support the common cases with precached map directly in
-// an Object.create stub.
-BUILTIN(ObjectCreate) {
- HandleScope scope(isolate);
- Handle<Object> prototype = args.atOrUndefined(isolate, 1);
- if (!prototype->IsNull(isolate) && !prototype->IsJSReceiver()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, prototype));
+void Builtins::Generate_ObjectCreate(CodeStubAssembler* a) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* prototype = a->Parameter(1);
+ Node* properties = a->Parameter(2);
+ Node* context = a->Parameter(3 + 2);
+
+ Label call_runtime(a, Label::kDeferred), prototype_valid(a), no_properties(a);
+ {
+ a->Comment("Argument 1 check: prototype");
+ a->GotoIf(a->WordEqual(prototype, a->NullConstant()), &prototype_valid);
+ a->BranchIfJSReceiver(prototype, &prototype_valid, &call_runtime);
}
- // Generate the map with the specified {prototype} based on the Object
- // function's initial map from the current native context.
- // TODO(bmeurer): Use a dedicated cache for Object.create; think about
- // slack tracking for Object.create.
- Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
- isolate);
- if (map->prototype() != *prototype) {
- if (prototype->IsNull(isolate)) {
- map = isolate->object_with_null_prototype_map();
- } else if (prototype->IsJSObject()) {
- Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
- if (!js_prototype->map()->is_prototype_map()) {
- JSObject::OptimizeAsPrototype(js_prototype, FAST_PROTOTYPE);
- }
- Handle<PrototypeInfo> info =
- Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
- // TODO(verwaest): Use inobject slack tracking for this map.
- if (info->HasObjectCreateMap()) {
- map = handle(info->ObjectCreateMap(), isolate);
- } else {
- map = Map::CopyInitialMap(map);
- Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
- PrototypeInfo::SetObjectCreateMap(info, map);
- }
- } else {
- map = Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
- }
+ a->Bind(&prototype_valid);
+ {
+ a->Comment("Argument 2 check: properties");
+ // Check that we have a simple object
+ a->GotoIf(a->TaggedIsSmi(properties), &call_runtime);
+ // Undefined implies no properties.
+ a->GotoIf(a->WordEqual(properties, a->UndefinedConstant()), &no_properties);
+ Node* properties_map = a->LoadMap(properties);
+ a->GotoIf(a->IsSpecialReceiverMap(properties_map), &call_runtime);
+ // Stay on the fast path only if there are no elements.
+ a->GotoUnless(a->WordEqual(a->LoadElements(properties),
+ a->LoadRoot(Heap::kEmptyFixedArrayRootIndex)),
+ &call_runtime);
+ // Handle dictionary objects or fast objects with properties in runtime.
+ Node* bit_field3 = a->LoadMapBitField3(properties_map);
+ a->GotoIf(a->IsSetWord32<Map::DictionaryMap>(bit_field3), &call_runtime);
+ a->Branch(a->IsSetWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3),
+ &call_runtime, &no_properties);
}
- // Actually allocate the object.
- Handle<JSObject> object = isolate->factory()->NewJSObjectFromMap(map);
+ // Create a new object with the given prototype.
+ a->Bind(&no_properties);
+ {
+ Variable map(a, MachineRepresentation::kTagged);
+ Variable properties(a, MachineRepresentation::kTagged);
+ Label non_null_proto(a), instantiate_map(a), good(a);
+
+ a->Branch(a->WordEqual(prototype, a->NullConstant()), &good,
+ &non_null_proto);
+
+ a->Bind(&good);
+ {
+ map.Bind(a->LoadContextElement(
+ context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP));
+ properties.Bind(
+ a->AllocateNameDictionary(NameDictionary::kInitialCapacity));
+ a->Goto(&instantiate_map);
+ }
+
+ a->Bind(&non_null_proto);
+ {
+ properties.Bind(a->EmptyFixedArrayConstant());
+ Node* object_function =
+ a->LoadContextElement(context, Context::OBJECT_FUNCTION_INDEX);
+ Node* object_function_map = a->LoadObjectField(
+ object_function, JSFunction::kPrototypeOrInitialMapOffset);
+ map.Bind(object_function_map);
+ a->GotoIf(a->WordEqual(prototype, a->LoadMapPrototype(map.value())),
+ &instantiate_map);
+ // Try loading the prototype info.
+ Node* prototype_info =
+ a->LoadMapPrototypeInfo(a->LoadMap(prototype), &call_runtime);
+ a->Comment("Load ObjectCreateMap from PrototypeInfo");
+ Node* weak_cell =
+ a->LoadObjectField(prototype_info, PrototypeInfo::kObjectCreateMap);
+ a->GotoIf(a->WordEqual(weak_cell, a->UndefinedConstant()), &call_runtime);
+ map.Bind(a->LoadWeakCellValue(weak_cell, &call_runtime));
+ a->Goto(&instantiate_map);
+ }
- // Define the properties if properties was specified and is not undefined.
- Handle<Object> properties = args.atOrUndefined(isolate, 2);
- if (!properties->IsUndefined(isolate)) {
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, JSReceiver::DefineProperties(isolate, object, properties));
+ a->Bind(&instantiate_map);
+ {
+ Node* instance =
+ a->AllocateJSObjectFromMap(map.value(), properties.value());
+ a->Return(instance);
+ }
}
- return *object;
+ a->Bind(&call_runtime);
+ {
+ a->Return(
+ a->CallRuntime(Runtime::kObjectCreate, context, prototype, properties));
+ }
}
// ES6 section 19.1.2.3 Object.defineProperties
@@ -692,6 +725,85 @@ BUILTIN(ObjectGetPrototypeOf) {
JSReceiver::GetPrototype(isolate, receiver));
}
+// ES6 section 19.1.2.21 Object.setPrototypeOf ( O, proto )
+BUILTIN(ObjectSetPrototypeOf) {
+ HandleScope scope(isolate);
+
+ // 1. Let O be ? RequireObjectCoercible(O).
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ if (object->IsNull(isolate) || object->IsUndefined(isolate)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Object.setPrototypeOf")));
+ }
+
+ // 2. If Type(proto) is neither Object nor Null, throw a TypeError exception.
+ Handle<Object> proto = args.atOrUndefined(isolate, 2);
+ if (!proto->IsNull(isolate) && !proto->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, proto));
+ }
+
+ // 3. If Type(O) is not Object, return O.
+ if (!object->IsJSReceiver()) return *object;
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+
+ // 4. Let status be ? O.[[SetPrototypeOf]](proto).
+ // 5. If status is false, throw a TypeError exception.
+ MAYBE_RETURN(
+ JSReceiver::SetPrototype(receiver, proto, true, Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+
+ // 6. Return O.
+ return *receiver;
+}
+
+// ES6 section B.2.2.1.1 get Object.prototype.__proto__
+BUILTIN(ObjectPrototypeGetProto) {
+ HandleScope scope(isolate);
+ // 1. Let O be ? ToObject(this value).
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, receiver, Object::ToObject(isolate, args.receiver()));
+
+ // 2. Return ? O.[[GetPrototypeOf]]().
+ RETURN_RESULT_OR_FAILURE(isolate,
+ JSReceiver::GetPrototype(isolate, receiver));
+}
+
+// ES6 section B.2.2.1.2 set Object.prototype.__proto__
+BUILTIN(ObjectPrototypeSetProto) {
+ HandleScope scope(isolate);
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ Handle<Object> object = args.receiver();
+ if (object->IsNull(isolate) || object->IsUndefined(isolate)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "set Object.prototype.__proto__")));
+ }
+
+ // 2. If Type(proto) is neither Object nor Null, return undefined.
+ Handle<Object> proto = args.at<Object>(1);
+ if (!proto->IsNull(isolate) && !proto->IsJSReceiver()) {
+ return isolate->heap()->undefined_value();
+ }
+
+ // 3. If Type(O) is not Object, return undefined.
+ if (!object->IsJSReceiver()) return isolate->heap()->undefined_value();
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+
+ // 4. Let status be ? O.[[SetPrototypeOf]](proto).
+ // 5. If status is false, throw a TypeError exception.
+ MAYBE_RETURN(
+ JSReceiver::SetPrototype(receiver, proto, true, Object::THROW_ON_ERROR),
+ isolate->heap()->exception());
+
+ // Return undefined.
+ return isolate->heap()->undefined_value();
+}
+
// ES6 section 19.1.2.6 Object.getOwnPropertyDescriptor ( O, P )
BUILTIN(ObjectGetOwnPropertyDescriptor) {
HandleScope scope(isolate);
@@ -910,6 +1022,39 @@ BUILTIN(ObjectSeal) {
return *object;
}
+void Builtins::Generate_HasProperty(CodeStubAssembler* assembler) {
+ typedef HasPropertyDescriptor Descriptor;
+ typedef compiler::Node Node;
+
+ Node* key = assembler->Parameter(Descriptor::kKey);
+ Node* object = assembler->Parameter(Descriptor::kObject);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->Return(
+ assembler->HasProperty(object, key, context, Runtime::kHasProperty));
+}
+
+void Builtins::Generate_ForInFilter(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef ForInFilterDescriptor Descriptor;
+
+ Node* key = assembler->Parameter(Descriptor::kKey);
+ Node* object = assembler->Parameter(Descriptor::kObject);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->Return(assembler->ForInFilter(key, object, context));
+}
+
+void Builtins::Generate_InstanceOf(CodeStubAssembler* assembler) {
+ typedef compiler::Node Node;
+ typedef CompareDescriptor Descriptor;
+ Node* object = assembler->Parameter(Descriptor::kLeft);
+ Node* callable = assembler->Parameter(Descriptor::kRight);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ assembler->Return(assembler->InstanceOf(object, callable, context));
+}
+
// ES6 section 7.3.19 OrdinaryHasInstance ( C, O )
void Builtins::Generate_OrdinaryHasInstance(CodeStubAssembler* assembler) {
typedef compiler::Node Node;
diff --git a/deps/v8/src/builtins/builtins-promise.cc b/deps/v8/src/builtins/builtins-promise.cc
new file mode 100644
index 0000000000..9f5d7c88d7
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-promise.cc
@@ -0,0 +1,84 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+
+#include "src/promise-utils.h"
+
+namespace v8 {
+namespace internal {
+
+// ES#sec-promise-resolve-functions
+// Promise Resolve Functions
+BUILTIN(PromiseResolveClosure) {
+ HandleScope scope(isolate);
+
+ Handle<Context> context(isolate->context(), isolate);
+
+ if (PromiseUtils::HasAlreadyVisited(context)) {
+ return isolate->heap()->undefined_value();
+ }
+
+ PromiseUtils::SetAlreadyVisited(context);
+ Handle<JSObject> promise = handle(PromiseUtils::GetPromise(context), isolate);
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+
+ MaybeHandle<Object> maybe_result;
+ Handle<Object> argv[] = {promise, value};
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, Execution::Call(isolate, isolate->promise_resolve(),
+ isolate->factory()->undefined_value(),
+ arraysize(argv), argv));
+ return isolate->heap()->undefined_value();
+}
+
+// ES#sec-promise-reject-functions
+// Promise Reject Functions
+BUILTIN(PromiseRejectClosure) {
+ HandleScope scope(isolate);
+
+ Handle<Context> context(isolate->context(), isolate);
+
+ if (PromiseUtils::HasAlreadyVisited(context)) {
+ return isolate->heap()->undefined_value();
+ }
+
+ PromiseUtils::SetAlreadyVisited(context);
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ Handle<JSObject> promise = handle(PromiseUtils::GetPromise(context), isolate);
+ Handle<Object> debug_event =
+ handle(PromiseUtils::GetDebugEvent(context), isolate);
+ MaybeHandle<Object> maybe_result;
+ Handle<Object> argv[] = {promise, value, debug_event};
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, Execution::Call(isolate, isolate->promise_internal_reject(),
+ isolate->factory()->undefined_value(),
+ arraysize(argv), argv));
+ return isolate->heap()->undefined_value();
+}
+
+// ES#sec-createresolvingfunctions
+// CreateResolvingFunctions ( promise )
+BUILTIN(CreateResolvingFunctions) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+
+ Handle<JSObject> promise = args.at<JSObject>(1);
+ Handle<Object> debug_event = args.at<Object>(2);
+ Handle<JSFunction> resolve, reject;
+
+ PromiseUtils::CreateResolvingFunctions(isolate, promise, debug_event,
+ &resolve, &reject);
+
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(2);
+ result->set(0, *resolve);
+ result->set(1, *reject);
+
+ return *isolate->factory()->NewJSArrayWithElements(result, FAST_ELEMENTS, 2,
+ NOT_TENURED);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-regexp.cc b/deps/v8/src/builtins/builtins-regexp.cc
index 371221fa70..5f8d18be43 100644
--- a/deps/v8/src/builtins/builtins-regexp.cc
+++ b/deps/v8/src/builtins/builtins-regexp.cc
@@ -7,6 +7,8 @@
#include "src/code-factory.h"
#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-utils.h"
+#include "src/string-builder.h"
namespace v8 {
namespace internal {
@@ -16,27 +18,6 @@ namespace internal {
namespace {
-// ES#sec-isregexp IsRegExp ( argument )
-Maybe<bool> IsRegExp(Isolate* isolate, Handle<Object> object) {
- if (!object->IsJSReceiver()) return Just(false);
-
- Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
-
- if (isolate->regexp_function()->initial_map() == receiver->map()) {
- // Fast-path for unmodified JSRegExp instances.
- return Just(true);
- }
-
- Handle<Object> match;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, match,
- JSObject::GetProperty(receiver, isolate->factory()->match_symbol()),
- Nothing<bool>());
-
- if (!match->IsUndefined(isolate)) return Just(match->BooleanValue());
- return Just(object->IsJSRegExp());
-}
-
Handle<String> PatternFlags(Isolate* isolate, Handle<JSRegExp> regexp) {
static const int kMaxFlagsLength = 5 + 1; // 5 flags and '\0';
char flags_string[kMaxFlagsLength];
@@ -58,10 +39,10 @@ Handle<String> PatternFlags(Isolate* isolate, Handle<JSRegExp> regexp) {
// ES#sec-regexpinitialize
// Runtime Semantics: RegExpInitialize ( obj, pattern, flags )
-MaybeHandle<JSRegExp> RegExpInitialize(Isolate* isolate,
- Handle<JSRegExp> regexp,
- Handle<Object> pattern,
- Handle<Object> flags) {
+MUST_USE_RESULT MaybeHandle<JSRegExp> RegExpInitialize(Isolate* isolate,
+ Handle<JSRegExp> regexp,
+ Handle<Object> pattern,
+ Handle<Object> flags) {
Handle<String> pattern_string;
if (pattern->IsUndefined(isolate)) {
pattern_string = isolate->factory()->empty_string();
@@ -79,9 +60,7 @@ MaybeHandle<JSRegExp> RegExpInitialize(Isolate* isolate,
}
// TODO(jgruber): We could avoid the flags back and forth conversions.
- RETURN_RESULT(isolate,
- JSRegExp::Initialize(regexp, pattern_string, flags_string),
- JSRegExp);
+ return JSRegExp::Initialize(regexp, pattern_string, flags_string);
}
} // namespace
@@ -99,7 +78,8 @@ BUILTIN(RegExpConstructor) {
bool pattern_is_regexp;
{
- Maybe<bool> maybe_pattern_is_regexp = IsRegExp(isolate, pattern);
+ Maybe<bool> maybe_pattern_is_regexp =
+ RegExpUtils::IsRegExp(isolate, pattern);
if (maybe_pattern_is_regexp.IsNothing()) {
DCHECK(isolate->has_pending_exception());
return isolate->heap()->exception();
@@ -158,36 +138,77 @@ BUILTIN(RegExpConstructor) {
RegExpInitialize(isolate, regexp, pattern, flags));
}
+BUILTIN(RegExpPrototypeCompile) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSRegExp, regexp, "RegExp.prototype.compile");
+
+ Handle<Object> pattern = args.atOrUndefined(isolate, 1);
+ Handle<Object> flags = args.atOrUndefined(isolate, 2);
+
+ if (pattern->IsJSRegExp()) {
+ Handle<JSRegExp> pattern_regexp = Handle<JSRegExp>::cast(pattern);
+
+ if (!flags->IsUndefined(isolate)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kRegExpFlags));
+ }
+
+ flags = PatternFlags(isolate, pattern_regexp);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, pattern,
+ Object::GetProperty(pattern, isolate->factory()->source_string()));
+ }
+
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, regexp, RegExpInitialize(isolate, regexp, pattern, flags));
+
+ // Return undefined for compatibility with JSC.
+ // See http://crbug.com/585775 for web compat details.
+
+ return isolate->heap()->undefined_value();
+}
+
namespace {
+compiler::Node* FastLoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
+ compiler::Node* regexp) {
+ // Load the in-object field.
+ static const int field_offset =
+ JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
+ return a->LoadObjectField(regexp, field_offset);
+}
+
+compiler::Node* SlowLoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
+ compiler::Node* regexp) {
+ // Load through the GetProperty stub.
+ typedef compiler::Node Node;
+
+ Node* const name =
+ a->HeapConstant(a->isolate()->factory()->lastIndex_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
+ return a->CallStub(getproperty_callable, context, regexp, name);
+}
+
compiler::Node* LoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
compiler::Node* has_initialmap,
compiler::Node* regexp) {
typedef CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
Variable var_value(a, MachineRepresentation::kTagged);
- Label out(a), if_unmodified(a), if_modified(a, Label::kDeferred);
+ Label out(a), if_unmodified(a), if_modified(a);
a->Branch(has_initialmap, &if_unmodified, &if_modified);
a->Bind(&if_unmodified);
{
- // Load the in-object field.
- static const int field_offset =
- JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
- var_value.Bind(a->LoadObjectField(regexp, field_offset));
+ var_value.Bind(FastLoadLastIndex(a, context, regexp));
a->Goto(&out);
}
a->Bind(&if_modified);
{
- // Load through the GetProperty stub.
- Node* const name =
- a->HeapConstant(a->isolate()->factory()->last_index_string());
- Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
- var_value.Bind(a->CallStub(getproperty_callable, context, regexp, name));
+ var_value.Bind(SlowLoadLastIndex(a, context, regexp));
a->Goto(&out);
}
@@ -195,33 +216,46 @@ compiler::Node* LoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
return var_value.value();
}
+// The fast-path of StoreLastIndex when regexp is guaranteed to be an unmodified
+// JSRegExp instance.
+void FastStoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
+ compiler::Node* regexp, compiler::Node* value) {
+ // Store the in-object field.
+ static const int field_offset =
+ JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
+ a->StoreObjectField(regexp, field_offset, value);
+}
+
+void SlowStoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
+ compiler::Node* regexp, compiler::Node* value) {
+ // Store through runtime.
+ // TODO(ishell): Use SetPropertyStub here once available.
+ typedef compiler::Node Node;
+
+ Node* const name =
+ a->HeapConstant(a->isolate()->factory()->lastIndex_string());
+ Node* const language_mode = a->SmiConstant(Smi::FromInt(STRICT));
+ a->CallRuntime(Runtime::kSetProperty, context, regexp, name, value,
+ language_mode);
+}
+
void StoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
compiler::Node* has_initialmap, compiler::Node* regexp,
compiler::Node* value) {
typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- Label out(a), if_unmodified(a), if_modified(a, Label::kDeferred);
+ Label out(a), if_unmodified(a), if_modified(a);
a->Branch(has_initialmap, &if_unmodified, &if_modified);
a->Bind(&if_unmodified);
{
- // Store the in-object field.
- static const int field_offset =
- JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
- a->StoreObjectField(regexp, field_offset, value);
+ FastStoreLastIndex(a, context, regexp, value);
a->Goto(&out);
}
a->Bind(&if_modified);
{
- // Store through runtime.
- // TODO(ishell): Use SetPropertyStub here once available.
- Node* const name =
- a->HeapConstant(a->isolate()->factory()->last_index_string());
- Node* const language_mode = a->SmiConstant(Smi::FromInt(STRICT));
- a->CallRuntime(Runtime::kSetProperty, context, regexp, name, value,
- language_mode);
+ SlowStoreLastIndex(a, context, regexp, value);
a->Goto(&out);
}
@@ -231,7 +265,7 @@ void StoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
compiler::Node* ConstructNewResultFromMatchInfo(Isolate* isolate,
CodeStubAssembler* a,
compiler::Node* context,
- compiler::Node* match_elements,
+ compiler::Node* match_info,
compiler::Node* string) {
typedef CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Label Label;
@@ -241,13 +275,14 @@ compiler::Node* ConstructNewResultFromMatchInfo(Isolate* isolate,
CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
Node* const num_indices = a->SmiUntag(a->LoadFixedArrayElement(
- match_elements, a->IntPtrConstant(RegExpImpl::kLastCaptureCount), 0,
+ match_info, a->IntPtrConstant(RegExpMatchInfo::kNumberOfCapturesIndex), 0,
mode));
Node* const num_results = a->SmiTag(a->WordShr(num_indices, 1));
Node* const start = a->LoadFixedArrayElement(
- match_elements, a->IntPtrConstant(RegExpImpl::kFirstCapture), 0, mode);
+ match_info, a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), 0,
+ mode);
Node* const end = a->LoadFixedArrayElement(
- match_elements, a->IntPtrConstant(RegExpImpl::kFirstCapture + 1), 0,
+ match_info, a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 1), 0,
mode);
// Calculate the substring of the first match before creating the result array
@@ -264,13 +299,14 @@ compiler::Node* ConstructNewResultFromMatchInfo(Isolate* isolate,
a->GotoIf(a->SmiEqual(num_results, a->SmiConstant(Smi::FromInt(1))), &out);
// Store all remaining captures.
- Node* const limit =
- a->IntPtrAdd(a->IntPtrConstant(RegExpImpl::kFirstCapture), num_indices);
+ Node* const limit = a->IntPtrAdd(
+ a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), num_indices);
Variable var_from_cursor(a, MachineType::PointerRepresentation());
Variable var_to_cursor(a, MachineType::PointerRepresentation());
- var_from_cursor.Bind(a->IntPtrConstant(RegExpImpl::kFirstCapture + 2));
+ var_from_cursor.Bind(
+ a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 2));
var_to_cursor.Bind(a->IntPtrConstant(1));
Variable* vars[] = {&var_from_cursor, &var_to_cursor};
@@ -281,15 +317,14 @@ compiler::Node* ConstructNewResultFromMatchInfo(Isolate* isolate,
{
Node* const from_cursor = var_from_cursor.value();
Node* const to_cursor = var_to_cursor.value();
- Node* const start = a->LoadFixedArrayElement(match_elements, from_cursor);
+ Node* const start = a->LoadFixedArrayElement(match_info, from_cursor);
Label next_iter(a);
a->GotoIf(a->SmiEqual(start, a->SmiConstant(Smi::FromInt(-1))), &next_iter);
Node* const from_cursor_plus1 =
a->IntPtrAdd(from_cursor, a->IntPtrConstant(1));
- Node* const end =
- a->LoadFixedArrayElement(match_elements, from_cursor_plus1);
+ Node* const end = a->LoadFixedArrayElement(match_info, from_cursor_plus1);
Node* const capture = a->SubString(context, string, start, end);
a->StoreFixedArrayElement(result_elements, to_cursor, capture);
@@ -305,29 +340,29 @@ compiler::Node* ConstructNewResultFromMatchInfo(Isolate* isolate,
return result;
}
-} // namespace
-
// ES#sec-regexp.prototype.exec
// RegExp.prototype.exec ( string )
-void Builtins::Generate_RegExpPrototypeExec(CodeStubAssembler* a) {
+compiler::Node* RegExpPrototypeExecInternal(CodeStubAssembler* a,
+ compiler::Node* context,
+ compiler::Node* maybe_receiver,
+ compiler::Node* maybe_string) {
typedef CodeStubAssembler::Variable Variable;
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
Isolate* const isolate = a->isolate();
- Node* const receiver = a->Parameter(0);
- Node* const maybe_string = a->Parameter(1);
- Node* const context = a->Parameter(4);
-
Node* const null = a->NullConstant();
Node* const int_zero = a->IntPtrConstant(0);
- Node* const smi_zero = a->SmiConstant(Smi::FromInt(0));
+ Node* const smi_zero = a->SmiConstant(Smi::kZero);
+
+ Variable var_result(a, MachineRepresentation::kTagged);
+ Label out(a);
- // Ensure {receiver} is a JSRegExp.
+ // Ensure {maybe_receiver} is a JSRegExp.
Node* const regexp_map = a->ThrowIfNotInstanceType(
- context, receiver, JS_REGEXP_TYPE, "RegExp.prototype.exec");
- Node* const regexp = receiver;
+ context, maybe_receiver, JS_REGEXP_TYPE, "RegExp.prototype.exec");
+ Node* const regexp = maybe_receiver;
// Check whether the regexp instance is unmodified.
Node* const native_context = a->LoadNativeContext(context);
@@ -369,14 +404,15 @@ void Builtins::Generate_RegExpPrototypeExec(CodeStubAssembler* a) {
var_lastindex.Bind(lastindex);
Label if_isoob(a, Label::kDeferred);
- a->GotoUnless(a->WordIsSmi(lastindex), &if_isoob);
+ a->GotoUnless(a->TaggedIsSmi(lastindex), &if_isoob);
a->GotoUnless(a->SmiLessThanOrEqual(lastindex, string_length), &if_isoob);
a->Goto(&run_exec);
a->Bind(&if_isoob);
{
StoreLastIndex(a, context, has_initialmap, regexp, smi_zero);
- a->Return(null);
+ var_result.Bind(null);
+ a->Goto(&out);
}
}
@@ -400,7 +436,7 @@ void Builtins::Generate_RegExpPrototypeExec(CodeStubAssembler* a) {
match_indices = a->CallStub(exec_callable, context, regexp, string,
var_lastindex.value(), last_match_info);
- // {match_indices} is either null or the RegExpLastMatchInfo array.
+ // {match_indices} is either null or the RegExpMatchInfo array.
// Return early if exec failed, possibly updating last index.
a->GotoUnless(a->WordEqual(match_indices, null), &successful_match);
@@ -411,19 +447,19 @@ void Builtins::Generate_RegExpPrototypeExec(CodeStubAssembler* a) {
a->Goto(&return_null);
a->Bind(&return_null);
- a->Return(null);
+ var_result.Bind(null);
+ a->Goto(&out);
}
Label construct_result(a);
a->Bind(&successful_match);
{
- Node* const match_elements = a->LoadElements(match_indices);
-
a->GotoUnless(should_update_last_index, &construct_result);
// Update the new last index from {match_indices}.
Node* const new_lastindex = a->LoadFixedArrayElement(
- match_elements, a->IntPtrConstant(RegExpImpl::kFirstCapture + 1));
+ match_indices,
+ a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 1));
StoreLastIndex(a, context, has_initialmap, regexp, new_lastindex);
a->Goto(&construct_result);
@@ -431,10 +467,1638 @@ void Builtins::Generate_RegExpPrototypeExec(CodeStubAssembler* a) {
a->Bind(&construct_result);
{
Node* result = ConstructNewResultFromMatchInfo(isolate, a, context,
- match_elements, string);
+ match_indices, string);
+ var_result.Bind(result);
+ a->Goto(&out);
+ }
+ }
+
+ a->Bind(&out);
+ return var_result.value();
+}
+
+} // namespace
+
+// ES#sec-regexp.prototype.exec
+// RegExp.prototype.exec ( string )
+void Builtins::Generate_RegExpPrototypeExec(CodeStubAssembler* a) {
+ typedef compiler::Node Node;
+
+ Node* const maybe_receiver = a->Parameter(0);
+ Node* const maybe_string = a->Parameter(1);
+ Node* const context = a->Parameter(4);
+
+ Node* const result =
+ RegExpPrototypeExecInternal(a, context, maybe_receiver, maybe_string);
+ a->Return(result);
+}
+
+namespace {
+
+compiler::Node* ThrowIfNotJSReceiver(CodeStubAssembler* a, Isolate* isolate,
+ compiler::Node* context,
+ compiler::Node* value,
+ MessageTemplate::Template msg_template,
+ char const* method_name) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label out(a), throw_exception(a, Label::kDeferred);
+ Variable var_value_map(a, MachineRepresentation::kTagged);
+
+ a->GotoIf(a->TaggedIsSmi(value), &throw_exception);
+
+ // Load the instance type of the {value}.
+ var_value_map.Bind(a->LoadMap(value));
+ Node* const value_instance_type =
+ a->LoadMapInstanceType(var_value_map.value());
+
+ a->Branch(a->IsJSReceiverInstanceType(value_instance_type), &out,
+ &throw_exception);
+
+ // The {value} is not a compatible receiver for this method.
+ a->Bind(&throw_exception);
+ {
+ Node* const message_id = a->SmiConstant(Smi::FromInt(msg_template));
+ Node* const method_name_str = a->HeapConstant(
+ isolate->factory()->NewStringFromAsciiChecked(method_name, TENURED));
+
+ Callable callable = CodeFactory::ToString(isolate);
+ Node* const value_str = a->CallStub(callable, context, value);
+
+ a->CallRuntime(Runtime::kThrowTypeError, context, message_id,
+ method_name_str, value_str);
+ var_value_map.Bind(a->UndefinedConstant());
+ a->Goto(&out); // Never reached.
+ }
+
+ a->Bind(&out);
+ return var_value_map.value();
+}
+
+compiler::Node* IsInitialRegExpMap(CodeStubAssembler* a,
+ compiler::Node* context,
+ compiler::Node* map) {
+ typedef compiler::Node Node;
+
+ Node* const native_context = a->LoadNativeContext(context);
+ Node* const regexp_fun =
+ a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+ Node* const initial_map =
+ a->LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const has_initialmap = a->WordEqual(map, initial_map);
+
+ return has_initialmap;
+}
+
+// RegExp fast path implementations rely on unmodified JSRegExp instances.
+// We use a fairly coarse granularity for this and simply check whether both
+// the regexp itself is unmodified (i.e. its map has not changed) and its
+// prototype is unmodified.
+void BranchIfFastPath(CodeStubAssembler* a, compiler::Node* context,
+ compiler::Node* map,
+ CodeStubAssembler::Label* if_isunmodified,
+ CodeStubAssembler::Label* if_ismodified) {
+ typedef compiler::Node Node;
+
+ Node* const native_context = a->LoadNativeContext(context);
+ Node* const regexp_fun =
+ a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+ Node* const initial_map =
+ a->LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const has_initialmap = a->WordEqual(map, initial_map);
+
+ a->GotoUnless(has_initialmap, if_ismodified);
+
+ Node* const initial_proto_initial_map = a->LoadContextElement(
+ native_context, Context::REGEXP_PROTOTYPE_MAP_INDEX);
+ Node* const proto_map = a->LoadMap(a->LoadMapPrototype(map));
+ Node* const proto_has_initialmap =
+ a->WordEqual(proto_map, initial_proto_initial_map);
+
+ // TODO(ishell): Update this check once map changes for constant field
+ // tracking are landing.
+
+ a->Branch(proto_has_initialmap, if_isunmodified, if_ismodified);
+}
+
+} // namespace
+
+void Builtins::Generate_RegExpPrototypeFlagsGetter(CodeStubAssembler* a) {
+ typedef CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Node* const receiver = a->Parameter(0);
+ Node* const context = a->Parameter(3);
+
+ Isolate* isolate = a->isolate();
+ Node* const int_zero = a->IntPtrConstant(0);
+ Node* const int_one = a->IntPtrConstant(1);
+
+ Node* const map = ThrowIfNotJSReceiver(a, isolate, context, receiver,
+ MessageTemplate::kRegExpNonObject,
+ "RegExp.prototype.flags");
+
+ Variable var_length(a, MachineType::PointerRepresentation());
+ Variable var_flags(a, MachineType::PointerRepresentation());
+
+ // First, count the number of characters we will need and check which flags
+ // are set.
+
+ var_length.Bind(int_zero);
+
+ Label if_isunmodifiedjsregexp(a),
+ if_isnotunmodifiedjsregexp(a, Label::kDeferred);
+ a->Branch(IsInitialRegExpMap(a, context, map), &if_isunmodifiedjsregexp,
+ &if_isnotunmodifiedjsregexp);
+
+ Label construct_string(a);
+ a->Bind(&if_isunmodifiedjsregexp);
+ {
+ // Refer to JSRegExp's flag property on the fast-path.
+ Node* const flags_smi =
+ a->LoadObjectField(receiver, JSRegExp::kFlagsOffset);
+ Node* const flags_intptr = a->SmiUntag(flags_smi);
+ var_flags.Bind(flags_intptr);
+
+ Label label_global(a), label_ignorecase(a), label_multiline(a),
+ label_unicode(a), label_sticky(a);
+
+#define CASE_FOR_FLAG(FLAG, LABEL, NEXT_LABEL) \
+ do { \
+ a->Bind(&LABEL); \
+ Node* const mask = a->IntPtrConstant(FLAG); \
+ a->GotoIf(a->WordEqual(a->WordAnd(flags_intptr, mask), int_zero), \
+ &NEXT_LABEL); \
+ var_length.Bind(a->IntPtrAdd(var_length.value(), int_one)); \
+ a->Goto(&NEXT_LABEL); \
+ } while (false)
+
+ a->Goto(&label_global);
+ CASE_FOR_FLAG(JSRegExp::kGlobal, label_global, label_ignorecase);
+ CASE_FOR_FLAG(JSRegExp::kIgnoreCase, label_ignorecase, label_multiline);
+ CASE_FOR_FLAG(JSRegExp::kMultiline, label_multiline, label_unicode);
+ CASE_FOR_FLAG(JSRegExp::kUnicode, label_unicode, label_sticky);
+ CASE_FOR_FLAG(JSRegExp::kSticky, label_sticky, construct_string);
+#undef CASE_FOR_FLAG
+ }
+
+ a->Bind(&if_isnotunmodifiedjsregexp);
+ {
+ // Fall back to GetProperty stub on the slow-path.
+ var_flags.Bind(int_zero);
+
+ Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
+ Label label_global(a), label_ignorecase(a), label_multiline(a),
+ label_unicode(a), label_sticky(a);
+
+#define CASE_FOR_FLAG(NAME, FLAG, LABEL, NEXT_LABEL) \
+ do { \
+ a->Bind(&LABEL); \
+ Node* const name = \
+ a->HeapConstant(isolate->factory()->NewStringFromAsciiChecked(NAME)); \
+ Node* const flag = \
+ a->CallStub(getproperty_callable, context, receiver, name); \
+ Label if_isflagset(a); \
+ a->BranchIfToBooleanIsTrue(flag, &if_isflagset, &NEXT_LABEL); \
+ a->Bind(&if_isflagset); \
+ var_length.Bind(a->IntPtrAdd(var_length.value(), int_one)); \
+ var_flags.Bind(a->WordOr(var_flags.value(), a->IntPtrConstant(FLAG))); \
+ a->Goto(&NEXT_LABEL); \
+ } while (false)
+
+ a->Goto(&label_global);
+ CASE_FOR_FLAG("global", JSRegExp::kGlobal, label_global, label_ignorecase);
+ CASE_FOR_FLAG("ignoreCase", JSRegExp::kIgnoreCase, label_ignorecase,
+ label_multiline);
+ CASE_FOR_FLAG("multiline", JSRegExp::kMultiline, label_multiline,
+ label_unicode);
+ CASE_FOR_FLAG("unicode", JSRegExp::kUnicode, label_unicode, label_sticky);
+ CASE_FOR_FLAG("sticky", JSRegExp::kSticky, label_sticky, construct_string);
+#undef CASE_FOR_FLAG
+ }
+
+ // Allocate a string of the required length and fill it with the corresponding
+ // char for each set flag.
+
+ a->Bind(&construct_string);
+ {
+ Node* const result =
+ a->AllocateSeqOneByteString(context, var_length.value());
+ Node* const flags_intptr = var_flags.value();
+
+ Variable var_offset(a, MachineType::PointerRepresentation());
+ var_offset.Bind(
+ a->IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+ Label label_global(a), label_ignorecase(a), label_multiline(a),
+ label_unicode(a), label_sticky(a), out(a);
+
+#define CASE_FOR_FLAG(FLAG, CHAR, LABEL, NEXT_LABEL) \
+ do { \
+ a->Bind(&LABEL); \
+ Node* const mask = a->IntPtrConstant(FLAG); \
+ a->GotoIf(a->WordEqual(a->WordAnd(flags_intptr, mask), int_zero), \
+ &NEXT_LABEL); \
+ Node* const value = a->IntPtrConstant(CHAR); \
+ a->StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \
+ var_offset.value(), value); \
+ var_offset.Bind(a->IntPtrAdd(var_offset.value(), int_one)); \
+ a->Goto(&NEXT_LABEL); \
+ } while (false)
+
+ a->Goto(&label_global);
+ CASE_FOR_FLAG(JSRegExp::kGlobal, 'g', label_global, label_ignorecase);
+ CASE_FOR_FLAG(JSRegExp::kIgnoreCase, 'i', label_ignorecase,
+ label_multiline);
+ CASE_FOR_FLAG(JSRegExp::kMultiline, 'm', label_multiline, label_unicode);
+ CASE_FOR_FLAG(JSRegExp::kUnicode, 'u', label_unicode, label_sticky);
+ CASE_FOR_FLAG(JSRegExp::kSticky, 'y', label_sticky, out);
+#undef CASE_FOR_FLAG
+
+ a->Bind(&out);
+ a->Return(result);
+ }
+}
+
+// ES6 21.2.5.10.
+BUILTIN(RegExpPrototypeSourceGetter) {
+ HandleScope scope(isolate);
+
+ Handle<Object> recv = args.receiver();
+ if (!recv->IsJSRegExp()) {
+ Handle<JSFunction> regexp_fun = isolate->regexp_function();
+ if (*recv == regexp_fun->prototype()) {
+ isolate->CountUsage(v8::Isolate::kRegExpPrototypeSourceGetter);
+ return *isolate->factory()->NewStringFromAsciiChecked("(?:)");
+ }
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kRegExpNonRegExp,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "RegExp.prototype.source")));
+ }
+
+ Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(recv);
+ return regexp->source();
+}
+
+BUILTIN(RegExpPrototypeToString) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSReceiver, recv, "RegExp.prototype.toString");
+
+ if (*recv == isolate->regexp_function()->prototype()) {
+ isolate->CountUsage(v8::Isolate::kRegExpPrototypeToString);
+ }
+
+ IncrementalStringBuilder builder(isolate);
+
+ builder.AppendCharacter('/');
+ {
+ Handle<Object> source;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, source,
+ JSReceiver::GetProperty(recv, isolate->factory()->source_string()));
+ Handle<String> source_str;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, source_str,
+ Object::ToString(isolate, source));
+ builder.AppendString(source_str);
+ }
+
+ builder.AppendCharacter('/');
+ {
+ Handle<Object> flags;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, flags,
+ JSReceiver::GetProperty(recv, isolate->factory()->flags_string()));
+ Handle<String> flags_str;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, flags_str,
+ Object::ToString(isolate, flags));
+ builder.AppendString(flags_str);
+ }
+
+ RETURN_RESULT_OR_FAILURE(isolate, builder.Finish());
+}
+
+// ES6 21.2.4.2.
+BUILTIN(RegExpPrototypeSpeciesGetter) {
+ HandleScope scope(isolate);
+ return *args.receiver();
+}
+
+namespace {
+
+// Fast-path implementation for flag checks on an unmodified JSRegExp instance.
+compiler::Node* FastFlagGetter(CodeStubAssembler* a,
+ compiler::Node* const regexp,
+ JSRegExp::Flag flag) {
+ typedef compiler::Node Node;
+
+ Node* const smi_zero = a->SmiConstant(Smi::kZero);
+ Node* const flags = a->LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+ Node* const mask = a->SmiConstant(Smi::FromInt(flag));
+ Node* const is_flag_set = a->WordNotEqual(a->WordAnd(flags, mask), smi_zero);
+
+ return is_flag_set;
+}
+
+void Generate_FlagGetter(CodeStubAssembler* a, JSRegExp::Flag flag,
+ v8::Isolate::UseCounterFeature counter,
+ const char* method_name) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Node* const receiver = a->Parameter(0);
+ Node* const context = a->Parameter(3);
+
+ Isolate* isolate = a->isolate();
+
+ // Check whether we have an unmodified regexp instance.
+ Label if_isunmodifiedjsregexp(a),
+ if_isnotunmodifiedjsregexp(a, Label::kDeferred);
+
+ a->GotoIf(a->TaggedIsSmi(receiver), &if_isnotunmodifiedjsregexp);
+
+ Node* const receiver_map = a->LoadMap(receiver);
+ Node* const instance_type = a->LoadMapInstanceType(receiver_map);
+
+ a->Branch(a->Word32Equal(instance_type, a->Int32Constant(JS_REGEXP_TYPE)),
+ &if_isunmodifiedjsregexp, &if_isnotunmodifiedjsregexp);
+
+ a->Bind(&if_isunmodifiedjsregexp);
+ {
+ // Refer to JSRegExp's flag property on the fast-path.
+ Node* const is_flag_set = FastFlagGetter(a, receiver, flag);
+ a->Return(a->Select(is_flag_set, a->TrueConstant(), a->FalseConstant()));
+ }
+
+ a->Bind(&if_isnotunmodifiedjsregexp);
+ {
+ Node* const native_context = a->LoadNativeContext(context);
+ Node* const regexp_fun =
+ a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+ Node* const initial_map = a->LoadObjectField(
+ regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const initial_prototype = a->LoadMapPrototype(initial_map);
+
+ Label if_isprototype(a), if_isnotprototype(a);
+ a->Branch(a->WordEqual(receiver, initial_prototype), &if_isprototype,
+ &if_isnotprototype);
+
+ a->Bind(&if_isprototype);
+ {
+ Node* const counter_smi = a->SmiConstant(Smi::FromInt(counter));
+ a->CallRuntime(Runtime::kIncrementUseCounter, context, counter_smi);
+ a->Return(a->UndefinedConstant());
+ }
+
+ a->Bind(&if_isnotprototype);
+ {
+ Node* const message_id =
+ a->SmiConstant(Smi::FromInt(MessageTemplate::kRegExpNonRegExp));
+ Node* const method_name_str = a->HeapConstant(
+ isolate->factory()->NewStringFromAsciiChecked(method_name));
+ a->CallRuntime(Runtime::kThrowTypeError, context, message_id,
+ method_name_str);
+ a->Return(a->UndefinedConstant()); // Never reached.
+ }
+ }
+}
+
+} // namespace
+
+// ES6 21.2.5.4.
+void Builtins::Generate_RegExpPrototypeGlobalGetter(CodeStubAssembler* a) {
+ Generate_FlagGetter(a, JSRegExp::kGlobal,
+ v8::Isolate::kRegExpPrototypeOldFlagGetter,
+ "RegExp.prototype.global");
+}
+
+// ES6 21.2.5.5.
+void Builtins::Generate_RegExpPrototypeIgnoreCaseGetter(CodeStubAssembler* a) {
+ Generate_FlagGetter(a, JSRegExp::kIgnoreCase,
+ v8::Isolate::kRegExpPrototypeOldFlagGetter,
+ "RegExp.prototype.ignoreCase");
+}
+
+// ES6 21.2.5.7.
+void Builtins::Generate_RegExpPrototypeMultilineGetter(CodeStubAssembler* a) {
+ Generate_FlagGetter(a, JSRegExp::kMultiline,
+ v8::Isolate::kRegExpPrototypeOldFlagGetter,
+ "RegExp.prototype.multiline");
+}
+
+// ES6 21.2.5.12.
+void Builtins::Generate_RegExpPrototypeStickyGetter(CodeStubAssembler* a) {
+ Generate_FlagGetter(a, JSRegExp::kSticky,
+ v8::Isolate::kRegExpPrototypeStickyGetter,
+ "RegExp.prototype.sticky");
+}
+
+// ES6 21.2.5.15.
+void Builtins::Generate_RegExpPrototypeUnicodeGetter(CodeStubAssembler* a) {
+ Generate_FlagGetter(a, JSRegExp::kUnicode,
+ v8::Isolate::kRegExpPrototypeUnicodeGetter,
+ "RegExp.prototype.unicode");
+}
+
+// The properties $1..$9 are the first nine capturing substrings of the last
+// successful match, or ''. The function RegExpMakeCaptureGetter will be
+// called with indices from 1 to 9.
+#define DEFINE_CAPTURE_GETTER(i) \
+ BUILTIN(RegExpCapture##i##Getter) { \
+ HandleScope scope(isolate); \
+ return *RegExpUtils::GenericCaptureGetter( \
+ isolate, isolate->regexp_last_match_info(), i); \
+ }
+DEFINE_CAPTURE_GETTER(1)
+DEFINE_CAPTURE_GETTER(2)
+DEFINE_CAPTURE_GETTER(3)
+DEFINE_CAPTURE_GETTER(4)
+DEFINE_CAPTURE_GETTER(5)
+DEFINE_CAPTURE_GETTER(6)
+DEFINE_CAPTURE_GETTER(7)
+DEFINE_CAPTURE_GETTER(8)
+DEFINE_CAPTURE_GETTER(9)
+#undef DEFINE_CAPTURE_GETTER
+
+// The properties `input` and `$_` are aliases for each other. When this
+// value is set, the value it is set to is coerced to a string.
+// Getter and setter for the input.
+
+BUILTIN(RegExpInputGetter) {
+ HandleScope scope(isolate);
+ Handle<Object> obj(isolate->regexp_last_match_info()->LastInput(), isolate);
+ return obj->IsUndefined(isolate) ? isolate->heap()->empty_string()
+ : String::cast(*obj);
+}
+
+BUILTIN(RegExpInputSetter) {
+ HandleScope scope(isolate);
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ Handle<String> str;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, str,
+ Object::ToString(isolate, value));
+ isolate->regexp_last_match_info()->SetLastInput(*str);
+ return isolate->heap()->undefined_value();
+}
+
+// Getters for the static properties lastMatch, lastParen, leftContext, and
+// rightContext of the RegExp constructor. The properties are computed based
+// on the captures array of the last successful match and the subject string
+// of the last successful match.
+BUILTIN(RegExpLastMatchGetter) {
+ HandleScope scope(isolate);
+ return *RegExpUtils::GenericCaptureGetter(
+ isolate, isolate->regexp_last_match_info(), 0);
+}
+
+BUILTIN(RegExpLastParenGetter) {
+ HandleScope scope(isolate);
+ Handle<RegExpMatchInfo> match_info = isolate->regexp_last_match_info();
+ const int length = match_info->NumberOfCaptureRegisters();
+ if (length <= 2) return isolate->heap()->empty_string(); // No captures.
+
+ DCHECK_EQ(0, length % 2);
+ const int last_capture = (length / 2) - 1;
+
+ // We match the SpiderMonkey behavior: return the substring defined by the
+ // last pair (after the first pair) of elements of the capture array even if
+ // it is empty.
+ return *RegExpUtils::GenericCaptureGetter(isolate, match_info, last_capture);
+}
+
+BUILTIN(RegExpLeftContextGetter) {
+ HandleScope scope(isolate);
+ Handle<RegExpMatchInfo> match_info = isolate->regexp_last_match_info();
+ const int start_index = match_info->Capture(0);
+ Handle<String> last_subject(match_info->LastSubject());
+ return *isolate->factory()->NewSubString(last_subject, 0, start_index);
+}
+
+BUILTIN(RegExpRightContextGetter) {
+ HandleScope scope(isolate);
+ Handle<RegExpMatchInfo> match_info = isolate->regexp_last_match_info();
+ const int start_index = match_info->Capture(1);
+ Handle<String> last_subject(match_info->LastSubject());
+ const int len = last_subject->length();
+ return *isolate->factory()->NewSubString(last_subject, start_index, len);
+}
+
+namespace {
+
+// ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
+compiler::Node* RegExpExec(CodeStubAssembler* a, compiler::Node* context,
+ compiler::Node* recv, compiler::Node* string) {
+ typedef CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Isolate* isolate = a->isolate();
+
+ Node* const null = a->NullConstant();
+
+ Variable var_result(a, MachineRepresentation::kTagged);
+ Label out(a), call_builtin_exec(a), slow_path(a, Label::kDeferred);
+
+ Node* const map = a->LoadMap(recv);
+ BranchIfFastPath(a, context, map, &call_builtin_exec, &slow_path);
+
+ a->Bind(&call_builtin_exec);
+ {
+ Node* const result = RegExpPrototypeExecInternal(a, context, recv, string);
+ var_result.Bind(result);
+ a->Goto(&out);
+ }
+
+ a->Bind(&slow_path);
+ {
+ // Take the slow path of fetching the exec property, calling it, and
+ // verifying its return value.
+
+ // Get the exec property.
+ Node* const name = a->HeapConstant(isolate->factory()->exec_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
+ Node* const exec = a->CallStub(getproperty_callable, context, recv, name);
+
+ // Is {exec} callable?
+ Label if_iscallable(a), if_isnotcallable(a);
+
+ a->GotoIf(a->TaggedIsSmi(exec), &if_isnotcallable);
+
+ Node* const exec_map = a->LoadMap(exec);
+ a->Branch(a->IsCallableMap(exec_map), &if_iscallable, &if_isnotcallable);
+
+ a->Bind(&if_iscallable);
+ {
+ Callable call_callable = CodeFactory::Call(isolate);
+ Node* const result =
+ a->CallJS(call_callable, context, exec, recv, string);
+
+ var_result.Bind(result);
+ a->GotoIf(a->WordEqual(result, null), &out);
+
+ ThrowIfNotJSReceiver(a, isolate, context, result,
+ MessageTemplate::kInvalidRegExpExecResult, "unused");
+
+ a->Goto(&out);
+ }
+
+ a->Bind(&if_isnotcallable);
+ {
+ a->ThrowIfNotInstanceType(context, recv, JS_REGEXP_TYPE,
+ "RegExp.prototype.exec");
+ a->Goto(&call_builtin_exec);
+ }
+ }
+
+ a->Bind(&out);
+ return var_result.value();
+}
+
+} // namespace
+
+// ES#sec-regexp.prototype.test
+// RegExp.prototype.test ( S )
+void Builtins::Generate_RegExpPrototypeTest(CodeStubAssembler* a) {
+ typedef compiler::Node Node;
+
+ Isolate* const isolate = a->isolate();
+
+ Node* const maybe_receiver = a->Parameter(0);
+ Node* const maybe_string = a->Parameter(1);
+ Node* const context = a->Parameter(4);
+
+ // Ensure {maybe_receiver} is a JSReceiver.
+ ThrowIfNotJSReceiver(a, isolate, context, maybe_receiver,
+ MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.test");
+ Node* const receiver = maybe_receiver;
+
+ // Convert {maybe_string} to a String.
+ Node* const string = a->ToString(context, maybe_string);
+
+ // Call exec.
+ Node* const match_indices = RegExpExec(a, context, receiver, string);
+
+ // Return true iff exec matched successfully.
+ Node* const result = a->Select(a->WordEqual(match_indices, a->NullConstant()),
+ a->FalseConstant(), a->TrueConstant());
+ a->Return(result);
+}
+
+// ES#sec-regexp.prototype-@@match
+// RegExp.prototype [ @@match ] ( string )
+BUILTIN(RegExpPrototypeMatch) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSReceiver, recv, "RegExp.prototype.@@match");
+
+ Handle<Object> string_obj = args.atOrUndefined(isolate, 1);
+
+ Handle<String> string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string,
+ Object::ToString(isolate, string_obj));
+
+ Handle<Object> global_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, global_obj,
+ JSReceiver::GetProperty(recv, isolate->factory()->global_string()));
+ const bool global = global_obj->BooleanValue();
+
+ if (!global) {
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ RegExpUtils::RegExpExec(isolate, recv, string,
+ isolate->factory()->undefined_value()));
+ }
+
+ Handle<Object> unicode_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, unicode_obj,
+ JSReceiver::GetProperty(recv, isolate->factory()->unicode_string()));
+ const bool unicode = unicode_obj->BooleanValue();
+
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ RegExpUtils::SetLastIndex(isolate, recv, 0));
+
+ static const int kInitialArraySize = 8;
+ Handle<FixedArray> elems =
+ isolate->factory()->NewFixedArrayWithHoles(kInitialArraySize);
+
+ int n = 0;
+ for (;; n++) {
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ RegExpUtils::RegExpExec(isolate, recv, string,
+ isolate->factory()->undefined_value()));
+
+ if (result->IsNull(isolate)) {
+ if (n == 0) return isolate->heap()->null_value();
+ break;
+ }
+
+ Handle<Object> match_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match_obj,
+ Object::GetElement(isolate, result, 0));
+
+ Handle<String> match;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match,
+ Object::ToString(isolate, match_obj));
+
+ elems = FixedArray::SetAndGrow(elems, n, match);
+
+ if (match->length() == 0) {
+ RETURN_FAILURE_ON_EXCEPTION(isolate, RegExpUtils::SetAdvancedStringIndex(
+ isolate, recv, string, unicode));
+ }
+ }
+
+ elems->Shrink(n);
+ return *isolate->factory()->NewJSArrayWithElements(elems);
+}
+
+namespace {
+
+void Generate_RegExpPrototypeSearchBody(CodeStubAssembler* a,
+ compiler::Node* const receiver,
+ compiler::Node* const string,
+ compiler::Node* const context,
+ bool is_fastpath) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Isolate* const isolate = a->isolate();
+
+ Node* const smi_zero = a->SmiConstant(Smi::kZero);
+
+ // Grab the initial value of last index.
+ Node* const previous_last_index =
+ is_fastpath ? FastLoadLastIndex(a, context, receiver)
+ : SlowLoadLastIndex(a, context, receiver);
+
+ // Ensure last index is 0.
+ if (is_fastpath) {
+ FastStoreLastIndex(a, context, receiver, smi_zero);
+ } else {
+ Label next(a);
+ a->GotoIf(a->SameValue(previous_last_index, smi_zero, context), &next);
+
+ SlowStoreLastIndex(a, context, receiver, smi_zero);
+ a->Goto(&next);
+ a->Bind(&next);
+ }
+
+ // Call exec.
+ Node* const match_indices =
+ is_fastpath ? RegExpPrototypeExecInternal(a, context, receiver, string)
+ : RegExpExec(a, context, receiver, string);
+
+ // Reset last index if necessary.
+ if (is_fastpath) {
+ FastStoreLastIndex(a, context, receiver, previous_last_index);
+ } else {
+ Label next(a);
+ Node* const current_last_index = SlowLoadLastIndex(a, context, receiver);
+
+ a->GotoIf(a->SameValue(current_last_index, previous_last_index, context),
+ &next);
+
+ SlowStoreLastIndex(a, context, receiver, previous_last_index);
+ a->Goto(&next);
+ a->Bind(&next);
+ }
+
+ // Return -1 if no match was found.
+ {
+ Label next(a);
+ a->GotoUnless(a->WordEqual(match_indices, a->NullConstant()), &next);
+ a->Return(a->SmiConstant(-1));
+ a->Bind(&next);
+ }
+
+ // Return the index of the match.
+ {
+ Label fast_result(a), slow_result(a, Label::kDeferred);
+
+ Node* const native_context = a->LoadNativeContext(context);
+ Node* const initial_regexp_result_map =
+ a->LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX);
+ Node* const match_indices_map = a->LoadMap(match_indices);
+
+ a->Branch(a->WordEqual(match_indices_map, initial_regexp_result_map),
+ &fast_result, &slow_result);
+
+ a->Bind(&fast_result);
+ {
+ Node* const index =
+ a->LoadObjectField(match_indices, JSRegExpResult::kIndexOffset,
+ MachineType::AnyTagged());
+ a->Return(index);
+ }
+
+ a->Bind(&slow_result);
+ {
+ Node* const name = a->HeapConstant(isolate->factory()->index_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
+ Node* const index =
+ a->CallStub(getproperty_callable, context, match_indices, name);
+ a->Return(index);
+ }
+ }
+}
+
+} // namespace
+
+// ES#sec-regexp.prototype-@@search
+// RegExp.prototype [ @@search ] ( string )
+void Builtins::Generate_RegExpPrototypeSearch(CodeStubAssembler* a) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Isolate* const isolate = a->isolate();
+
+ Node* const maybe_receiver = a->Parameter(0);
+ Node* const maybe_string = a->Parameter(1);
+ Node* const context = a->Parameter(4);
+
+ // Ensure {maybe_receiver} is a JSReceiver.
+ Node* const map =
+ ThrowIfNotJSReceiver(a, isolate, context, maybe_receiver,
+ MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@search");
+ Node* const receiver = maybe_receiver;
+
+ // Convert {maybe_string} to a String.
+ Node* const string = a->ToString(context, maybe_string);
+
+ Label fast_path(a), slow_path(a);
+ BranchIfFastPath(a, context, map, &fast_path, &slow_path);
+
+ a->Bind(&fast_path);
+ Generate_RegExpPrototypeSearchBody(a, receiver, string, context, true);
+
+ a->Bind(&slow_path);
+ Generate_RegExpPrototypeSearchBody(a, receiver, string, context, false);
+}
+
+namespace {
+
+MUST_USE_RESULT MaybeHandle<Object> ToUint32(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t* out) {
+ if (object->IsUndefined(isolate)) {
+ *out = kMaxUInt32;
+ return object;
+ }
+
+ Handle<Object> number;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, number, Object::ToNumber(object), Object);
+ *out = NumberToUint32(*number);
+ return object;
+}
+
+bool AtSurrogatePair(Isolate* isolate, Handle<String> string, int index) {
+ if (index + 1 >= string->length()) return false;
+ const uint16_t first = string->Get(index);
+ if (first < 0xD800 || first > 0xDBFF) return false;
+ const uint16_t second = string->Get(index + 1);
+ return (second >= 0xDC00 && second <= 0xDFFF);
+}
+
+Handle<JSArray> NewJSArrayWithElements(Isolate* isolate,
+ Handle<FixedArray> elems,
+ int num_elems) {
+ elems->Shrink(num_elems);
+ return isolate->factory()->NewJSArrayWithElements(elems);
+}
+
+MaybeHandle<JSArray> RegExpSplit(Isolate* isolate, Handle<JSRegExp> regexp,
+ Handle<String> string,
+ Handle<Object> limit_obj) {
+ Factory* factory = isolate->factory();
+
+ uint32_t limit;
+ RETURN_ON_EXCEPTION(isolate, ToUint32(isolate, limit_obj, &limit), JSArray);
+
+ const int length = string->length();
+
+ if (limit == 0) return factory->NewJSArray(0);
+
+ Handle<RegExpMatchInfo> last_match_info = isolate->regexp_last_match_info();
+
+ if (length == 0) {
+ Handle<Object> match_indices;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, match_indices,
+ RegExpImpl::Exec(regexp, string, 0, last_match_info), JSArray);
+
+ if (!match_indices->IsNull(isolate)) return factory->NewJSArray(0);
+
+ Handle<FixedArray> elems = factory->NewUninitializedFixedArray(1);
+ elems->set(0, *string);
+ return factory->NewJSArrayWithElements(elems);
+ }
+
+ int current_index = 0;
+ int start_index = 0;
+ int start_match = 0;
+
+ static const int kInitialArraySize = 8;
+ Handle<FixedArray> elems = factory->NewFixedArrayWithHoles(kInitialArraySize);
+ int num_elems = 0;
+
+ while (true) {
+ if (start_index == length) {
+ Handle<String> substr =
+ factory->NewSubString(string, current_index, length);
+ elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+ break;
+ }
+
+ Handle<Object> match_indices_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, match_indices_obj,
+ RegExpImpl::Exec(regexp, string, start_index,
+ isolate->regexp_last_match_info()),
+ JSArray);
+
+ if (match_indices_obj->IsNull(isolate)) {
+ Handle<String> substr =
+ factory->NewSubString(string, current_index, length);
+ elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+ break;
+ }
+
+ auto match_indices = Handle<RegExpMatchInfo>::cast(match_indices_obj);
+
+ start_match = match_indices->Capture(0);
+
+ if (start_match == length) {
+ Handle<String> substr =
+ factory->NewSubString(string, current_index, length);
+ elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+ break;
+ }
+
+ const int end_index = match_indices->Capture(1);
+
+ if (start_index == end_index && end_index == current_index) {
+ const bool unicode = (regexp->GetFlags() & JSRegExp::kUnicode) != 0;
+ if (unicode && AtSurrogatePair(isolate, string, start_index)) {
+ start_index += 2;
+ } else {
+ start_index += 1;
+ }
+ continue;
+ }
+
+ {
+ Handle<String> substr =
+ factory->NewSubString(string, current_index, start_match);
+ elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+ }
+
+ if (static_cast<uint32_t>(num_elems) == limit) break;
+
+ for (int i = 2; i < match_indices->NumberOfCaptureRegisters(); i += 2) {
+ const int start = match_indices->Capture(i);
+ const int end = match_indices->Capture(i + 1);
+
+ if (end != -1) {
+ Handle<String> substr = factory->NewSubString(string, start, end);
+ elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+ } else {
+ elems = FixedArray::SetAndGrow(elems, num_elems++,
+ factory->undefined_value());
+ }
+
+ if (static_cast<uint32_t>(num_elems) == limit) {
+ return NewJSArrayWithElements(isolate, elems, num_elems);
+ }
+ }
+
+ start_index = current_index = end_index;
+ }
+
+ return NewJSArrayWithElements(isolate, elems, num_elems);
+}
+
+// ES##sec-speciesconstructor
+// SpeciesConstructor ( O, defaultConstructor )
+MUST_USE_RESULT MaybeHandle<Object> SpeciesConstructor(
+ Isolate* isolate, Handle<JSReceiver> recv,
+ Handle<JSFunction> default_ctor) {
+ Handle<Object> ctor_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, ctor_obj,
+ JSObject::GetProperty(recv, isolate->factory()->constructor_string()),
+ Object);
+
+ if (ctor_obj->IsUndefined(isolate)) return default_ctor;
+
+ if (!ctor_obj->IsJSReceiver()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kConstructorNotReceiver),
+ Object);
+ }
+
+ Handle<JSReceiver> ctor = Handle<JSReceiver>::cast(ctor_obj);
+
+ Handle<Object> species;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, species,
+ JSObject::GetProperty(ctor, isolate->factory()->species_symbol()),
+ Object);
+
+ if (species->IsNull(isolate) || species->IsUndefined(isolate)) {
+ return default_ctor;
+ }
+
+ if (species->IsConstructor()) return species;
+
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kSpeciesNotConstructor), Object);
+}
+
+} // namespace
+
+// ES#sec-regexp.prototype-@@split
+// RegExp.prototype [ @@split ] ( string, limit )
+BUILTIN(RegExpPrototypeSplit) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSReceiver, recv, "RegExp.prototype.@@split");
+
+ Factory* factory = isolate->factory();
+
+ Handle<Object> string_obj = args.atOrUndefined(isolate, 1);
+ Handle<Object> limit_obj = args.atOrUndefined(isolate, 2);
+
+ Handle<String> string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string,
+ Object::ToString(isolate, string_obj));
+
+ if (RegExpUtils::IsUnmodifiedRegExp(isolate, recv)) {
+ RETURN_RESULT_OR_FAILURE(
+ isolate,
+ RegExpSplit(isolate, Handle<JSRegExp>::cast(recv), string, limit_obj));
+ }
+
+ Handle<JSFunction> regexp_fun = isolate->regexp_function();
+ Handle<Object> ctor;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, ctor, SpeciesConstructor(isolate, recv, regexp_fun));
+
+ Handle<Object> flags_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, flags_obj, JSObject::GetProperty(recv, factory->flags_string()));
+
+ Handle<String> flags;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, flags,
+ Object::ToString(isolate, flags_obj));
+
+ Handle<String> u_str = factory->LookupSingleCharacterStringFromCode('u');
+ const bool unicode = (String::IndexOf(isolate, flags, u_str, 0) >= 0);
+
+ Handle<String> y_str = factory->LookupSingleCharacterStringFromCode('y');
+ const bool sticky = (String::IndexOf(isolate, flags, y_str, 0) >= 0);
+
+ Handle<String> new_flags = flags;
+ if (!sticky) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_flags,
+ factory->NewConsString(flags, y_str));
+ }
+
+ Handle<JSReceiver> splitter;
+ {
+ const int argc = 2;
+
+ ScopedVector<Handle<Object>> argv(argc);
+ argv[0] = recv;
+ argv[1] = new_flags;
+
+ Handle<JSFunction> ctor_fun = Handle<JSFunction>::cast(ctor);
+ Handle<Object> splitter_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, splitter_obj, Execution::New(ctor_fun, argc, argv.start()));
+
+ splitter = Handle<JSReceiver>::cast(splitter_obj);
+ }
+
+ uint32_t limit;
+ RETURN_FAILURE_ON_EXCEPTION(isolate, ToUint32(isolate, limit_obj, &limit));
+
+ const int length = string->length();
+
+ if (limit == 0) return *factory->NewJSArray(0);
+
+ if (length == 0) {
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, RegExpUtils::RegExpExec(isolate, splitter, string,
+ factory->undefined_value()));
+
+ if (!result->IsNull(isolate)) return *factory->NewJSArray(0);
+
+ Handle<FixedArray> elems = factory->NewUninitializedFixedArray(1);
+ elems->set(0, *string);
+ return *factory->NewJSArrayWithElements(elems);
+ }
+
+ // TODO(jgruber): Wrap this in a helper class.
+ static const int kInitialArraySize = 8;
+ Handle<FixedArray> elems = factory->NewFixedArrayWithHoles(kInitialArraySize);
+ int num_elems = 0;
+
+ int string_index = 0;
+ int prev_string_index = 0;
+ while (string_index < length) {
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, RegExpUtils::SetLastIndex(isolate, splitter, string_index));
+
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, RegExpUtils::RegExpExec(isolate, splitter, string,
+ factory->undefined_value()));
+
+ if (result->IsNull(isolate)) {
+ string_index = RegExpUtils::AdvanceStringIndex(isolate, string,
+ string_index, unicode);
+ continue;
+ }
+
+ // TODO(jgruber): Extract toLength of some property into function.
+ Handle<Object> last_index_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, last_index_obj, RegExpUtils::GetLastIndex(isolate, splitter));
+
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, last_index_obj, Object::ToLength(isolate, last_index_obj));
+ const int last_index = Handle<Smi>::cast(last_index_obj)->value();
+
+ const int end = std::min(last_index, length);
+ if (end == prev_string_index) {
+ string_index = RegExpUtils::AdvanceStringIndex(isolate, string,
+ string_index, unicode);
+ continue;
+ }
+
+ {
+ Handle<String> substr =
+ factory->NewSubString(string, prev_string_index, string_index);
+ elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+ if (static_cast<uint32_t>(num_elems) == limit) {
+ return *NewJSArrayWithElements(isolate, elems, num_elems);
+ }
+ }
+
+ prev_string_index = end;
+
+ Handle<Object> num_captures_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, num_captures_obj,
+ Object::GetProperty(result, isolate->factory()->length_string()));
+
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, num_captures_obj, Object::ToLength(isolate, num_captures_obj));
+ const int num_captures =
+ std::max(Handle<Smi>::cast(num_captures_obj)->value(), 0);
+
+ for (int i = 1; i < num_captures; i++) {
+ Handle<Object> capture;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, capture, Object::GetElement(isolate, result, i));
+ elems = FixedArray::SetAndGrow(elems, num_elems++, capture);
+ if (static_cast<uint32_t>(num_elems) == limit) {
+ return *NewJSArrayWithElements(isolate, elems, num_elems);
+ }
+ }
+
+ string_index = prev_string_index;
+ }
+
+ {
+ Handle<String> substr =
+ factory->NewSubString(string, prev_string_index, length);
+ elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+ }
+
+ return *NewJSArrayWithElements(isolate, elems, num_elems);
+}
+
+namespace {
+
+compiler::Node* ReplaceGlobalCallableFastPath(
+ CodeStubAssembler* a, compiler::Node* context, compiler::Node* regexp,
+ compiler::Node* subject_string, compiler::Node* replace_callable) {
+ // The fast path is reached only if {receiver} is a global unmodified
+ // JSRegExp instance and {replace_callable} is callable.
+
+ typedef CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Isolate* const isolate = a->isolate();
+
+ Node* const null = a->NullConstant();
+ Node* const undefined = a->UndefinedConstant();
+ Node* const int_zero = a->IntPtrConstant(0);
+ Node* const int_one = a->IntPtrConstant(1);
+ Node* const smi_zero = a->SmiConstant(Smi::kZero);
+
+ Node* const native_context = a->LoadNativeContext(context);
+
+ Label out(a);
+ Variable var_result(a, MachineRepresentation::kTagged);
+
+ // Set last index to 0.
+ FastStoreLastIndex(a, context, regexp, smi_zero);
+
+ // Allocate {result_array}.
+ Node* result_array;
+ {
+ ElementsKind kind = FAST_ELEMENTS;
+ Node* const array_map = a->LoadJSArrayElementsMap(kind, native_context);
+ Node* const capacity = a->IntPtrConstant(16);
+ Node* const length = smi_zero;
+ Node* const allocation_site = nullptr;
+ CodeStubAssembler::ParameterMode capacity_mode =
+ CodeStubAssembler::INTPTR_PARAMETERS;
+
+ result_array = a->AllocateJSArray(kind, array_map, capacity, length,
+ allocation_site, capacity_mode);
+ }
+
+ // Call into runtime for RegExpExecMultiple.
+ Node* last_match_info = a->LoadContextElement(
+ native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+ Node* const res =
+ a->CallRuntime(Runtime::kRegExpExecMultiple, context, regexp,
+ subject_string, last_match_info, result_array);
+
+ // Reset last index to 0.
+ FastStoreLastIndex(a, context, regexp, smi_zero);
+
+ // If no matches, return the subject string.
+ var_result.Bind(subject_string);
+ a->GotoIf(a->WordEqual(res, null), &out);
+
+ // Reload last match info since it might have changed.
+ last_match_info = a->LoadContextElement(
+ native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+
+ Node* const res_length = a->LoadJSArrayLength(res);
+ Node* const res_elems = a->LoadElements(res);
+ CSA_ASSERT(a, a->HasInstanceType(res_elems, FIXED_ARRAY_TYPE));
+
+ CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
+ Node* const num_capture_registers = a->LoadFixedArrayElement(
+ last_match_info,
+ a->IntPtrConstant(RegExpMatchInfo::kNumberOfCapturesIndex), 0, mode);
+
+ Label if_hasexplicitcaptures(a), if_noexplicitcaptures(a), create_result(a);
+ a->Branch(a->SmiEqual(num_capture_registers, a->SmiConstant(Smi::FromInt(2))),
+ &if_noexplicitcaptures, &if_hasexplicitcaptures);
+
+ a->Bind(&if_noexplicitcaptures);
+ {
+ // If the number of captures is two then there are no explicit captures in
+ // the regexp, just the implicit capture that captures the whole match. In
+ // this case we can simplify quite a bit and end up with something faster.
+ // The builder will consist of some integers that indicate slices of the
+ // input string and some replacements that were returned from the replace
+ // function.
+
+ Variable var_match_start(a, MachineRepresentation::kTagged);
+ var_match_start.Bind(smi_zero);
+
+ Node* const end = a->SmiUntag(res_length);
+ Variable var_i(a, MachineType::PointerRepresentation());
+ var_i.Bind(int_zero);
+
+ Variable* vars[] = {&var_i, &var_match_start};
+ Label loop(a, 2, vars);
+ a->Goto(&loop);
+ a->Bind(&loop);
+ {
+ Node* const i = var_i.value();
+ a->GotoUnless(a->IntPtrLessThan(i, end), &create_result);
+
+ CodeStubAssembler::ParameterMode mode =
+ CodeStubAssembler::INTPTR_PARAMETERS;
+ Node* const elem = a->LoadFixedArrayElement(res_elems, i, 0, mode);
+
+ Label if_issmi(a), if_isstring(a), loop_epilogue(a);
+ a->Branch(a->TaggedIsSmi(elem), &if_issmi, &if_isstring);
+
+ a->Bind(&if_issmi);
+ {
+ // Integers represent slices of the original string.
+ Label if_isnegativeorzero(a), if_ispositive(a);
+ a->BranchIfSmiLessThanOrEqual(elem, smi_zero, &if_isnegativeorzero,
+ &if_ispositive);
+
+ a->Bind(&if_ispositive);
+ {
+ Node* const int_elem = a->SmiUntag(elem);
+ Node* const new_match_start =
+ a->IntPtrAdd(a->WordShr(int_elem, a->IntPtrConstant(11)),
+ a->WordAnd(int_elem, a->IntPtrConstant(0x7ff)));
+ var_match_start.Bind(a->SmiTag(new_match_start));
+ a->Goto(&loop_epilogue);
+ }
+
+ a->Bind(&if_isnegativeorzero);
+ {
+ Node* const next_i = a->IntPtrAdd(i, int_one);
+ var_i.Bind(next_i);
+
+ Node* const next_elem =
+ a->LoadFixedArrayElement(res_elems, next_i, 0, mode);
+
+ Node* const new_match_start = a->SmiSub(next_elem, elem);
+ var_match_start.Bind(new_match_start);
+ a->Goto(&loop_epilogue);
+ }
+ }
+
+ a->Bind(&if_isstring);
+ {
+ CSA_ASSERT(a, a->IsStringInstanceType(a->LoadInstanceType(elem)));
+
+ Callable call_callable = CodeFactory::Call(isolate);
+ Node* const replacement_obj =
+ a->CallJS(call_callable, context, replace_callable, undefined, elem,
+ var_match_start.value(), subject_string);
+
+ Node* const replacement_str = a->ToString(context, replacement_obj);
+ a->StoreFixedArrayElement(res_elems, i, replacement_str);
+
+ Node* const elem_length = a->LoadStringLength(elem);
+ Node* const new_match_start =
+ a->SmiAdd(var_match_start.value(), elem_length);
+ var_match_start.Bind(new_match_start);
+
+ a->Goto(&loop_epilogue);
+ }
+
+ a->Bind(&loop_epilogue);
+ {
+ var_i.Bind(a->IntPtrAdd(var_i.value(), int_one));
+ a->Goto(&loop);
+ }
+ }
+ }
+
+ a->Bind(&if_hasexplicitcaptures);
+ {
+ CodeStubAssembler::ParameterMode mode =
+ CodeStubAssembler::INTPTR_PARAMETERS;
+
+ Node* const from = int_zero;
+ Node* const to = a->SmiUntag(res_length);
+ const int increment = 1;
+
+ a->BuildFastLoop(
+ MachineType::PointerRepresentation(), from, to,
+ [res_elems, isolate, native_context, context, undefined,
+ replace_callable, mode](CodeStubAssembler* a, Node* index) {
+ Node* const elem =
+ a->LoadFixedArrayElement(res_elems, index, 0, mode);
+
+ Label do_continue(a);
+ a->GotoIf(a->TaggedIsSmi(elem), &do_continue);
+
+ // elem must be an Array.
+ // Use the apply argument as backing for global RegExp properties.
+
+ CSA_ASSERT(a, a->HasInstanceType(elem, JS_ARRAY_TYPE));
+
+ // TODO(jgruber): Remove indirection through Call->ReflectApply.
+ Callable call_callable = CodeFactory::Call(isolate);
+ Node* const reflect_apply = a->LoadContextElement(
+ native_context, Context::REFLECT_APPLY_INDEX);
+
+ Node* const replacement_obj =
+ a->CallJS(call_callable, context, reflect_apply, undefined,
+ replace_callable, undefined, elem);
+
+ // Overwrite the i'th element in the results with the string we got
+ // back from the callback function.
+
+ Node* const replacement_str = a->ToString(context, replacement_obj);
+ a->StoreFixedArrayElement(res_elems, index, replacement_str,
+ UPDATE_WRITE_BARRIER, mode);
+
+ a->Goto(&do_continue);
+ a->Bind(&do_continue);
+ },
+ increment, CodeStubAssembler::IndexAdvanceMode::kPost);
+
+ a->Goto(&create_result);
+ }
+
+ a->Bind(&create_result);
+ {
+ Node* const result = a->CallRuntime(Runtime::kStringBuilderConcat, context,
+ res, res_length, subject_string);
+ var_result.Bind(result);
+ a->Goto(&out);
+ }
+
+ a->Bind(&out);
+ return var_result.value();
+}
+
+compiler::Node* ReplaceSimpleStringFastPath(CodeStubAssembler* a,
+ compiler::Node* context,
+ compiler::Node* regexp,
+ compiler::Node* subject_string,
+ compiler::Node* replace_string) {
+ // The fast path is reached only if {receiver} is an unmodified
+ // JSRegExp instance, {replace_value} is non-callable, and
+ // ToString({replace_value}) does not contain '$', i.e. we're doing a simple
+ // string replacement.
+
+ typedef CodeStubAssembler::Variable Variable;
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Isolate* const isolate = a->isolate();
+
+ Node* const null = a->NullConstant();
+ Node* const int_zero = a->IntPtrConstant(0);
+ Node* const smi_zero = a->SmiConstant(Smi::kZero);
+
+ Label out(a);
+ Variable var_result(a, MachineRepresentation::kTagged);
+
+ // Load the last match info.
+ Node* const native_context = a->LoadNativeContext(context);
+ Node* const last_match_info = a->LoadContextElement(
+ native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+
+ // Is {regexp} global?
+ Label if_isglobal(a), if_isnonglobal(a);
+ Node* const flags = a->LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+ Node* const is_global =
+ a->WordAnd(a->SmiUntag(flags), a->IntPtrConstant(JSRegExp::kGlobal));
+ a->Branch(a->WordEqual(is_global, int_zero), &if_isnonglobal, &if_isglobal);
+
+ a->Bind(&if_isglobal);
+ {
+ // Hand off global regexps to runtime.
+ FastStoreLastIndex(a, context, regexp, smi_zero);
+ Node* const result =
+ a->CallRuntime(Runtime::kStringReplaceGlobalRegExpWithString, context,
+ subject_string, regexp, replace_string, last_match_info);
+ var_result.Bind(result);
+ a->Goto(&out);
+ }
+
+ a->Bind(&if_isnonglobal);
+ {
+ // Run exec, then manually construct the resulting string.
+ Callable exec_callable = CodeFactory::RegExpExec(isolate);
+ Node* const match_indices =
+ a->CallStub(exec_callable, context, regexp, subject_string, smi_zero,
+ last_match_info);
+
+ Label if_matched(a), if_didnotmatch(a);
+ a->Branch(a->WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
+
+ a->Bind(&if_didnotmatch);
+ {
+ FastStoreLastIndex(a, context, regexp, smi_zero);
+ var_result.Bind(subject_string);
+ a->Goto(&out);
+ }
+
+ a->Bind(&if_matched);
+ {
+ CodeStubAssembler::ParameterMode mode =
+ CodeStubAssembler::INTPTR_PARAMETERS;
+
+ Node* const subject_start = smi_zero;
+ Node* const match_start = a->LoadFixedArrayElement(
+ match_indices, a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex),
+ 0, mode);
+ Node* const match_end = a->LoadFixedArrayElement(
+ match_indices,
+ a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 1), 0, mode);
+ Node* const subject_end = a->LoadStringLength(subject_string);
+
+ Label if_replaceisempty(a), if_replaceisnotempty(a);
+ Node* const replace_length = a->LoadStringLength(replace_string);
+ a->Branch(a->SmiEqual(replace_length, smi_zero), &if_replaceisempty,
+ &if_replaceisnotempty);
+
+ a->Bind(&if_replaceisempty);
+ {
+ // TODO(jgruber): We could skip many of the checks that using SubString
+ // here entails.
+
+ Node* const first_part =
+ a->SubString(context, subject_string, subject_start, match_start);
+ Node* const second_part =
+ a->SubString(context, subject_string, match_end, subject_end);
+
+ Node* const result = a->StringAdd(context, first_part, second_part);
+ var_result.Bind(result);
+ a->Goto(&out);
+ }
+
+ a->Bind(&if_replaceisnotempty);
+ {
+ Node* const first_part =
+ a->SubString(context, subject_string, subject_start, match_start);
+ Node* const second_part = replace_string;
+ Node* const third_part =
+ a->SubString(context, subject_string, match_end, subject_end);
+
+ Node* result = a->StringAdd(context, first_part, second_part);
+ result = a->StringAdd(context, result, third_part);
+
+ var_result.Bind(result);
+ a->Goto(&out);
+ }
+ }
+ }
+
+ a->Bind(&out);
+ return var_result.value();
+}
+
+} // namespace
+
+// ES#sec-regexp.prototype-@@replace
+// RegExp.prototype [ @@replace ] ( string, replaceValue )
+void Builtins::Generate_RegExpPrototypeReplace(CodeStubAssembler* a) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Isolate* const isolate = a->isolate();
+
+ Node* const maybe_receiver = a->Parameter(0);
+ Node* const maybe_string = a->Parameter(1);
+ Node* const replace_value = a->Parameter(2);
+ Node* const context = a->Parameter(5);
+
+ Node* const int_zero = a->IntPtrConstant(0);
+
+ // Ensure {maybe_receiver} is a JSReceiver.
+ Node* const map =
+ ThrowIfNotJSReceiver(a, isolate, context, maybe_receiver,
+ MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@replace");
+ Node* const receiver = maybe_receiver;
+
+ // Convert {maybe_string} to a String.
+ Callable tostring_callable = CodeFactory::ToString(isolate);
+ Node* const string = a->CallStub(tostring_callable, context, maybe_string);
+
+ // Fast-path checks: 1. Is the {receiver} an unmodified JSRegExp instance?
+ Label checkreplacecallable(a), runtime(a, Label::kDeferred), fastpath(a);
+ BranchIfFastPath(a, context, map, &checkreplacecallable, &runtime);
+
+ a->Bind(&checkreplacecallable);
+ Node* const regexp = receiver;
+
+ // 2. Is {replace_value} callable?
+ Label checkreplacestring(a), if_iscallable(a);
+ a->GotoIf(a->TaggedIsSmi(replace_value), &checkreplacestring);
+
+ Node* const replace_value_map = a->LoadMap(replace_value);
+ a->Branch(a->IsCallableMap(replace_value_map), &if_iscallable,
+ &checkreplacestring);
+
+ // 3. Does ToString({replace_value}) contain '$'?
+ a->Bind(&checkreplacestring);
+ {
+ Node* const replace_string =
+ a->CallStub(tostring_callable, context, replace_value);
+
+ Node* const dollar_char = a->IntPtrConstant('$');
+ Node* const smi_minusone = a->SmiConstant(Smi::FromInt(-1));
+ a->GotoUnless(a->SmiEqual(a->StringIndexOfChar(context, replace_string,
+ dollar_char, int_zero),
+ smi_minusone),
+ &runtime);
+
+ a->Return(ReplaceSimpleStringFastPath(a, context, regexp, string,
+ replace_string));
+ }
+
+ // {regexp} is unmodified and {replace_value} is callable.
+ a->Bind(&if_iscallable);
+ {
+ Node* const replace_callable = replace_value;
+
+ // Check if the {regexp} is global.
+ Label if_isglobal(a), if_isnotglobal(a);
+ Node* const is_global = FastFlagGetter(a, regexp, JSRegExp::kGlobal);
+ a->Branch(is_global, &if_isglobal, &if_isnotglobal);
+
+ a->Bind(&if_isglobal);
+ {
+ Node* const result = ReplaceGlobalCallableFastPath(
+ a, context, regexp, string, replace_callable);
+ a->Return(result);
+ }
+
+ a->Bind(&if_isnotglobal);
+ {
+ Node* const result =
+ a->CallRuntime(Runtime::kStringReplaceNonGlobalRegExpWithFunction,
+ context, string, regexp, replace_callable);
a->Return(result);
}
}
+
+ a->Bind(&runtime);
+ {
+ Node* const result = a->CallRuntime(Runtime::kRegExpReplace, context,
+ receiver, string, replace_value);
+ a->Return(result);
+ }
+}
+
+// Simple string matching functionality for internal use which does not modify
+// the last match info.
+void Builtins::Generate_RegExpInternalMatch(CodeStubAssembler* a) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Isolate* const isolate = a->isolate();
+
+ Node* const regexp = a->Parameter(1);
+ Node* const string = a->Parameter(2);
+ Node* const context = a->Parameter(5);
+
+ Node* const null = a->NullConstant();
+ Node* const smi_zero = a->SmiConstant(Smi::FromInt(0));
+
+ Node* const native_context = a->LoadNativeContext(context);
+ Node* const internal_match_info = a->LoadContextElement(
+ native_context, Context::REGEXP_INTERNAL_MATCH_INFO_INDEX);
+
+ Callable exec_callable = CodeFactory::RegExpExec(isolate);
+ Node* const match_indices = a->CallStub(
+ exec_callable, context, regexp, string, smi_zero, internal_match_info);
+
+ Label if_matched(a), if_didnotmatch(a);
+ a->Branch(a->WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
+
+ a->Bind(&if_didnotmatch);
+ a->Return(null);
+
+ a->Bind(&if_matched);
+ {
+ Node* result = ConstructNewResultFromMatchInfo(isolate, a, context,
+ match_indices, string);
+ a->Return(result);
+ }
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index 6aad4daeef..2b5bf498a5 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -37,7 +37,7 @@ void ValidateSharedTypedArray(CodeStubAssembler* a, compiler::Node* tagged,
not_float_or_clamped(a), invalid(a);
// Fail if it is not a heap object.
- a->Branch(a->WordIsSmi(tagged), &is_smi, &not_smi);
+ a->Branch(a->TaggedIsSmi(tagged), &is_smi, &not_smi);
a->Bind(&is_smi);
a->Goto(&invalid);
@@ -52,8 +52,9 @@ void ValidateSharedTypedArray(CodeStubAssembler* a, compiler::Node* tagged,
// Fail if the array's JSArrayBuffer is not shared.
a->Bind(&is_typed_array);
Node* array_buffer = a->LoadObjectField(tagged, JSTypedArray::kBufferOffset);
- Node* is_buffer_shared = a->BitFieldDecode<JSArrayBuffer::IsShared>(
- a->LoadObjectField(array_buffer, JSArrayBuffer::kBitFieldSlot));
+ Node* is_buffer_shared =
+ a->IsSetWord32<JSArrayBuffer::IsShared>(a->LoadObjectField(
+ array_buffer, JSArrayBuffer::kBitFieldOffset, MachineType::Uint32()));
a->Branch(is_buffer_shared, &is_shared, &not_shared);
a->Bind(&not_shared);
a->Goto(&invalid);
@@ -102,7 +103,7 @@ compiler::Node* ConvertTaggedAtomicIndexToWord32(CodeStubAssembler* a,
CodeStubAssembler::Label done(a, &var_result);
CodeStubAssembler::Label if_numberissmi(a), if_numberisnotsmi(a);
- a->Branch(a->WordIsSmi(number_index), &if_numberissmi, &if_numberisnotsmi);
+ a->Branch(a->TaggedIsSmi(number_index), &if_numberissmi, &if_numberisnotsmi);
a->Bind(&if_numberissmi);
{
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index 68d2bd0c97..4ccccbc859 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -6,13 +6,15 @@
#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
+#include "src/regexp/regexp-utils.h"
namespace v8 {
namespace internal {
-namespace {
+typedef CodeStubAssembler::ResultMode ResultMode;
+typedef CodeStubAssembler::RelationalComparisonMode RelationalComparisonMode;
-enum ResultMode { kDontNegateResult, kNegateResult };
+namespace {
void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
// Here's pseudo-code for the algorithm below in case of kDontNegateResult
@@ -168,9 +170,10 @@ void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
{
// TODO(bmeurer): Add fast case support for flattened cons strings;
// also add support for two byte string equality checks.
- Runtime::FunctionId function_id = (mode == kDontNegateResult)
- ? Runtime::kStringEqual
- : Runtime::kStringNotEqual;
+ Runtime::FunctionId function_id =
+ (mode == ResultMode::kDontNegateResult)
+ ? Runtime::kStringEqual
+ : Runtime::kStringNotEqual;
assembler->TailCallRuntime(function_id, context, lhs, rhs);
}
}
@@ -184,18 +187,14 @@ void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
}
assembler->Bind(&if_equal);
- assembler->Return(assembler->BooleanConstant(mode == kDontNegateResult));
+ assembler->Return(
+ assembler->BooleanConstant(mode == ResultMode::kDontNegateResult));
assembler->Bind(&if_notequal);
- assembler->Return(assembler->BooleanConstant(mode == kNegateResult));
+ assembler->Return(
+ assembler->BooleanConstant(mode == ResultMode::kNegateResult));
}
-enum RelationalComparisonMode {
- kLessThan,
- kLessThanOrEqual,
- kGreaterThan,
- kGreaterThanOrEqual
-};
void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
RelationalComparisonMode mode) {
@@ -293,8 +292,8 @@ void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
assembler->Goto(&loop);
assembler->Bind(&if_valueisnotsame);
- assembler->BranchIf(assembler->Uint32LessThan(lhs_value, rhs_value),
- &if_less, &if_greater);
+ assembler->Branch(assembler->Uint32LessThan(lhs_value, rhs_value),
+ &if_less, &if_greater);
}
assembler->Bind(&if_done);
@@ -320,19 +319,19 @@ void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
// TODO(bmeurer): Add fast case support for flattened cons strings;
// also add support for two byte string relational comparisons.
switch (mode) {
- case kLessThan:
+ case RelationalComparisonMode::kLessThan:
assembler->TailCallRuntime(Runtime::kStringLessThan, context, lhs,
rhs);
break;
- case kLessThanOrEqual:
+ case RelationalComparisonMode::kLessThanOrEqual:
assembler->TailCallRuntime(Runtime::kStringLessThanOrEqual, context,
lhs, rhs);
break;
- case kGreaterThan:
+ case RelationalComparisonMode::kGreaterThan:
assembler->TailCallRuntime(Runtime::kStringGreaterThan, context, lhs,
rhs);
break;
- case kGreaterThanOrEqual:
+ case RelationalComparisonMode::kGreaterThanOrEqual:
assembler->TailCallRuntime(Runtime::kStringGreaterThanOrEqual,
context, lhs, rhs);
break;
@@ -342,39 +341,39 @@ void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
assembler->Bind(&if_less);
switch (mode) {
- case kLessThan:
- case kLessThanOrEqual:
+ case RelationalComparisonMode::kLessThan:
+ case RelationalComparisonMode::kLessThanOrEqual:
assembler->Return(assembler->BooleanConstant(true));
break;
- case kGreaterThan:
- case kGreaterThanOrEqual:
+ case RelationalComparisonMode::kGreaterThan:
+ case RelationalComparisonMode::kGreaterThanOrEqual:
assembler->Return(assembler->BooleanConstant(false));
break;
}
assembler->Bind(&if_equal);
switch (mode) {
- case kLessThan:
- case kGreaterThan:
+ case RelationalComparisonMode::kLessThan:
+ case RelationalComparisonMode::kGreaterThan:
assembler->Return(assembler->BooleanConstant(false));
break;
- case kLessThanOrEqual:
- case kGreaterThanOrEqual:
+ case RelationalComparisonMode::kLessThanOrEqual:
+ case RelationalComparisonMode::kGreaterThanOrEqual:
assembler->Return(assembler->BooleanConstant(true));
break;
}
assembler->Bind(&if_greater);
switch (mode) {
- case kLessThan:
- case kLessThanOrEqual:
+ case RelationalComparisonMode::kLessThan:
+ case RelationalComparisonMode::kLessThanOrEqual:
assembler->Return(assembler->BooleanConstant(false));
break;
- case kGreaterThan:
- case kGreaterThanOrEqual:
+ case RelationalComparisonMode::kGreaterThan:
+ case RelationalComparisonMode::kGreaterThanOrEqual:
assembler->Return(assembler->BooleanConstant(true));
break;
}
@@ -384,32 +383,36 @@ void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
// static
void Builtins::Generate_StringEqual(CodeStubAssembler* assembler) {
- GenerateStringEqual(assembler, kDontNegateResult);
+ GenerateStringEqual(assembler, ResultMode::kDontNegateResult);
}
// static
void Builtins::Generate_StringNotEqual(CodeStubAssembler* assembler) {
- GenerateStringEqual(assembler, kNegateResult);
+ GenerateStringEqual(assembler, ResultMode::kNegateResult);
}
// static
void Builtins::Generate_StringLessThan(CodeStubAssembler* assembler) {
- GenerateStringRelationalComparison(assembler, kLessThan);
+ GenerateStringRelationalComparison(assembler,
+ RelationalComparisonMode::kLessThan);
}
// static
void Builtins::Generate_StringLessThanOrEqual(CodeStubAssembler* assembler) {
- GenerateStringRelationalComparison(assembler, kLessThanOrEqual);
+ GenerateStringRelationalComparison(
+ assembler, RelationalComparisonMode::kLessThanOrEqual);
}
// static
void Builtins::Generate_StringGreaterThan(CodeStubAssembler* assembler) {
- GenerateStringRelationalComparison(assembler, kGreaterThan);
+ GenerateStringRelationalComparison(assembler,
+ RelationalComparisonMode::kGreaterThan);
}
// static
void Builtins::Generate_StringGreaterThanOrEqual(CodeStubAssembler* assembler) {
- GenerateStringRelationalComparison(assembler, kGreaterThanOrEqual);
+ GenerateStringRelationalComparison(
+ assembler, RelationalComparisonMode::kGreaterThanOrEqual);
}
// -----------------------------------------------------------------------------
@@ -421,181 +424,117 @@ void Builtins::Generate_StringFromCharCode(CodeStubAssembler* assembler) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Variable Variable;
- Node* code = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
+ Node* argc = assembler->ChangeInt32ToIntPtr(
+ assembler->Parameter(BuiltinDescriptor::kArgumentsCount));
+ Node* context = assembler->Parameter(BuiltinDescriptor::kContext);
+
+ CodeStubArguments arguments(assembler, argc);
// Check if we have exactly one argument (plus the implicit receiver), i.e.
// if the parent frame is not an arguments adaptor frame.
Label if_oneargument(assembler), if_notoneargument(assembler);
- Node* parent_frame_pointer = assembler->LoadParentFramePointer();
- Node* parent_frame_type =
- assembler->Load(MachineType::Pointer(), parent_frame_pointer,
- assembler->IntPtrConstant(
- CommonFrameConstants::kContextOrFrameTypeOffset));
- assembler->Branch(
- assembler->WordEqual(
- parent_frame_type,
- assembler->SmiConstant(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))),
- &if_notoneargument, &if_oneargument);
+ assembler->Branch(assembler->WordEqual(argc, assembler->IntPtrConstant(1)),
+ &if_oneargument, &if_notoneargument);
assembler->Bind(&if_oneargument);
{
// Single argument case, perform fast single character string cache lookup
// for one-byte code units, or fall back to creating a single character
// string on the fly otherwise.
+ Node* code = arguments.AtIndex(0);
Node* code32 = assembler->TruncateTaggedToWord32(context, code);
Node* code16 = assembler->Word32And(
code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
Node* result = assembler->StringFromCharCode(code16);
- assembler->Return(result);
+ arguments.PopAndReturn(result);
}
+ Node* code16 = nullptr;
assembler->Bind(&if_notoneargument);
{
- // Determine the resulting string length.
- Node* length = assembler->LoadAndUntagSmi(
- parent_frame_pointer, ArgumentsAdaptorFrameConstants::kLengthOffset);
-
+ Label two_byte(assembler);
// Assume that the resulting string contains only one-byte characters.
- Node* result = assembler->AllocateSeqOneByteString(context, length);
-
- // Truncate all input parameters and append them to the resulting string.
- Variable var_offset(assembler, MachineType::PointerRepresentation());
- Label loop(assembler, &var_offset), done_loop(assembler);
- var_offset.Bind(assembler->IntPtrConstant(0));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
- {
- // Load the current {offset}.
- Node* offset = var_offset.value();
-
- // Check if we're done with the string.
- assembler->GotoIf(assembler->WordEqual(offset, length), &done_loop);
-
- // Load the next code point and truncate it to a 16-bit value.
- Node* code = assembler->Load(
- MachineType::AnyTagged(), parent_frame_pointer,
- assembler->IntPtrAdd(
- assembler->WordShl(assembler->IntPtrSub(length, offset),
- assembler->IntPtrConstant(kPointerSizeLog2)),
- assembler->IntPtrConstant(
- CommonFrameConstants::kFixedFrameSizeAboveFp -
- kPointerSize)));
- Node* code32 = assembler->TruncateTaggedToWord32(context, code);
- Node* code16 = assembler->Word32And(
+ Node* one_byte_result = assembler->AllocateSeqOneByteString(context, argc);
+
+ Variable max_index(assembler, MachineType::PointerRepresentation());
+ max_index.Bind(assembler->IntPtrConstant(0));
+
+ // Iterate over the incoming arguments, converting them to 8-bit character
+ // codes. Stop if any of the conversions generates a code that doesn't fit
+ // in 8 bits.
+ CodeStubAssembler::VariableList vars({&max_index}, assembler->zone());
+ arguments.ForEach(vars, [context, &two_byte, &max_index, &code16,
+ one_byte_result](CodeStubAssembler* assembler,
+ Node* arg) {
+ Node* code32 = assembler->TruncateTaggedToWord32(context, arg);
+ code16 = assembler->Word32And(
code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
- // Check if {code16} fits into a one-byte string.
- Label if_codeisonebyte(assembler), if_codeistwobyte(assembler);
- assembler->Branch(
- assembler->Int32LessThanOrEqual(
+ assembler->GotoIf(
+ assembler->Int32GreaterThan(
code16, assembler->Int32Constant(String::kMaxOneByteCharCode)),
- &if_codeisonebyte, &if_codeistwobyte);
-
- assembler->Bind(&if_codeisonebyte);
- {
- // The {code16} fits into the SeqOneByteString {result}.
- assembler->StoreNoWriteBarrier(
- MachineRepresentation::kWord8, result,
- assembler->IntPtrAdd(
- assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
- kHeapObjectTag),
- offset),
- code16);
- var_offset.Bind(
- assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&if_codeistwobyte);
- {
- // Allocate a SeqTwoByteString to hold the resulting string.
- Node* cresult = assembler->AllocateSeqTwoByteString(context, length);
-
- // Copy all characters that were previously written to the
- // SeqOneByteString in {result} over to the new {cresult}.
- Variable var_coffset(assembler, MachineType::PointerRepresentation());
- Label cloop(assembler, &var_coffset), done_cloop(assembler);
- var_coffset.Bind(assembler->IntPtrConstant(0));
- assembler->Goto(&cloop);
- assembler->Bind(&cloop);
- {
- Node* coffset = var_coffset.value();
- assembler->GotoIf(assembler->WordEqual(coffset, offset), &done_cloop);
- Node* ccode = assembler->Load(
- MachineType::Uint8(), result,
- assembler->IntPtrAdd(
- assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
- kHeapObjectTag),
- coffset));
- assembler->StoreNoWriteBarrier(
- MachineRepresentation::kWord16, cresult,
- assembler->IntPtrAdd(
- assembler->IntPtrConstant(SeqTwoByteString::kHeaderSize -
- kHeapObjectTag),
- assembler->WordShl(coffset, 1)),
- ccode);
- var_coffset.Bind(
- assembler->IntPtrAdd(coffset, assembler->IntPtrConstant(1)));
- assembler->Goto(&cloop);
- }
+ &two_byte);
+
+ // The {code16} fits into the SeqOneByteString {one_byte_result}.
+ Node* offset = assembler->ElementOffsetFromIndex(
+ max_index.value(), UINT8_ELEMENTS,
+ CodeStubAssembler::INTPTR_PARAMETERS,
+ SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ assembler->StoreNoWriteBarrier(MachineRepresentation::kWord8,
+ one_byte_result, offset, code16);
+ max_index.Bind(assembler->IntPtrAdd(max_index.value(),
+ assembler->IntPtrConstant(1)));
+ });
+ arguments.PopAndReturn(one_byte_result);
+
+ assembler->Bind(&two_byte);
+
+ // At least one of the characters in the string requires a 16-bit
+ // representation. Allocate a SeqTwoByteString to hold the resulting
+ // string.
+ Node* two_byte_result = assembler->AllocateSeqTwoByteString(context, argc);
+
+ // Copy the characters that have already been put in the 8-bit string into
+ // their corresponding positions in the new 16-bit string.
+ Node* zero = assembler->IntPtrConstant(0);
+ assembler->CopyStringCharacters(
+ one_byte_result, two_byte_result, zero, zero, max_index.value(),
+ String::ONE_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
+ CodeStubAssembler::INTPTR_PARAMETERS);
- // Write the pending {code16} to {offset}.
- assembler->Bind(&done_cloop);
- assembler->StoreNoWriteBarrier(
- MachineRepresentation::kWord16, cresult,
- assembler->IntPtrAdd(
- assembler->IntPtrConstant(SeqTwoByteString::kHeaderSize -
- kHeapObjectTag),
- assembler->WordShl(offset, 1)),
- code16);
-
- // Copy the remaining parameters to the SeqTwoByteString {cresult}.
- Label floop(assembler, &var_offset), done_floop(assembler);
- assembler->Goto(&floop);
- assembler->Bind(&floop);
- {
- // Compute the next {offset}.
- Node* offset = assembler->IntPtrAdd(var_offset.value(),
- assembler->IntPtrConstant(1));
-
- // Check if we're done with the string.
- assembler->GotoIf(assembler->WordEqual(offset, length), &done_floop);
-
- // Load the next code point and truncate it to a 16-bit value.
- Node* code = assembler->Load(
- MachineType::AnyTagged(), parent_frame_pointer,
- assembler->IntPtrAdd(
- assembler->WordShl(
- assembler->IntPtrSub(length, offset),
- assembler->IntPtrConstant(kPointerSizeLog2)),
- assembler->IntPtrConstant(
- CommonFrameConstants::kFixedFrameSizeAboveFp -
- kPointerSize)));
- Node* code32 = assembler->TruncateTaggedToWord32(context, code);
+ // Write the character that caused the 8-bit to 16-bit fault.
+ Node* max_index_offset = assembler->ElementOffsetFromIndex(
+ max_index.value(), UINT16_ELEMENTS,
+ CodeStubAssembler::INTPTR_PARAMETERS,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ assembler->StoreNoWriteBarrier(MachineRepresentation::kWord16,
+ two_byte_result, max_index_offset, code16);
+ max_index.Bind(
+ assembler->IntPtrAdd(max_index.value(), assembler->IntPtrConstant(1)));
+
+ // Resume copying the passed-in arguments from the same place where the
+ // 8-bit copy stopped, but this time copying over all of the characters
+ // using a 16-bit representation.
+ arguments.ForEach(
+ vars,
+ [context, two_byte_result, &max_index](CodeStubAssembler* assembler,
+ Node* arg) {
+ Node* code32 = assembler->TruncateTaggedToWord32(context, arg);
Node* code16 = assembler->Word32And(
code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
- // Store the truncated {code} point at the next offset.
- assembler->StoreNoWriteBarrier(
- MachineRepresentation::kWord16, cresult,
- assembler->IntPtrAdd(
- assembler->IntPtrConstant(SeqTwoByteString::kHeaderSize -
- kHeapObjectTag),
- assembler->WordShl(offset, 1)),
- code16);
- var_offset.Bind(offset);
- assembler->Goto(&floop);
- }
-
- // Return the SeqTwoByteString.
- assembler->Bind(&done_floop);
- assembler->Return(cresult);
- }
- }
-
- assembler->Bind(&done_loop);
- assembler->Return(result);
+ Node* offset = assembler->ElementOffsetFromIndex(
+ max_index.value(), UINT16_ELEMENTS,
+ CodeStubAssembler::INTPTR_PARAMETERS,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ assembler->StoreNoWriteBarrier(MachineRepresentation::kWord16,
+ two_byte_result, offset, code16);
+ max_index.Bind(assembler->IntPtrAdd(max_index.value(),
+ assembler->IntPtrConstant(1)));
+ },
+ max_index.value());
+
+ arguments.PopAndReturn(two_byte_result);
}
}
@@ -662,7 +601,7 @@ BUILTIN(StringFromCodePoint) {
List<uc16> two_byte_buffer(length - index);
while (true) {
- if (code <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ if (code <= static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
two_byte_buffer.Add(code);
} else {
two_byte_buffer.Add(unibrow::Utf16::LeadSurrogate(code));
@@ -711,7 +650,8 @@ void Builtins::Generate_StringPrototypeCharAt(CodeStubAssembler* assembler) {
Label return_emptystring(assembler, Label::kDeferred);
position = assembler->ToInteger(context, position,
CodeStubAssembler::kTruncateMinusZero);
- assembler->GotoUnless(assembler->WordIsSmi(position), &return_emptystring);
+ assembler->GotoUnless(assembler->TaggedIsSmi(position),
+ &return_emptystring);
// Determine the actual length of the {receiver} String.
Node* receiver_length =
@@ -756,7 +696,7 @@ void Builtins::Generate_StringPrototypeCharCodeAt(
Label return_nan(assembler, Label::kDeferred);
position = assembler->ToInteger(context, position,
CodeStubAssembler::kTruncateMinusZero);
- assembler->GotoUnless(assembler->WordIsSmi(position), &return_nan);
+ assembler->GotoUnless(assembler->TaggedIsSmi(position), &return_nan);
// Determine the actual length of the {receiver} String.
Node* receiver_length =
@@ -779,6 +719,100 @@ void Builtins::Generate_StringPrototypeCharCodeAt(
assembler->Return(result);
}
+// ES6 section 21.1.3.6
+// String.prototype.endsWith ( searchString [ , endPosition ] )
+BUILTIN(StringPrototypeEndsWith) {
+ HandleScope handle_scope(isolate);
+ TO_THIS_STRING(str, "String.prototype.endsWith");
+
+ // Check if the search string is a regExp and fail if it is.
+ Handle<Object> search = args.atOrUndefined(isolate, 1);
+ Maybe<bool> is_reg_exp = RegExpUtils::IsRegExp(isolate, search);
+ if (is_reg_exp.IsNothing()) {
+ DCHECK(isolate->has_pending_exception());
+ return isolate->heap()->exception();
+ }
+ if (is_reg_exp.FromJust()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kFirstArgumentNotRegExp,
+ isolate->factory()->NewStringFromStaticChars(
+ "String.prototype.endsWith")));
+ }
+ Handle<String> search_string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
+ Object::ToString(isolate, search));
+
+ Handle<Object> position = args.atOrUndefined(isolate, 2);
+ int end;
+
+ if (position->IsUndefined(isolate)) {
+ end = str->length();
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
+ Object::ToInteger(isolate, position));
+ double index = std::max(position->Number(), 0.0);
+ index = std::min(index, static_cast<double>(str->length()));
+ end = static_cast<uint32_t>(index);
+ }
+
+ int start = end - search_string->length();
+ if (start < 0) return isolate->heap()->false_value();
+
+ FlatStringReader str_reader(isolate, String::Flatten(str));
+ FlatStringReader search_reader(isolate, String::Flatten(search_string));
+
+ for (int i = 0; i < search_string->length(); i++) {
+ if (str_reader.Get(start + i) != search_reader.Get(i)) {
+ return isolate->heap()->false_value();
+ }
+ }
+ return isolate->heap()->true_value();
+}
+
+// ES6 section 21.1.3.7
+// String.prototype.includes ( searchString [ , position ] )
+BUILTIN(StringPrototypeIncludes) {
+ HandleScope handle_scope(isolate);
+ TO_THIS_STRING(str, "String.prototype.includes");
+
+ // Check if the search string is a regExp and fail if it is.
+ Handle<Object> search = args.atOrUndefined(isolate, 1);
+ Maybe<bool> is_reg_exp = RegExpUtils::IsRegExp(isolate, search);
+ if (is_reg_exp.IsNothing()) {
+ DCHECK(isolate->has_pending_exception());
+ return isolate->heap()->exception();
+ }
+ if (is_reg_exp.FromJust()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kFirstArgumentNotRegExp,
+ isolate->factory()->NewStringFromStaticChars(
+ "String.prototype.includes")));
+ }
+ Handle<String> search_string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
+ Object::ToString(isolate, search));
+ Handle<Object> position;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, position,
+ Object::ToInteger(isolate, args.atOrUndefined(isolate, 2)));
+
+ double index = std::max(position->Number(), 0.0);
+ index = std::min(index, static_cast<double>(str->length()));
+
+ int index_in_str = String::IndexOf(isolate, str, search_string,
+ static_cast<uint32_t>(index));
+ return *isolate->factory()->ToBoolean(index_in_str != -1);
+}
+
+// ES6 section 21.1.3.8 String.prototype.indexOf ( searchString [ , position ] )
+BUILTIN(StringPrototypeIndexOf) {
+ HandleScope handle_scope(isolate);
+
+ return String::IndexOf(isolate, args.receiver(),
+ args.atOrUndefined(isolate, 1),
+ args.atOrUndefined(isolate, 2));
+}
+
// ES6 section 21.1.3.9
// String.prototype.lastIndexOf ( searchString [ , position ] )
BUILTIN(StringPrototypeLastIndexOf) {
@@ -803,13 +837,13 @@ BUILTIN(StringPrototypeLocaleCompare) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, str2, Object::ToString(isolate, args.at<Object>(1)));
- if (str1.is_identical_to(str2)) return Smi::FromInt(0); // Equal.
+ if (str1.is_identical_to(str2)) return Smi::kZero; // Equal.
int str1_length = str1->length();
int str2_length = str2->length();
// Decide trivial cases without flattening.
if (str1_length == 0) {
- if (str2_length == 0) return Smi::FromInt(0); // Equal.
+ if (str2_length == 0) return Smi::kZero; // Equal.
return Smi::FromInt(-str2_length);
} else {
if (str2_length == 0) return Smi::FromInt(str1_length);
@@ -889,7 +923,7 @@ void Builtins::Generate_StringPrototypeSubstr(CodeStubAssembler* a) {
Node* const length = a->Parameter(2);
Node* const context = a->Parameter(5);
- Node* const zero = a->SmiConstant(Smi::FromInt(0));
+ Node* const zero = a->SmiConstant(Smi::kZero);
// Check that {receiver} is coercible to Object and convert it to a String.
Node* const string =
@@ -903,7 +937,7 @@ void Builtins::Generate_StringPrototypeSubstr(CodeStubAssembler* a) {
a->ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero);
Label if_issmi(a), if_isheapnumber(a, Label::kDeferred);
- a->Branch(a->WordIsSmi(start_int), &if_issmi, &if_isheapnumber);
+ a->Branch(a->TaggedIsSmi(start_int), &if_issmi, &if_isheapnumber);
a->Bind(&if_issmi);
{
@@ -947,7 +981,7 @@ void Builtins::Generate_StringPrototypeSubstr(CodeStubAssembler* a) {
a->ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero));
}
- a->Branch(a->WordIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
+ a->Branch(a->TaggedIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
// Set {length} to min(max({length}, 0), {string_length} - {start}
a->Bind(&if_issmi);
@@ -967,8 +1001,8 @@ void Builtins::Generate_StringPrototypeSubstr(CodeStubAssembler* a) {
// two cases according to the spec: if it is negative, "" is returned; if
// it is positive, then length is set to {string_length} - {start}.
- a->Assert(a->WordEqual(a->LoadMap(var_length.value()),
- a->HeapNumberMapConstant()));
+ CSA_ASSERT(a, a->WordEqual(a->LoadMap(var_length.value()),
+ a->HeapNumberMapConstant()));
Label if_isnegative(a), if_ispositive(a);
Node* const float_zero = a->Float64Constant(0.);
@@ -1013,7 +1047,7 @@ compiler::Node* ToSmiBetweenZeroAnd(CodeStubAssembler* a,
a->ToInteger(context, value, CodeStubAssembler::kTruncateMinusZero);
Label if_issmi(a), if_isnotsmi(a, Label::kDeferred);
- a->Branch(a->WordIsSmi(value_int), &if_issmi, &if_isnotsmi);
+ a->Branch(a->TaggedIsSmi(value_int), &if_issmi, &if_isnotsmi);
a->Bind(&if_issmi);
{
@@ -1028,7 +1062,7 @@ compiler::Node* ToSmiBetweenZeroAnd(CodeStubAssembler* a,
a->Bind(&if_isoutofbounds);
{
- Node* const zero = a->SmiConstant(Smi::FromInt(0));
+ Node* const zero = a->SmiConstant(Smi::kZero);
var_result.Bind(a->Select(a->SmiLessThan(value_int, zero), zero, limit));
a->Goto(&out);
}
@@ -1037,10 +1071,11 @@ compiler::Node* ToSmiBetweenZeroAnd(CodeStubAssembler* a,
a->Bind(&if_isnotsmi);
{
// {value} is a heap number - in this case, it is definitely out of bounds.
- a->Assert(a->WordEqual(a->LoadMap(value_int), a->HeapNumberMapConstant()));
+ CSA_ASSERT(a,
+ a->WordEqual(a->LoadMap(value_int), a->HeapNumberMapConstant()));
Node* const float_zero = a->Float64Constant(0.);
- Node* const smi_zero = a->SmiConstant(Smi::FromInt(0));
+ Node* const smi_zero = a->SmiConstant(Smi::kZero);
Node* const value_float = a->LoadHeapNumberValue(value_int);
var_result.Bind(a->Select(a->Float64LessThan(value_float, float_zero),
smi_zero, limit));
@@ -1106,6 +1141,55 @@ void Builtins::Generate_StringPrototypeSubstring(CodeStubAssembler* a) {
}
}
+BUILTIN(StringPrototypeStartsWith) {
+ HandleScope handle_scope(isolate);
+ TO_THIS_STRING(str, "String.prototype.startsWith");
+
+ // Check if the search string is a regExp and fail if it is.
+ Handle<Object> search = args.atOrUndefined(isolate, 1);
+ Maybe<bool> is_reg_exp = RegExpUtils::IsRegExp(isolate, search);
+ if (is_reg_exp.IsNothing()) {
+ DCHECK(isolate->has_pending_exception());
+ return isolate->heap()->exception();
+ }
+ if (is_reg_exp.FromJust()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kFirstArgumentNotRegExp,
+ isolate->factory()->NewStringFromStaticChars(
+ "String.prototype.startsWith")));
+ }
+ Handle<String> search_string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
+ Object::ToString(isolate, search));
+
+ Handle<Object> position = args.atOrUndefined(isolate, 2);
+ int start;
+
+ if (position->IsUndefined(isolate)) {
+ start = 0;
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
+ Object::ToInteger(isolate, position));
+ double index = std::max(position->Number(), 0.0);
+ index = std::min(index, static_cast<double>(str->length()));
+ start = static_cast<uint32_t>(index);
+ }
+
+ if (start + search_string->length() > str->length()) {
+ return isolate->heap()->false_value();
+ }
+
+ FlatStringReader str_reader(isolate, String::Flatten(str));
+ FlatStringReader search_reader(isolate, String::Flatten(search_string));
+
+ for (int i = 0; i < search_string->length(); i++) {
+ if (str_reader.Get(start + i) != search_reader.Get(i)) {
+ return isolate->heap()->false_value();
+ }
+ }
+ return isolate->heap()->true_value();
+}
+
// ES6 section 21.1.3.25 String.prototype.toString ()
void Builtins::Generate_StringPrototypeToString(CodeStubAssembler* assembler) {
typedef compiler::Node Node;
@@ -1173,7 +1257,7 @@ void Builtins::Generate_StringPrototypeIterator(CodeStubAssembler* assembler) {
Heap::kEmptyFixedArrayRootIndex);
assembler->StoreObjectFieldNoWriteBarrier(
iterator, JSStringIterator::kStringOffset, string);
- Node* index = assembler->SmiConstant(Smi::FromInt(0));
+ Node* index = assembler->SmiConstant(Smi::kZero);
assembler->StoreObjectFieldNoWriteBarrier(
iterator, JSStringIterator::kNextIndexOffset, index);
assembler->Return(iterator);
@@ -1218,17 +1302,16 @@ compiler::Node* LoadSurrogatePairInternal(CodeStubAssembler* assembler,
{
Node* lead = var_result.value();
Node* trail = var_trail.value();
-#ifdef ENABLE_SLOW_DCHECKS
+
// Check that this path is only taken if a surrogate pair is found
- assembler->Assert(assembler->Uint32GreaterThanOrEqual(
- lead, assembler->Int32Constant(0xD800)));
- assembler->Assert(
- assembler->Uint32LessThan(lead, assembler->Int32Constant(0xDC00)));
- assembler->Assert(assembler->Uint32GreaterThanOrEqual(
- trail, assembler->Int32Constant(0xDC00)));
- assembler->Assert(
- assembler->Uint32LessThan(trail, assembler->Int32Constant(0xE000)));
-#endif
+ CSA_SLOW_ASSERT(assembler, assembler->Uint32GreaterThanOrEqual(
+ lead, assembler->Int32Constant(0xD800)));
+ CSA_SLOW_ASSERT(assembler, assembler->Uint32LessThan(
+ lead, assembler->Int32Constant(0xDC00)));
+ CSA_SLOW_ASSERT(assembler, assembler->Uint32GreaterThanOrEqual(
+ trail, assembler->Int32Constant(0xDC00)));
+ CSA_SLOW_ASSERT(assembler, assembler->Uint32LessThan(
+ trail, assembler->Int32Constant(0xE000)));
switch (encoding) {
case UnicodeEncoding::UTF16:
@@ -1289,7 +1372,7 @@ void Builtins::Generate_StringIteratorPrototypeNext(
Node* iterator = assembler->Parameter(0);
Node* context = assembler->Parameter(3);
- assembler->GotoIf(assembler->WordIsSmi(iterator), &throw_bad_receiver);
+ assembler->GotoIf(assembler->TaggedIsSmi(iterator), &throw_bad_receiver);
assembler->GotoUnless(
assembler->WordEqual(assembler->LoadInstanceType(iterator),
assembler->Int32Constant(JS_STRING_ITERATOR_TYPE)),
diff --git a/deps/v8/src/builtins/builtins-typedarray.cc b/deps/v8/src/builtins/builtins-typedarray.cc
index ede04f26d8..94173fa613 100644
--- a/deps/v8/src/builtins/builtins-typedarray.cc
+++ b/deps/v8/src/builtins/builtins-typedarray.cc
@@ -31,7 +31,8 @@ void Generate_TypedArrayProtoypeGetter(CodeStubAssembler* assembler,
// Check if the {receiver} is actually a JSTypedArray.
Label if_receiverisincompatible(assembler, Label::kDeferred);
- assembler->GotoIf(assembler->WordIsSmi(receiver), &if_receiverisincompatible);
+ assembler->GotoIf(assembler->TaggedIsSmi(receiver),
+ &if_receiverisincompatible);
Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
assembler->GotoUnless(
assembler->Word32Equal(receiver_instance_type,
@@ -41,16 +42,9 @@ void Generate_TypedArrayProtoypeGetter(CodeStubAssembler* assembler,
// Check if the {receiver}'s JSArrayBuffer was neutered.
Node* receiver_buffer =
assembler->LoadObjectField(receiver, JSTypedArray::kBufferOffset);
- Node* receiver_buffer_bit_field = assembler->LoadObjectField(
- receiver_buffer, JSArrayBuffer::kBitFieldOffset, MachineType::Uint32());
Label if_receiverisneutered(assembler, Label::kDeferred);
- assembler->GotoUnless(
- assembler->Word32Equal(
- assembler->Word32And(
- receiver_buffer_bit_field,
- assembler->Int32Constant(JSArrayBuffer::WasNeutered::kMask)),
- assembler->Int32Constant(0)),
- &if_receiverisneutered);
+ assembler->GotoIf(assembler->IsDetachedBuffer(receiver_buffer),
+ &if_receiverisneutered);
assembler->Return(assembler->LoadObjectField(receiver, object_offset));
assembler->Bind(&if_receiverisneutered);
@@ -97,5 +91,79 @@ void Builtins::Generate_TypedArrayPrototypeLength(
JSTypedArray::kLengthOffset);
}
+namespace {
+
+template <IterationKind kIterationKind>
+void Generate_TypedArrayPrototypeIterationMethod(CodeStubAssembler* assembler,
+ const char* method_name) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* receiver = assembler->Parameter(0);
+ Node* context = assembler->Parameter(3);
+
+ Label throw_bad_receiver(assembler, Label::kDeferred);
+ Label throw_typeerror(assembler, Label::kDeferred);
+
+ assembler->GotoIf(assembler->TaggedIsSmi(receiver), &throw_bad_receiver);
+
+ Node* map = assembler->LoadMap(receiver);
+ Node* instance_type = assembler->LoadMapInstanceType(map);
+ assembler->GotoIf(
+ assembler->Word32NotEqual(instance_type,
+ assembler->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ &throw_bad_receiver);
+
+ // Check if the {receiver}'s JSArrayBuffer was neutered.
+ Node* receiver_buffer =
+ assembler->LoadObjectField(receiver, JSTypedArray::kBufferOffset);
+ Label if_receiverisneutered(assembler, Label::kDeferred);
+ assembler->GotoIf(assembler->IsDetachedBuffer(receiver_buffer),
+ &if_receiverisneutered);
+
+ assembler->Return(assembler->CreateArrayIterator(receiver, map, instance_type,
+ context, kIterationKind));
+
+ Variable var_message(assembler, MachineRepresentation::kTagged);
+ assembler->Bind(&throw_bad_receiver);
+ var_message.Bind(
+ assembler->SmiConstant(Smi::FromInt(MessageTemplate::kNotTypedArray)));
+ assembler->Goto(&throw_typeerror);
+
+ assembler->Bind(&if_receiverisneutered);
+ var_message.Bind(assembler->SmiConstant(
+ Smi::FromInt(MessageTemplate::kDetachedOperation)));
+ assembler->Goto(&throw_typeerror);
+
+ assembler->Bind(&throw_typeerror);
+ {
+ Node* arg1 = assembler->HeapConstant(
+ assembler->isolate()->factory()->NewStringFromAsciiChecked(method_name,
+ TENURED));
+ Node* result = assembler->CallRuntime(Runtime::kThrowTypeError, context,
+ var_message.value(), arg1);
+ assembler->Return(result);
+ }
+}
+} // namespace
+
+void Builtins::Generate_TypedArrayPrototypeValues(
+ CodeStubAssembler* assembler) {
+ Generate_TypedArrayPrototypeIterationMethod<IterationKind::kValues>(
+ assembler, "%TypedArray%.prototype.values()");
+}
+
+void Builtins::Generate_TypedArrayPrototypeEntries(
+ CodeStubAssembler* assembler) {
+ Generate_TypedArrayPrototypeIterationMethod<IterationKind::kEntries>(
+ assembler, "%TypedArray%.prototype.entries()");
+}
+
+void Builtins::Generate_TypedArrayPrototypeKeys(CodeStubAssembler* assembler) {
+ Generate_TypedArrayPrototypeIterationMethod<IterationKind::kKeys>(
+ assembler, "%TypedArray%.prototype.keys()");
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index ca1786c4fa..6378fdfad5 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -48,9 +48,8 @@ class BuiltinArguments : public Arguments {
static const int kNumExtraArgs = 3;
static const int kNumExtraArgsWithReceiver = 4;
- template <class S>
- Handle<S> target() {
- return Arguments::at<S>(Arguments::length() - 1 - kTargetOffset);
+ Handle<JSFunction> target() {
+ return Arguments::at<JSFunction>(Arguments::length() - 1 - kTargetOffset);
}
Handle<HeapObject> new_target() {
return Arguments::at<HeapObject>(Arguments::length() - 1 -
@@ -92,8 +91,7 @@ class BuiltinArguments : public Arguments {
MUST_USE_RESULT Object* Builtin_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
DCHECK(isolate->context() == nullptr || isolate->context()->IsContext()); \
- if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() || \
- FLAG_runtime_call_stats)) { \
+ if (V8_UNLIKELY(FLAG_runtime_stats)) { \
return Builtin_Impl_Stats_##name(args_length, args_object, isolate); \
} \
BuiltinArguments args(args_length, args_object); \
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index d5a0e17d17..ec981fe01e 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -83,8 +83,10 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate,
CodeAssemblerGenerator generator, int argc,
Code::Flags flags, const char* name) {
HandleScope scope(isolate);
- Zone zone(isolate->allocator());
- CodeStubAssembler assembler(isolate, &zone, argc, flags, name);
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ const int argc_with_recv =
+ (argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
+ CodeStubAssembler assembler(isolate, &zone, argc_with_recv, flags, name);
generator(&assembler);
Handle<Code> code = assembler.GenerateCode();
PostBuildProfileAndTracing(isolate, *code, name);
@@ -97,7 +99,7 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate,
CallDescriptors::Key interface_descriptor,
Code::Flags flags, const char* name) {
HandleScope scope(isolate);
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
// The interface descriptor with given key must be initialized at this point
// and this construction just queries the details from the descriptors table.
CallInterfaceDescriptor descriptor(isolate, interface_descriptor);
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index 3579f3c18a..a6b126d106 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -52,6 +52,8 @@ namespace internal {
/* Code aging */ \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, ASM) \
\
+ TFS(ToObject, BUILTIN, kNoExtraICState, TypeConversion) \
+ \
/* Calls */ \
ASM(ArgumentsAdaptorTrampoline) \
/* ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) */ \
@@ -107,7 +109,6 @@ namespace internal {
\
/* Interpreter */ \
ASM(InterpreterEntryTrampoline) \
- ASM(InterpreterMarkBaselineOnReturn) \
ASM(InterpreterPushArgsAndCall) \
ASM(InterpreterPushArgsAndCallFunction) \
ASM(InterpreterPushArgsAndTailCall) \
@@ -115,6 +116,7 @@ namespace internal {
ASM(InterpreterPushArgsAndConstruct) \
ASM(InterpreterPushArgsAndConstructFunction) \
ASM(InterpreterPushArgsAndConstructArray) \
+ ASM(InterpreterEnterBytecodeAdvance) \
ASM(InterpreterEnterBytecodeDispatch) \
ASM(InterpreterOnStackReplacement) \
\
@@ -151,7 +153,6 @@ namespace internal {
TFS(GrowFastDoubleElements, BUILTIN, kNoExtraICState, GrowArrayElements) \
TFS(GrowFastSmiOrObjectElements, BUILTIN, kNoExtraICState, \
GrowArrayElements) \
- TFS(OrdinaryHasInstance, BUILTIN, kNoExtraICState, Compare) \
\
/* Debugger */ \
DBG(FrameDropper_LiveEdit) \
@@ -173,9 +174,11 @@ namespace internal {
TFS(NonNumberToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
TFS(ToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
TFS(ToString, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(ToInteger, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(ToLength, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(Typeof, BUILTIN, kNoExtraICState, Typeof) \
\
/* Handlers */ \
- ASH(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, kNoExtraICState) \
TFS(KeyedLoadIC_Megamorphic_TF, KEYED_LOAD_IC, kNoExtraICState, \
LoadWithVector) \
ASM(KeyedLoadIC_Miss) \
@@ -183,6 +186,10 @@ namespace internal {
ASH(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, kNoExtraICState) \
ASH(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, \
StoreICState::kStrictModeState) \
+ TFS(KeyedStoreIC_Megamorphic_TF, KEYED_STORE_IC, kNoExtraICState, \
+ StoreWithVector) \
+ TFS(KeyedStoreIC_Megamorphic_Strict_TF, KEYED_STORE_IC, \
+ StoreICState::kStrictModeState, StoreWithVector) \
ASM(KeyedStoreIC_Miss) \
ASH(KeyedStoreIC_Slow, HANDLER, Code::KEYED_STORE_IC) \
TFS(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector) \
@@ -210,16 +217,24 @@ namespace internal {
ASM(InternalArrayCode) \
CPP(ArrayConcat) \
/* ES6 section 22.1.2.2 Array.isArray */ \
- TFJ(ArrayIsArray, 2) \
+ TFJ(ArrayIsArray, 1) \
/* ES7 #sec-array.prototype.includes */ \
- TFJ(ArrayIncludes, 3) \
- TFJ(ArrayIndexOf, 3) \
+ TFJ(ArrayIncludes, 2) \
+ TFJ(ArrayIndexOf, 2) \
CPP(ArrayPop) \
CPP(ArrayPush) \
CPP(ArrayShift) \
CPP(ArraySlice) \
CPP(ArraySplice) \
CPP(ArrayUnshift) \
+ /* ES6 #sec-array.prototype.entries */ \
+ TFJ(ArrayPrototypeEntries, 0) \
+ /* ES6 #sec-array.prototype.keys */ \
+ TFJ(ArrayPrototypeKeys, 0) \
+ /* ES6 #sec-array.prototype.values */ \
+ TFJ(ArrayPrototypeValues, 0) \
+ /* ES6 #sec-%arrayiteratorprototype%.next */ \
+ TFJ(ArrayIteratorPrototypeNext, 0) \
\
/* ArrayBuffer */ \
CPP(ArrayBufferConstructor) \
@@ -231,9 +246,9 @@ namespace internal {
CPP(BooleanConstructor) \
CPP(BooleanConstructor_ConstructStub) \
/* ES6 section 19.3.3.2 Boolean.prototype.toString ( ) */ \
- TFJ(BooleanPrototypeToString, 1) \
+ TFJ(BooleanPrototypeToString, 0) \
/* ES6 section 19.3.3.3 Boolean.prototype.valueOf ( ) */ \
- TFJ(BooleanPrototypeValueOf, 1) \
+ TFJ(BooleanPrototypeValueOf, 0) \
\
/* CallSite */ \
CPP(CallSitePrototypeGetColumnNumber) \
@@ -280,41 +295,41 @@ namespace internal {
CPP(DateConstructor) \
CPP(DateConstructor_ConstructStub) \
/* ES6 section 20.3.4.2 Date.prototype.getDate ( ) */ \
- TFJ(DatePrototypeGetDate, 1) \
+ TFJ(DatePrototypeGetDate, 0) \
/* ES6 section 20.3.4.3 Date.prototype.getDay ( ) */ \
- TFJ(DatePrototypeGetDay, 1) \
+ TFJ(DatePrototypeGetDay, 0) \
/* ES6 section 20.3.4.4 Date.prototype.getFullYear ( ) */ \
- TFJ(DatePrototypeGetFullYear, 1) \
+ TFJ(DatePrototypeGetFullYear, 0) \
/* ES6 section 20.3.4.5 Date.prototype.getHours ( ) */ \
- TFJ(DatePrototypeGetHours, 1) \
+ TFJ(DatePrototypeGetHours, 0) \
/* ES6 section 20.3.4.6 Date.prototype.getMilliseconds ( ) */ \
- TFJ(DatePrototypeGetMilliseconds, 1) \
+ TFJ(DatePrototypeGetMilliseconds, 0) \
/* ES6 section 20.3.4.7 Date.prototype.getMinutes ( ) */ \
- TFJ(DatePrototypeGetMinutes, 1) \
+ TFJ(DatePrototypeGetMinutes, 0) \
/* ES6 section 20.3.4.8 Date.prototype.getMonth */ \
- TFJ(DatePrototypeGetMonth, 1) \
+ TFJ(DatePrototypeGetMonth, 0) \
/* ES6 section 20.3.4.9 Date.prototype.getSeconds ( ) */ \
- TFJ(DatePrototypeGetSeconds, 1) \
+ TFJ(DatePrototypeGetSeconds, 0) \
/* ES6 section 20.3.4.10 Date.prototype.getTime ( ) */ \
- TFJ(DatePrototypeGetTime, 1) \
+ TFJ(DatePrototypeGetTime, 0) \
/* ES6 section 20.3.4.11 Date.prototype.getTimezoneOffset ( ) */ \
- TFJ(DatePrototypeGetTimezoneOffset, 1) \
+ TFJ(DatePrototypeGetTimezoneOffset, 0) \
/* ES6 section 20.3.4.12 Date.prototype.getUTCDate ( ) */ \
- TFJ(DatePrototypeGetUTCDate, 1) \
+ TFJ(DatePrototypeGetUTCDate, 0) \
/* ES6 section 20.3.4.13 Date.prototype.getUTCDay ( ) */ \
- TFJ(DatePrototypeGetUTCDay, 1) \
+ TFJ(DatePrototypeGetUTCDay, 0) \
/* ES6 section 20.3.4.14 Date.prototype.getUTCFullYear ( ) */ \
- TFJ(DatePrototypeGetUTCFullYear, 1) \
+ TFJ(DatePrototypeGetUTCFullYear, 0) \
/* ES6 section 20.3.4.15 Date.prototype.getUTCHours ( ) */ \
- TFJ(DatePrototypeGetUTCHours, 1) \
+ TFJ(DatePrototypeGetUTCHours, 0) \
/* ES6 section 20.3.4.16 Date.prototype.getUTCMilliseconds ( ) */ \
- TFJ(DatePrototypeGetUTCMilliseconds, 1) \
+ TFJ(DatePrototypeGetUTCMilliseconds, 0) \
/* ES6 section 20.3.4.17 Date.prototype.getUTCMinutes ( ) */ \
- TFJ(DatePrototypeGetUTCMinutes, 1) \
+ TFJ(DatePrototypeGetUTCMinutes, 0) \
/* ES6 section 20.3.4.18 Date.prototype.getUTCMonth ( ) */ \
- TFJ(DatePrototypeGetUTCMonth, 1) \
+ TFJ(DatePrototypeGetUTCMonth, 0) \
/* ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( ) */ \
- TFJ(DatePrototypeGetUTCSeconds, 1) \
+ TFJ(DatePrototypeGetUTCSeconds, 0) \
CPP(DatePrototypeGetYear) \
CPP(DatePrototypeSetYear) \
CPP(DateNow) \
@@ -360,17 +375,17 @@ namespace internal {
CPP(FunctionPrototypeBind) \
ASM(FunctionPrototypeCall) \
/* ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V ) */ \
- TFJ(FunctionPrototypeHasInstance, 2) \
+ TFJ(FunctionPrototypeHasInstance, 1) \
CPP(FunctionPrototypeToString) \
\
/* Generator and Async */ \
CPP(GeneratorFunctionConstructor) \
/* ES6 section 25.3.1.2 Generator.prototype.next ( value ) */ \
- TFJ(GeneratorPrototypeNext, 2) \
+ TFJ(GeneratorPrototypeNext, 1) \
/* ES6 section 25.3.1.3 Generator.prototype.return ( value ) */ \
- TFJ(GeneratorPrototypeReturn, 2) \
+ TFJ(GeneratorPrototypeReturn, 1) \
/* ES6 section 25.3.1.4 Generator.prototype.throw ( exception ) */ \
- TFJ(GeneratorPrototypeThrow, 2) \
+ TFJ(GeneratorPrototypeThrow, 1) \
CPP(AsyncFunctionConstructor) \
\
/* Global object */ \
@@ -382,12 +397,12 @@ namespace internal {
CPP(GlobalUnescape) \
CPP(GlobalEval) \
/* ES6 section 18.2.2 isFinite ( number ) */ \
- TFJ(GlobalIsFinite, 2) \
+ TFJ(GlobalIsFinite, 1) \
/* ES6 section 18.2.3 isNaN ( number ) */ \
- TFJ(GlobalIsNaN, 2) \
+ TFJ(GlobalIsNaN, 1) \
\
/* ES6 #sec-%iteratorprototype%-@@iterator */ \
- TFJ(IteratorPrototypeIterator, 1) \
+ TFJ(IteratorPrototypeIterator, 0) \
\
/* JSON */ \
CPP(JsonParse) \
@@ -395,73 +410,75 @@ namespace internal {
\
/* Math */ \
/* ES6 section 20.2.2.1 Math.abs ( x ) */ \
- TFJ(MathAbs, 2) \
+ TFJ(MathAbs, 1) \
/* ES6 section 20.2.2.2 Math.acos ( x ) */ \
- TFJ(MathAcos, 2) \
+ TFJ(MathAcos, 1) \
/* ES6 section 20.2.2.3 Math.acosh ( x ) */ \
- TFJ(MathAcosh, 2) \
+ TFJ(MathAcosh, 1) \
/* ES6 section 20.2.2.4 Math.asin ( x ) */ \
- TFJ(MathAsin, 2) \
+ TFJ(MathAsin, 1) \
/* ES6 section 20.2.2.5 Math.asinh ( x ) */ \
- TFJ(MathAsinh, 2) \
+ TFJ(MathAsinh, 1) \
/* ES6 section 20.2.2.6 Math.atan ( x ) */ \
- TFJ(MathAtan, 2) \
+ TFJ(MathAtan, 1) \
/* ES6 section 20.2.2.7 Math.atanh ( x ) */ \
- TFJ(MathAtanh, 2) \
+ TFJ(MathAtanh, 1) \
/* ES6 section 20.2.2.8 Math.atan2 ( y, x ) */ \
- TFJ(MathAtan2, 3) \
+ TFJ(MathAtan2, 2) \
/* ES6 section 20.2.2.9 Math.cbrt ( x ) */ \
- TFJ(MathCbrt, 2) \
+ TFJ(MathCbrt, 1) \
/* ES6 section 20.2.2.10 Math.ceil ( x ) */ \
- TFJ(MathCeil, 2) \
+ TFJ(MathCeil, 1) \
/* ES6 section 20.2.2.11 Math.clz32 ( x ) */ \
- TFJ(MathClz32, 2) \
+ TFJ(MathClz32, 1) \
/* ES6 section 20.2.2.12 Math.cos ( x ) */ \
- TFJ(MathCos, 2) \
+ TFJ(MathCos, 1) \
/* ES6 section 20.2.2.13 Math.cosh ( x ) */ \
- TFJ(MathCosh, 2) \
+ TFJ(MathCosh, 1) \
/* ES6 section 20.2.2.14 Math.exp ( x ) */ \
- TFJ(MathExp, 2) \
+ TFJ(MathExp, 1) \
/* ES6 section 20.2.2.15 Math.expm1 ( x ) */ \
- TFJ(MathExpm1, 2) \
+ TFJ(MathExpm1, 1) \
/* ES6 section 20.2.2.16 Math.floor ( x ) */ \
- TFJ(MathFloor, 2) \
+ TFJ(MathFloor, 1) \
/* ES6 section 20.2.2.17 Math.fround ( x ) */ \
- TFJ(MathFround, 2) \
+ TFJ(MathFround, 1) \
/* ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values ) */ \
CPP(MathHypot) \
/* ES6 section 20.2.2.19 Math.imul ( x, y ) */ \
- TFJ(MathImul, 3) \
+ TFJ(MathImul, 2) \
/* ES6 section 20.2.2.20 Math.log ( x ) */ \
- TFJ(MathLog, 2) \
+ TFJ(MathLog, 1) \
/* ES6 section 20.2.2.21 Math.log1p ( x ) */ \
- TFJ(MathLog1p, 2) \
+ TFJ(MathLog1p, 1) \
/* ES6 section 20.2.2.22 Math.log10 ( x ) */ \
- TFJ(MathLog10, 2) \
+ TFJ(MathLog10, 1) \
/* ES6 section 20.2.2.23 Math.log2 ( x ) */ \
- TFJ(MathLog2, 2) \
+ TFJ(MathLog2, 1) \
/* ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values ) */ \
ASM(MathMax) \
/* ES6 section 20.2.2.25 Math.min ( value1, value2 , ...values ) */ \
ASM(MathMin) \
/* ES6 section 20.2.2.26 Math.pow ( x, y ) */ \
- TFJ(MathPow, 3) \
+ TFJ(MathPow, 2) \
+ /* ES6 section 20.2.2.27 Math.random */ \
+ TFJ(MathRandom, 0) \
/* ES6 section 20.2.2.28 Math.round ( x ) */ \
- TFJ(MathRound, 2) \
+ TFJ(MathRound, 1) \
/* ES6 section 20.2.2.29 Math.sign ( x ) */ \
- TFJ(MathSign, 2) \
+ TFJ(MathSign, 1) \
/* ES6 section 20.2.2.30 Math.sin ( x ) */ \
- TFJ(MathSin, 2) \
+ TFJ(MathSin, 1) \
/* ES6 section 20.2.2.31 Math.sinh ( x ) */ \
- TFJ(MathSinh, 2) \
+ TFJ(MathSinh, 1) \
/* ES6 section 20.2.2.32 Math.sqrt ( x ) */ \
- TFJ(MathTan, 2) \
+ TFJ(MathTan, 1) \
/* ES6 section 20.2.2.33 Math.tan ( x ) */ \
- TFJ(MathTanh, 2) \
+ TFJ(MathTanh, 1) \
/* ES6 section 20.2.2.34 Math.tanh ( x ) */ \
- TFJ(MathSqrt, 2) \
+ TFJ(MathSqrt, 1) \
/* ES6 section 20.2.2.35 Math.trunc ( x ) */ \
- TFJ(MathTrunc, 2) \
+ TFJ(MathTrunc, 1) \
\
/* Number */ \
/* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case */ \
@@ -469,24 +486,47 @@ namespace internal {
/* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case */ \
ASM(NumberConstructor_ConstructStub) \
/* ES6 section 20.1.2.2 Number.isFinite ( number ) */ \
- TFJ(NumberIsFinite, 2) \
+ TFJ(NumberIsFinite, 1) \
/* ES6 section 20.1.2.3 Number.isInteger ( number ) */ \
- TFJ(NumberIsInteger, 2) \
+ TFJ(NumberIsInteger, 1) \
/* ES6 section 20.1.2.4 Number.isNaN ( number ) */ \
- TFJ(NumberIsNaN, 2) \
+ TFJ(NumberIsNaN, 1) \
/* ES6 section 20.1.2.5 Number.isSafeInteger ( number ) */ \
- TFJ(NumberIsSafeInteger, 2) \
+ TFJ(NumberIsSafeInteger, 1) \
+ /* ES6 section 20.1.2.12 Number.parseFloat ( string ) */ \
+ TFJ(NumberParseFloat, 1) \
+ /* ES6 section 20.1.2.13 Number.parseInt ( string, radix ) */ \
+ TFJ(NumberParseInt, 2) \
CPP(NumberPrototypeToExponential) \
CPP(NumberPrototypeToFixed) \
CPP(NumberPrototypeToLocaleString) \
CPP(NumberPrototypeToPrecision) \
CPP(NumberPrototypeToString) \
/* ES6 section 20.1.3.7 Number.prototype.valueOf ( ) */ \
- TFJ(NumberPrototypeValueOf, 1) \
+ TFJ(NumberPrototypeValueOf, 0) \
+ TFS(Add, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(Subtract, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(Multiply, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(Divide, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(Modulus, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(BitwiseAnd, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(BitwiseOr, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(BitwiseXor, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(ShiftLeft, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(ShiftRight, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(ShiftRightLogical, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(LessThan, BUILTIN, kNoExtraICState, Compare) \
+ TFS(LessThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(GreaterThan, BUILTIN, kNoExtraICState, Compare) \
+ TFS(GreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(Equal, BUILTIN, kNoExtraICState, Compare) \
+ TFS(NotEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StrictEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StrictNotEqual, BUILTIN, kNoExtraICState, Compare) \
\
/* Object */ \
CPP(ObjectAssign) \
- CPP(ObjectCreate) \
+ TFJ(ObjectCreate, 2) \
CPP(ObjectDefineGetter) \
CPP(ObjectDefineProperties) \
CPP(ObjectDefineProperty) \
@@ -498,8 +538,9 @@ namespace internal {
CPP(ObjectGetOwnPropertyNames) \
CPP(ObjectGetOwnPropertySymbols) \
CPP(ObjectGetPrototypeOf) \
+ CPP(ObjectSetPrototypeOf) \
/* ES6 section 19.1.3.2 Object.prototype.hasOwnProperty */ \
- TFJ(ObjectHasOwnProperty, 2) \
+ TFJ(ObjectHasOwnProperty, 1) \
CPP(ObjectIs) \
CPP(ObjectIsExtensible) \
CPP(ObjectIsFrozen) \
@@ -509,11 +550,23 @@ namespace internal {
CPP(ObjectLookupSetter) \
CPP(ObjectPreventExtensions) \
/* ES6 section 19.1.3.6 Object.prototype.toString () */ \
- TFJ(ObjectProtoToString, 1) \
+ TFJ(ObjectProtoToString, 0) \
CPP(ObjectPrototypePropertyIsEnumerable) \
+ CPP(ObjectPrototypeGetProto) \
+ CPP(ObjectPrototypeSetProto) \
CPP(ObjectSeal) \
CPP(ObjectValues) \
\
+ TFS(HasProperty, BUILTIN, kNoExtraICState, HasProperty) \
+ TFS(InstanceOf, BUILTIN, kNoExtraICState, Compare) \
+ TFS(OrdinaryHasInstance, BUILTIN, kNoExtraICState, Compare) \
+ TFS(ForInFilter, BUILTIN, kNoExtraICState, ForInFilter) \
+ \
+ /* Promise */ \
+ CPP(CreateResolvingFunctions) \
+ CPP(PromiseResolveClosure) \
+ CPP(PromiseRejectClosure) \
+ \
/* Proxy */ \
CPP(ProxyConstructor) \
CPP(ProxyConstructor_ConstructStub) \
@@ -534,24 +587,64 @@ namespace internal {
CPP(ReflectSetPrototypeOf) \
\
/* RegExp */ \
+ CPP(RegExpCapture1Getter) \
+ CPP(RegExpCapture2Getter) \
+ CPP(RegExpCapture3Getter) \
+ CPP(RegExpCapture4Getter) \
+ CPP(RegExpCapture5Getter) \
+ CPP(RegExpCapture6Getter) \
+ CPP(RegExpCapture7Getter) \
+ CPP(RegExpCapture8Getter) \
+ CPP(RegExpCapture9Getter) \
CPP(RegExpConstructor) \
- TFJ(RegExpPrototypeExec, 2) \
+ TFJ(RegExpInternalMatch, 2) \
+ CPP(RegExpInputGetter) \
+ CPP(RegExpInputSetter) \
+ CPP(RegExpLastMatchGetter) \
+ CPP(RegExpLastParenGetter) \
+ CPP(RegExpLeftContextGetter) \
+ CPP(RegExpPrototypeCompile) \
+ TFJ(RegExpPrototypeExec, 1) \
+ TFJ(RegExpPrototypeFlagsGetter, 0) \
+ TFJ(RegExpPrototypeGlobalGetter, 0) \
+ TFJ(RegExpPrototypeIgnoreCaseGetter, 0) \
+ CPP(RegExpPrototypeMatch) \
+ TFJ(RegExpPrototypeMultilineGetter, 0) \
+ TFJ(RegExpPrototypeReplace, 2) \
+ TFJ(RegExpPrototypeSearch, 1) \
+ CPP(RegExpPrototypeSourceGetter) \
+ CPP(RegExpPrototypeSpeciesGetter) \
+ CPP(RegExpPrototypeSplit) \
+ TFJ(RegExpPrototypeStickyGetter, 0) \
+ TFJ(RegExpPrototypeTest, 1) \
+ CPP(RegExpPrototypeToString) \
+ TFJ(RegExpPrototypeUnicodeGetter, 0) \
+ CPP(RegExpRightContextGetter) \
\
/* SharedArrayBuffer */ \
CPP(SharedArrayBufferPrototypeGetByteLength) \
- TFJ(AtomicsLoad, 3) \
- TFJ(AtomicsStore, 4) \
+ TFJ(AtomicsLoad, 2) \
+ TFJ(AtomicsStore, 3) \
\
/* String */ \
ASM(StringConstructor) \
ASM(StringConstructor_ConstructStub) \
CPP(StringFromCodePoint) \
/* ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits ) */ \
- TFJ(StringFromCharCode, 2) \
+ TFJ(StringFromCharCode, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 section 21.1.3.1 String.prototype.charAt ( pos ) */ \
- TFJ(StringPrototypeCharAt, 2) \
+ TFJ(StringPrototypeCharAt, 1) \
/* ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos ) */ \
- TFJ(StringPrototypeCharCodeAt, 2) \
+ TFJ(StringPrototypeCharCodeAt, 1) \
+ /* ES6 section 21.1.3.6 */ \
+ /* String.prototype.endsWith ( searchString [ , endPosition ] ) */ \
+ CPP(StringPrototypeEndsWith) \
+ /* ES6 section 21.1.3.7 */ \
+ /* String.prototype.includes ( searchString [ , position ] ) */ \
+ CPP(StringPrototypeIncludes) \
+ /* ES6 section 21.1.3.8 */ \
+ /* String.prototype.indexOf ( searchString [ , position ] ) */ \
+ CPP(StringPrototypeIndexOf) \
/* ES6 section 21.1.3.9 */ \
/* String.prototype.lastIndexOf ( searchString [ , position ] ) */ \
CPP(StringPrototypeLastIndexOf) \
@@ -560,40 +653,52 @@ namespace internal {
/* ES6 section 21.1.3.12 String.prototype.normalize ( [form] ) */ \
CPP(StringPrototypeNormalize) \
/* ES6 section B.2.3.1 String.prototype.substr ( start, length ) */ \
- TFJ(StringPrototypeSubstr, 3) \
+ TFJ(StringPrototypeSubstr, 2) \
/* ES6 section 21.1.3.19 String.prototype.substring ( start, end ) */ \
- TFJ(StringPrototypeSubstring, 3) \
+ TFJ(StringPrototypeSubstring, 2) \
+ /* ES6 section 21.1.3.20 */ \
+ /* String.prototype.startsWith ( searchString [ , position ] ) */ \
+ CPP(StringPrototypeStartsWith) \
/* ES6 section 21.1.3.25 String.prototype.toString () */ \
- TFJ(StringPrototypeToString, 1) \
+ TFJ(StringPrototypeToString, 0) \
CPP(StringPrototypeTrim) \
CPP(StringPrototypeTrimLeft) \
CPP(StringPrototypeTrimRight) \
/* ES6 section 21.1.3.28 String.prototype.valueOf () */ \
- TFJ(StringPrototypeValueOf, 1) \
+ TFJ(StringPrototypeValueOf, 0) \
/* ES6 #sec-string.prototype-@@iterator */ \
- TFJ(StringPrototypeIterator, 1) \
+ TFJ(StringPrototypeIterator, 0) \
\
/* StringIterator */ \
- TFJ(StringIteratorPrototypeNext, 1) \
+ TFJ(StringIteratorPrototypeNext, 0) \
\
/* Symbol */ \
CPP(SymbolConstructor) \
CPP(SymbolConstructor_ConstructStub) \
/* ES6 section 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint ) */ \
- TFJ(SymbolPrototypeToPrimitive, 2) \
+ TFJ(SymbolPrototypeToPrimitive, 1) \
/* ES6 section 19.4.3.2 Symbol.prototype.toString ( ) */ \
- TFJ(SymbolPrototypeToString, 1) \
+ TFJ(SymbolPrototypeToString, 0) \
/* ES6 section 19.4.3.3 Symbol.prototype.valueOf ( ) */ \
- TFJ(SymbolPrototypeValueOf, 1) \
+ TFJ(SymbolPrototypeValueOf, 0) \
\
/* TypedArray */ \
CPP(TypedArrayPrototypeBuffer) \
/* ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength */ \
- TFJ(TypedArrayPrototypeByteLength, 1) \
+ TFJ(TypedArrayPrototypeByteLength, 0) \
/* ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset */ \
- TFJ(TypedArrayPrototypeByteOffset, 1) \
+ TFJ(TypedArrayPrototypeByteOffset, 0) \
/* ES6 section 22.2.3.18 get %TypedArray%.prototype.length */ \
- TFJ(TypedArrayPrototypeLength, 1)
+ TFJ(TypedArrayPrototypeLength, 0) \
+ /* ES6 #sec-%typedarray%.prototype.entries */ \
+ TFJ(TypedArrayPrototypeEntries, 0) \
+ /* ES6 #sec-%typedarray%.prototype.keys */ \
+ TFJ(TypedArrayPrototypeKeys, 0) \
+ /* ES6 #sec-%typedarray%.prototype.values */ \
+ TFJ(TypedArrayPrototypeValues, 0) \
+ \
+ CPP(ModuleNamespaceIterator) \
+ CPP(FixedArrayIteratorNext)
#define IGNORE_BUILTIN(...)
@@ -637,7 +742,8 @@ class Builtins {
builtin_count
};
-#define DECLARE_BUILTIN_ACCESSOR(Name, ...) Handle<Code> Name();
+#define DECLARE_BUILTIN_ACCESSOR(Name, ...) \
+ V8_EXPORT_PRIVATE Handle<Code> Name();
BUILTIN_LIST_ALL(DECLARE_BUILTIN_ACCESSOR)
#undef DECLARE_BUILTIN_ACCESSOR
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 9dd621fca8..4287333d3f 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -110,15 +110,15 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- esi: context
// -- edi: constructor function
- // -- ebx: allocation site or undefined
// -- edx: new target
// -----------------------------------
@@ -127,10 +127,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the incoming parameters on the stack.
- __ AssertUndefinedOrAllocationSite(ebx);
- __ push(esi);
- __ push(ebx);
__ SmiTag(eax);
+ __ push(esi);
__ push(eax);
if (create_implicit_receiver) {
@@ -197,12 +195,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Label use_receiver, exit;
// If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(eax, &use_receiver);
+ __ JumpIfSmi(eax, &use_receiver, Label::kNear);
// If the type of the result (stored in its map) is less than
// FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
__ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
- __ j(above_equal, &exit);
+ __ j(above_equal, &exit, Label::kNear);
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
@@ -244,6 +242,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ ret(0);
}
+} // namespace
+
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
@@ -473,7 +473,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
{
Label done_loop, loop;
- __ Move(ecx, Smi::FromInt(0));
+ __ Move(ecx, Smi::kZero);
__ bind(&loop);
__ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
__ j(equal, &done_loop, Label::kNear);
@@ -685,31 +685,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(ecx);
}
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
- // Save the function and context for call to CompileBaseline.
- __ mov(edi, Operand(ebp, StandardFrameConstants::kFunctionOffset));
- __ mov(kContextRegister,
- Operand(ebp, StandardFrameConstants::kContextOffset));
-
- // Leave the frame before recompiling for baseline so that we don't count as
- // an activation on the stack.
- LeaveInterpreterFrame(masm, ebx, ecx);
-
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Push return value.
- __ push(eax);
-
- // Push function as argument and compile for baseline.
- __ push(edi);
- __ CallRuntime(Runtime::kCompileBaseline);
-
- // Restore return value.
- __ pop(eax);
- }
- __ ret(0);
-}
-
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch1, Register scratch2,
Label* stack_overflow,
@@ -1005,12 +980,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
}
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
- DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
__ LoadHeapObject(ebx,
masm->isolate()->builtins()->InterpreterEntryTrampoline());
__ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
@@ -1047,6 +1022,31 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
__ jmp(ebx);
}
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+ // Advance the current bytecode offset stored within the given interpreter
+ // stack frame. This simulates what all bytecode handlers do upon completion
+ // of the underlying operation.
+ __ mov(ebx, Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ mov(edx, Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
+ __ Push(ebx); // First argument is the bytecode array.
+ __ Push(edx); // Second argument is the bytecode offset.
+ __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+ __ Move(edx, eax); // Result is the new bytecode offset.
+ __ Pop(kInterpreterAccumulatorRegister);
+ }
+ __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), edx);
+
+ Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ Generate_InterpreterEnterBytecode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)
@@ -1055,7 +1055,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime, gotta_call_runtime_no_stack;
- Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
@@ -1118,15 +1117,12 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ mov(entry, FieldOperand(map, index, times_half_pointer_size,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &maybe_call_runtime);
+ __ JumpIfSmi(entry, &try_shared);
// Found literals and code. Get them into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
-
- Label install_optimized_code_and_tailcall;
- __ bind(&install_optimized_code_and_tailcall);
__ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, eax);
@@ -1160,24 +1156,16 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
- __ bind(&maybe_call_runtime);
- __ pop(closure);
-
- // Last possibility. Check the context free optimized code map entry.
- __ mov(entry, FieldOperand(map, FixedArray::kHeaderSize +
- SharedFunctionInfo::kSharedCodeIndex));
- __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Store code entry in the closure.
- __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
- __ jmp(&install_optimized_code_and_tailcall);
-
__ bind(&try_shared);
+ __ pop(closure);
__ pop(new_target);
__ pop(argument_count);
- // Is the full code valid?
__ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ // Is the shared function marked for tier up?
+ __ test_b(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
+ Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
+ __ j(not_zero, &gotta_call_runtime_no_stack);
+ // Is the full code valid?
__ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
__ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
__ and_(ebx, Code::KindField::kMask);
@@ -1939,7 +1927,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
__ jmp(&done, Label::kNear);
__ bind(&no_arguments);
- __ Move(ebx, Smi::FromInt(0));
+ __ Move(ebx, Smi::kZero);
__ bind(&done);
}
@@ -2485,8 +2473,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Push(edi);
__ mov(eax, ecx);
__ Push(esi);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToObject(),
+ RelocInfo::CODE_TARGET);
__ Pop(esi);
__ mov(ecx, eax);
__ Pop(edi);
@@ -2837,7 +2825,7 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::FromInt(0));
+ __ Move(esi, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
@@ -2852,7 +2840,7 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
__ Push(edx);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::FromInt(0));
+ __ Move(esi, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
@@ -2865,7 +2853,7 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::FromInt(0));
+ __ Move(esi, Smi::kZero);
__ TailCallRuntime(Runtime::kAbort);
}
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index a2b6bea626..b9c4a72dd0 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -266,7 +266,7 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// 2b. No arguments, return +0.
__ bind(&no_arguments);
- __ Move(v0, Smi::FromInt(0));
+ __ Move(v0, Smi::kZero);
__ DropAndRet(1);
}
@@ -295,7 +295,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ lw(a0, MemOperand(at));
__ jmp(&done);
__ bind(&no_arguments);
- __ Move(a0, Smi::FromInt(0));
+ __ Move(a0, Smi::kZero);
__ bind(&done);
}
@@ -548,14 +548,14 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
- // -- a2 : allocation site or undefined
// -- a3 : new target
// -- cp : context
// -- ra : return address
@@ -569,9 +569,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the incoming parameters on the stack.
- __ AssertUndefinedOrAllocationSite(a2, t0);
__ SmiTag(a0);
- __ Push(cp, a2, a0);
+ __ Push(cp, a0);
if (create_implicit_receiver) {
// Allocate the new receiver object.
@@ -698,6 +697,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Ret();
}
+} // namespace
+
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
@@ -1144,31 +1145,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Jump(t0);
}
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
- // Save the function and context for call to CompileBaseline.
- __ lw(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ lw(kContextRegister,
- MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // Leave the frame before recompiling for baseline so that we don't count as
- // an activation on the stack.
- LeaveInterpreterFrame(masm, t0);
-
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Push return value.
- __ push(v0);
-
- // Push function as argument and compile for baseline.
- __ push(a1);
- __ CallRuntime(Runtime::kCompileBaseline);
-
- // Restore return value.
- __ pop(v0);
- }
- __ Jump(ra);
-}
-
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch1, Register scratch2,
Label* stack_overflow) {
@@ -1320,12 +1296,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
}
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
- DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
__ li(t0, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
__ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1363,6 +1339,29 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
__ Jump(a1);
}
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+ // Advance the current bytecode offset stored within the given interpreter
+ // stack frame. This simulates what all bytecode handlers do upon completion
+ // of the underlying operation.
+ __ lw(a1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ lw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister, a1, a2);
+ __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+ __ mov(a2, v0); // Result is the new bytecode offset.
+ __ Pop(kInterpreterAccumulatorRegister);
+ }
+ __ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ Generate_InterpreterEnterBytecode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1371,7 +1370,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime, gotta_call_runtime_no_stack;
- Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
@@ -1435,15 +1433,12 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &maybe_call_runtime);
+ __ JumpIfSmi(entry, &try_shared);
// Found literals and code. Get them into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- Label install_optimized_code_and_tailcall;
- __ bind(&install_optimized_code_and_tailcall);
__ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, t1);
@@ -1478,24 +1473,18 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
- __ bind(&maybe_call_runtime);
- __ pop(closure);
-
- // Last possibility. Check the context free optimized code map entry.
- __ lw(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
- SharedFunctionInfo::kSharedCodeIndex));
- __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Store code entry in the closure.
- __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(&install_optimized_code_and_tailcall);
-
__ bind(&try_shared);
+ __ pop(closure);
__ pop(new_target);
__ pop(argument_count);
- // Is the full code valid?
__ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ // Is the shared function marked for tier up?
+ __ lbu(t1, FieldMemOperand(entry,
+ SharedFunctionInfo::kMarkedForTierUpByteOffset));
+ __ And(t1, t1,
+ Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
+ __ Branch(&gotta_call_runtime_no_stack, ne, t1, Operand(zero_reg));
+ // Is the full code valid?
__ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ lw(t1, FieldMemOperand(entry, Code::kFlagsOffset));
__ And(t1, t1, Operand(Code::KindField::kMask));
@@ -1858,7 +1847,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
}
// If the code object is null, just return to the caller.
- __ Ret(eq, v0, Operand(Smi::FromInt(0)));
+ __ Ret(eq, v0, Operand(Smi::kZero));
// Drop any potential handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
@@ -2455,8 +2444,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Push(a0, a1);
__ mov(a0, a3);
__ Push(cp);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToObject(),
+ RelocInfo::CODE_TARGET);
__ Pop(cp);
__ mov(a3, v0);
__ Pop(a0, a1);
@@ -2840,7 +2829,7 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -----------------------------------
__ SmiTag(a0);
__ Push(a0);
- __ Move(cp, Smi::FromInt(0));
+ __ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
@@ -2853,7 +2842,7 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
__ SmiTag(a0);
__ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(a0, a1);
- __ Move(cp, Smi::FromInt(0));
+ __ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
@@ -2864,7 +2853,7 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
// -- ra : return address
// -----------------------------------
__ Push(a0);
- __ Move(cp, Smi::FromInt(0));
+ __ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAbort);
}
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index f7225f01cd..a6abb55c46 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -264,7 +264,7 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// 2b. No arguments, return +0.
__ bind(&no_arguments);
- __ Move(v0, Smi::FromInt(0));
+ __ Move(v0, Smi::kZero);
__ DropAndRet(1);
}
@@ -293,7 +293,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ ld(a0, MemOperand(at));
__ jmp(&done);
__ bind(&no_arguments);
- __ Move(a0, Smi::FromInt(0));
+ __ Move(a0, Smi::kZero);
__ bind(&done);
}
@@ -546,14 +546,14 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
- // -- a2 : allocation site or undefined
// -- a3 : new target
// -- cp : context
// -- ra : return address
@@ -567,9 +567,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the incoming parameters on the stack.
- __ AssertUndefinedOrAllocationSite(a2, t0);
__ SmiTag(a0);
- __ Push(cp, a2, a0);
+ __ Push(cp, a0);
if (create_implicit_receiver) {
__ Push(a1, a3);
@@ -693,6 +692,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Ret();
}
+} // namespace
+
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
@@ -1136,31 +1137,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Jump(a4);
}
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
- // Save the function and context for call to CompileBaseline.
- __ ld(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ ld(kContextRegister,
- MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // Leave the frame before recompiling for baseline so that we don't count as
- // an activation on the stack.
- LeaveInterpreterFrame(masm, t0);
-
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Push return value.
- __ push(v0);
-
- // Push function as argument and compile for baseline.
- __ push(a1);
- __ CallRuntime(Runtime::kCompileBaseline);
-
- // Restore return value.
- __ pop(v0);
- }
- __ Jump(ra);
-}
-
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch1, Register scratch2,
Label* stack_overflow) {
@@ -1312,12 +1288,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
}
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
- DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
__ li(t0, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
__ Daddu(ra, t0, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1355,6 +1331,29 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
__ Jump(a1);
}
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+ // Advance the current bytecode offset stored within the given interpreter
+ // stack frame. This simulates what all bytecode handlers do upon completion
+ // of the underlying operation.
+ __ ld(a1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ ld(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister, a1, a2);
+ __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+ __ mov(a2, v0); // Result is the new bytecode offset.
+ __ Pop(kInterpreterAccumulatorRegister);
+ }
+ __ sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ Generate_InterpreterEnterBytecode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argument count (preserved for callee)
@@ -1363,7 +1362,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime, gotta_call_runtime_no_stack;
- Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
@@ -1427,15 +1425,12 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &maybe_call_runtime);
+ __ JumpIfSmi(entry, &try_shared);
// Found literals and code. Get them into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- Label install_optimized_code_and_tailcall;
- __ bind(&install_optimized_code_and_tailcall);
__ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, a5);
@@ -1470,24 +1465,18 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
- __ bind(&maybe_call_runtime);
- __ pop(closure);
-
- // Last possibility. Check the context free optimized code map entry.
- __ ld(entry, FieldMemOperand(map, FixedArray::kHeaderSize +
- SharedFunctionInfo::kSharedCodeIndex));
- __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Store code entry in the closure.
- __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(&install_optimized_code_and_tailcall);
-
__ bind(&try_shared);
+ __ pop(closure);
__ pop(new_target);
__ pop(argument_count);
- // Is the full code valid?
__ ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ // Is the shared function marked for tier up?
+ __ lbu(a5, FieldMemOperand(entry,
+ SharedFunctionInfo::kMarkedForTierUpByteOffset));
+ __ And(a5, a5,
+ Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
+ __ Branch(&gotta_call_runtime_no_stack, ne, a5, Operand(zero_reg));
+ // Is the full code valid?
__ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ lw(a5, FieldMemOperand(entry, Code::kFlagsOffset));
__ And(a5, a5, Operand(Code::KindField::kMask));
@@ -1852,7 +1841,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
}
// If the code object is null, just return to the caller.
- __ Ret(eq, v0, Operand(Smi::FromInt(0)));
+ __ Ret(eq, v0, Operand(Smi::kZero));
// Drop any potential handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
@@ -2450,8 +2439,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Push(a0, a1);
__ mov(a0, a3);
__ Push(cp);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToObject(),
+ RelocInfo::CODE_TARGET);
__ Pop(cp);
__ mov(a3, v0);
__ Pop(a0, a1);
@@ -2833,7 +2822,7 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -----------------------------------
__ SmiTag(a0);
__ Push(a0);
- __ Move(cp, Smi::FromInt(0));
+ __ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
@@ -2846,7 +2835,7 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
__ SmiTag(a0);
__ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(a0, a1);
- __ Move(cp, Smi::FromInt(0));
+ __ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
@@ -2857,7 +2846,7 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
// -- ra : return address
// -----------------------------------
__ Push(a0);
- __ Move(cp, Smi::FromInt(0));
+ __ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAbort);
}
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 7e2b82c9a3..be1e67cc30 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -267,7 +267,7 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// 2b. No arguments, return +0.
__ bind(&no_arguments);
- __ LoadSmiLiteral(r3, Smi::FromInt(0));
+ __ LoadSmiLiteral(r3, Smi::kZero);
__ Ret(1);
}
@@ -297,7 +297,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ LoadPX(r5, MemOperand(sp, r5));
__ b(&done);
__ bind(&no_arguments);
- __ LoadSmiLiteral(r5, Smi::FromInt(0));
+ __ LoadSmiLiteral(r5, Smi::kZero);
__ bind(&done);
}
@@ -555,14 +555,14 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
- // -- r5 : allocation site or undefined
// -- r6 : new target
// -- cp : context
// -- lr : return address
@@ -576,15 +576,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the incoming parameters on the stack.
- __ AssertUndefinedOrAllocationSite(r5, r7);
if (!create_implicit_receiver) {
__ SmiTag(r7, r3, SetRC);
- __ Push(cp, r5, r7);
+ __ Push(cp, r7);
__ PushRoot(Heap::kTheHoleValueRootIndex);
} else {
__ SmiTag(r3);
- __ Push(cp, r5, r3);
+ __ Push(cp, r3);
// Allocate the new receiver object.
__ Push(r4, r6);
@@ -711,6 +710,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ blr();
}
+} // namespace
+
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
@@ -1172,31 +1173,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ JumpToJSEntry(r7);
}
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
- // Save the function and context for call to CompileBaseline.
- __ LoadP(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadP(kContextRegister,
- MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // Leave the frame before recompiling for baseline so that we don't count as
- // an activation on the stack.
- LeaveInterpreterFrame(masm, r5);
-
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Push return value.
- __ push(r3);
-
- // Push function as argument and compile for baseline.
- __ push(r4);
- __ CallRuntime(Runtime::kCompileBaseline);
-
- // Restore return value.
- __ pop(r3);
- }
- __ blr();
-}
-
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch,
Label* stack_overflow) {
@@ -1348,12 +1324,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
}
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
- DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
__ Move(r5, masm->isolate()->builtins()->InterpreterEntryTrampoline());
__ addi(r0, r5, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1390,6 +1366,31 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
__ Jump(ip);
}
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+ // Advance the current bytecode offset stored within the given interpreter
+ // stack frame. This simulates what all bytecode handlers do upon completion
+ // of the underlying operation.
+ __ LoadP(r4, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ LoadP(r5,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister, r4, r5);
+ __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+ __ Move(r5, r3); // Result is the new bytecode offset.
+ __ Pop(kInterpreterAccumulatorRegister);
+ }
+ __ StoreP(r5,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ Generate_InterpreterEnterBytecode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : argument count (preserved for callee)
@@ -1398,7 +1399,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
@@ -1460,14 +1460,11 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &maybe_call_runtime);
+ __ JumpIfSmi(entry, &try_shared);
// Found literals and code. Get them into the closure and return.
// Store code entry in the closure.
__ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- Label install_optimized_code_and_tailcall;
- __ bind(&install_optimized_code_and_tailcall);
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, entry, r8);
@@ -1502,23 +1499,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// We found neither literals nor code.
__ b(&gotta_call_runtime);
- __ bind(&maybe_call_runtime);
-
- // Last possibility. Check the context free optimized code map entry.
- __ LoadP(entry,
- FieldMemOperand(map, FixedArray::kHeaderSize +
- SharedFunctionInfo::kSharedCodeIndex));
- __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Store code entry in the closure.
- __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ b(&install_optimized_code_and_tailcall);
-
__ bind(&try_shared);
- // Is the full code valid?
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ // Is the shared function marked for tier up?
+ __ lbz(r8, FieldMemOperand(entry,
+ SharedFunctionInfo::kMarkedForTierUpByteOffset));
+ __ TestBit(r8, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
+ __ bne(&gotta_call_runtime, cr0);
+ // Is the full code valid?
__ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ lwz(r8, FieldMemOperand(entry, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(r8);
@@ -1888,7 +1877,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// If the code object is null, just return to the caller.
Label skip;
- __ CmpSmiLiteral(r3, Smi::FromInt(0), r0);
+ __ CmpSmiLiteral(r3, Smi::kZero, r0);
__ bne(&skip);
__ Ret();
@@ -2507,8 +2496,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Push(r3, r4);
__ mr(r3, r6);
__ Push(cp);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToObject(),
+ RelocInfo::CODE_TARGET);
__ Pop(cp);
__ mr(r6, r3);
__ Pop(r3, r4);
@@ -2854,7 +2843,7 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -----------------------------------
__ SmiTag(r4);
__ Push(r4);
- __ LoadSmiLiteral(cp, Smi::FromInt(0));
+ __ LoadSmiLiteral(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
@@ -2867,7 +2856,7 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
__ SmiTag(r4);
__ LoadSmiLiteral(r5, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(r4, r5);
- __ LoadSmiLiteral(cp, Smi::FromInt(0));
+ __ LoadSmiLiteral(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
@@ -2878,7 +2867,7 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
// -- lr : return address
// -----------------------------------
__ push(r4);
- __ LoadSmiLiteral(cp, Smi::FromInt(0));
+ __ LoadSmiLiteral(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAbort);
}
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 91ae2c006b..8655ab8d79 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -267,7 +267,7 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// 2b. No arguments, return +0.
__ bind(&no_arguments);
- __ LoadSmiLiteral(r2, Smi::FromInt(0));
+ __ LoadSmiLiteral(r2, Smi::kZero);
__ Ret(1);
}
@@ -296,7 +296,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ LoadP(r4, MemOperand(sp, r4));
__ b(&done);
__ bind(&no_arguments);
- __ LoadSmiLiteral(r4, Smi::FromInt(0));
+ __ LoadSmiLiteral(r4, Smi::kZero);
__ bind(&done);
}
@@ -551,14 +551,14 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- r2 : number of arguments
// -- r3 : constructor function
- // -- r4 : allocation site or undefined
// -- r5 : new target
// -- cp : context
// -- lr : return address
@@ -572,16 +572,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the incoming parameters on the stack.
- __ AssertUndefinedOrAllocationSite(r4, r6);
if (!create_implicit_receiver) {
__ SmiTag(r6, r2);
__ LoadAndTestP(r6, r6);
- __ Push(cp, r4, r6);
+ __ Push(cp, r6);
__ PushRoot(Heap::kTheHoleValueRootIndex);
} else {
__ SmiTag(r2);
- __ Push(cp, r4, r2);
+ __ Push(cp, r2);
// Allocate the new receiver object.
__ Push(r3, r5);
@@ -710,6 +709,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Ret();
}
+} // namespace
+
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
@@ -1175,31 +1176,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ JumpToJSEntry(r6);
}
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
- // Save the function and context for call to CompileBaseline.
- __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadP(kContextRegister,
- MemOperand(fp, StandardFrameConstants::kContextOffset));
-
- // Leave the frame before recompiling for baseline so that we don't count as
- // an activation on the stack.
- LeaveInterpreterFrame(masm, r4);
-
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Push return value.
- __ push(r2);
-
- // Push function as argument and compile for baseline.
- __ push(r3);
- __ CallRuntime(Runtime::kCompileBaseline);
-
- // Restore return value.
- __ pop(r2);
- }
- __ Ret();
-}
-
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch,
Label* stack_overflow) {
@@ -1352,12 +1328,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
}
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
- DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
__ Move(r4, masm->isolate()->builtins()->InterpreterEntryTrampoline());
__ AddP(r14, r4, Operand(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1393,6 +1369,31 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
__ Jump(ip);
}
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+ // Advance the current bytecode offset stored within the given interpreter
+ // stack frame. This simulates what all bytecode handlers do upon completion
+ // of the underlying operation.
+ __ LoadP(r3, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ LoadP(r4,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister, r3, r4);
+ __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+ __ Move(r4, r2); // Result is the new bytecode offset.
+ __ Pop(kInterpreterAccumulatorRegister);
+ }
+ __ StoreP(r4,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ Generate_InterpreterEnterBytecode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : argument count (preserved for callee)
@@ -1401,7 +1402,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
@@ -1463,14 +1463,11 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &maybe_call_runtime);
+ __ JumpIfSmi(entry, &try_shared);
// Found literals and code. Get them into the closure and return.
// Store code entry in the closure.
__ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- Label install_optimized_code_and_tailcall;
- __ bind(&install_optimized_code_and_tailcall);
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, entry, r7);
@@ -1505,23 +1502,15 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// We found neither literals nor code.
__ b(&gotta_call_runtime);
- __ bind(&maybe_call_runtime);
-
- // Last possibility. Check the context free optimized code map entry.
- __ LoadP(entry,
- FieldMemOperand(map, FixedArray::kHeaderSize +
- SharedFunctionInfo::kSharedCodeIndex));
- __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Store code entry in the closure.
- __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ b(&install_optimized_code_and_tailcall);
-
__ bind(&try_shared);
- // Is the full code valid?
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ // Is the shared function marked for tier up?
+ __ LoadlB(temp, FieldMemOperand(
+ entry, SharedFunctionInfo::kMarkedForTierUpByteOffset));
+ __ TestBit(temp, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
+ __ bne(&gotta_call_runtime);
+ // Is the full code valid?
__ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ LoadlW(r7, FieldMemOperand(entry, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(r7);
@@ -1895,7 +1884,7 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// If the code object is null, just return to the caller.
Label skip;
- __ CmpSmiLiteral(r2, Smi::FromInt(0), r0);
+ __ CmpSmiLiteral(r2, Smi::kZero, r0);
__ bne(&skip);
__ Ret();
@@ -2516,8 +2505,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Push(r2, r3);
__ LoadRR(r2, r5);
__ Push(cp);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToObject(),
+ RelocInfo::CODE_TARGET);
__ Pop(cp);
__ LoadRR(r5, r2);
__ Pop(r2, r3);
@@ -2865,7 +2854,7 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// -----------------------------------
__ SmiTag(r3);
__ Push(r3);
- __ LoadSmiLiteral(cp, Smi::FromInt(0));
+ __ LoadSmiLiteral(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
@@ -2878,7 +2867,7 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
__ SmiTag(r3);
__ LoadSmiLiteral(r4, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(r3, r4);
- __ LoadSmiLiteral(cp, Smi::FromInt(0));
+ __ LoadSmiLiteral(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
@@ -2889,7 +2878,7 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
// -- lr : return address
// -----------------------------------
__ push(r3);
- __ LoadSmiLiteral(cp, Smi::FromInt(0));
+ __ LoadSmiLiteral(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAbort);
}
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index beae2d29c3..cde02647ac 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -112,15 +112,15 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- rax: number of arguments
// -- rsi: context
// -- rdi: constructor function
- // -- rbx: allocation site or undefined
// -- rdx: new target
// -----------------------------------
@@ -129,10 +129,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the incoming parameters on the stack.
- __ AssertUndefinedOrAllocationSite(rbx);
- __ Push(rsi);
- __ Push(rbx);
__ Integer32ToSmi(rcx, rax);
+ __ Push(rsi);
__ Push(rcx);
if (create_implicit_receiver) {
@@ -197,13 +195,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// on page 74.
Label use_receiver, exit;
// If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(rax, &use_receiver);
+ __ JumpIfSmi(rax, &use_receiver, Label::kNear);
// If the type of the result (stored in its map) is less than
// FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
- __ j(above_equal, &exit);
+ __ j(above_equal, &exit, Label::kNear);
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
@@ -246,6 +244,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ ret(0);
}
+} // namespace
+
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
@@ -658,7 +658,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// it is present) and load it into kInterpreterBytecodeArrayRegister.
__ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
Label load_debug_bytecode_array, bytecode_array_loaded;
- DCHECK_EQ(Smi::FromInt(0), DebugInfo::uninitialized());
+ DCHECK_EQ(Smi::kZero, DebugInfo::uninitialized());
__ cmpp(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
Immediate(0));
__ j(not_equal, &load_debug_bytecode_array);
@@ -766,31 +766,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(rcx);
}
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
- // Save the function and context for call to CompileBaseline.
- __ movp(rdi, Operand(rbp, StandardFrameConstants::kFunctionOffset));
- __ movp(kContextRegister,
- Operand(rbp, StandardFrameConstants::kContextOffset));
-
- // Leave the frame before recompiling for baseline so that we don't count as
- // an activation on the stack.
- LeaveInterpreterFrame(masm, rbx, rcx);
-
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Push return value.
- __ Push(rax);
-
- // Push function as argument and compile for baseline.
- __ Push(rdi);
- __ CallRuntime(Runtime::kCompileBaseline);
-
- // Restore return value.
- __ Pop(rax);
- }
- __ ret(0);
-}
-
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch1, Register scratch2,
Label* stack_overflow) {
@@ -981,12 +956,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
}
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
- DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
__ Move(rbx, masm->isolate()->builtins()->InterpreterEntryTrampoline());
__ addp(rbx, Immediate(interpreter_entry_return_pc_offset->value() +
Code::kHeaderSize - kHeapObjectTag));
@@ -1023,6 +998,31 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
__ jmp(rbx);
}
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+ // Advance the current bytecode offset stored within the given interpreter
+ // stack frame. This simulates what all bytecode handlers do upon completion
+ // of the underlying operation.
+ __ movp(rbx, Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ movp(rdx, Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
+ __ Push(rbx); // First argument is the bytecode array.
+ __ Push(rdx); // Second argument is the bytecode offset.
+ __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+ __ Move(rdx, rax); // Result is the new bytecode offset.
+ __ Pop(kInterpreterAccumulatorRegister);
+ }
+ __ movp(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rdx);
+
+ Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ Generate_InterpreterEnterBytecode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argument count (preserved for callee)
@@ -1031,7 +1031,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
- Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
@@ -1085,13 +1084,10 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ movp(entry, FieldOperand(map, index, times_pointer_size,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &maybe_call_runtime);
+ __ JumpIfSmi(entry, &try_shared);
// Found literals and code. Get them into the closure and return.
__ leap(entry, FieldOperand(entry, Code::kHeaderSize));
-
- Label install_optimized_code_and_tailcall;
- __ bind(&install_optimized_code_and_tailcall);
__ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, r15);
@@ -1124,21 +1120,13 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
- __ bind(&maybe_call_runtime);
-
- // Last possibility. Check the context free optimized code map entry.
- __ movp(entry, FieldOperand(map, FixedArray::kHeaderSize +
- SharedFunctionInfo::kSharedCodeIndex));
- __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Store code entry in the closure.
- __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
- __ jmp(&install_optimized_code_and_tailcall);
-
__ bind(&try_shared);
- // Is the full code valid?
__ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ // Is the shared function marked for tier up?
+ __ testb(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
+ Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
+ __ j(not_zero, &gotta_call_runtime);
+ // Is the full code valid?
__ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
__ movl(rbx, FieldOperand(entry, Code::kFlagsOffset));
__ andl(rbx, Immediate(Code::KindField::kMask));
@@ -1904,7 +1892,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ movp(rbx, args.GetArgumentOperand(1));
__ jmp(&done, Label::kNear);
__ bind(&no_arguments);
- __ Move(rbx, Smi::FromInt(0));
+ __ Move(rbx, Smi::kZero);
__ bind(&done);
}
@@ -2157,7 +2145,7 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
__ PopReturnAddressTo(rcx);
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
- __ Move(rsi, Smi::FromInt(0));
+ __ Move(rsi, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
@@ -2172,7 +2160,7 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
__ Push(rdx);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ PushReturnAddressFrom(rcx);
- __ Move(rsi, Smi::FromInt(0));
+ __ Move(rsi, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
@@ -2185,7 +2173,7 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ PopReturnAddressTo(rcx);
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
- __ Move(rsi, Smi::FromInt(0));
+ __ Move(rsi, Smi::kZero);
__ TailCallRuntime(Runtime::kAbort);
}
@@ -2583,8 +2571,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Push(rdi);
__ movp(rax, rcx);
__ Push(rsi);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToObject(),
+ RelocInfo::CODE_TARGET);
__ Pop(rsi);
__ movp(rcx, rax);
__ Pop(rdi);
diff --git a/deps/v8/src/builtins/x87/builtins-x87.cc b/deps/v8/src/builtins/x87/builtins-x87.cc
index 8e096a3d0b..2187f86f61 100644
--- a/deps/v8/src/builtins/x87/builtins-x87.cc
+++ b/deps/v8/src/builtins/x87/builtins-x87.cc
@@ -110,15 +110,15 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
GenerateTailCallToSharedCode(masm);
}
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
+namespace {
+
+void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- esi: context
// -- edi: constructor function
- // -- ebx: allocation site or undefined
// -- edx: new target
// -----------------------------------
@@ -127,10 +127,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the incoming parameters on the stack.
- __ AssertUndefinedOrAllocationSite(ebx);
- __ push(esi);
- __ push(ebx);
__ SmiTag(eax);
+ __ push(esi);
__ push(eax);
if (create_implicit_receiver) {
@@ -198,12 +196,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Label use_receiver, exit;
// If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(eax, &use_receiver);
+ __ JumpIfSmi(eax, &use_receiver, Label::kNear);
// If the type of the result (stored in its map) is less than
// FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
__ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
- __ j(above_equal, &exit);
+ __ j(above_equal, &exit, Label::kNear);
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
@@ -245,6 +243,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ ret(0);
}
+} // namespace
+
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Generate_JSConstructStubHelper(masm, false, true, false);
}
@@ -474,7 +474,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
{
Label done_loop, loop;
- __ Move(ecx, Smi::FromInt(0));
+ __ Move(ecx, Smi::kZero);
__ bind(&loop);
__ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
__ j(equal, &done_loop, Label::kNear);
@@ -686,31 +686,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ jmp(ecx);
}
-void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
- // Save the function and context for call to CompileBaseline.
- __ mov(edi, Operand(ebp, StandardFrameConstants::kFunctionOffset));
- __ mov(kContextRegister,
- Operand(ebp, StandardFrameConstants::kContextOffset));
-
- // Leave the frame before recompiling for baseline so that we don't count as
- // an activation on the stack.
- LeaveInterpreterFrame(masm, ebx, ecx);
-
- {
- FrameScope frame_scope(masm, StackFrame::INTERNAL);
- // Push return value.
- __ push(eax);
-
- // Push function as argument and compile for baseline.
- __ push(edi);
- __ CallRuntime(Runtime::kCompileBaseline);
-
- // Restore return value.
- __ pop(eax);
- }
- __ ret(0);
-}
-
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
Register scratch1, Register scratch2,
Label* stack_overflow,
@@ -1006,12 +981,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructArray(
}
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// Set the return address to the correct point in the interpreter entry
// trampoline.
Smi* interpreter_entry_return_pc_offset(
masm->isolate()->heap()->interpreter_entry_return_pc_offset());
- DCHECK_NE(interpreter_entry_return_pc_offset, Smi::FromInt(0));
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
__ LoadHeapObject(ebx,
masm->isolate()->builtins()->InterpreterEntryTrampoline());
__ add(ebx, Immediate(interpreter_entry_return_pc_offset->value() +
@@ -1048,6 +1023,31 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
__ jmp(ebx);
}
+void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+ // Advance the current bytecode offset stored within the given interpreter
+ // stack frame. This simulates what all bytecode handlers do upon completion
+ // of the underlying operation.
+ __ mov(ebx, Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ mov(edx, Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
+ __ Push(ebx); // First argument is the bytecode array.
+ __ Push(edx); // Second argument is the bytecode offset.
+ __ CallRuntime(Runtime::kInterpreterAdvanceBytecodeOffset);
+ __ Move(edx, eax); // Result is the new bytecode offset.
+ __ Pop(kInterpreterAccumulatorRegister);
+ }
+ __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), edx);
+
+ Generate_InterpreterEnterBytecode(masm);
+}
+
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ Generate_InterpreterEnterBytecode(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argument count (preserved for callee)
@@ -1056,7 +1056,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime, gotta_call_runtime_no_stack;
- Label maybe_call_runtime;
Label try_shared;
Label loop_top, loop_bottom;
@@ -1119,15 +1118,12 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ mov(entry, FieldOperand(map, index, times_half_pointer_size,
SharedFunctionInfo::kOffsetToPreviousCachedCode));
__ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &maybe_call_runtime);
+ __ JumpIfSmi(entry, &try_shared);
// Found literals and code. Get them into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
-
- Label install_optimized_code_and_tailcall;
- __ bind(&install_optimized_code_and_tailcall);
__ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, eax);
@@ -1161,24 +1157,16 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
- __ bind(&maybe_call_runtime);
- __ pop(closure);
-
- // Last possibility. Check the context free optimized code map entry.
- __ mov(entry, FieldOperand(map, FixedArray::kHeaderSize +
- SharedFunctionInfo::kSharedCodeIndex));
- __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
- __ JumpIfSmi(entry, &try_shared);
-
- // Store code entry in the closure.
- __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
- __ jmp(&install_optimized_code_and_tailcall);
-
__ bind(&try_shared);
+ __ pop(closure);
__ pop(new_target);
__ pop(argument_count);
- // Is the full code valid?
__ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ // Is the shared function marked for tier up?
+ __ test_b(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
+ Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
+ __ j(not_zero, &gotta_call_runtime_no_stack);
+ // Is the full code valid?
__ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
__ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
__ and_(ebx, Code::KindField::kMask);
@@ -1956,7 +1944,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
__ jmp(&done, Label::kNear);
__ bind(&no_arguments);
- __ Move(ebx, Smi::FromInt(0));
+ __ Move(ebx, Smi::kZero);
__ bind(&done);
}
@@ -2509,8 +2497,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Push(edi);
__ mov(eax, ecx);
__ Push(esi);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(masm->isolate()->builtins()->ToObject(),
+ RelocInfo::CODE_TARGET);
__ Pop(esi);
__ mov(ecx, eax);
__ Pop(edi);
@@ -2861,7 +2849,7 @@ void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::FromInt(0));
+ __ Move(esi, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
@@ -2876,7 +2864,7 @@ void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
__ Push(edx);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::FromInt(0));
+ __ Move(esi, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
@@ -2889,7 +2877,7 @@ void Builtins::Generate_Abort(MacroAssembler* masm) {
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ PushReturnAddressFrom(ecx);
- __ Move(esi, Smi::FromInt(0));
+ __ Move(esi, Smi::kZero);
__ TailCallRuntime(Runtime::kAbort);
}
diff --git a/deps/v8/src/cancelable-task.cc b/deps/v8/src/cancelable-task.cc
index defbb44775..ea351f8908 100644
--- a/deps/v8/src/cancelable-task.cc
+++ b/deps/v8/src/cancelable-task.cc
@@ -26,13 +26,15 @@ Cancelable::~Cancelable() {
}
}
-CancelableTaskManager::CancelableTaskManager() : task_id_counter_(0) {}
+CancelableTaskManager::CancelableTaskManager()
+ : task_id_counter_(0), canceled_(false) {}
uint32_t CancelableTaskManager::Register(Cancelable* task) {
base::LockGuard<base::Mutex> guard(&mutex_);
uint32_t id = ++task_id_counter_;
// The loop below is just used when task_id_counter_ overflows.
while (cancelable_tasks_.count(id) > 0) ++id;
+ CHECK(!canceled_);
cancelable_tasks_[id] = task;
return id;
}
@@ -42,12 +44,12 @@ void CancelableTaskManager::RemoveFinishedTask(uint32_t id) {
base::LockGuard<base::Mutex> guard(&mutex_);
size_t removed = cancelable_tasks_.erase(id);
USE(removed);
- DCHECK_NE(0, removed);
+ DCHECK_NE(0u, removed);
cancelable_tasks_barrier_.NotifyOne();
}
-
-bool CancelableTaskManager::TryAbort(uint32_t id) {
+CancelableTaskManager::TryAbortResult CancelableTaskManager::TryAbort(
+ uint32_t id) {
base::LockGuard<base::Mutex> guard(&mutex_);
auto entry = cancelable_tasks_.find(id);
if (entry != cancelable_tasks_.end()) {
@@ -56,10 +58,12 @@ bool CancelableTaskManager::TryAbort(uint32_t id) {
// Cannot call RemoveFinishedTask here because of recursive locking.
cancelable_tasks_.erase(entry);
cancelable_tasks_barrier_.NotifyOne();
- return true;
+ return kTaskAborted;
+ } else {
+ return kTaskRunning;
}
}
- return false;
+ return kTaskRemoved;
}
@@ -69,6 +73,7 @@ void CancelableTaskManager::CancelAndWait() {
// of canceling we wait for the background tasks that have already been
// started.
base::LockGuard<base::Mutex> guard(&mutex_);
+ canceled_ = true;
// Cancelable tasks could be running or could potentially register new
// tasks, requiring a loop here.
diff --git a/deps/v8/src/cancelable-task.h b/deps/v8/src/cancelable-task.h
index b1d62aad4b..65f98e7662 100644
--- a/deps/v8/src/cancelable-task.h
+++ b/deps/v8/src/cancelable-task.h
@@ -11,6 +11,7 @@
#include "src/base/atomic-utils.h"
#include "src/base/macros.h"
#include "src/base/platform/condition-variable.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -21,26 +22,27 @@ class Isolate;
// Keeps track of cancelable tasks. It is possible to register and remove tasks
// from any fore- and background task/thread.
-class CancelableTaskManager {
+class V8_EXPORT_PRIVATE CancelableTaskManager {
public:
CancelableTaskManager();
// Registers a new cancelable {task}. Returns the unique {id} of the task that
// can be used to try to abort a task by calling {Abort}.
+ // Must not be called after CancelAndWait.
uint32_t Register(Cancelable* task);
// Try to abort running a task identified by {id}. The possible outcomes are:
- // (1) The task is already finished running and thus has been removed from
- // the manager.
+ // (1) The task is already finished running or was canceled before and
+ // thus has been removed from the manager.
// (2) The task is currently running and cannot be canceled anymore.
// (3) The task is not yet running (or finished) so it is canceled and
// removed.
//
- // Returns {false} for (1) and (2), and {true} for (3).
- bool TryAbort(uint32_t id);
+ enum TryAbortResult { kTaskRemoved, kTaskRunning, kTaskAborted };
+ TryAbortResult TryAbort(uint32_t id);
// Cancels all remaining registered tasks and waits for tasks that are
- // already running.
+ // already running. This disallows subsequent Register calls.
void CancelAndWait();
private:
@@ -59,13 +61,14 @@ class CancelableTaskManager {
base::ConditionVariable cancelable_tasks_barrier_;
base::Mutex mutex_;
+ bool canceled_;
+
friend class Cancelable;
DISALLOW_COPY_AND_ASSIGN(CancelableTaskManager);
};
-
-class Cancelable {
+class V8_EXPORT_PRIVATE Cancelable {
public:
explicit Cancelable(CancelableTaskManager* parent);
virtual ~Cancelable();
diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/char-predicates.h
index 3161ae4ae9..966b2a5936 100644
--- a/deps/v8/src/char-predicates.h
+++ b/deps/v8/src/char-predicates.h
@@ -5,6 +5,7 @@
#ifndef V8_CHAR_PREDICATES_H_
#define V8_CHAR_PREDICATES_H_
+#include "src/globals.h"
#include "src/unicode.h"
namespace v8 {
@@ -25,8 +26,7 @@ inline bool IsBinaryDigit(uc32 c);
inline bool IsRegExpWord(uc32 c);
inline bool IsRegExpNewline(uc32 c);
-
-struct SupplementaryPlanes {
+struct V8_EXPORT_PRIVATE SupplementaryPlanes {
static bool IsIDStart(uc32 c);
static bool IsIDPart(uc32 c);
};
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index 7448591856..128c709998 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -23,10 +23,6 @@ Callable make_callable(Stub& stub) {
// static
Callable CodeFactory::LoadIC(Isolate* isolate) {
- if (FLAG_tf_load_ic_stub) {
- LoadICTrampolineTFStub stub(isolate);
- return make_callable(stub);
- }
LoadICTrampolineStub stub(isolate);
return make_callable(stub);
}
@@ -39,10 +35,6 @@ Callable CodeFactory::ApiGetter(Isolate* isolate) {
// static
Callable CodeFactory::LoadICInOptimizedCode(Isolate* isolate) {
- if (FLAG_tf_load_ic_stub) {
- LoadICTFStub stub(isolate);
- return make_callable(stub);
- }
LoadICStub stub(isolate);
return make_callable(stub);
}
@@ -62,56 +54,39 @@ Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate,
// static
Callable CodeFactory::KeyedLoadIC(Isolate* isolate) {
- if (FLAG_tf_load_ic_stub) {
- KeyedLoadICTrampolineTFStub stub(isolate);
- return make_callable(stub);
- }
- KeyedLoadICTrampolineStub stub(isolate);
+ KeyedLoadICTrampolineTFStub stub(isolate);
return make_callable(stub);
}
// static
Callable CodeFactory::KeyedLoadICInOptimizedCode(Isolate* isolate) {
- if (FLAG_tf_load_ic_stub) {
- KeyedLoadICTFStub stub(isolate);
- return make_callable(stub);
- }
- KeyedLoadICStub stub(isolate);
+ KeyedLoadICTFStub stub(isolate);
return make_callable(stub);
}
// static
Callable CodeFactory::KeyedLoadIC_Megamorphic(Isolate* isolate) {
- if (FLAG_tf_load_ic_stub) {
- return Callable(isolate->builtins()->KeyedLoadIC_Megamorphic_TF(),
- LoadWithVectorDescriptor(isolate));
- }
- return Callable(isolate->builtins()->KeyedLoadIC_Megamorphic(),
+ return Callable(isolate->builtins()->KeyedLoadIC_Megamorphic_TF(),
LoadWithVectorDescriptor(isolate));
}
// static
-Callable CodeFactory::CallIC(Isolate* isolate, int argc,
- ConvertReceiverMode mode,
+Callable CodeFactory::CallIC(Isolate* isolate, ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
- CallICTrampolineStub stub(isolate, CallICState(argc, mode, tail_call_mode));
+ CallICTrampolineStub stub(isolate, CallICState(mode, tail_call_mode));
return make_callable(stub);
}
// static
-Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate, int argc,
+Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate,
ConvertReceiverMode mode,
TailCallMode tail_call_mode) {
- CallICStub stub(isolate, CallICState(argc, mode, tail_call_mode));
+ CallICStub stub(isolate, CallICState(mode, tail_call_mode));
return make_callable(stub);
}
// static
Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
- if (FLAG_tf_store_ic_stub) {
- StoreICTrampolineTFStub stub(isolate, StoreICState(language_mode));
- return make_callable(stub);
- }
StoreICTrampolineStub stub(isolate, StoreICState(language_mode));
return make_callable(stub);
}
@@ -119,10 +94,6 @@ Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
// static
Callable CodeFactory::StoreICInOptimizedCode(Isolate* isolate,
LanguageMode language_mode) {
- if (FLAG_tf_store_ic_stub) {
- StoreICTFStub stub(isolate, StoreICState(language_mode));
- return make_callable(stub);
- }
StoreICStub stub(isolate, StoreICState(language_mode));
return make_callable(stub);
}
@@ -130,6 +101,10 @@ Callable CodeFactory::StoreICInOptimizedCode(Isolate* isolate,
// static
Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
LanguageMode language_mode) {
+ if (FLAG_tf_store_ic_stub) {
+ KeyedStoreICTrampolineTFStub stub(isolate, StoreICState(language_mode));
+ return make_callable(stub);
+ }
KeyedStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
return make_callable(stub);
}
@@ -137,11 +112,31 @@ Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
// static
Callable CodeFactory::KeyedStoreICInOptimizedCode(Isolate* isolate,
LanguageMode language_mode) {
+ if (FLAG_tf_store_ic_stub) {
+ KeyedStoreICTFStub stub(isolate, StoreICState(language_mode));
+ return make_callable(stub);
+ }
KeyedStoreICStub stub(isolate, StoreICState(language_mode));
return make_callable(stub);
}
// static
+Callable CodeFactory::KeyedStoreIC_Megamorphic(Isolate* isolate,
+ LanguageMode language_mode) {
+ if (FLAG_tf_store_ic_stub) {
+ return Callable(
+ language_mode == STRICT
+ ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict_TF()
+ : isolate->builtins()->KeyedStoreIC_Megamorphic_TF(),
+ StoreWithVectorDescriptor(isolate));
+ }
+ return Callable(language_mode == STRICT
+ ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
+ : isolate->builtins()->KeyedStoreIC_Megamorphic(),
+ StoreWithVectorDescriptor(isolate));
+}
+
+// static
Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op) {
CompareICStub stub(isolate, op);
return make_callable(stub);
@@ -154,12 +149,6 @@ Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op) {
}
// static
-Callable CodeFactory::InstanceOf(Isolate* isolate) {
- InstanceOfStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
Callable CodeFactory::GetProperty(Isolate* isolate) {
GetPropertyStub stub(isolate);
return make_callable(stub);
@@ -190,36 +179,12 @@ Callable CodeFactory::StringToNumber(Isolate* isolate) {
}
// static
-Callable CodeFactory::ToString(Isolate* isolate) {
- return Callable(isolate->builtins()->ToString(),
- TypeConversionDescriptor(isolate));
-}
-
-// static
Callable CodeFactory::ToName(Isolate* isolate) {
return Callable(isolate->builtins()->ToName(),
TypeConversionDescriptor(isolate));
}
// static
-Callable CodeFactory::ToInteger(Isolate* isolate) {
- ToIntegerStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::ToLength(Isolate* isolate) {
- ToLengthStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::ToObject(Isolate* isolate) {
- ToObjectStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
Callable CodeFactory::NonPrimitiveToPrimitive(Isolate* isolate,
ToPrimitiveHint hint) {
return Callable(isolate->builtins()->NonPrimitiveToPrimitive(hint),
@@ -240,88 +205,59 @@ Callable CodeFactory::NumberToString(Isolate* isolate) {
}
// static
-Callable CodeFactory::OrdinaryHasInstance(Isolate* isolate) {
- return Callable(isolate->builtins()->OrdinaryHasInstance(),
- CompareDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::RegExpConstructResult(Isolate* isolate) {
- RegExpConstructResultStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
Callable CodeFactory::RegExpExec(Isolate* isolate) {
RegExpExecStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
-Callable CodeFactory::Add(Isolate* isolate) {
- AddStub stub(isolate);
- return make_callable(stub);
+Callable CodeFactory::StringFromCharCode(Isolate* isolate) {
+ Handle<Code> code(isolate->builtins()->StringFromCharCode());
+ return Callable(code, BuiltinDescriptor(isolate));
}
-// static
-Callable CodeFactory::Subtract(Isolate* isolate) {
- SubtractStub stub(isolate);
- return make_callable(stub);
-}
+#define DECLARE_TFS(Name, Kind, Extra, InterfaceDescriptor) \
+ typedef InterfaceDescriptor##Descriptor Name##Descriptor;
+BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, DECLARE_TFS,
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+#undef DECLARE_TFS
-// static
-Callable CodeFactory::Multiply(Isolate* isolate) {
- MultiplyStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::Divide(Isolate* isolate) {
- DivideStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::Modulus(Isolate* isolate) {
- ModulusStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::ShiftRight(Isolate* isolate) {
- ShiftRightStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::ShiftRightLogical(Isolate* isolate) {
- ShiftRightLogicalStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::ShiftLeft(Isolate* isolate) {
- ShiftLeftStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::BitwiseAnd(Isolate* isolate) {
- BitwiseAndStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::BitwiseOr(Isolate* isolate) {
- BitwiseOrStub stub(isolate);
- return make_callable(stub);
-}
+#define TFS_BUILTIN(Name) \
+ Callable CodeFactory::Name(Isolate* isolate) { \
+ Handle<Code> code(isolate->builtins()->Name()); \
+ return Callable(code, Name##Descriptor(isolate)); \
+ }
-// static
-Callable CodeFactory::BitwiseXor(Isolate* isolate) {
- BitwiseXorStub stub(isolate);
- return make_callable(stub);
-}
+TFS_BUILTIN(ToString)
+TFS_BUILTIN(Add)
+TFS_BUILTIN(Subtract)
+TFS_BUILTIN(Multiply)
+TFS_BUILTIN(Divide)
+TFS_BUILTIN(Modulus)
+TFS_BUILTIN(BitwiseAnd)
+TFS_BUILTIN(BitwiseOr)
+TFS_BUILTIN(BitwiseXor)
+TFS_BUILTIN(ShiftLeft)
+TFS_BUILTIN(ShiftRight)
+TFS_BUILTIN(ShiftRightLogical)
+TFS_BUILTIN(LessThan)
+TFS_BUILTIN(LessThanOrEqual)
+TFS_BUILTIN(GreaterThan)
+TFS_BUILTIN(GreaterThanOrEqual)
+TFS_BUILTIN(Equal)
+TFS_BUILTIN(NotEqual)
+TFS_BUILTIN(StrictEqual)
+TFS_BUILTIN(StrictNotEqual)
+TFS_BUILTIN(HasProperty)
+TFS_BUILTIN(ToInteger)
+TFS_BUILTIN(ToLength)
+TFS_BUILTIN(ToObject)
+TFS_BUILTIN(Typeof)
+TFS_BUILTIN(InstanceOf)
+TFS_BUILTIN(OrdinaryHasInstance)
+TFS_BUILTIN(ForInFilter)
+
+#undef TFS_BUILTIN
// static
Callable CodeFactory::Inc(Isolate* isolate) {
@@ -336,54 +272,6 @@ Callable CodeFactory::Dec(Isolate* isolate) {
}
// static
-Callable CodeFactory::LessThan(Isolate* isolate) {
- LessThanStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::LessThanOrEqual(Isolate* isolate) {
- LessThanOrEqualStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::GreaterThan(Isolate* isolate) {
- GreaterThanStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::GreaterThanOrEqual(Isolate* isolate) {
- GreaterThanOrEqualStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::Equal(Isolate* isolate) {
- EqualStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::NotEqual(Isolate* isolate) {
- NotEqualStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::StrictEqual(Isolate* isolate) {
- StrictEqualStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::StrictNotEqual(Isolate* isolate) {
- StrictNotEqualStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag) {
StringAddStub stub(isolate, flags, pretenure_flag);
@@ -463,12 +351,6 @@ Callable CodeFactory::ResumeGenerator(Isolate* isolate) {
}
// static
-Callable CodeFactory::Typeof(Isolate* isolate) {
- TypeofStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
Callable CodeFactory::FastCloneRegExp(Isolate* isolate) {
FastCloneRegExpStub stub(isolate);
return make_callable(stub);
@@ -591,18 +473,6 @@ Callable CodeFactory::ConstructFunction(Isolate* isolate) {
}
// static
-Callable CodeFactory::HasProperty(Isolate* isolate) {
- HasPropertyStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::ForInFilter(Isolate* isolate) {
- ForInFilterStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
Callable CodeFactory::InterpreterPushArgsAndCall(Isolate* isolate,
TailCallMode tail_call_mode,
CallableType function_type) {
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 59f069e8bd..033e5d54fb 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -28,8 +28,7 @@ class Callable final BASE_EMBEDDED {
const CallInterfaceDescriptor descriptor_;
};
-
-class CodeFactory final {
+class V8_EXPORT_PRIVATE CodeFactory final {
public:
// Initial states for ICs.
static Callable LoadIC(Isolate* isolate);
@@ -40,18 +39,18 @@ class CodeFactory final {
static Callable KeyedLoadIC(Isolate* isolate);
static Callable KeyedLoadICInOptimizedCode(Isolate* isolate);
static Callable KeyedLoadIC_Megamorphic(Isolate* isolate);
- static Callable CallIC(Isolate* isolate, int argc,
+ static Callable CallIC(Isolate* isolate,
ConvertReceiverMode mode = ConvertReceiverMode::kAny,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
static Callable CallICInOptimizedCode(
- Isolate* isolate, int argc,
- ConvertReceiverMode mode = ConvertReceiverMode::kAny,
+ Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
static Callable StoreIC(Isolate* isolate, LanguageMode mode);
static Callable StoreICInOptimizedCode(Isolate* isolate, LanguageMode mode);
static Callable KeyedStoreIC(Isolate* isolate, LanguageMode mode);
static Callable KeyedStoreICInOptimizedCode(Isolate* isolate,
LanguageMode mode);
+ static Callable KeyedStoreIC_Megamorphic(Isolate* isolate, LanguageMode mode);
static Callable ResumeGenerator(Isolate* isolate);
@@ -65,6 +64,9 @@ class CodeFactory final {
// Code stubs. Add methods here as needed to reduce dependency on
// code-stubs.h.
static Callable InstanceOf(Isolate* isolate);
+ static Callable OrdinaryHasInstance(Isolate* isolate);
+
+ static Callable StringFromCharCode(Isolate* isolate);
static Callable GetProperty(Isolate* isolate);
@@ -84,9 +86,6 @@ class CodeFactory final {
OrdinaryToPrimitiveHint hint);
static Callable NumberToString(Isolate* isolate);
- static Callable OrdinaryHasInstance(Isolate* isolate);
-
- static Callable RegExpConstructResult(Isolate* isolate);
static Callable RegExpExec(Isolate* isolate);
static Callable Add(Isolate* isolate);
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index 1efa0ccb11..b1ed2f13c7 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -1,7 +1,6 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
#include "src/code-stub-assembler.h"
#include "src/code-factory.h"
#include "src/frames-inl.h"
@@ -26,11 +25,32 @@ CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
const char* name)
: compiler::CodeAssembler(isolate, zone, parameter_count, flags, name) {}
-void CodeStubAssembler::Assert(Node* condition) {
+void CodeStubAssembler::Assert(ConditionBody codition_body, const char* message,
+ const char* file, int line) {
#if defined(DEBUG)
Label ok(this);
- Comment("[ Assert");
- GotoIf(condition, &ok);
+ Label not_ok(this, Label::kDeferred);
+ if (message != nullptr && FLAG_code_comments) {
+ Comment("[ Assert: %s", message);
+ } else {
+ Comment("[ Assert");
+ }
+ Node* condition = codition_body();
+ DCHECK_NOT_NULL(condition);
+ Branch(condition, &ok, &not_ok);
+ Bind(&not_ok);
+ if (message != nullptr) {
+ char chars[1024];
+ Vector<char> buffer(chars);
+ if (file != nullptr) {
+ SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line);
+ } else {
+ SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message);
+ }
+ CallRuntime(
+ Runtime::kGlobalPrint, SmiConstant(Smi::kZero),
+ HeapConstant(factory()->NewStringFromAsciiChecked(&(buffer[0]))));
+ }
DebugBreak();
Goto(&ok);
Bind(&ok);
@@ -38,9 +58,7 @@ void CodeStubAssembler::Assert(Node* condition) {
#endif
}
-Node* CodeStubAssembler::NoContextConstant() {
- return SmiConstant(Smi::FromInt(0));
-}
+Node* CodeStubAssembler::NoContextConstant() { return NumberConstant(0); }
#define HEAP_CONSTANT_ACCESSOR(rootName, name) \
Node* CodeStubAssembler::name##Constant() { \
@@ -73,6 +91,62 @@ Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
}
}
+Node* CodeStubAssembler::IntPtrAddFoldConstants(Node* left, Node* right) {
+ int32_t left_constant;
+ bool is_left_constant = ToInt32Constant(left, left_constant);
+ int32_t right_constant;
+ bool is_right_constant = ToInt32Constant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return IntPtrConstant(left_constant + right_constant);
+ }
+ if (left_constant == 0) {
+ return right;
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return IntPtrAdd(left, right);
+}
+
+Node* CodeStubAssembler::IntPtrSubFoldConstants(Node* left, Node* right) {
+ int32_t left_constant;
+ bool is_left_constant = ToInt32Constant(left, left_constant);
+ int32_t right_constant;
+ bool is_right_constant = ToInt32Constant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return IntPtrConstant(left_constant - right_constant);
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return IntPtrSub(left, right);
+}
+
+Node* CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32(Node* value) {
+ Comment("IntPtrRoundUpToPowerOfTwo32");
+ CSA_ASSERT(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u)));
+ value = IntPtrSub(value, IntPtrConstant(1));
+ for (int i = 1; i <= 16; i *= 2) {
+ value = WordOr(value, WordShr(value, IntPtrConstant(i)));
+ }
+ return IntPtrAdd(value, IntPtrConstant(1));
+}
+
+Node* CodeStubAssembler::WordIsPowerOfTwo(Node* value) {
+ // value && !(value & (value - 1))
+ return WordEqual(
+ Select(WordEqual(value, IntPtrConstant(0)), IntPtrConstant(1),
+ WordAnd(value, IntPtrSub(value, IntPtrConstant(1))),
+ MachineType::PointerRepresentation()),
+ IntPtrConstant(0));
+}
+
Node* CodeStubAssembler::Float64Round(Node* x) {
Node* one = Float64Constant(1.0);
Node* one_half = Float64Constant(0.5);
@@ -324,38 +398,39 @@ Node* CodeStubAssembler::SmiToFloat64(Node* value) {
return ChangeInt32ToFloat64(SmiToWord32(value));
}
-Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) { return IntPtrAdd(a, b); }
-
-Node* CodeStubAssembler::SmiAddWithOverflow(Node* a, Node* b) {
- return IntPtrAddWithOverflow(a, b);
+Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) {
+ return BitcastWordToTaggedSigned(
+ IntPtrAdd(BitcastTaggedToWord(a), BitcastTaggedToWord(b)));
}
-Node* CodeStubAssembler::SmiSub(Node* a, Node* b) { return IntPtrSub(a, b); }
-
-Node* CodeStubAssembler::SmiSubWithOverflow(Node* a, Node* b) {
- return IntPtrSubWithOverflow(a, b);
+Node* CodeStubAssembler::SmiSub(Node* a, Node* b) {
+ return BitcastWordToTaggedSigned(
+ IntPtrSub(BitcastTaggedToWord(a), BitcastTaggedToWord(b)));
}
-Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) { return WordEqual(a, b); }
+Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) {
+ return WordEqual(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
+}
Node* CodeStubAssembler::SmiAbove(Node* a, Node* b) {
- return UintPtrGreaterThan(a, b);
+ return UintPtrGreaterThan(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
}
Node* CodeStubAssembler::SmiAboveOrEqual(Node* a, Node* b) {
- return UintPtrGreaterThanOrEqual(a, b);
+ return UintPtrGreaterThanOrEqual(BitcastTaggedToWord(a),
+ BitcastTaggedToWord(b));
}
Node* CodeStubAssembler::SmiBelow(Node* a, Node* b) {
- return UintPtrLessThan(a, b);
+ return UintPtrLessThan(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
}
Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) {
- return IntPtrLessThan(a, b);
+ return IntPtrLessThan(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
}
Node* CodeStubAssembler::SmiLessThanOrEqual(Node* a, Node* b) {
- return IntPtrLessThanOrEqual(a, b);
+ return IntPtrLessThanOrEqual(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
}
Node* CodeStubAssembler::SmiMax(Node* a, Node* b) {
@@ -481,7 +556,7 @@ Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
var_lhs_float64.Bind(SmiToFloat64(a));
var_rhs_float64.Bind(SmiToFloat64(b));
Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
- Node* result = ChangeFloat64ToTagged(value);
+ Node* result = AllocateHeapNumberWithValue(value);
var_result.Bind(result);
Goto(&return_result);
}
@@ -490,8 +565,9 @@ Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
return var_result.value();
}
-Node* CodeStubAssembler::WordIsSmi(Node* a) {
- return WordEqual(WordAnd(a, IntPtrConstant(kSmiTagMask)), IntPtrConstant(0));
+Node* CodeStubAssembler::TaggedIsSmi(Node* a) {
+ return WordEqual(WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
+ IntPtrConstant(0));
}
Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
@@ -499,6 +575,11 @@ Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
IntPtrConstant(0));
}
+Node* CodeStubAssembler::WordIsWordAligned(Node* word) {
+ return WordEqual(IntPtrConstant(0),
+ WordAnd(word, IntPtrConstant((1 << kPointerSizeLog2) - 1)));
+}
+
void CodeStubAssembler::BranchIfSimd128Equal(Node* lhs, Node* lhs_map,
Node* rhs, Node* rhs_map,
Label* if_equal,
@@ -599,10 +680,28 @@ void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
}
}
+void CodeStubAssembler::BranchIfJSReceiver(Node* object, Label* if_true,
+ Label* if_false) {
+ GotoIf(TaggedIsSmi(object), if_false);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Branch(Int32GreaterThanOrEqual(LoadInstanceType(object),
+ Int32Constant(FIRST_JS_RECEIVER_TYPE)),
+ if_true, if_false);
+}
+
+void CodeStubAssembler::BranchIfJSObject(Node* object, Label* if_true,
+ Label* if_false) {
+ GotoIf(TaggedIsSmi(object), if_false);
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ Branch(Int32GreaterThanOrEqual(LoadInstanceType(object),
+ Int32Constant(FIRST_JS_OBJECT_TYPE)),
+ if_true, if_false);
+}
+
void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
Label* if_true, Label* if_false) {
// Bailout if receiver is a Smi.
- GotoIf(WordIsSmi(object), if_false);
+ GotoIf(TaggedIsSmi(object), if_false);
Node* map = LoadMap(object);
@@ -610,20 +709,14 @@ void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
GotoIf(WordNotEqual(LoadMapInstanceType(map), Int32Constant(JS_ARRAY_TYPE)),
if_false);
- Node* bit_field2 = LoadMapBitField2(map);
- Node* elements_kind = BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+ Node* elements_kind = LoadMapElementsKind(map);
// Bailout if receiver has slow elements.
- GotoIf(
- Int32GreaterThan(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
- if_false);
+ GotoUnless(IsFastElementsKind(elements_kind), if_false);
// Check prototype chain if receiver does not have packed elements.
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == (FAST_SMI_ELEMENTS | 1));
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == (FAST_ELEMENTS | 1));
- STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == (FAST_DOUBLE_ELEMENTS | 1));
- Node* holey_elements = Word32And(elements_kind, Int32Constant(1));
- GotoIf(Word32Equal(holey_elements, Int32Constant(0)), if_true);
+ GotoUnless(IsHoleyFastElementsKind(elements_kind), if_true);
+
BranchIfPrototypesHaveNoElements(map, if_true, if_false);
}
@@ -644,19 +737,17 @@ Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
&no_runtime_call);
Bind(&runtime_call);
- // AllocateInTargetSpace does not use the context.
- Node* context = SmiConstant(Smi::FromInt(0));
-
Node* runtime_result;
if (flags & kPretenured) {
Node* runtime_flags = SmiConstant(
Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE)));
- runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
- SmiTag(size_in_bytes), runtime_flags);
+ runtime_result =
+ CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
+ SmiTag(size_in_bytes), runtime_flags);
} else {
- runtime_result = CallRuntime(Runtime::kAllocateInNewSpace, context,
- SmiTag(size_in_bytes));
+ runtime_result = CallRuntime(Runtime::kAllocateInNewSpace,
+ NoContextConstant(), SmiTag(size_in_bytes));
}
result.Bind(runtime_result);
Goto(&merge_runtime);
@@ -730,6 +821,7 @@ Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
}
Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) {
+ Comment("Allocate");
bool const new_space = !(flags & kPretenured);
Node* top_address = ExternalConstant(
new_space
@@ -761,6 +853,11 @@ Node* CodeStubAssembler::InnerAllocate(Node* previous, int offset) {
return InnerAllocate(previous, IntPtrConstant(offset));
}
+Node* CodeStubAssembler::IsRegularHeapObjectSize(Node* size) {
+ return UintPtrLessThanOrEqual(size,
+ IntPtrConstant(kMaxRegularHeapObjectSize));
+}
+
void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
Label* if_false) {
Label if_valueissmi(this), if_valueisnotsmi(this), if_valueisstring(this),
@@ -771,7 +868,7 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
GotoIf(WordEqual(value, BooleanConstant(false)), if_false);
// Check if {value} is a Smi or a HeapObject.
- Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+ Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
Bind(&if_valueissmi);
{
@@ -810,9 +907,8 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
MachineType::Float64());
// Check if the floating point {value} is neither 0.0, -0.0 nor NaN.
- Node* zero = Float64Constant(0.0);
- GotoIf(Float64LessThan(zero, value_value), if_true);
- BranchIfFloat64LessThan(value_value, zero, if_true, if_false);
+ Branch(Float64LessThan(Float64Constant(0.0), Float64Abs(value_value)),
+ if_true, if_false);
}
Bind(&if_valueisother);
@@ -827,8 +923,8 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
value_map_bitfield, Int32Constant(1 << Map::kIsUndetectable));
// Check if the {value} is undetectable.
- BranchIfWord32Equal(value_map_undetectable, Int32Constant(0), if_true,
- if_false);
+ Branch(Word32Equal(value_map_undetectable, Int32Constant(0)), if_true,
+ if_false);
}
}
}
@@ -926,9 +1022,9 @@ Node* CodeStubAssembler::LoadInstanceType(Node* object) {
return LoadMapInstanceType(LoadMap(object));
}
-void CodeStubAssembler::AssertInstanceType(Node* object,
- InstanceType instance_type) {
- Assert(Word32Equal(LoadInstanceType(object), Int32Constant(instance_type)));
+Node* CodeStubAssembler::HasInstanceType(Node* object,
+ InstanceType instance_type) {
+ return Word32Equal(LoadInstanceType(object), Int32Constant(instance_type));
}
Node* CodeStubAssembler::LoadProperties(Node* object) {
@@ -939,11 +1035,12 @@ Node* CodeStubAssembler::LoadElements(Node* object) {
return LoadObjectField(object, JSObject::kElementsOffset);
}
-Node* CodeStubAssembler::LoadJSArrayLength(compiler::Node* array) {
+Node* CodeStubAssembler::LoadJSArrayLength(Node* array) {
+ CSA_ASSERT(this, IsJSArray(array));
return LoadObjectField(array, JSArray::kLengthOffset);
}
-Node* CodeStubAssembler::LoadFixedArrayBaseLength(compiler::Node* array) {
+Node* CodeStubAssembler::LoadFixedArrayBaseLength(Node* array) {
return LoadObjectField(array, FixedArrayBase::kLengthOffset);
}
@@ -952,14 +1049,17 @@ Node* CodeStubAssembler::LoadAndUntagFixedArrayBaseLength(Node* array) {
}
Node* CodeStubAssembler::LoadMapBitField(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
return LoadObjectField(map, Map::kBitFieldOffset, MachineType::Uint8());
}
Node* CodeStubAssembler::LoadMapBitField2(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
return LoadObjectField(map, Map::kBitField2Offset, MachineType::Uint8());
}
Node* CodeStubAssembler::LoadMapBitField3(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
return LoadObjectField(map, Map::kBitField3Offset, MachineType::Uint32());
}
@@ -968,44 +1068,64 @@ Node* CodeStubAssembler::LoadMapInstanceType(Node* map) {
}
Node* CodeStubAssembler::LoadMapElementsKind(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
Node* bit_field2 = LoadMapBitField2(map);
- return BitFieldDecode<Map::ElementsKindBits>(bit_field2);
+ return DecodeWord32<Map::ElementsKindBits>(bit_field2);
}
Node* CodeStubAssembler::LoadMapDescriptors(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
return LoadObjectField(map, Map::kDescriptorsOffset);
}
Node* CodeStubAssembler::LoadMapPrototype(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
return LoadObjectField(map, Map::kPrototypeOffset);
}
+Node* CodeStubAssembler::LoadMapPrototypeInfo(Node* map,
+ Label* if_no_proto_info) {
+ CSA_ASSERT(this, IsMap(map));
+ Node* prototype_info =
+ LoadObjectField(map, Map::kTransitionsOrPrototypeInfoOffset);
+ GotoIf(TaggedIsSmi(prototype_info), if_no_proto_info);
+ GotoUnless(WordEqual(LoadMap(prototype_info),
+ LoadRoot(Heap::kPrototypeInfoMapRootIndex)),
+ if_no_proto_info);
+ return prototype_info;
+}
+
Node* CodeStubAssembler::LoadMapInstanceSize(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
return ChangeUint32ToWord(
LoadObjectField(map, Map::kInstanceSizeOffset, MachineType::Uint8()));
}
Node* CodeStubAssembler::LoadMapInobjectProperties(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
// See Map::GetInObjectProperties() for details.
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- Assert(Int32GreaterThanOrEqual(LoadMapInstanceType(map),
- Int32Constant(FIRST_JS_OBJECT_TYPE)));
+ CSA_ASSERT(this,
+ Int32GreaterThanOrEqual(LoadMapInstanceType(map),
+ Int32Constant(FIRST_JS_OBJECT_TYPE)));
return ChangeUint32ToWord(LoadObjectField(
map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
MachineType::Uint8()));
}
Node* CodeStubAssembler::LoadMapConstructorFunctionIndex(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
// See Map::GetConstructorFunctionIndex() for details.
STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
- Assert(Int32LessThanOrEqual(LoadMapInstanceType(map),
- Int32Constant(LAST_PRIMITIVE_TYPE)));
+ CSA_ASSERT(this, Int32LessThanOrEqual(LoadMapInstanceType(map),
+ Int32Constant(LAST_PRIMITIVE_TYPE)));
return ChangeUint32ToWord(LoadObjectField(
map, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
MachineType::Uint8()));
}
Node* CodeStubAssembler::LoadMapConstructor(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
Variable result(this, MachineRepresentation::kTagged);
result.Bind(LoadObjectField(map, Map::kConstructorOrBackPointerOffset));
@@ -1013,7 +1133,7 @@ Node* CodeStubAssembler::LoadMapConstructor(Node* map) {
Goto(&loop);
Bind(&loop);
{
- GotoIf(WordIsSmi(result.value()), &done);
+ GotoIf(TaggedIsSmi(result.value()), &done);
Node* is_map_type =
Word32Equal(LoadInstanceType(result.value()), Int32Constant(MAP_TYPE));
GotoUnless(is_map_type, &done);
@@ -1026,6 +1146,7 @@ Node* CodeStubAssembler::LoadMapConstructor(Node* map) {
}
Node* CodeStubAssembler::LoadNameHashField(Node* name) {
+ CSA_ASSERT(this, IsName(name));
return LoadObjectField(name, Name::kHashFieldOffset, MachineType::Uint32());
}
@@ -1041,15 +1162,23 @@ Node* CodeStubAssembler::LoadNameHash(Node* name, Label* if_hash_not_computed) {
}
Node* CodeStubAssembler::LoadStringLength(Node* object) {
+ CSA_ASSERT(this, IsString(object));
return LoadObjectField(object, String::kLengthOffset);
}
Node* CodeStubAssembler::LoadJSValueValue(Node* object) {
+ CSA_ASSERT(this, IsJSValue(object));
return LoadObjectField(object, JSValue::kValueOffset);
}
+Node* CodeStubAssembler::LoadWeakCellValueUnchecked(Node* weak_cell) {
+ // TODO(ishell): fix callers.
+ return LoadObjectField(weak_cell, WeakCell::kValueOffset);
+}
+
Node* CodeStubAssembler::LoadWeakCellValue(Node* weak_cell, Label* if_cleared) {
- Node* value = LoadObjectField(weak_cell, WeakCell::kValueOffset);
+ CSA_ASSERT(this, IsWeakCell(weak_cell));
+ Node* value = LoadWeakCellValueUnchecked(weak_cell);
if (if_cleared != nullptr) {
GotoIf(WordEqual(value, IntPtrConstant(0)), if_cleared);
}
@@ -1066,6 +1195,44 @@ Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
return Load(MachineType::AnyTagged(), object, offset);
}
+Node* CodeStubAssembler::LoadFixedTypedArrayElement(
+ Node* data_pointer, Node* index_node, ElementsKind elements_kind,
+ ParameterMode parameter_mode) {
+ Node* offset =
+ ElementOffsetFromIndex(index_node, elements_kind, parameter_mode, 0);
+ MachineType type;
+ switch (elements_kind) {
+ case UINT8_ELEMENTS: /* fall through */
+ case UINT8_CLAMPED_ELEMENTS:
+ type = MachineType::Uint8();
+ break;
+ case INT8_ELEMENTS:
+ type = MachineType::Int8();
+ break;
+ case UINT16_ELEMENTS:
+ type = MachineType::Uint16();
+ break;
+ case INT16_ELEMENTS:
+ type = MachineType::Int16();
+ break;
+ case UINT32_ELEMENTS:
+ type = MachineType::Uint32();
+ break;
+ case INT32_ELEMENTS:
+ type = MachineType::Int32();
+ break;
+ case FLOAT32_ELEMENTS:
+ type = MachineType::Float32();
+ break;
+ case FLOAT64_ELEMENTS:
+ type = MachineType::Float64();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return Load(type, data_pointer, offset);
+}
+
Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
Node* object, Node* index_node, int additional_offset,
ParameterMode parameter_mode) {
@@ -1088,6 +1255,7 @@ Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
Node* CodeStubAssembler::LoadFixedDoubleArrayElement(
Node* object, Node* index_node, MachineType machine_type,
int additional_offset, ParameterMode parameter_mode, Label* if_hole) {
+ CSA_ASSERT(this, IsFixedDoubleArray(object));
int32_t header_size =
FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_DOUBLE_ELEMENTS,
@@ -1125,12 +1293,35 @@ Node* CodeStubAssembler::LoadContextElement(Node* context, int slot_index) {
return Load(MachineType::AnyTagged(), context, IntPtrConstant(offset));
}
+Node* CodeStubAssembler::LoadContextElement(Node* context, Node* slot_index) {
+ Node* offset =
+ IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+ IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
+ return Load(MachineType::AnyTagged(), context, offset);
+}
+
+Node* CodeStubAssembler::StoreContextElement(Node* context, int slot_index,
+ Node* value) {
+ int offset = Context::SlotOffset(slot_index);
+ return Store(MachineRepresentation::kTagged, context, IntPtrConstant(offset),
+ value);
+}
+
+Node* CodeStubAssembler::StoreContextElement(Node* context, Node* slot_index,
+ Node* value) {
+ Node* offset =
+ IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+ IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
+ return Store(MachineRepresentation::kTagged, context, offset, value);
+}
+
Node* CodeStubAssembler::LoadNativeContext(Node* context) {
return LoadContextElement(context, Context::NATIVE_CONTEXT_INDEX);
}
Node* CodeStubAssembler::LoadJSArrayElementsMap(ElementsKind kind,
Node* native_context) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
return LoadFixedArrayElement(native_context,
IntPtrConstant(Context::ArrayMapIndex(kind)));
}
@@ -1206,6 +1397,7 @@ Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node,
Node* CodeStubAssembler::StoreFixedDoubleArrayElement(
Node* object, Node* index_node, Node* value, ParameterMode parameter_mode) {
+ CSA_ASSERT(this, IsFixedDoubleArray(object));
Node* offset =
ElementOffsetFromIndex(index_node, FAST_DOUBLE_ELEMENTS, parameter_mode,
FixedArray::kHeaderSize - kHeapObjectTag);
@@ -1230,8 +1422,11 @@ Node* CodeStubAssembler::AllocateHeapNumberWithValue(Node* value,
return result;
}
-Node* CodeStubAssembler::AllocateSeqOneByteString(int length) {
- Node* result = Allocate(SeqOneByteString::SizeFor(length));
+Node* CodeStubAssembler::AllocateSeqOneByteString(int length,
+ AllocationFlags flags) {
+ Comment("AllocateSeqOneByteString");
+ Node* result = Allocate(SeqOneByteString::SizeFor(length), flags);
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex));
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
SmiConstant(Smi::FromInt(length)));
@@ -1241,27 +1436,31 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(int length) {
return result;
}
-Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length) {
+Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length,
+ ParameterMode mode,
+ AllocationFlags flags) {
+ Comment("AllocateSeqOneByteString");
Variable var_result(this, MachineRepresentation::kTagged);
// Compute the SeqOneByteString size and check if it fits into new space.
Label if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred),
if_join(this);
- Node* size = WordAnd(
- IntPtrAdd(
- IntPtrAdd(length, IntPtrConstant(SeqOneByteString::kHeaderSize)),
- IntPtrConstant(kObjectAlignmentMask)),
- IntPtrConstant(~kObjectAlignmentMask));
+ Node* raw_size = GetArrayAllocationSize(
+ length, UINT8_ELEMENTS, mode,
+ SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
+ Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
Bind(&if_sizeissmall);
{
// Just allocate the SeqOneByteString in new space.
- Node* result = Allocate(size);
+ Node* result = Allocate(size, flags);
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex));
- StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
- SmiFromWord(length));
+ StoreObjectFieldNoWriteBarrier(
+ result, SeqOneByteString::kLengthOffset,
+ mode == SMI_PARAMETERS ? length : SmiFromWord(length));
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldOffset,
IntPtrConstant(String::kEmptyHashField),
MachineRepresentation::kWord32);
@@ -1272,8 +1471,9 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length) {
Bind(&if_notsizeissmall);
{
// We might need to allocate in large object space, go to the runtime.
- Node* result = CallRuntime(Runtime::kAllocateSeqOneByteString, context,
- SmiFromWord(length));
+ Node* result =
+ CallRuntime(Runtime::kAllocateSeqOneByteString, context,
+ mode == SMI_PARAMETERS ? length : SmiFromWord(length));
var_result.Bind(result);
Goto(&if_join);
}
@@ -1282,8 +1482,11 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length) {
return var_result.value();
}
-Node* CodeStubAssembler::AllocateSeqTwoByteString(int length) {
- Node* result = Allocate(SeqTwoByteString::SizeFor(length));
+Node* CodeStubAssembler::AllocateSeqTwoByteString(int length,
+ AllocationFlags flags) {
+ Comment("AllocateSeqTwoByteString");
+ Node* result = Allocate(SeqTwoByteString::SizeFor(length), flags);
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex));
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
SmiConstant(Smi::FromInt(length)));
@@ -1293,27 +1496,31 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(int length) {
return result;
}
-Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length) {
+Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length,
+ ParameterMode mode,
+ AllocationFlags flags) {
+ Comment("AllocateSeqTwoByteString");
Variable var_result(this, MachineRepresentation::kTagged);
// Compute the SeqTwoByteString size and check if it fits into new space.
Label if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred),
if_join(this);
- Node* size = WordAnd(
- IntPtrAdd(IntPtrAdd(WordShl(length, 1),
- IntPtrConstant(SeqTwoByteString::kHeaderSize)),
- IntPtrConstant(kObjectAlignmentMask)),
- IntPtrConstant(~kObjectAlignmentMask));
+ Node* raw_size = GetArrayAllocationSize(
+ length, UINT16_ELEMENTS, mode,
+ SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
+ Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
Bind(&if_sizeissmall);
{
// Just allocate the SeqTwoByteString in new space.
- Node* result = Allocate(size);
+ Node* result = Allocate(size, flags);
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex));
- StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
- SmiFromWord(length));
+ StoreObjectFieldNoWriteBarrier(
+ result, SeqTwoByteString::kLengthOffset,
+ mode == SMI_PARAMETERS ? length : SmiFromWord(length));
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldOffset,
IntPtrConstant(String::kEmptyHashField),
MachineRepresentation::kWord32);
@@ -1324,8 +1531,9 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length) {
Bind(&if_notsizeissmall);
{
// We might need to allocate in large object space, go to the runtime.
- Node* result = CallRuntime(Runtime::kAllocateSeqTwoByteString, context,
- SmiFromWord(length));
+ Node* result =
+ CallRuntime(Runtime::kAllocateSeqTwoByteString, context,
+ mode == SMI_PARAMETERS ? length : SmiFromWord(length));
var_result.Bind(result);
Goto(&if_join);
}
@@ -1334,10 +1542,13 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length) {
return var_result.value();
}
-Node* CodeStubAssembler::AllocateSlicedOneByteString(Node* length, Node* parent,
- Node* offset) {
+Node* CodeStubAssembler::AllocateSlicedString(
+ Heap::RootListIndex map_root_index, Node* length, Node* parent,
+ Node* offset) {
+ CSA_ASSERT(this, TaggedIsSmi(length));
Node* result = Allocate(SlicedString::kSize);
- Node* map = LoadRoot(Heap::kSlicedOneByteStringMapRootIndex);
+ Node* map = LoadRoot(map_root_index);
+ DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map);
StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length,
MachineRepresentation::kTagged);
@@ -1351,28 +1562,118 @@ Node* CodeStubAssembler::AllocateSlicedOneByteString(Node* length, Node* parent,
return result;
}
+Node* CodeStubAssembler::AllocateSlicedOneByteString(Node* length, Node* parent,
+ Node* offset) {
+ return AllocateSlicedString(Heap::kSlicedOneByteStringMapRootIndex, length,
+ parent, offset);
+}
+
Node* CodeStubAssembler::AllocateSlicedTwoByteString(Node* length, Node* parent,
Node* offset) {
- Node* result = Allocate(SlicedString::kSize);
- Node* map = LoadRoot(Heap::kSlicedStringMapRootIndex);
+ return AllocateSlicedString(Heap::kSlicedStringMapRootIndex, length, parent,
+ offset);
+}
+
+Node* CodeStubAssembler::AllocateConsString(Heap::RootListIndex map_root_index,
+ Node* length, Node* first,
+ Node* second,
+ AllocationFlags flags) {
+ CSA_ASSERT(this, TaggedIsSmi(length));
+ Node* result = Allocate(ConsString::kSize, flags);
+ Node* map = LoadRoot(map_root_index);
+ DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map);
- StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length,
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length,
MachineRepresentation::kTagged);
- StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldOffset,
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldOffset,
Int32Constant(String::kEmptyHashField),
MachineRepresentation::kWord32);
- StoreObjectFieldNoWriteBarrier(result, SlicedString::kParentOffset, parent,
- MachineRepresentation::kTagged);
- StoreObjectFieldNoWriteBarrier(result, SlicedString::kOffsetOffset, offset,
- MachineRepresentation::kTagged);
+ bool const new_space = !(flags & kPretenured);
+ if (new_space) {
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, first,
+ MachineRepresentation::kTagged);
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset, second,
+ MachineRepresentation::kTagged);
+ } else {
+ StoreObjectField(result, ConsString::kFirstOffset, first);
+ StoreObjectField(result, ConsString::kSecondOffset, second);
+ }
return result;
}
+Node* CodeStubAssembler::AllocateOneByteConsString(Node* length, Node* first,
+ Node* second,
+ AllocationFlags flags) {
+ return AllocateConsString(Heap::kConsOneByteStringMapRootIndex, length, first,
+ second, flags);
+}
+
+Node* CodeStubAssembler::AllocateTwoByteConsString(Node* length, Node* first,
+ Node* second,
+ AllocationFlags flags) {
+ return AllocateConsString(Heap::kConsStringMapRootIndex, length, first,
+ second, flags);
+}
+
+Node* CodeStubAssembler::NewConsString(Node* context, Node* length, Node* left,
+ Node* right, AllocationFlags flags) {
+ CSA_ASSERT(this, TaggedIsSmi(length));
+ // Added string can be a cons string.
+ Comment("Allocating ConsString");
+ Node* left_instance_type = LoadInstanceType(left);
+ Node* right_instance_type = LoadInstanceType(right);
+
+ // Compute intersection and difference of instance types.
+ Node* anded_instance_types = WordAnd(left_instance_type, right_instance_type);
+ Node* xored_instance_types = WordXor(left_instance_type, right_instance_type);
+
+ // We create a one-byte cons string if
+ // 1. both strings are one-byte, or
+ // 2. at least one of the strings is two-byte, but happens to contain only
+ // one-byte characters.
+ // To do this, we check
+ // 1. if both strings are one-byte, or if the one-byte data hint is set in
+ // both strings, or
+ // 2. if one of the strings has the one-byte data hint set and the other
+ // string is one-byte.
+ STATIC_ASSERT(kOneByteStringTag != 0);
+ STATIC_ASSERT(kOneByteDataHintTag != 0);
+ Label one_byte_map(this);
+ Label two_byte_map(this);
+ Variable result(this, MachineRepresentation::kTagged);
+ Label done(this, &result);
+ GotoIf(WordNotEqual(
+ WordAnd(anded_instance_types,
+ IntPtrConstant(kStringEncodingMask | kOneByteDataHintTag)),
+ IntPtrConstant(0)),
+ &one_byte_map);
+ Branch(WordNotEqual(WordAnd(xored_instance_types,
+ IntPtrConstant(kStringEncodingMask |
+ kOneByteDataHintMask)),
+ IntPtrConstant(kOneByteStringTag | kOneByteDataHintTag)),
+ &two_byte_map, &one_byte_map);
+
+ Bind(&one_byte_map);
+ Comment("One-byte ConsString");
+ result.Bind(AllocateOneByteConsString(length, left, right, flags));
+ Goto(&done);
+
+ Bind(&two_byte_map);
+ Comment("Two-byte ConsString");
+ result.Bind(AllocateTwoByteConsString(length, left, right, flags));
+ Goto(&done);
+
+ Bind(&done);
+
+ return result.value();
+}
+
Node* CodeStubAssembler::AllocateRegExpResult(Node* context, Node* length,
Node* index, Node* input) {
Node* const max_length =
SmiConstant(Smi::FromInt(JSArray::kInitialMaxFastElementArray));
- Assert(SmiLessThanOrEqual(length, max_length));
+ CSA_ASSERT(this, SmiLessThanOrEqual(length, max_length));
+ USE(max_length);
// Allocate the JSRegExpResult.
// TODO(jgruber): Fold JSArray and FixedArray allocations, then remove
@@ -1412,6 +1713,120 @@ Node* CodeStubAssembler::AllocateRegExpResult(Node* context, Node* length,
return result;
}
+Node* CodeStubAssembler::AllocateNameDictionary(int at_least_space_for) {
+ return AllocateNameDictionary(IntPtrConstant(at_least_space_for));
+}
+
+Node* CodeStubAssembler::AllocateNameDictionary(Node* at_least_space_for) {
+ CSA_ASSERT(this, UintPtrLessThanOrEqual(
+ at_least_space_for,
+ IntPtrConstant(NameDictionary::kMaxCapacity)));
+
+ Node* capacity = HashTableComputeCapacity(at_least_space_for);
+ CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
+
+ Node* length = EntryToIndex<NameDictionary>(capacity);
+ Node* store_size =
+ IntPtrAddFoldConstants(WordShl(length, IntPtrConstant(kPointerSizeLog2)),
+ IntPtrConstant(NameDictionary::kHeaderSize));
+
+ Node* result = Allocate(store_size);
+ Comment("Initialize NameDictionary");
+ // Initialize FixedArray fields.
+ StoreObjectFieldRoot(result, FixedArray::kMapOffset,
+ Heap::kHashTableMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
+ SmiFromWord(length));
+ // Initialized HashTable fields.
+ Node* zero = SmiConstant(0);
+ StoreFixedArrayElement(result, NameDictionary::kNumberOfElementsIndex, zero,
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(result, NameDictionary::kNumberOfDeletedElementsIndex,
+ zero, SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(result, NameDictionary::kCapacityIndex,
+ SmiTag(capacity), SKIP_WRITE_BARRIER);
+ // Initialize Dictionary fields.
+ Node* filler = LoadRoot(Heap::kUndefinedValueRootIndex);
+ StoreFixedArrayElement(result, NameDictionary::kMaxNumberKeyIndex, filler,
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(result, NameDictionary::kNextEnumerationIndexIndex,
+ SmiConstant(PropertyDetails::kInitialIndex),
+ SKIP_WRITE_BARRIER);
+
+ // Initialize NameDictionary elements.
+ result = BitcastTaggedToWord(result);
+ Node* start_address = IntPtrAdd(
+ result, IntPtrConstant(NameDictionary::OffsetOfElementAt(
+ NameDictionary::kElementsStartIndex) -
+ kHeapObjectTag));
+ Node* end_address = IntPtrAdd(
+ result,
+ IntPtrSubFoldConstants(store_size, IntPtrConstant(kHeapObjectTag)));
+ StoreFieldsNoWriteBarrier(start_address, end_address, filler);
+ return result;
+}
+
+Node* CodeStubAssembler::AllocateJSObjectFromMap(Node* map, Node* properties,
+ Node* elements) {
+ CSA_ASSERT(this, IsMap(map));
+ Node* size =
+ IntPtrMul(LoadMapInstanceSize(map), IntPtrConstant(kPointerSize));
+ CSA_ASSERT(this, IsRegularHeapObjectSize(size));
+ Node* object = Allocate(size);
+ StoreMapNoWriteBarrier(object, map);
+ InitializeJSObjectFromMap(object, map, size, properties, elements);
+ return object;
+}
+
+void CodeStubAssembler::InitializeJSObjectFromMap(Node* object, Node* map,
+ Node* size, Node* properties,
+ Node* elements) {
+ // This helper assumes that the object is in new-space, as guarded by the
+ // check in AllocatedJSObjectFromMap.
+ if (properties == nullptr) {
+ CSA_ASSERT(this, Word32BinaryNot(IsDictionaryMap((map))));
+ StoreObjectFieldRoot(object, JSObject::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ } else {
+ StoreObjectFieldNoWriteBarrier(object, JSObject::kPropertiesOffset,
+ properties);
+ }
+ if (elements == nullptr) {
+ StoreObjectFieldRoot(object, JSObject::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ } else {
+ StoreObjectFieldNoWriteBarrier(object, JSObject::kElementsOffset, elements);
+ }
+ InitializeJSObjectBody(object, map, size, JSObject::kHeaderSize);
+}
+
+void CodeStubAssembler::InitializeJSObjectBody(Node* object, Node* map,
+ Node* size, int start_offset) {
+ // TODO(cbruni): activate in-object slack tracking machinery.
+ Comment("InitializeJSObjectBody");
+ Node* filler = LoadRoot(Heap::kUndefinedValueRootIndex);
+ // Calculate the untagged field addresses.
+ Node* start_address =
+ IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag));
+ Node* end_address =
+ IntPtrSub(IntPtrAdd(object, size), IntPtrConstant(kHeapObjectTag));
+ StoreFieldsNoWriteBarrier(start_address, end_address, filler);
+}
+
+void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
+ Node* end_address,
+ Node* value) {
+ Comment("StoreFieldsNoWriteBarrier");
+ CSA_ASSERT(this, WordIsWordAligned(start_address));
+ CSA_ASSERT(this, WordIsWordAligned(end_address));
+ BuildFastLoop(
+ MachineType::PointerRepresentation(), start_address, end_address,
+ [value](CodeStubAssembler* a, Node* current) {
+ a->StoreNoWriteBarrier(MachineRepresentation::kTagged, current, value);
+ },
+ kPointerSize, IndexAdvanceMode::kPost);
+}
+
Node* CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
ElementsKind kind, Node* array_map, Node* length, Node* allocation_site) {
Comment("begin allocation of JSArray without elements");
@@ -1446,7 +1861,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
Node* array = AllocateUninitializedJSArray(kind, array_map, length,
allocation_site, size);
- Node* elements = InnerAllocate(array, elements_offset);
+ // The bitcast here is safe because InnerAllocate doesn't actually allocate.
+ Node* elements = InnerAllocate(BitcastTaggedToWord(array), elements_offset);
StoreObjectField(array, JSObject::kElementsOffset, elements);
return {array, elements};
@@ -1492,8 +1908,10 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
TagParameter(capacity, capacity_mode));
// Fill in the elements with holes.
- FillFixedArrayWithValue(kind, elements, IntPtrConstant(0), capacity,
- Heap::kTheHoleValueRootIndex, capacity_mode);
+ FillFixedArrayWithValue(
+ kind, elements, capacity_mode == SMI_PARAMETERS ? SmiConstant(Smi::kZero)
+ : IntPtrConstant(0),
+ capacity, Heap::kTheHoleValueRootIndex, capacity_mode);
return array;
}
@@ -1502,6 +1920,8 @@ Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
Node* capacity_node,
ParameterMode mode,
AllocationFlags flags) {
+ CSA_ASSERT(this,
+ IntPtrGreaterThan(capacity_node, IntPtrOrSmiConstant(0, mode)));
Node* total_size = GetFixedArrayAllocationSize(capacity_node, kind, mode);
// Allocate both array and elements object, and initialize the JSArray.
@@ -1532,86 +1952,37 @@ void CodeStubAssembler::FillFixedArrayWithValue(
Is64() ? Int64Constant(kHoleNanInt64) : Int32Constant(kHoleNanLower32);
Node* value = LoadRoot(value_root_index);
- const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag;
- int32_t to;
- bool constant_to = ToInt32Constant(to_node, to);
- int32_t from;
- bool constant_from = ToInt32Constant(from_node, from);
- if (constant_to && constant_from &&
- (to - from) <= kElementLoopUnrollThreshold) {
- for (int i = from; i < to; ++i) {
- Node* index = IntPtrConstant(i);
- if (is_double) {
- Node* offset = ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
- first_element_offset);
- // Don't use doubles to store the hole double, since manipulating the
- // signaling NaN used for the hole in C++, e.g. with bit_cast, will
- // change its value on ia32 (the x87 stack is used to return values
- // and stores to the stack silently clear the signalling bit).
- //
- // TODO(danno): When we have a Float32/Float64 wrapper class that
- // preserves double bits during manipulation, remove this code/change
- // this to an indexed Float64 store.
- if (Is64()) {
- StoreNoWriteBarrier(MachineRepresentation::kWord64, array, offset,
- double_hole);
+ BuildFastFixedArrayForEach(
+ array, kind, from_node, to_node,
+ [value, is_double, double_hole](CodeStubAssembler* assembler, Node* array,
+ Node* offset) {
+ if (is_double) {
+ // Don't use doubles to store the hole double, since manipulating the
+ // signaling NaN used for the hole in C++, e.g. with bit_cast, will
+ // change its value on ia32 (the x87 stack is used to return values
+ // and stores to the stack silently clear the signalling bit).
+ //
+ // TODO(danno): When we have a Float32/Float64 wrapper class that
+ // preserves double bits during manipulation, remove this code/change
+ // this to an indexed Float64 store.
+ if (assembler->Is64()) {
+ assembler->StoreNoWriteBarrier(MachineRepresentation::kWord64,
+ array, offset, double_hole);
+ } else {
+ assembler->StoreNoWriteBarrier(MachineRepresentation::kWord32,
+ array, offset, double_hole);
+ assembler->StoreNoWriteBarrier(
+ MachineRepresentation::kWord32, array,
+ assembler->IntPtrAdd(offset,
+ assembler->IntPtrConstant(kPointerSize)),
+ double_hole);
+ }
} else {
- StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
- double_hole);
- offset = ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
- first_element_offset + kPointerSize);
- StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
- double_hole);
+ assembler->StoreNoWriteBarrier(MachineRepresentation::kTagged, array,
+ offset, value);
}
- } else {
- StoreFixedArrayElement(array, index, value, SKIP_WRITE_BARRIER,
- INTPTR_PARAMETERS);
- }
- }
- } else {
- Variable current(this, MachineRepresentation::kTagged);
- Label test(this);
- Label decrement(this, &current);
- Label done(this);
- Node* limit =
- IntPtrAdd(array, ElementOffsetFromIndex(from_node, kind, mode));
- current.Bind(IntPtrAdd(array, ElementOffsetFromIndex(to_node, kind, mode)));
-
- Branch(WordEqual(current.value(), limit), &done, &decrement);
-
- Bind(&decrement);
- current.Bind(IntPtrSub(
- current.value(),
- IntPtrConstant(IsFastDoubleElementsKind(kind) ? kDoubleSize
- : kPointerSize)));
- if (is_double) {
- // Don't use doubles to store the hole double, since manipulating the
- // signaling NaN used for the hole in C++, e.g. with bit_cast, will
- // change its value on ia32 (the x87 stack is used to return values
- // and stores to the stack silently clear the signalling bit).
- //
- // TODO(danno): When we have a Float32/Float64 wrapper class that
- // preserves double bits during manipulation, remove this code/change
- // this to an indexed Float64 store.
- if (Is64()) {
- StoreNoWriteBarrier(MachineRepresentation::kWord64, current.value(),
- Int64Constant(first_element_offset), double_hole);
- } else {
- StoreNoWriteBarrier(MachineRepresentation::kWord32, current.value(),
- Int32Constant(first_element_offset), double_hole);
- StoreNoWriteBarrier(MachineRepresentation::kWord32, current.value(),
- Int32Constant(kPointerSize + first_element_offset),
- double_hole);
- }
- } else {
- StoreNoWriteBarrier(MachineType::PointerRepresentation(), current.value(),
- IntPtrConstant(first_element_offset), value);
- }
- Node* compare = WordNotEqual(current.value(), limit);
- Branch(compare, &decrement, &done);
-
- Bind(&done);
- }
+ },
+ mode);
}
void CodeStubAssembler::CopyFixedArrayElements(
@@ -1710,8 +2081,8 @@ void CodeStubAssembler::CopyFixedArrayElements(
StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array, to_offset,
value);
} else {
- StoreNoWriteBarrier(MachineType::PointerRepresentation(), to_array,
- to_offset, value);
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, to_array, to_offset,
+ value);
}
Goto(&next_iter);
@@ -1748,73 +2119,66 @@ void CodeStubAssembler::CopyFixedArrayElements(
Comment("] CopyFixedArrayElements");
}
-void CodeStubAssembler::CopyStringCharacters(compiler::Node* from_string,
- compiler::Node* to_string,
- compiler::Node* from_index,
- compiler::Node* character_count,
- String::Encoding encoding) {
- Label out(this);
-
- // Nothing to do for zero characters.
-
- GotoIf(SmiLessThanOrEqual(character_count, SmiConstant(Smi::FromInt(0))),
- &out);
-
- // Calculate offsets into the strings.
-
- Node* from_offset;
- Node* limit_offset;
- Node* to_offset;
-
- {
- Node* byte_count = SmiUntag(character_count);
- Node* from_byte_index = SmiUntag(from_index);
- if (encoding == String::ONE_BYTE_ENCODING) {
- const int offset = SeqOneByteString::kHeaderSize - kHeapObjectTag;
- from_offset = IntPtrAdd(IntPtrConstant(offset), from_byte_index);
- limit_offset = IntPtrAdd(from_offset, byte_count);
- to_offset = IntPtrConstant(offset);
- } else {
- STATIC_ASSERT(2 == sizeof(uc16));
- byte_count = WordShl(byte_count, 1);
- from_byte_index = WordShl(from_byte_index, 1);
-
- const int offset = SeqTwoByteString::kHeaderSize - kHeapObjectTag;
- from_offset = IntPtrAdd(IntPtrConstant(offset), from_byte_index);
- limit_offset = IntPtrAdd(from_offset, byte_count);
- to_offset = IntPtrConstant(offset);
- }
- }
-
- Variable var_from_offset(this, MachineType::PointerRepresentation());
- Variable var_to_offset(this, MachineType::PointerRepresentation());
-
- var_from_offset.Bind(from_offset);
- var_to_offset.Bind(to_offset);
-
- Variable* vars[] = {&var_from_offset, &var_to_offset};
- Label decrement(this, 2, vars);
-
- Label loop(this, 2, vars);
- Goto(&loop);
- Bind(&loop);
- {
- from_offset = var_from_offset.value();
- to_offset = var_to_offset.value();
-
- // TODO(jgruber): We could make this faster through larger copy unit sizes.
- Node* value = Load(MachineType::Uint8(), from_string, from_offset);
- StoreNoWriteBarrier(MachineRepresentation::kWord8, to_string, to_offset,
- value);
-
- Node* new_from_offset = IntPtrAdd(from_offset, IntPtrConstant(1));
- var_from_offset.Bind(new_from_offset);
- var_to_offset.Bind(IntPtrAdd(to_offset, IntPtrConstant(1)));
-
- Branch(WordNotEqual(new_from_offset, limit_offset), &loop, &out);
- }
-
- Bind(&out);
+void CodeStubAssembler::CopyStringCharacters(
+ compiler::Node* from_string, compiler::Node* to_string,
+ compiler::Node* from_index, compiler::Node* to_index,
+ compiler::Node* character_count, String::Encoding from_encoding,
+ String::Encoding to_encoding, ParameterMode mode) {
+ bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING;
+ bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING;
+ DCHECK_IMPLIES(to_one_byte, from_one_byte);
+ Comment("CopyStringCharacters %s -> %s",
+ from_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING",
+ to_one_byte ? "ONE_BYTE_ENCODING" : "TWO_BYTE_ENCODING");
+
+ ElementsKind from_kind = from_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
+ ElementsKind to_kind = to_one_byte ? UINT8_ELEMENTS : UINT16_ELEMENTS;
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ int header_size = SeqOneByteString::kHeaderSize - kHeapObjectTag;
+ Node* from_offset =
+ ElementOffsetFromIndex(from_index, from_kind, mode, header_size);
+ Node* to_offset =
+ ElementOffsetFromIndex(to_index, to_kind, mode, header_size);
+ Node* byte_count = ElementOffsetFromIndex(character_count, from_kind, mode);
+ Node* limit_offset = IntPtrAddFoldConstants(from_offset, byte_count);
+
+ // Prepare the fast loop
+ MachineType type =
+ from_one_byte ? MachineType::Uint8() : MachineType::Uint16();
+ MachineRepresentation rep = to_one_byte ? MachineRepresentation::kWord8
+ : MachineRepresentation::kWord16;
+ int from_increment = 1 << ElementsKindToShiftSize(from_kind);
+ int to_increment = 1 << ElementsKindToShiftSize(to_kind);
+
+ Variable current_to_offset(this, MachineType::PointerRepresentation());
+ VariableList vars({&current_to_offset}, zone());
+ current_to_offset.Bind(to_offset);
+ int to_index_constant = 0, from_index_constant = 0;
+ Smi* to_index_smi = nullptr;
+ Smi* from_index_smi = nullptr;
+ bool index_same = (from_encoding == to_encoding) &&
+ (from_index == to_index ||
+ (ToInt32Constant(from_index, from_index_constant) &&
+ ToInt32Constant(to_index, to_index_constant) &&
+ from_index_constant == to_index_constant) ||
+ (ToSmiConstant(from_index, from_index_smi) &&
+ ToSmiConstant(to_index, to_index_smi) &&
+ to_index_smi == from_index_smi));
+ BuildFastLoop(vars, MachineType::PointerRepresentation(), from_offset,
+ limit_offset,
+ [from_string, to_string, &current_to_offset, to_increment, type,
+ rep, index_same](CodeStubAssembler* assembler, Node* offset) {
+ Node* value = assembler->Load(type, from_string, offset);
+ assembler->StoreNoWriteBarrier(
+ rep, to_string,
+ index_same ? offset : current_to_offset.value(), value);
+ if (!index_same) {
+ current_to_offset.Bind(assembler->IntPtrAdd(
+ current_to_offset.value(),
+ assembler->IntPtrConstant(to_increment)));
+ }
+ },
+ from_increment, IndexAdvanceMode::kPost);
}
Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
@@ -1831,7 +2195,7 @@ Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
return value;
} else {
- Node* value = Load(MachineType::Pointer(), array, offset);
+ Node* value = Load(MachineType::AnyTagged(), array, offset);
if (if_hole) {
GotoIf(WordEqual(value, TheHoleConstant()), if_hole);
}
@@ -1907,10 +2271,6 @@ Node* CodeStubAssembler::GrowElementsCapacity(
// Allocate the new backing store.
Node* new_elements = AllocateFixedArray(to_kind, new_capacity, mode);
- // Fill in the added capacity in the new store with holes.
- FillFixedArrayWithValue(to_kind, new_elements, capacity, new_capacity,
- Heap::kTheHoleValueRootIndex, mode);
-
// Copy the elements from the old elements store to the new.
// The size-check above guarantees that the |new_elements| is allocated
// in new space so we can skip the write barrier.
@@ -1935,13 +2295,47 @@ void CodeStubAssembler::InitializeAllocationMemento(
if (FLAG_allocation_site_pretenuring) {
Node* count = LoadObjectField(allocation_site,
AllocationSite::kPretenureCreateCountOffset);
- Node* incremented_count = IntPtrAdd(count, SmiConstant(Smi::FromInt(1)));
+ Node* incremented_count = SmiAdd(count, SmiConstant(Smi::FromInt(1)));
StoreObjectFieldNoWriteBarrier(allocation_site,
AllocationSite::kPretenureCreateCountOffset,
incremented_count);
}
}
+Node* CodeStubAssembler::TryTaggedToFloat64(Node* value,
+ Label* if_valueisnotnumber) {
+ Label out(this);
+ Variable var_result(this, MachineRepresentation::kFloat64);
+
+ // Check if the {value} is a Smi or a HeapObject.
+ Label if_valueissmi(this), if_valueisnotsmi(this);
+ Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+
+ Bind(&if_valueissmi);
+ {
+ // Convert the Smi {value}.
+ var_result.Bind(SmiToFloat64(value));
+ Goto(&out);
+ }
+
+ Bind(&if_valueisnotsmi);
+ {
+ // Check if {value} is a HeapNumber.
+ Label if_valueisheapnumber(this);
+ Branch(IsHeapNumberMap(LoadMap(value)), &if_valueisheapnumber,
+ if_valueisnotnumber);
+
+ Bind(&if_valueisheapnumber);
+ {
+ // Load the floating point value.
+ var_result.Bind(LoadHeapNumberValue(value));
+ Goto(&out);
+ }
+ }
+ Bind(&out);
+ return var_result.value();
+}
+
Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
// We might need to loop once due to ToNumber conversion.
Variable var_value(this, MachineRepresentation::kTagged),
@@ -1951,42 +2345,23 @@ Node* CodeStubAssembler::TruncateTaggedToFloat64(Node* context, Node* value) {
Goto(&loop);
Bind(&loop);
{
+ Label if_valueisnotnumber(this, Label::kDeferred);
+
// Load the current {value}.
value = var_value.value();
- // Check if the {value} is a Smi or a HeapObject.
- Label if_valueissmi(this), if_valueisnotsmi(this);
- Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+ // Convert {value} to Float64 if it is a number and convert it to a number
+ // otherwise.
+ Node* const result = TryTaggedToFloat64(value, &if_valueisnotnumber);
+ var_result.Bind(result);
+ Goto(&done_loop);
- Bind(&if_valueissmi);
+ Bind(&if_valueisnotnumber);
{
- // Convert the Smi {value}.
- var_result.Bind(SmiToFloat64(value));
- Goto(&done_loop);
- }
-
- Bind(&if_valueisnotsmi);
- {
- // Check if {value} is a HeapNumber.
- Label if_valueisheapnumber(this),
- if_valueisnotheapnumber(this, Label::kDeferred);
- Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
- &if_valueisheapnumber, &if_valueisnotheapnumber);
-
- Bind(&if_valueisheapnumber);
- {
- // Load the floating point value.
- var_result.Bind(LoadHeapNumberValue(value));
- Goto(&done_loop);
- }
-
- Bind(&if_valueisnotheapnumber);
- {
- // Convert the {value} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_value.Bind(CallStub(callable, context, value));
- Goto(&loop);
- }
+ // Convert the {value} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_value.Bind(CallStub(callable, context, value));
+ Goto(&loop);
}
}
Bind(&done_loop);
@@ -2007,7 +2382,7 @@ Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
// Check if the {value} is a Smi or a HeapObject.
Label if_valueissmi(this), if_valueisnotsmi(this);
- Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+ Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
Bind(&if_valueissmi);
{
@@ -2060,8 +2435,8 @@ Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) {
Bind(&if_valueisequal);
{
GotoUnless(Word32Equal(value32, Int32Constant(0)), &if_valueisint32);
- BranchIfInt32LessThan(Float64ExtractHighWord32(value), Int32Constant(0),
- &if_valueisheapnumber, &if_valueisint32);
+ Branch(Int32LessThan(Float64ExtractHighWord32(value), Int32Constant(0)),
+ &if_valueisheapnumber, &if_valueisint32);
}
Bind(&if_valueisnotequal);
Goto(&if_valueisheapnumber);
@@ -2169,7 +2544,7 @@ Node* CodeStubAssembler::ToThisString(Node* context, Node* value,
// Check if the {value} is a Smi or a HeapObject.
Label if_valueissmi(this, Label::kDeferred), if_valueisnotsmi(this),
if_valueisstring(this);
- Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+ Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
Bind(&if_valueisnotsmi);
{
// Load the instance type of the {value}.
@@ -2237,9 +2612,9 @@ Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
value = var_value.value();
// Check if the {value} is a Smi or a HeapObject.
- GotoIf(WordIsSmi(value), (primitive_type == PrimitiveType::kNumber)
- ? &done_loop
- : &done_throw);
+ GotoIf(TaggedIsSmi(value), (primitive_type == PrimitiveType::kNumber)
+ ? &done_loop
+ : &done_throw);
// Load the mape of the {value}.
Node* value_map = LoadMap(value);
@@ -2301,7 +2676,7 @@ Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
Label out(this), throw_exception(this, Label::kDeferred);
Variable var_value_map(this, MachineRepresentation::kTagged);
- GotoIf(WordIsSmi(value), &throw_exception);
+ GotoIf(TaggedIsSmi(value), &throw_exception);
// Load the instance type of the {value}.
var_value_map.Bind(LoadMap(value));
@@ -2323,6 +2698,37 @@ Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
return var_value_map.value();
}
+Node* CodeStubAssembler::IsSpecialReceiverMap(Node* map) {
+ Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
+ uint32_t mask =
+ 1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
+ USE(mask);
+ // Interceptors or access checks imply special receiver.
+ CSA_ASSERT(this, Select(IsSetWord32(LoadMapBitField(map), mask), is_special,
+ Int32Constant(1), MachineRepresentation::kWord32));
+ return is_special;
+}
+
+Node* CodeStubAssembler::IsDictionaryMap(Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
+ Node* bit_field3 = LoadMapBitField3(map);
+ return Word32NotEqual(IsSetWord32<Map::DictionaryMap>(bit_field3),
+ Int32Constant(0));
+}
+
+Node* CodeStubAssembler::IsCallableMap(Node* map) {
+ CSA_ASSERT(this, IsMap(map));
+ return Word32NotEqual(
+ Word32And(LoadMapBitField(map), Int32Constant(1 << Map::kIsCallable)),
+ Int32Constant(0));
+}
+
+Node* CodeStubAssembler::IsSpecialReceiverInstanceType(Node* instance_type) {
+ STATIC_ASSERT(JS_GLOBAL_OBJECT_TYPE <= LAST_SPECIAL_RECEIVER_TYPE);
+ return Int32LessThanOrEqual(instance_type,
+ Int32Constant(LAST_SPECIAL_RECEIVER_TYPE));
+}
+
Node* CodeStubAssembler::IsStringInstanceType(Node* instance_type) {
STATIC_ASSERT(INTERNALIZED_STRING_TYPE == FIRST_TYPE);
return Int32LessThan(instance_type, Int32Constant(FIRST_NONSTRING_TYPE));
@@ -2334,7 +2740,71 @@ Node* CodeStubAssembler::IsJSReceiverInstanceType(Node* instance_type) {
Int32Constant(FIRST_JS_RECEIVER_TYPE));
}
+Node* CodeStubAssembler::IsJSReceiver(Node* object) {
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ return IsJSReceiverInstanceType(LoadInstanceType(object));
+}
+
+Node* CodeStubAssembler::IsJSObject(Node* object) {
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ return Int32GreaterThanOrEqual(LoadInstanceType(object),
+ Int32Constant(FIRST_JS_RECEIVER_TYPE));
+}
+
+Node* CodeStubAssembler::IsJSGlobalProxy(Node* object) {
+ return Word32Equal(LoadInstanceType(object),
+ Int32Constant(JS_GLOBAL_PROXY_TYPE));
+}
+
+Node* CodeStubAssembler::IsMap(Node* map) {
+ return HasInstanceType(map, MAP_TYPE);
+}
+
+Node* CodeStubAssembler::IsJSValue(Node* map) {
+ return HasInstanceType(map, JS_VALUE_TYPE);
+}
+
+Node* CodeStubAssembler::IsJSArray(Node* object) {
+ return HasInstanceType(object, JS_ARRAY_TYPE);
+}
+
+Node* CodeStubAssembler::IsWeakCell(Node* object) {
+ return HasInstanceType(object, WEAK_CELL_TYPE);
+}
+
+Node* CodeStubAssembler::IsName(Node* object) {
+ return Int32LessThanOrEqual(LoadInstanceType(object),
+ Int32Constant(LAST_NAME_TYPE));
+}
+
+Node* CodeStubAssembler::IsString(Node* object) {
+ return Int32LessThanOrEqual(LoadInstanceType(object),
+ Int32Constant(FIRST_NONSTRING_TYPE));
+}
+
+Node* CodeStubAssembler::IsNativeContext(Node* object) {
+ return WordEqual(LoadMap(object), LoadRoot(Heap::kNativeContextMapRootIndex));
+}
+
+Node* CodeStubAssembler::IsFixedDoubleArray(Node* object) {
+ return WordEqual(LoadMap(object), FixedDoubleArrayMapConstant());
+}
+
+Node* CodeStubAssembler::IsHashTable(Node* object) {
+ return WordEqual(LoadMap(object), LoadRoot(Heap::kHashTableMapRootIndex));
+}
+
+Node* CodeStubAssembler::IsDictionary(Node* object) {
+ return WordOr(IsHashTable(object), IsUnseededNumberDictionary(object));
+}
+
+Node* CodeStubAssembler::IsUnseededNumberDictionary(Node* object) {
+ return WordEqual(LoadMap(object),
+ LoadRoot(Heap::kUnseededNumberDictionaryMapRootIndex));
+}
+
Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index) {
+ CSA_ASSERT(this, IsString(string));
// Translate the {index} into a Word.
index = SmiToWord(index);
@@ -2580,6 +3050,8 @@ Node* AllocAndCopyStringCharacters(CodeStubAssembler* a, Node* context,
Label end(a), two_byte_sequential(a);
Variable var_result(a, MachineRepresentation::kTagged);
+ Node* const smi_zero = a->SmiConstant(Smi::kZero);
+
STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
a->GotoIf(a->Word32Equal(a->Word32And(from_instance_type,
a->Int32Constant(kStringEncodingMask)),
@@ -2590,8 +3062,10 @@ Node* AllocAndCopyStringCharacters(CodeStubAssembler* a, Node* context,
{
Node* result =
a->AllocateSeqOneByteString(context, a->SmiToWord(character_count));
- a->CopyStringCharacters(from, result, from_index, character_count,
- String::ONE_BYTE_ENCODING);
+ a->CopyStringCharacters(from, result, from_index, smi_zero, character_count,
+ String::ONE_BYTE_ENCODING,
+ String::ONE_BYTE_ENCODING,
+ CodeStubAssembler::SMI_PARAMETERS);
var_result.Bind(result);
a->Goto(&end);
@@ -2602,8 +3076,10 @@ Node* AllocAndCopyStringCharacters(CodeStubAssembler* a, Node* context,
{
Node* result =
a->AllocateSeqTwoByteString(context, a->SmiToWord(character_count));
- a->CopyStringCharacters(from, result, from_index, character_count,
- String::TWO_BYTE_ENCODING);
+ a->CopyStringCharacters(from, result, from_index, smi_zero, character_count,
+ String::TWO_BYTE_ENCODING,
+ String::TWO_BYTE_ENCODING,
+ CodeStubAssembler::SMI_PARAMETERS);
var_result.Bind(result);
a->Goto(&end);
@@ -2632,7 +3108,7 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// Make sure first argument is a string.
// Bailout if receiver is a Smi.
- GotoIf(WordIsSmi(string), &runtime);
+ GotoIf(TaggedIsSmi(string), &runtime);
// Load the instance type of the {string}.
Node* const instance_type = LoadInstanceType(string);
@@ -2814,7 +3290,7 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
GotoIf(SmiAbove(substr_length, string_length), &runtime);
// Equal length - check if {from, to} == {0, str.length}.
- GotoIf(SmiAbove(from, SmiConstant(Smi::FromInt(0))), &runtime);
+ GotoIf(SmiAbove(from, SmiConstant(Smi::kZero)), &runtime);
// Return the original string (substr_length == string_length).
@@ -2837,6 +3313,178 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
return var_result.value();
}
+Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
+ AllocationFlags flags) {
+ Label check_right(this);
+ Label runtime(this, Label::kDeferred);
+ Label cons(this);
+ Label non_cons(this);
+ Variable result(this, MachineRepresentation::kTagged);
+ Label done(this, &result);
+ Label done_native(this, &result);
+ Counters* counters = isolate()->counters();
+
+ Node* left_length = LoadStringLength(left);
+ GotoIf(WordNotEqual(IntPtrConstant(0), left_length), &check_right);
+ result.Bind(right);
+ Goto(&done_native);
+
+ Bind(&check_right);
+ Node* right_length = LoadStringLength(right);
+ GotoIf(WordNotEqual(IntPtrConstant(0), right_length), &cons);
+ result.Bind(left);
+ Goto(&done_native);
+
+ Bind(&cons);
+ CSA_ASSERT(this, TaggedIsSmi(left_length));
+ CSA_ASSERT(this, TaggedIsSmi(right_length));
+ Node* new_length = SmiAdd(left_length, right_length);
+ GotoIf(UintPtrGreaterThanOrEqual(
+ new_length, SmiConstant(Smi::FromInt(String::kMaxLength))),
+ &runtime);
+
+ GotoIf(IntPtrLessThan(new_length,
+ SmiConstant(Smi::FromInt(ConsString::kMinLength))),
+ &non_cons);
+
+ result.Bind(NewConsString(context, new_length, left, right, flags));
+ Goto(&done_native);
+
+ Bind(&non_cons);
+
+ Comment("Full string concatenate");
+ Node* left_instance_type = LoadInstanceType(left);
+ Node* right_instance_type = LoadInstanceType(right);
+ // Compute intersection and difference of instance types.
+
+ Node* ored_instance_types = WordOr(left_instance_type, right_instance_type);
+ Node* xored_instance_types = WordXor(left_instance_type, right_instance_type);
+
+ // Check if both strings have the same encoding and both are sequential.
+ GotoIf(WordNotEqual(
+ WordAnd(xored_instance_types, IntPtrConstant(kStringEncodingMask)),
+ IntPtrConstant(0)),
+ &runtime);
+ GotoIf(WordNotEqual(WordAnd(ored_instance_types,
+ IntPtrConstant(kStringRepresentationMask)),
+ IntPtrConstant(0)),
+ &runtime);
+
+ Label two_byte(this);
+ GotoIf(WordEqual(
+ WordAnd(ored_instance_types, IntPtrConstant(kStringEncodingMask)),
+ IntPtrConstant(kTwoByteStringTag)),
+ &two_byte);
+ // One-byte sequential string case
+ Node* new_string =
+ AllocateSeqOneByteString(context, new_length, SMI_PARAMETERS);
+ CopyStringCharacters(left, new_string, SmiConstant(Smi::kZero),
+ SmiConstant(Smi::kZero), left_length,
+ String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING,
+ SMI_PARAMETERS);
+ CopyStringCharacters(right, new_string, SmiConstant(Smi::kZero), left_length,
+ right_length, String::ONE_BYTE_ENCODING,
+ String::ONE_BYTE_ENCODING, SMI_PARAMETERS);
+ result.Bind(new_string);
+ Goto(&done_native);
+
+ Bind(&two_byte);
+ {
+ // Two-byte sequential string case
+ new_string = AllocateSeqTwoByteString(context, new_length, SMI_PARAMETERS);
+ CopyStringCharacters(left, new_string, SmiConstant(Smi::kZero),
+ SmiConstant(Smi::kZero), left_length,
+ String::TWO_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
+ SMI_PARAMETERS);
+ CopyStringCharacters(right, new_string, SmiConstant(Smi::kZero),
+ left_length, right_length, String::TWO_BYTE_ENCODING,
+ String::TWO_BYTE_ENCODING, SMI_PARAMETERS);
+ result.Bind(new_string);
+ Goto(&done_native);
+ }
+
+ Bind(&runtime);
+ {
+ result.Bind(CallRuntime(Runtime::kStringAdd, context, left, right));
+ Goto(&done);
+ }
+
+ Bind(&done_native);
+ {
+ IncrementCounter(counters->string_add_native(), 1);
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return result.value();
+}
+
+Node* CodeStubAssembler::StringIndexOfChar(Node* context, Node* string,
+ Node* needle_char, Node* from) {
+ CSA_ASSERT(this, IsString(string));
+ Variable var_result(this, MachineRepresentation::kTagged);
+
+ Label out(this), runtime(this, Label::kDeferred);
+
+ // Let runtime handle non-one-byte {needle_char}.
+
+ Node* const one_byte_char_mask = IntPtrConstant(0xFF);
+ GotoUnless(WordEqual(WordAnd(needle_char, one_byte_char_mask), needle_char),
+ &runtime);
+
+ // TODO(jgruber): Handle external and two-byte strings.
+
+ Node* const one_byte_seq_mask = Int32Constant(
+ kIsIndirectStringMask | kExternalStringTag | kStringEncodingMask);
+ Node* const expected_masked = Int32Constant(kOneByteStringTag);
+
+ Node* const string_instance_type = LoadInstanceType(string);
+ GotoUnless(Word32Equal(Word32And(string_instance_type, one_byte_seq_mask),
+ expected_masked),
+ &runtime);
+
+ // If we reach this, {string} is a non-indirect, non-external one-byte string.
+
+ Node* const length = LoadStringLength(string);
+ Node* const search_range_length = SmiUntag(SmiSub(length, from));
+
+ const int offset = SeqOneByteString::kHeaderSize - kHeapObjectTag;
+ Node* const begin = IntPtrConstant(offset);
+ Node* const cursor = IntPtrAdd(begin, SmiUntag(from));
+ Node* const end = IntPtrAdd(cursor, search_range_length);
+
+ var_result.Bind(SmiConstant(Smi::FromInt(-1)));
+
+ BuildFastLoop(MachineType::PointerRepresentation(), cursor, end,
+ [string, needle_char, begin, &var_result, &out](
+ CodeStubAssembler* csa, Node* cursor) {
+ Label next(csa);
+ Node* value = csa->Load(MachineType::Uint8(), string, cursor);
+ csa->GotoUnless(csa->WordEqual(value, needle_char), &next);
+
+ // Found a match.
+ Node* index = csa->SmiTag(csa->IntPtrSub(cursor, begin));
+ var_result.Bind(index);
+ csa->Goto(&out);
+
+ csa->Bind(&next);
+ },
+ 1, IndexAdvanceMode::kPost);
+ Goto(&out);
+
+ Bind(&runtime);
+ {
+ Node* const pattern = StringFromCharCode(needle_char);
+ Node* const result =
+ CallRuntime(Runtime::kStringIndexOf, context, string, pattern, from);
+ var_result.Bind(result);
+ Goto(&out);
+ }
+
+ Bind(&out);
+ return var_result.value();
+}
+
Node* CodeStubAssembler::StringFromCodePoint(compiler::Node* codepoint,
UnicodeEncoding encoding) {
Variable var_result(this, MachineRepresentation::kTagged);
@@ -2901,7 +3549,8 @@ Node* CodeStubAssembler::StringToNumber(Node* context, Node* input) {
Word32And(hash, Int32Constant(String::kContainsCachedArrayIndexMask));
GotoIf(Word32NotEqual(bit, Int32Constant(0)), &runtime);
- var_result.Bind(SmiTag(BitFieldDecode<String::ArrayIndexValueBits>(hash)));
+ var_result.Bind(
+ SmiTag(DecodeWordFromWord32<String::ArrayIndexValueBits>(hash)));
Goto(&end);
Bind(&runtime);
@@ -2914,6 +3563,85 @@ Node* CodeStubAssembler::StringToNumber(Node* context, Node* input) {
return var_result.value();
}
+Node* CodeStubAssembler::NumberToString(compiler::Node* context,
+ compiler::Node* argument) {
+ Variable result(this, MachineRepresentation::kTagged);
+ Label runtime(this, Label::kDeferred);
+ Label smi(this);
+ Label done(this, &result);
+
+ // Load the number string cache.
+ Node* number_string_cache = LoadRoot(Heap::kNumberStringCacheRootIndex);
+
+ // Make the hash mask from the length of the number string cache. It
+ // contains two elements (number and string) for each cache entry.
+ Node* mask = LoadFixedArrayBaseLength(number_string_cache);
+ Node* one = IntPtrConstant(1);
+ mask = IntPtrSub(mask, one);
+
+ GotoIf(TaggedIsSmi(argument), &smi);
+
+ // Argument isn't smi, check to see if it's a heap-number.
+ Node* map = LoadMap(argument);
+ GotoUnless(WordEqual(map, HeapNumberMapConstant()), &runtime);
+
+ // Make a hash from the two 32-bit values of the double.
+ Node* low =
+ LoadObjectField(argument, HeapNumber::kValueOffset, MachineType::Int32());
+ Node* high = LoadObjectField(argument, HeapNumber::kValueOffset + kIntSize,
+ MachineType::Int32());
+ Node* hash = Word32Xor(low, high);
+ if (Is64()) hash = ChangeInt32ToInt64(hash);
+ hash = WordShl(hash, one);
+ Node* index = WordAnd(hash, SmiToWord(mask));
+
+ // Cache entry's key must be a heap number
+ Node* number_key =
+ LoadFixedArrayElement(number_string_cache, index, 0, INTPTR_PARAMETERS);
+ GotoIf(TaggedIsSmi(number_key), &runtime);
+ map = LoadMap(number_key);
+ GotoUnless(WordEqual(map, HeapNumberMapConstant()), &runtime);
+
+ // Cache entry's key must match the heap number value we're looking for.
+ Node* low_compare = LoadObjectField(number_key, HeapNumber::kValueOffset,
+ MachineType::Int32());
+ Node* high_compare = LoadObjectField(
+ number_key, HeapNumber::kValueOffset + kIntSize, MachineType::Int32());
+ GotoUnless(WordEqual(low, low_compare), &runtime);
+ GotoUnless(WordEqual(high, high_compare), &runtime);
+
+ // Heap number match, return value fro cache entry.
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+ result.Bind(LoadFixedArrayElement(number_string_cache, index, kPointerSize,
+ INTPTR_PARAMETERS));
+ Goto(&done);
+
+ Bind(&runtime);
+ {
+ // No cache entry, go to the runtime.
+ result.Bind(CallRuntime(Runtime::kNumberToString, context, argument));
+ }
+ Goto(&done);
+
+ Bind(&smi);
+ {
+ // Load the smi key, make sure it matches the smi we're looking for.
+ Node* smi_index = WordAnd(WordShl(argument, one), mask);
+ Node* smi_key = LoadFixedArrayElement(number_string_cache, smi_index, 0,
+ SMI_PARAMETERS);
+ GotoIf(WordNotEqual(smi_key, argument), &runtime);
+
+ // Smi match, return value from cache entry.
+ IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
+ result.Bind(LoadFixedArrayElement(number_string_cache, smi_index,
+ kPointerSize, SMI_PARAMETERS));
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return result.value();
+}
+
Node* CodeStubAssembler::ToName(Node* context, Node* value) {
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
@@ -2922,7 +3650,7 @@ Node* CodeStubAssembler::ToName(Node* context, Node* value) {
Variable var_result(this, MachineRepresentation::kTagged);
Label is_number(this);
- GotoIf(WordIsSmi(value), &is_number);
+ GotoIf(TaggedIsSmi(value), &is_number);
Label not_name(this);
Node* value_instance_type = LoadInstanceType(value);
@@ -2965,8 +3693,8 @@ Node* CodeStubAssembler::ToName(Node* context, Node* value) {
Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
// Assert input is a HeapObject (not smi or heap number)
- Assert(Word32BinaryNot(WordIsSmi(input)));
- Assert(Word32NotEqual(LoadMap(input), HeapNumberMapConstant()));
+ CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(input)));
+ CSA_ASSERT(this, Word32NotEqual(LoadMap(input), HeapNumberMapConstant()));
// We might need to loop once here due to ToPrimitive conversions.
Variable var_input(this, MachineRepresentation::kTagged);
@@ -3015,7 +3743,7 @@ Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
// Check if the {result} is already a Number.
Label if_resultisnumber(this), if_resultisnotnumber(this);
- GotoIf(WordIsSmi(result), &if_resultisnumber);
+ GotoIf(TaggedIsSmi(result), &if_resultisnumber);
Node* result_map = LoadMap(result);
Branch(WordEqual(result_map, HeapNumberMapConstant()), &if_resultisnumber,
&if_resultisnotnumber);
@@ -3057,7 +3785,7 @@ Node* CodeStubAssembler::ToNumber(Node* context, Node* input) {
Label end(this);
Label not_smi(this, Label::kDeferred);
- GotoUnless(WordIsSmi(input), &not_smi);
+ GotoUnless(TaggedIsSmi(input), &not_smi);
var_result.Bind(input);
Goto(&end);
@@ -3082,6 +3810,110 @@ Node* CodeStubAssembler::ToNumber(Node* context, Node* input) {
return var_result.value();
}
+Node* CodeStubAssembler::ToString(Node* context, Node* input) {
+ Label is_number(this);
+ Label runtime(this, Label::kDeferred);
+ Variable result(this, MachineRepresentation::kTagged);
+ Label done(this, &result);
+
+ GotoIf(TaggedIsSmi(input), &is_number);
+
+ Node* input_map = LoadMap(input);
+ Node* input_instance_type = LoadMapInstanceType(input_map);
+
+ result.Bind(input);
+ GotoIf(IsStringInstanceType(input_instance_type), &done);
+
+ Label not_heap_number(this);
+ Branch(WordNotEqual(input_map, HeapNumberMapConstant()), &not_heap_number,
+ &is_number);
+
+ Bind(&is_number);
+ result.Bind(NumberToString(context, input));
+ Goto(&done);
+
+ Bind(&not_heap_number);
+ {
+ GotoIf(Word32NotEqual(input_instance_type, Int32Constant(ODDBALL_TYPE)),
+ &runtime);
+ result.Bind(LoadObjectField(input, Oddball::kToStringOffset));
+ Goto(&done);
+ }
+
+ Bind(&runtime);
+ {
+ result.Bind(CallRuntime(Runtime::kToString, context, input));
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return result.value();
+}
+
+Node* CodeStubAssembler::FlattenString(Node* string) {
+ CSA_ASSERT(this, IsString(string));
+ Variable var_result(this, MachineRepresentation::kTagged);
+ var_result.Bind(string);
+
+ Node* instance_type = LoadInstanceType(string);
+
+ // Check if the {string} is not a ConsString (i.e. already flat).
+ Label is_cons(this, Label::kDeferred), is_flat_in_cons(this), end(this);
+ {
+ GotoUnless(Word32Equal(Word32And(instance_type,
+ Int32Constant(kStringRepresentationMask)),
+ Int32Constant(kConsStringTag)),
+ &end);
+
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string).
+ Node* rhs = LoadObjectField(string, ConsString::kSecondOffset);
+ Branch(WordEqual(rhs, EmptyStringConstant()), &is_flat_in_cons, &is_cons);
+ }
+
+ // Bail out to the runtime.
+ Bind(&is_cons);
+ {
+ var_result.Bind(
+ CallRuntime(Runtime::kFlattenString, NoContextConstant(), string));
+ Goto(&end);
+ }
+
+ Bind(&is_flat_in_cons);
+ {
+ var_result.Bind(LoadObjectField(string, ConsString::kFirstOffset));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return var_result.value();
+}
+
+Node* CodeStubAssembler::JSReceiverToPrimitive(Node* context, Node* input) {
+ Label if_isreceiver(this, Label::kDeferred), if_isnotreceiver(this);
+ Variable result(this, MachineRepresentation::kTagged);
+ Label done(this, &result);
+
+ BranchIfJSReceiver(input, &if_isreceiver, &if_isnotreceiver);
+
+ Bind(&if_isreceiver);
+ {
+ // Convert {input} to a primitive first passing Number hint.
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate());
+ result.Bind(CallStub(callable, context, input));
+ Goto(&done);
+ }
+
+ Bind(&if_isnotreceiver);
+ {
+ result.Bind(input);
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return result.value();
+}
+
Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
ToIntegerTruncationMode mode) {
// We might need to loop once for ToNumber conversion.
@@ -3098,7 +3930,7 @@ Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
Node* arg = var_arg.value();
// Check if {arg} is a Smi.
- GotoIf(WordIsSmi(arg), &out);
+ GotoIf(TaggedIsSmi(arg), &out);
// Check if {arg} is a HeapNumber.
Label if_argisheapnumber(this),
@@ -3135,7 +3967,7 @@ Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
}
Bind(&return_zero);
- var_arg.Bind(SmiConstant(Smi::FromInt(0)));
+ var_arg.Bind(SmiConstant(Smi::kZero));
Goto(&out);
}
@@ -3143,12 +3975,16 @@ Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
return var_arg.value();
}
-Node* CodeStubAssembler::BitFieldDecode(Node* word32, uint32_t shift,
- uint32_t mask) {
+Node* CodeStubAssembler::DecodeWord32(Node* word32, uint32_t shift,
+ uint32_t mask) {
return Word32Shr(Word32And(word32, Int32Constant(mask)),
static_cast<int>(shift));
}
+Node* CodeStubAssembler::DecodeWord(Node* word, uint32_t shift, uint32_t mask) {
+ return WordShr(WordAnd(word, IntPtrConstant(mask)), static_cast<int>(shift));
+}
+
void CodeStubAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
Node* counter_address = ExternalConstant(ExternalReference(counter));
@@ -3218,7 +4054,7 @@ void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
Goto(if_keyisunique);
Bind(&if_hascachedindex);
- var_index->Bind(BitFieldDecode<Name::ArrayIndexValueBits>(hash));
+ var_index->Bind(DecodeWordFromWord32<Name::ArrayIndexValueBits>(hash));
Goto(if_keyisindex);
}
@@ -3229,12 +4065,27 @@ Node* CodeStubAssembler::EntryToIndex(Node* entry, int field_index) {
field_index));
}
+template Node* CodeStubAssembler::EntryToIndex<NameDictionary>(Node*, int);
+template Node* CodeStubAssembler::EntryToIndex<GlobalDictionary>(Node*, int);
+
+Node* CodeStubAssembler::HashTableComputeCapacity(Node* at_least_space_for) {
+ Node* capacity = IntPtrRoundUpToPowerOfTwo32(
+ WordShl(at_least_space_for, IntPtrConstant(1)));
+ return IntPtrMax(capacity, IntPtrConstant(HashTableBase::kMinCapacity));
+}
+
+Node* CodeStubAssembler::IntPtrMax(Node* left, Node* right) {
+ return Select(IntPtrGreaterThanOrEqual(left, right), left, right,
+ MachineType::PointerRepresentation());
+}
+
template <typename Dictionary>
void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
Node* unique_name, Label* if_found,
Variable* var_name_index,
Label* if_not_found,
int inlined_probes) {
+ CSA_ASSERT(this, IsDictionary(dictionary));
DCHECK_EQ(MachineType::PointerRepresentation(), var_name_index->rep());
Comment("NameDictionaryLookup");
@@ -3319,6 +4170,7 @@ void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary,
Label* if_found,
Variable* var_entry,
Label* if_not_found) {
+ CSA_ASSERT(this, IsDictionary(dictionary));
DCHECK_EQ(MachineType::PointerRepresentation(), var_entry->rep());
Comment("NumberDictionaryLookup");
@@ -3361,7 +4213,7 @@ void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary,
Label next_probe(this);
{
Label if_currentissmi(this), if_currentisnotsmi(this);
- Branch(WordIsSmi(current), &if_currentissmi, &if_currentisnotsmi);
+ Branch(TaggedIsSmi(current), &if_currentissmi, &if_currentisnotsmi);
Bind(&if_currentissmi);
{
Node* current_value = SmiUntag(current);
@@ -3393,25 +4245,22 @@ void CodeStubAssembler::DescriptorLookupLinear(Node* unique_name,
Label* if_found,
Variable* var_name_index,
Label* if_not_found) {
- Variable var_descriptor(this, MachineType::PointerRepresentation());
- Label loop(this, &var_descriptor);
- var_descriptor.Bind(IntPtrConstant(0));
- Goto(&loop);
-
- Bind(&loop);
- {
- Node* index = var_descriptor.value();
- Node* name_offset = IntPtrConstant(DescriptorArray::ToKeyIndex(0));
- Node* factor = IntPtrConstant(DescriptorArray::kDescriptorSize);
- GotoIf(WordEqual(index, nof), if_not_found);
- Node* name_index = IntPtrAdd(name_offset, IntPtrMul(index, factor));
- Node* candidate_name =
- LoadFixedArrayElement(descriptors, name_index, 0, INTPTR_PARAMETERS);
- var_name_index->Bind(name_index);
- GotoIf(WordEqual(candidate_name, unique_name), if_found);
- var_descriptor.Bind(IntPtrAdd(index, IntPtrConstant(1)));
- Goto(&loop);
- }
+ Node* first_inclusive = IntPtrConstant(DescriptorArray::ToKeyIndex(0));
+ Node* factor = IntPtrConstant(DescriptorArray::kDescriptorSize);
+ Node* last_exclusive = IntPtrAdd(first_inclusive, IntPtrMul(nof, factor));
+
+ BuildFastLoop(
+ MachineType::PointerRepresentation(), last_exclusive, first_inclusive,
+ [descriptors, unique_name, if_found, var_name_index](
+ CodeStubAssembler* assembler, Node* name_index) {
+ Node* candidate_name = assembler->LoadFixedArrayElement(
+ descriptors, name_index, 0, INTPTR_PARAMETERS);
+ var_name_index->Bind(name_index);
+ assembler->GotoIf(assembler->WordEqual(candidate_name, unique_name),
+ if_found);
+ },
+ -DescriptorArray::kDescriptorSize, IndexAdvanceMode::kPre);
+ Goto(if_not_found);
}
void CodeStubAssembler::TryLookupProperty(
@@ -3428,19 +4277,20 @@ void CodeStubAssembler::TryLookupProperty(
Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
&if_objectisspecial);
- Node* bit_field = LoadMapBitField(map);
- Node* mask = Int32Constant(1 << Map::kHasNamedInterceptor |
- 1 << Map::kIsAccessCheckNeeded);
- Assert(Word32Equal(Word32And(bit_field, mask), Int32Constant(0)));
+ uint32_t mask =
+ 1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
+ CSA_ASSERT(this, Word32BinaryNot(IsSetWord32(LoadMapBitField(map), mask)));
+ USE(mask);
Node* bit_field3 = LoadMapBitField3(map);
- Node* bit = BitFieldDecode<Map::DictionaryMap>(bit_field3);
Label if_isfastmap(this), if_isslowmap(this);
- Branch(Word32Equal(bit, Int32Constant(0)), &if_isfastmap, &if_isslowmap);
+ Branch(IsSetWord32<Map::DictionaryMap>(bit_field3), &if_isslowmap,
+ &if_isfastmap);
Bind(&if_isfastmap);
{
Comment("DescriptorArrayLookup");
- Node* nof = BitFieldDecodeWord<Map::NumberOfOwnDescriptorsBits>(bit_field3);
+ Node* nof =
+ DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3);
// Bail out to the runtime for large numbers of own descriptors. The stub
// only does linear search, which becomes too expensive in that case.
{
@@ -3528,7 +4378,7 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
name_to_details_offset);
var_details->Bind(details);
- Node* location = BitFieldDecode<PropertyDetails::LocationField>(details);
+ Node* location = DecodeWord32<PropertyDetails::LocationField>(details);
Label if_in_field(this), if_in_descriptor(this), done(this);
Branch(Word32Equal(location, Int32Constant(kField)), &if_in_field,
@@ -3536,17 +4386,17 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
Bind(&if_in_field);
{
Node* field_index =
- BitFieldDecodeWord<PropertyDetails::FieldIndexField>(details);
+ DecodeWordFromWord32<PropertyDetails::FieldIndexField>(details);
Node* representation =
- BitFieldDecode<PropertyDetails::RepresentationField>(details);
+ DecodeWord32<PropertyDetails::RepresentationField>(details);
Node* inobject_properties = LoadMapInobjectProperties(map);
Label if_inobject(this), if_backing_store(this);
Variable var_double_value(this, MachineRepresentation::kFloat64);
Label rebox_double(this, &var_double_value);
- BranchIfUintPtrLessThan(field_index, inobject_properties, &if_inobject,
- &if_backing_store);
+ Branch(UintPtrLessThan(field_index, inobject_properties), &if_inobject,
+ &if_backing_store);
Bind(&if_inobject);
{
Comment("if_inobject");
@@ -3556,9 +4406,9 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
IntPtrConstant(kPointerSize));
Label if_double(this), if_tagged(this);
- BranchIfWord32NotEqual(representation,
- Int32Constant(Representation::kDouble), &if_tagged,
- &if_double);
+ Branch(Word32NotEqual(representation,
+ Int32Constant(Representation::kDouble)),
+ &if_tagged, &if_double);
Bind(&if_tagged);
{
var_value->Bind(LoadObjectField(object, field_offset));
@@ -3584,9 +4434,9 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
Node* value = LoadFixedArrayElement(properties, field_index);
Label if_double(this), if_tagged(this);
- BranchIfWord32NotEqual(representation,
- Int32Constant(Representation::kDouble), &if_tagged,
- &if_double);
+ Branch(Word32NotEqual(representation,
+ Int32Constant(Representation::kDouble)),
+ &if_tagged, &if_double);
Bind(&if_tagged);
{
var_value->Bind(value);
@@ -3623,7 +4473,7 @@ void CodeStubAssembler::LoadPropertyFromNameDictionary(Node* dictionary,
Variable* var_details,
Variable* var_value) {
Comment("LoadPropertyFromNameDictionary");
-
+ CSA_ASSERT(this, IsDictionary(dictionary));
const int name_to_details_offset =
(NameDictionary::kEntryDetailsIndex - NameDictionary::kEntryKeyIndex) *
kPointerSize;
@@ -3647,6 +4497,7 @@ void CodeStubAssembler::LoadPropertyFromGlobalDictionary(Node* dictionary,
Variable* var_value,
Label* if_deleted) {
Comment("[ LoadPropertyFromGlobalDictionary");
+ CSA_ASSERT(this, IsDictionary(dictionary));
const int name_to_value_offset =
(GlobalDictionary::kEntryValueIndex - GlobalDictionary::kEntryKeyIndex) *
@@ -3677,7 +4528,7 @@ Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
var_value.Bind(value);
Label done(this);
- Node* kind = BitFieldDecode<PropertyDetails::KindField>(details);
+ Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
GotoIf(Word32Equal(kind, Int32Constant(kData)), &done);
// Accessor case.
@@ -3686,7 +4537,7 @@ Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
GotoIf(Word32Equal(LoadInstanceType(accessor_pair),
Int32Constant(ACCESSOR_INFO_TYPE)),
if_bailout);
- AssertInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE);
+ CSA_ASSERT(this, HasInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE));
Node* getter = LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
Node* getter_map = LoadMap(getter);
Node* instance_type = LoadMapInstanceType(getter_map);
@@ -3697,10 +4548,7 @@ Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
// Return undefined if the {getter} is not callable.
var_value.Bind(UndefinedConstant());
- GotoIf(Word32Equal(Word32And(LoadMapBitField(getter_map),
- Int32Constant(1 << Map::kIsCallable)),
- Int32Constant(0)),
- &done);
+ GotoUnless(IsCallableMap(getter_map), &done);
// Call the accessor.
Callable callable = CodeFactory::Call(isolate());
@@ -3847,18 +4695,18 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
}
Bind(&if_isfaststringwrapper);
{
- AssertInstanceType(object, JS_VALUE_TYPE);
+ CSA_ASSERT(this, HasInstanceType(object, JS_VALUE_TYPE));
Node* string = LoadJSValueValue(object);
- Assert(IsStringInstanceType(LoadInstanceType(string)));
+ CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(string)));
Node* length = LoadStringLength(string);
GotoIf(UintPtrLessThan(intptr_index, SmiUntag(length)), if_found);
Goto(&if_isobjectorsmi);
}
Bind(&if_isslowstringwrapper);
{
- AssertInstanceType(object, JS_VALUE_TYPE);
+ CSA_ASSERT(this, HasInstanceType(object, JS_VALUE_TYPE));
Node* string = LoadJSValueValue(object);
- Assert(IsStringInstanceType(LoadInstanceType(string)));
+ CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(string)));
Node* length = LoadStringLength(string);
GotoIf(UintPtrLessThan(intptr_index, SmiUntag(length)), if_found);
Goto(&if_isdictionary);
@@ -3884,7 +4732,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
Label* if_bailout) {
// Ensure receiver is JSReceiver, otherwise bailout.
Label if_objectisnotsmi(this);
- Branch(WordIsSmi(receiver), if_bailout, &if_objectisnotsmi);
+ Branch(TaggedIsSmi(receiver), if_bailout, &if_objectisnotsmi);
Bind(&if_objectisnotsmi);
Node* map = LoadMap(receiver);
@@ -3994,7 +4842,7 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
return_runtime(this, Label::kDeferred), return_result(this);
// Goto runtime if {object} is a Smi.
- GotoIf(WordIsSmi(object), &return_runtime);
+ GotoIf(TaggedIsSmi(object), &return_runtime);
// Load map of {object}.
Node* object_map = LoadMap(object);
@@ -4017,7 +4865,7 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
}
// Goto runtime if {callable} is a Smi.
- GotoIf(WordIsSmi(callable), &return_runtime);
+ GotoIf(TaggedIsSmi(callable), &return_runtime);
// Load map of {callable}.
Node* callable_map = LoadMap(callable);
@@ -4134,8 +4982,10 @@ compiler::Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
bool constant_index = false;
if (mode == SMI_PARAMETERS) {
element_size_shift -= kSmiShiftBits;
- constant_index = ToIntPtrConstant(index_node, index);
- index = index >> kSmiShiftBits;
+ Smi* smi_index;
+ constant_index = ToSmiConstant(index_node, smi_index);
+ if (constant_index) index = smi_index->value();
+ index_node = BitcastTaggedToWord(index_node);
} else if (mode == INTEGER_PARAMETERS) {
int32_t temp = 0;
constant_index = ToInt32Constant(index_node, temp);
@@ -4150,16 +5000,14 @@ compiler::Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
if (Is64() && mode == INTEGER_PARAMETERS) {
index_node = ChangeInt32ToInt64(index_node);
}
- if (base_size == 0) {
- return (element_size_shift >= 0)
- ? WordShl(index_node, IntPtrConstant(element_size_shift))
- : WordShr(index_node, IntPtrConstant(-element_size_shift));
- }
- return IntPtrAdd(
- IntPtrConstant(base_size),
- (element_size_shift >= 0)
- ? WordShl(index_node, IntPtrConstant(element_size_shift))
- : WordShr(index_node, IntPtrConstant(-element_size_shift)));
+
+ Node* shifted_index =
+ (element_size_shift == 0)
+ ? index_node
+ : ((element_size_shift > 0)
+ ? WordShl(index_node, IntPtrConstant(element_size_shift))
+ : WordShr(index_node, IntPtrConstant(-element_size_shift)));
+ return IntPtrAddFoldConstants(IntPtrConstant(base_size), shifted_index);
}
compiler::Node* CodeStubAssembler::LoadTypeFeedbackVectorForStub() {
@@ -4186,11 +5034,10 @@ void CodeStubAssembler::UpdateFeedback(compiler::Node* feedback,
compiler::Node* CodeStubAssembler::LoadReceiverMap(compiler::Node* receiver) {
Variable var_receiver_map(this, MachineRepresentation::kTagged);
- // TODO(ishell): defer blocks when it works.
- Label load_smi_map(this /*, Label::kDeferred*/), load_receiver_map(this),
+ Label load_smi_map(this, Label::kDeferred), load_receiver_map(this),
if_result(this);
- Branch(WordIsSmi(receiver), &load_smi_map, &load_receiver_map);
+ Branch(TaggedIsSmi(receiver), &load_smi_map, &load_receiver_map);
Bind(&load_smi_map);
{
var_receiver_map.Bind(LoadRoot(Heap::kHeapNumberMapRootIndex));
@@ -4208,22 +5055,29 @@ compiler::Node* CodeStubAssembler::LoadReceiverMap(compiler::Node* receiver) {
compiler::Node* CodeStubAssembler::TryMonomorphicCase(
compiler::Node* slot, compiler::Node* vector, compiler::Node* receiver_map,
Label* if_handler, Variable* var_handler, Label* if_miss) {
+ Comment("TryMonomorphicCase");
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
// TODO(ishell): add helper class that hides offset computations for a series
// of loads.
int32_t header_size = FixedArray::kHeaderSize - kHeapObjectTag;
- Node* offset = ElementOffsetFromIndex(slot, FAST_HOLEY_ELEMENTS,
- SMI_PARAMETERS, header_size);
- Node* feedback = Load(MachineType::AnyTagged(), vector, offset);
+ // Adding |header_size| with a separate IntPtrAdd rather than passing it
+ // into ElementOffsetFromIndex() allows it to be folded into a single
+ // [base, index, offset] indirect memory access on x64.
+ Node* offset =
+ ElementOffsetFromIndex(slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS);
+ Node* feedback = Load(MachineType::AnyTagged(), vector,
+ IntPtrAdd(offset, IntPtrConstant(header_size)));
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
- GotoUnless(WordEqual(receiver_map, LoadWeakCellValue(feedback)), if_miss);
+ GotoIf(WordNotEqual(receiver_map, LoadWeakCellValueUnchecked(feedback)),
+ if_miss);
- Node* handler = Load(MachineType::AnyTagged(), vector,
- IntPtrAdd(offset, IntPtrConstant(kPointerSize)));
+ Node* handler =
+ Load(MachineType::AnyTagged(), vector,
+ IntPtrAdd(offset, IntPtrConstant(header_size + kPointerSize)));
var_handler->Bind(handler);
Goto(if_handler);
@@ -4233,6 +5087,7 @@ compiler::Node* CodeStubAssembler::TryMonomorphicCase(
void CodeStubAssembler::HandlePolymorphicCase(
compiler::Node* receiver_map, compiler::Node* feedback, Label* if_handler,
Variable* var_handler, Label* if_miss, int unroll_count) {
+ Comment("HandlePolymorphicCase");
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
// Iterate {feedback} array.
@@ -4252,34 +5107,70 @@ void CodeStubAssembler::HandlePolymorphicCase(
Bind(&next_entry);
}
- Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
// Loop from {unroll_count}*kEntrySize to {length}.
- Variable var_index(this, MachineType::PointerRepresentation());
- Label loop(this, &var_index);
- var_index.Bind(IntPtrConstant(unroll_count * kEntrySize));
- Goto(&loop);
- Bind(&loop);
- {
- Node* index = var_index.value();
- GotoIf(UintPtrGreaterThanOrEqual(index, length), if_miss);
-
- Node* cached_map = LoadWeakCellValue(
- LoadFixedArrayElement(feedback, index, 0, INTPTR_PARAMETERS));
-
- Label next_entry(this);
- GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+ Node* init = IntPtrConstant(unroll_count * kEntrySize);
+ Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
+ BuildFastLoop(
+ MachineType::PointerRepresentation(), init, length,
+ [receiver_map, feedback, if_handler, var_handler](CodeStubAssembler* csa,
+ Node* index) {
+ Node* cached_map = csa->LoadWeakCellValue(
+ csa->LoadFixedArrayElement(feedback, index, 0, INTPTR_PARAMETERS));
+
+ Label next_entry(csa);
+ csa->GotoIf(csa->WordNotEqual(receiver_map, cached_map), &next_entry);
+
+ // Found, now call handler.
+ Node* handler = csa->LoadFixedArrayElement(
+ feedback, index, kPointerSize, INTPTR_PARAMETERS);
+ var_handler->Bind(handler);
+ csa->Goto(if_handler);
+
+ csa->Bind(&next_entry);
+ },
+ kEntrySize, IndexAdvanceMode::kPost);
+ // The loop falls through if no handler was found.
+ Goto(if_miss);
+}
+
+void CodeStubAssembler::HandleKeyedStorePolymorphicCase(
+ compiler::Node* receiver_map, compiler::Node* feedback, Label* if_handler,
+ Variable* var_handler, Label* if_transition_handler,
+ Variable* var_transition_map_cell, Label* if_miss) {
+ DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
+ DCHECK_EQ(MachineRepresentation::kTagged, var_transition_map_cell->rep());
- // Found, now call handler.
- Node* handler =
- LoadFixedArrayElement(feedback, index, kPointerSize, INTPTR_PARAMETERS);
- var_handler->Bind(handler);
- Goto(if_handler);
+ const int kEntrySize = 3;
- Bind(&next_entry);
- var_index.Bind(IntPtrAdd(index, IntPtrConstant(kEntrySize)));
- Goto(&loop);
- }
+ Node* init = IntPtrConstant(0);
+ Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
+ BuildFastLoop(
+ MachineType::PointerRepresentation(), init, length,
+ [receiver_map, feedback, if_handler, var_handler, if_transition_handler,
+ var_transition_map_cell](CodeStubAssembler* csa, Node* index) {
+ Node* cached_map = csa->LoadWeakCellValue(
+ csa->LoadFixedArrayElement(feedback, index, 0, INTPTR_PARAMETERS));
+ Label next_entry(csa);
+ csa->GotoIf(csa->WordNotEqual(receiver_map, cached_map), &next_entry);
+
+ Node* maybe_transition_map_cell = csa->LoadFixedArrayElement(
+ feedback, index, kPointerSize, INTPTR_PARAMETERS);
+
+ var_handler->Bind(csa->LoadFixedArrayElement(
+ feedback, index, 2 * kPointerSize, INTPTR_PARAMETERS));
+ csa->GotoIf(
+ csa->WordEqual(maybe_transition_map_cell,
+ csa->LoadRoot(Heap::kUndefinedValueRootIndex)),
+ if_handler);
+ var_transition_map_cell->Bind(maybe_transition_map_cell);
+ csa->Goto(if_transition_handler);
+
+ csa->Bind(&next_entry);
+ },
+ kEntrySize, IndexAdvanceMode::kPost);
+ // The loop falls through if no handler was found.
+ Goto(if_miss);
}
compiler::Node* CodeStubAssembler::StubCachePrimaryOffset(compiler::Node* name,
@@ -4288,9 +5179,10 @@ compiler::Node* CodeStubAssembler::StubCachePrimaryOffset(compiler::Node* name,
STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
// Compute the hash of the name (use entire hash field).
Node* hash_field = LoadNameHashField(name);
- Assert(Word32Equal(
- Word32And(hash_field, Int32Constant(Name::kHashNotComputedMask)),
- Int32Constant(0)));
+ CSA_ASSERT(this,
+ Word32Equal(Word32And(hash_field,
+ Int32Constant(Name::kHashNotComputedMask)),
+ Int32Constant(0)));
// Using only the low bits in 64-bit mode is unlikely to increase the
// risk of collision even if the heap is spread over an area larger than
@@ -4355,11 +5247,11 @@ void CodeStubAssembler::TryProbeStubCacheTable(
DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
stub_cache->key_reference(table).address());
- Node* code = Load(MachineType::Pointer(), key_base,
- IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize)));
+ Node* handler = Load(MachineType::TaggedPointer(), key_base,
+ IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize)));
// We found the handler.
- var_handler->Bind(code);
+ var_handler->Bind(handler);
Goto(if_handler);
}
@@ -4372,7 +5264,7 @@ void CodeStubAssembler::TryProbeStubCache(
IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
// Check that the {receiver} isn't a smi.
- GotoIf(WordIsSmi(receiver), &miss);
+ GotoIf(TaggedIsSmi(receiver), &miss);
Node* receiver_map = LoadMap(receiver);
@@ -4399,7 +5291,7 @@ void CodeStubAssembler::TryProbeStubCache(
Node* CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
Variable var_intptr_key(this, MachineType::PointerRepresentation());
Label done(this, &var_intptr_key), key_is_smi(this);
- GotoIf(WordIsSmi(key), &key_is_smi);
+ GotoIf(TaggedIsSmi(key), &key_is_smi);
// Try to convert a heap number to a Smi.
GotoUnless(WordEqual(LoadMap(key), HeapNumberMapConstant()), miss);
{
@@ -4426,6 +5318,7 @@ void CodeStubAssembler::EmitFastElementsBoundsCheck(Node* object,
Node* is_jsarray_condition,
Label* miss) {
Variable var_length(this, MachineType::PointerRepresentation());
+ Comment("Fast elements bounds check");
Label if_array(this), length_loaded(this, &var_length);
GotoIf(is_jsarray_condition, &if_array);
{
@@ -4450,7 +5343,7 @@ void CodeStubAssembler::EmitElementLoad(Node* object, Node* elements,
Label* out_of_bounds, Label* miss) {
Label if_typed_array(this), if_fast_packed(this), if_fast_holey(this),
if_fast_double(this), if_fast_holey_double(this), if_nonfast(this),
- if_dictionary(this), unreachable(this);
+ if_dictionary(this);
GotoIf(
IntPtrGreaterThan(elements_kind, IntPtrConstant(LAST_FAST_ELEMENTS_KIND)),
&if_nonfast);
@@ -4535,7 +5428,7 @@ void CodeStubAssembler::EmitElementLoad(Node* object, Node* elements,
var_entry.value(), SeededNumberDictionary::kEntryDetailsIndex);
Node* details = SmiToWord32(
LoadFixedArrayElement(elements, details_index, 0, INTPTR_PARAMETERS));
- Node* kind = BitFieldDecode<PropertyDetails::KindField>(details);
+ Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
// TODO(jkummerow): Support accessors without missing?
GotoUnless(Word32Equal(kind, Int32Constant(kData)), miss);
// Finally, load the value.
@@ -4579,13 +5472,13 @@ void CodeStubAssembler::EmitElementLoad(Node* object, Node* elements,
UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS};
- const int kTypedElementsKindCount = LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
- FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
- 1;
+ const size_t kTypedElementsKindCount =
+ LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
+ FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
Switch(elements_kind, miss, elements_kinds, elements_kind_labels,
- static_cast<size_t>(kTypedElementsKindCount));
+ kTypedElementsKindCount);
Bind(&uint8_elements);
{
Comment("UINT8_ELEMENTS"); // Handles UINT8_CLAMPED_ELEMENTS too.
@@ -4645,114 +5538,370 @@ void CodeStubAssembler::HandleLoadICHandlerCase(
const LoadICParameters* p, Node* handler, Label* miss,
ElementSupport support_elements) {
Comment("have_handler");
- Label call_handler(this);
- GotoUnless(WordIsSmi(handler), &call_handler);
+ Variable var_holder(this, MachineRepresentation::kTagged);
+ var_holder.Bind(p->receiver);
+ Variable var_smi_handler(this, MachineRepresentation::kTagged);
+ var_smi_handler.Bind(handler);
+
+ Variable* vars[] = {&var_holder, &var_smi_handler};
+ Label if_smi_handler(this, 2, vars);
+ Label try_proto_handler(this), call_handler(this);
- // |handler| is a Smi, encoding what to do. See handler-configuration.h
+ Branch(TaggedIsSmi(handler), &if_smi_handler, &try_proto_handler);
+
+ // |handler| is a Smi, encoding what to do. See SmiHandler methods
// for the encoding format.
+ Bind(&if_smi_handler);
{
- Variable var_double_value(this, MachineRepresentation::kFloat64);
- Label rebox_double(this, &var_double_value);
+ HandleLoadICSmiHandlerCase(p, var_holder.value(), var_smi_handler.value(),
+ miss, support_elements);
+ }
- Node* handler_word = SmiUntag(handler);
- if (support_elements == kSupportElements) {
- Label property(this);
- Node* handler_type =
- WordAnd(handler_word, IntPtrConstant(LoadHandlerTypeBit::kMask));
- GotoUnless(
- WordEqual(handler_type, IntPtrConstant(kLoadICHandlerForElements)),
- &property);
-
- Comment("element_load");
- Node* intptr_index = TryToIntptr(p->name, miss);
- Node* elements = LoadElements(p->receiver);
- Node* is_jsarray =
- WordAnd(handler_word, IntPtrConstant(KeyedLoadIsJsArray::kMask));
- Node* is_jsarray_condition = WordNotEqual(is_jsarray, IntPtrConstant(0));
- Node* elements_kind = BitFieldDecode<KeyedLoadElementsKind>(handler_word);
- Label if_hole(this), unimplemented_elements_kind(this);
- Label* out_of_bounds = miss;
- EmitElementLoad(p->receiver, elements, elements_kind, intptr_index,
- is_jsarray_condition, &if_hole, &rebox_double,
- &var_double_value, &unimplemented_elements_kind,
- out_of_bounds, miss);
-
- Bind(&unimplemented_elements_kind);
- {
- // Smi handlers should only be installed for supported elements kinds.
- // Crash if we get here.
- DebugBreak();
- Goto(miss);
- }
+ Bind(&try_proto_handler);
+ {
+ GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
+ HandleLoadICProtoHandler(p, handler, &var_holder, &var_smi_handler,
+ &if_smi_handler, miss);
+ }
- Bind(&if_hole);
- {
- Comment("convert hole");
- Node* convert_hole =
- WordAnd(handler_word, IntPtrConstant(KeyedLoadConvertHole::kMask));
- GotoIf(WordEqual(convert_hole, IntPtrConstant(0)), miss);
- Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
- DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
- GotoUnless(
- WordEqual(
- LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Smi::FromInt(Isolate::kArrayProtectorValid))),
- miss);
- Return(UndefinedConstant());
- }
+ Bind(&call_handler);
+ {
+ typedef LoadWithVectorDescriptor Descriptor;
+ TailCallStub(Descriptor(isolate()), handler, p->context,
+ Arg(Descriptor::kReceiver, p->receiver),
+ Arg(Descriptor::kName, p->name),
+ Arg(Descriptor::kSlot, p->slot),
+ Arg(Descriptor::kVector, p->vector));
+ }
+}
+
+void CodeStubAssembler::HandleLoadICSmiHandlerCase(
+ const LoadICParameters* p, Node* holder, Node* smi_handler, Label* miss,
+ ElementSupport support_elements) {
+ Variable var_double_value(this, MachineRepresentation::kFloat64);
+ Label rebox_double(this, &var_double_value);
- Bind(&property);
- Comment("property_load");
+ Node* handler_word = SmiUntag(smi_handler);
+ Node* handler_kind = DecodeWord<LoadHandler::KindBits>(handler_word);
+ if (support_elements == kSupportElements) {
+ Label property(this);
+ GotoUnless(
+ WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForElements)),
+ &property);
+
+ Comment("element_load");
+ Node* intptr_index = TryToIntptr(p->name, miss);
+ Node* elements = LoadElements(holder);
+ Node* is_jsarray_condition =
+ IsSetWord<LoadHandler::IsJsArrayBits>(handler_word);
+ Node* elements_kind =
+ DecodeWord<LoadHandler::ElementsKindBits>(handler_word);
+ Label if_hole(this), unimplemented_elements_kind(this);
+ Label* out_of_bounds = miss;
+ EmitElementLoad(holder, elements, elements_kind, intptr_index,
+ is_jsarray_condition, &if_hole, &rebox_double,
+ &var_double_value, &unimplemented_elements_kind,
+ out_of_bounds, miss);
+
+ Bind(&unimplemented_elements_kind);
+ {
+ // Smi handlers should only be installed for supported elements kinds.
+ // Crash if we get here.
+ DebugBreak();
+ Goto(miss);
}
- // |handler_word| is a field index as obtained by
- // FieldIndex.GetLoadByFieldOffset():
- Label inobject_double(this), out_of_object(this),
- out_of_object_double(this);
- Node* inobject_bit =
- WordAnd(handler_word, IntPtrConstant(FieldOffsetIsInobject::kMask));
- Node* double_bit =
- WordAnd(handler_word, IntPtrConstant(FieldOffsetIsDouble::kMask));
- Node* offset =
- WordSar(handler_word, IntPtrConstant(FieldOffsetOffset::kShift));
-
- GotoIf(WordEqual(inobject_bit, IntPtrConstant(0)), &out_of_object);
-
- GotoUnless(WordEqual(double_bit, IntPtrConstant(0)), &inobject_double);
- Return(LoadObjectField(p->receiver, offset));
-
- Bind(&inobject_double);
- if (FLAG_unbox_double_fields) {
- var_double_value.Bind(
- LoadObjectField(p->receiver, offset, MachineType::Float64()));
- } else {
- Node* mutable_heap_number = LoadObjectField(p->receiver, offset);
- var_double_value.Bind(LoadHeapNumberValue(mutable_heap_number));
+ Bind(&if_hole);
+ {
+ Comment("convert hole");
+ GotoUnless(IsSetWord<LoadHandler::ConvertHoleBits>(handler_word), miss);
+ Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
+ DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
+ GotoUnless(
+ WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
+ miss);
+ Return(UndefinedConstant());
}
- Goto(&rebox_double);
- Bind(&out_of_object);
- Node* properties = LoadProperties(p->receiver);
- Node* value = LoadObjectField(properties, offset);
- GotoUnless(WordEqual(double_bit, IntPtrConstant(0)), &out_of_object_double);
- Return(value);
+ Bind(&property);
+ Comment("property_load");
+ }
- Bind(&out_of_object_double);
- var_double_value.Bind(LoadHeapNumberValue(value));
- Goto(&rebox_double);
+ Label constant(this), field(this);
+ Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForFields)),
+ &field, &constant);
+
+ Bind(&field);
+ {
+ Comment("field_load");
+ Node* offset = DecodeWord<LoadHandler::FieldOffsetBits>(handler_word);
+
+ Label inobject(this), out_of_object(this);
+ Branch(IsSetWord<LoadHandler::IsInobjectBits>(handler_word), &inobject,
+ &out_of_object);
+
+ Bind(&inobject);
+ {
+ Label is_double(this);
+ GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
+ Return(LoadObjectField(holder, offset));
+
+ Bind(&is_double);
+ if (FLAG_unbox_double_fields) {
+ var_double_value.Bind(
+ LoadObjectField(holder, offset, MachineType::Float64()));
+ } else {
+ Node* mutable_heap_number = LoadObjectField(holder, offset);
+ var_double_value.Bind(LoadHeapNumberValue(mutable_heap_number));
+ }
+ Goto(&rebox_double);
+ }
+
+ Bind(&out_of_object);
+ {
+ Label is_double(this);
+ Node* properties = LoadProperties(holder);
+ Node* value = LoadObjectField(properties, offset);
+ GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
+ Return(value);
+
+ Bind(&is_double);
+ var_double_value.Bind(LoadHeapNumberValue(value));
+ Goto(&rebox_double);
+ }
Bind(&rebox_double);
Return(AllocateHeapNumberWithValue(var_double_value.value()));
}
- // |handler| is a heap object. Must be code, call it.
- Bind(&call_handler);
- typedef LoadWithVectorDescriptor Descriptor;
- TailCallStub(Descriptor(isolate()), handler, p->context,
- Arg(Descriptor::kReceiver, p->receiver),
- Arg(Descriptor::kName, p->name),
- Arg(Descriptor::kSlot, p->slot),
- Arg(Descriptor::kVector, p->vector));
+ Bind(&constant);
+ {
+ Comment("constant_load");
+ Node* descriptors = LoadMapDescriptors(LoadMap(holder));
+ Node* descriptor =
+ DecodeWord<LoadHandler::DescriptorValueIndexBits>(handler_word);
+ CSA_ASSERT(this,
+ UintPtrLessThan(descriptor,
+ LoadAndUntagFixedArrayBaseLength(descriptors)));
+ Node* value =
+ LoadFixedArrayElement(descriptors, descriptor, 0, INTPTR_PARAMETERS);
+
+ Label if_accessor_info(this);
+ GotoIf(IsSetWord<LoadHandler::IsAccessorInfoBits>(handler_word),
+ &if_accessor_info);
+ Return(value);
+
+ Bind(&if_accessor_info);
+ Callable callable = CodeFactory::ApiGetter(isolate());
+ TailCallStub(callable, p->context, p->receiver, holder, value);
+ }
+}
+
+void CodeStubAssembler::HandleLoadICProtoHandler(
+ const LoadICParameters* p, Node* handler, Variable* var_holder,
+ Variable* var_smi_handler, Label* if_smi_handler, Label* miss) {
+ DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
+ DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep());
+
+ // IC dispatchers rely on these assumptions to be held.
+ STATIC_ASSERT(FixedArray::kLengthOffset == LoadHandler::kHolderCellOffset);
+ DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kSmiHandlerIndex),
+ LoadHandler::kSmiHandlerOffset);
+ DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kValidityCellIndex),
+ LoadHandler::kValidityCellOffset);
+
+ // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
+ Label validity_cell_check_done(this);
+ Node* validity_cell =
+ LoadObjectField(handler, LoadHandler::kValidityCellOffset);
+ GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
+ &validity_cell_check_done);
+ Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+ GotoIf(WordNotEqual(cell_value,
+ SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+ miss);
+ Goto(&validity_cell_check_done);
+
+ Bind(&validity_cell_check_done);
+ Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
+ CSA_ASSERT(this, TaggedIsSmi(smi_handler));
+ Node* handler_flags = SmiUntag(smi_handler);
+
+ Label check_prototypes(this);
+ GotoUnless(
+ IsSetWord<LoadHandler::DoNegativeLookupOnReceiverBits>(handler_flags),
+ &check_prototypes);
+ {
+ CSA_ASSERT(this, Word32BinaryNot(
+ HasInstanceType(p->receiver, JS_GLOBAL_OBJECT_TYPE)));
+ // We have a dictionary receiver, do a negative lookup check.
+ NameDictionaryNegativeLookup(p->receiver, p->name, miss);
+ Goto(&check_prototypes);
+ }
+
+ Bind(&check_prototypes);
+ Node* maybe_holder_cell =
+ LoadObjectField(handler, LoadHandler::kHolderCellOffset);
+ Label array_handler(this), tuple_handler(this);
+ Branch(TaggedIsSmi(maybe_holder_cell), &array_handler, &tuple_handler);
+
+ Bind(&tuple_handler);
+ {
+ Label load_existent(this);
+ GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
+ // This is a handler for a load of a non-existent value.
+ Return(UndefinedConstant());
+
+ Bind(&load_existent);
+ Node* holder = LoadWeakCellValue(maybe_holder_cell);
+ // The |holder| is guaranteed to be alive at this point since we passed
+ // both the receiver map check and the validity cell check.
+ CSA_ASSERT(this, WordNotEqual(holder, IntPtrConstant(0)));
+
+ var_holder->Bind(holder);
+ var_smi_handler->Bind(smi_handler);
+ Goto(if_smi_handler);
+ }
+
+ Bind(&array_handler);
+ {
+ typedef LoadICProtoArrayDescriptor Descriptor;
+ LoadICProtoArrayStub stub(isolate());
+ Node* target = HeapConstant(stub.GetCode());
+ TailCallStub(Descriptor(isolate()), target, p->context,
+ Arg(Descriptor::kReceiver, p->receiver),
+ Arg(Descriptor::kName, p->name),
+ Arg(Descriptor::kSlot, p->slot),
+ Arg(Descriptor::kVector, p->vector),
+ Arg(Descriptor::kHandler, handler));
+ }
+}
+
+void CodeStubAssembler::LoadICProtoArray(const LoadICParameters* p,
+ Node* handler) {
+ Label miss(this);
+ CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(handler)));
+ CSA_ASSERT(this, IsFixedArrayMap(LoadMap(handler)));
+
+ Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
+ Node* handler_flags = SmiUntag(smi_handler);
+
+ Node* handler_length = LoadAndUntagFixedArrayBaseLength(handler);
+
+ Node* holder = EmitLoadICProtoArrayCheck(p, handler, handler_length,
+ handler_flags, &miss);
+
+ HandleLoadICSmiHandlerCase(p, holder, smi_handler, &miss, kOnlyProperties);
+
+ Bind(&miss);
+ {
+ TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
+ p->slot, p->vector);
+ }
+}
+
+Node* CodeStubAssembler::EmitLoadICProtoArrayCheck(const LoadICParameters* p,
+ Node* handler,
+ Node* handler_length,
+ Node* handler_flags,
+ Label* miss) {
+ Variable start_index(this, MachineType::PointerRepresentation());
+ start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex));
+
+ Label can_access(this);
+ GotoUnless(IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_flags),
+ &can_access);
+ {
+ // Skip this entry of a handler.
+ start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex + 1));
+
+ int offset =
+ FixedArray::OffsetOfElementAt(LoadHandler::kFirstPrototypeIndex);
+ Node* expected_native_context =
+ LoadWeakCellValue(LoadObjectField(handler, offset), miss);
+ CSA_ASSERT(this, IsNativeContext(expected_native_context));
+
+ Node* native_context = LoadNativeContext(p->context);
+ GotoIf(WordEqual(expected_native_context, native_context), &can_access);
+ // If the receiver is not a JSGlobalProxy then we miss.
+ GotoUnless(IsJSGlobalProxy(p->receiver), miss);
+ // For JSGlobalProxy receiver try to compare security tokens of current
+ // and expected native contexts.
+ Node* expected_token = LoadContextElement(expected_native_context,
+ Context::SECURITY_TOKEN_INDEX);
+ Node* current_token =
+ LoadContextElement(native_context, Context::SECURITY_TOKEN_INDEX);
+ Branch(WordEqual(expected_token, current_token), &can_access, miss);
+ }
+ Bind(&can_access);
+
+ BuildFastLoop(
+ MachineType::PointerRepresentation(), start_index.value(), handler_length,
+ [this, p, handler, miss](CodeStubAssembler*, Node* current) {
+ Node* prototype_cell =
+ LoadFixedArrayElement(handler, current, 0, INTPTR_PARAMETERS);
+ CheckPrototype(prototype_cell, p->name, miss);
+ },
+ 1, IndexAdvanceMode::kPost);
+
+ Node* maybe_holder_cell = LoadFixedArrayElement(
+ handler, IntPtrConstant(LoadHandler::kHolderCellIndex), 0,
+ INTPTR_PARAMETERS);
+ Label load_existent(this);
+ GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
+ // This is a handler for a load of a non-existent value.
+ Return(UndefinedConstant());
+
+ Bind(&load_existent);
+ Node* holder = LoadWeakCellValue(maybe_holder_cell);
+ // The |holder| is guaranteed to be alive at this point since we passed
+ // the receiver map check, the validity cell check and the prototype chain
+ // check.
+ CSA_ASSERT(this, WordNotEqual(holder, IntPtrConstant(0)));
+ return holder;
+}
+
+void CodeStubAssembler::CheckPrototype(Node* prototype_cell, Node* name,
+ Label* miss) {
+ Node* maybe_prototype = LoadWeakCellValue(prototype_cell, miss);
+
+ Label done(this);
+ Label if_property_cell(this), if_dictionary_object(this);
+
+ // |maybe_prototype| is either a PropertyCell or a slow-mode prototype.
+ Branch(WordEqual(LoadMap(maybe_prototype),
+ LoadRoot(Heap::kGlobalPropertyCellMapRootIndex)),
+ &if_property_cell, &if_dictionary_object);
+
+ Bind(&if_dictionary_object);
+ {
+ CSA_ASSERT(this, IsDictionaryMap(LoadMap(maybe_prototype)));
+ NameDictionaryNegativeLookup(maybe_prototype, name, miss);
+ Goto(&done);
+ }
+
+ Bind(&if_property_cell);
+ {
+ // Ensure the property cell still contains the hole.
+ Node* value = LoadObjectField(maybe_prototype, PropertyCell::kValueOffset);
+ GotoIf(WordNotEqual(value, LoadRoot(Heap::kTheHoleValueRootIndex)), miss);
+ Goto(&done);
+ }
+
+ Bind(&done);
+}
+
+void CodeStubAssembler::NameDictionaryNegativeLookup(Node* object, Node* name,
+ Label* miss) {
+ CSA_ASSERT(this, IsDictionaryMap(LoadMap(object)));
+ Node* properties = LoadProperties(object);
+ // Ensure the property does not exist in a dictionary-mode object.
+ Variable var_name_index(this, MachineType::PointerRepresentation());
+ Label done(this);
+ NameDictionaryLookup<NameDictionary>(properties, name, miss, &var_name_index,
+ &done);
+ Bind(&done);
}
void CodeStubAssembler::LoadIC(const LoadICParameters* p) {
@@ -4871,7 +6020,7 @@ void CodeStubAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
if_property_dictionary(this), if_found_on_receiver(this);
Node* receiver = p->receiver;
- GotoIf(WordIsSmi(receiver), &slow);
+ GotoIf(TaggedIsSmi(receiver), &slow);
Node* receiver_map = LoadMap(receiver);
Node* instance_type = LoadMapInstanceType(receiver_map);
// Receivers requiring non-standard element accesses (interceptors, access
@@ -4946,7 +6095,8 @@ void CodeStubAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
const int32_t kMaxLinear = 210;
Label stub_cache(this);
Node* bitfield3 = LoadMapBitField3(receiver_map);
- Node* nof = BitFieldDecodeWord<Map::NumberOfOwnDescriptorsBits>(bitfield3);
+ Node* nof =
+ DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(bitfield3);
GotoIf(UintPtrGreaterThan(nof, IntPtrConstant(kMaxLinear)), &stub_cache);
Node* descriptors = LoadMapDescriptors(receiver_map);
Variable var_name_index(this, MachineType::PointerRepresentation());
@@ -5017,6 +6167,262 @@ void CodeStubAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
}
}
+void CodeStubAssembler::HandleStoreFieldAndReturn(Node* handler_word,
+ Node* holder,
+ Representation representation,
+ Node* value, Node* transition,
+ Label* miss) {
+ bool transition_to_field = transition != nullptr;
+ Node* prepared_value = PrepareValueForWrite(value, representation, miss);
+
+ if (transition_to_field) {
+ Label storage_extended(this);
+ GotoUnless(IsSetWord<StoreHandler::ExtendStorageBits>(handler_word),
+ &storage_extended);
+ Comment("[ Extend storage");
+ ExtendPropertiesBackingStore(holder);
+ Comment("] Extend storage");
+ Goto(&storage_extended);
+
+ Bind(&storage_extended);
+ }
+
+ Node* offset = DecodeWord<StoreHandler::FieldOffsetBits>(handler_word);
+ Label if_inobject(this), if_out_of_object(this);
+ Branch(IsSetWord<StoreHandler::IsInobjectBits>(handler_word), &if_inobject,
+ &if_out_of_object);
+
+ Bind(&if_inobject);
+ {
+ StoreNamedField(holder, offset, true, representation, prepared_value,
+ transition_to_field);
+ if (transition_to_field) {
+ StoreObjectField(holder, JSObject::kMapOffset, transition);
+ }
+ Return(value);
+ }
+
+ Bind(&if_out_of_object);
+ {
+ StoreNamedField(holder, offset, false, representation, prepared_value,
+ transition_to_field);
+ if (transition_to_field) {
+ StoreObjectField(holder, JSObject::kMapOffset, transition);
+ }
+ Return(value);
+ }
+}
+
+void CodeStubAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
+ Node* holder, Node* value,
+ Node* transition,
+ Label* miss) {
+ Comment(transition ? "transitioning field store" : "field store");
+
+#ifdef DEBUG
+ Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+ if (transition) {
+ CSA_ASSERT(
+ this,
+ WordOr(WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kTransitionToField)),
+ WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kTransitionToConstant))));
+ } else {
+ CSA_ASSERT(this, WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kStoreField)));
+ }
+#endif
+
+ Node* field_representation =
+ DecodeWord<StoreHandler::FieldRepresentationBits>(handler_word);
+
+ Label if_smi_field(this), if_double_field(this), if_heap_object_field(this),
+ if_tagged_field(this);
+
+ GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kTagged)),
+ &if_tagged_field);
+ GotoIf(WordEqual(field_representation,
+ IntPtrConstant(StoreHandler::kHeapObject)),
+ &if_heap_object_field);
+ GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kDouble)),
+ &if_double_field);
+ CSA_ASSERT(this, WordEqual(field_representation,
+ IntPtrConstant(StoreHandler::kSmi)));
+ Goto(&if_smi_field);
+
+ Bind(&if_tagged_field);
+ {
+ Comment("store tagged field");
+ HandleStoreFieldAndReturn(handler_word, holder, Representation::Tagged(),
+ value, transition, miss);
+ }
+
+ Bind(&if_double_field);
+ {
+ Comment("store double field");
+ HandleStoreFieldAndReturn(handler_word, holder, Representation::Double(),
+ value, transition, miss);
+ }
+
+ Bind(&if_heap_object_field);
+ {
+ Comment("store heap object field");
+ // Generate full field type check here and then store value as Tagged.
+ Node* prepared_value =
+ PrepareValueForWrite(value, Representation::HeapObject(), miss);
+ Node* value_index_in_descriptor =
+ DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
+ Node* descriptors =
+ LoadMapDescriptors(transition ? transition : LoadMap(holder));
+ Node* maybe_field_type = LoadFixedArrayElement(
+ descriptors, value_index_in_descriptor, 0, INTPTR_PARAMETERS);
+ Label do_store(this);
+ GotoIf(TaggedIsSmi(maybe_field_type), &do_store);
+ // Check that value type matches the field type.
+ {
+ Node* field_type = LoadWeakCellValue(maybe_field_type, miss);
+ Branch(WordEqual(LoadMap(prepared_value), field_type), &do_store, miss);
+ }
+ Bind(&do_store);
+ HandleStoreFieldAndReturn(handler_word, holder, Representation::Tagged(),
+ prepared_value, transition, miss);
+ }
+
+ Bind(&if_smi_field);
+ {
+ Comment("store smi field");
+ HandleStoreFieldAndReturn(handler_word, holder, Representation::Smi(),
+ value, transition, miss);
+ }
+}
+
+void CodeStubAssembler::HandleStoreICHandlerCase(const StoreICParameters* p,
+ Node* handler, Label* miss) {
+ Label if_smi_handler(this);
+ Label try_proto_handler(this), call_handler(this);
+
+ Branch(TaggedIsSmi(handler), &if_smi_handler, &try_proto_handler);
+
+ // |handler| is a Smi, encoding what to do. See SmiHandler methods
+ // for the encoding format.
+ Bind(&if_smi_handler);
+ {
+ Node* holder = p->receiver;
+ Node* handler_word = SmiUntag(handler);
+
+ // Handle non-transitioning field stores.
+ HandleStoreICSmiHandlerCase(handler_word, holder, p->value, nullptr, miss);
+ }
+
+ Bind(&try_proto_handler);
+ {
+ GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
+ HandleStoreICProtoHandler(p, handler, miss);
+ }
+
+ // |handler| is a heap object. Must be code, call it.
+ Bind(&call_handler);
+ {
+ StoreWithVectorDescriptor descriptor(isolate());
+ TailCallStub(descriptor, handler, p->context, p->receiver, p->name,
+ p->value, p->slot, p->vector);
+ }
+}
+
+void CodeStubAssembler::HandleStoreICProtoHandler(const StoreICParameters* p,
+ Node* handler, Label* miss) {
+ // IC dispatchers rely on these assumptions to be held.
+ STATIC_ASSERT(FixedArray::kLengthOffset ==
+ StoreHandler::kTransitionCellOffset);
+ DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kSmiHandlerIndex),
+ StoreHandler::kSmiHandlerOffset);
+ DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kValidityCellIndex),
+ StoreHandler::kValidityCellOffset);
+
+ // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
+ Label validity_cell_check_done(this);
+ Node* validity_cell =
+ LoadObjectField(handler, StoreHandler::kValidityCellOffset);
+ GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
+ &validity_cell_check_done);
+ Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+ GotoIf(WordNotEqual(cell_value,
+ SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+ miss);
+ Goto(&validity_cell_check_done);
+
+ Bind(&validity_cell_check_done);
+ Node* smi_handler = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
+ CSA_ASSERT(this, TaggedIsSmi(smi_handler));
+
+ Node* maybe_transition_cell =
+ LoadObjectField(handler, StoreHandler::kTransitionCellOffset);
+ Label array_handler(this), tuple_handler(this);
+ Branch(TaggedIsSmi(maybe_transition_cell), &array_handler, &tuple_handler);
+
+ Variable var_transition(this, MachineRepresentation::kTagged);
+ Label if_transition(this), if_transition_to_constant(this);
+ Bind(&tuple_handler);
+ {
+ Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
+ var_transition.Bind(transition);
+ Goto(&if_transition);
+ }
+
+ Bind(&array_handler);
+ {
+ Node* length = SmiUntag(maybe_transition_cell);
+ BuildFastLoop(MachineType::PointerRepresentation(),
+ IntPtrConstant(StoreHandler::kFirstPrototypeIndex), length,
+ [this, p, handler, miss](CodeStubAssembler*, Node* current) {
+ Node* prototype_cell = LoadFixedArrayElement(
+ handler, current, 0, INTPTR_PARAMETERS);
+ CheckPrototype(prototype_cell, p->name, miss);
+ },
+ 1, IndexAdvanceMode::kPost);
+
+ Node* maybe_transition_cell = LoadFixedArrayElement(
+ handler, IntPtrConstant(StoreHandler::kTransitionCellIndex), 0,
+ INTPTR_PARAMETERS);
+ Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
+ var_transition.Bind(transition);
+ Goto(&if_transition);
+ }
+
+ Bind(&if_transition);
+ {
+ Node* holder = p->receiver;
+ Node* transition = var_transition.value();
+ Node* handler_word = SmiUntag(smi_handler);
+
+ GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(transition)), miss);
+
+ Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+ GotoIf(WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kTransitionToConstant)),
+ &if_transition_to_constant);
+
+ // Handle transitioning field stores.
+ HandleStoreICSmiHandlerCase(handler_word, holder, p->value, transition,
+ miss);
+
+ Bind(&if_transition_to_constant);
+ {
+ // Check that constant matches value.
+ Node* value_index_in_descriptor =
+ DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
+ Node* descriptors = LoadMapDescriptors(transition);
+ Node* constant = LoadFixedArrayElement(
+ descriptors, value_index_in_descriptor, 0, INTPTR_PARAMETERS);
+ GotoIf(WordNotEqual(p->value, constant), miss);
+
+ StoreObjectField(p->receiver, JSObject::kMapOffset, transition);
+ Return(p->value);
+ }
+ }
+}
+
void CodeStubAssembler::StoreIC(const StoreICParameters* p) {
Variable var_handler(this, MachineRepresentation::kTagged);
// TODO(ishell): defer blocks when it works.
@@ -5033,9 +6439,7 @@ void CodeStubAssembler::StoreIC(const StoreICParameters* p) {
Bind(&if_handler);
{
Comment("StoreIC_if_handler");
- StoreWithVectorDescriptor descriptor(isolate());
- TailCallStub(descriptor, var_handler.value(), p->context, p->receiver,
- p->name, p->value, p->slot, p->vector);
+ HandleStoreICHandlerCase(p, var_handler.value(), &miss);
}
Bind(&try_polymorphic);
@@ -5066,15 +6470,95 @@ void CodeStubAssembler::StoreIC(const StoreICParameters* p) {
}
}
+void CodeStubAssembler::KeyedStoreIC(const StoreICParameters* p,
+ LanguageMode language_mode) {
+ Variable var_handler(this, MachineRepresentation::kTagged);
+ // This is to make |miss| label see the var_handler bound on all paths.
+ var_handler.Bind(IntPtrConstant(0));
+
+ // TODO(ishell): defer blocks when it works.
+ Label if_handler(this, &var_handler), try_polymorphic(this),
+ try_megamorphic(this /*, Label::kDeferred*/),
+ try_polymorphic_name(this /*, Label::kDeferred*/),
+ miss(this /*, Label::kDeferred*/);
+
+ Node* receiver_map = LoadReceiverMap(p->receiver);
+
+ // Check monomorphic case.
+ Node* feedback =
+ TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ &var_handler, &try_polymorphic);
+ Bind(&if_handler);
+ {
+ Comment("KeyedStoreIC_if_handler");
+ HandleStoreICHandlerCase(p, var_handler.value(), &miss);
+ }
+
+ Bind(&try_polymorphic);
+ {
+ // CheckPolymorphic case.
+ Comment("KeyedStoreIC_try_polymorphic");
+ GotoUnless(
+ WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
+ &try_megamorphic);
+ Label if_transition_handler(this);
+ Variable var_transition_map_cell(this, MachineRepresentation::kTagged);
+ HandleKeyedStorePolymorphicCase(receiver_map, feedback, &if_handler,
+ &var_handler, &if_transition_handler,
+ &var_transition_map_cell, &miss);
+ Bind(&if_transition_handler);
+ Comment("KeyedStoreIC_polymorphic_transition");
+ Node* transition_map =
+ LoadWeakCellValue(var_transition_map_cell.value(), &miss);
+ StoreTransitionDescriptor descriptor(isolate());
+ TailCallStub(descriptor, var_handler.value(), p->context, p->receiver,
+ p->name, transition_map, p->value, p->slot, p->vector);
+ }
+
+ Bind(&try_megamorphic);
+ {
+ // Check megamorphic case.
+ Comment("KeyedStoreIC_try_megamorphic");
+ GotoUnless(
+ WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ &try_polymorphic_name);
+ TailCallStub(
+ CodeFactory::KeyedStoreIC_Megamorphic(isolate(), language_mode),
+ p->context, p->receiver, p->name, p->value, p->slot, p->vector);
+ }
+
+ Bind(&try_polymorphic_name);
+ {
+ // We might have a name in feedback, and a fixed array in the next slot.
+ Comment("KeyedStoreIC_try_polymorphic_name");
+ GotoUnless(WordEqual(feedback, p->name), &miss);
+ // If the name comparison succeeded, we know we have a FixedArray with
+ // at least one map/handler pair.
+ Node* offset = ElementOffsetFromIndex(
+ p->slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS,
+ FixedArray::kHeaderSize + kPointerSize - kHeapObjectTag);
+ Node* array = Load(MachineType::AnyTagged(), p->vector, offset);
+ HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
+ 1);
+ }
+
+ Bind(&miss);
+ {
+ Comment("KeyedStoreIC_miss");
+ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
+ p->vector, p->receiver, p->name);
+ }
+}
+
void CodeStubAssembler::LoadGlobalIC(const LoadICParameters* p) {
Label try_handler(this), miss(this);
Node* weak_cell =
LoadFixedArrayElement(p->vector, p->slot, 0, SMI_PARAMETERS);
- AssertInstanceType(weak_cell, WEAK_CELL_TYPE);
+ CSA_ASSERT(this, HasInstanceType(weak_cell, WEAK_CELL_TYPE));
// Load value or try handler case if the {weak_cell} is cleared.
Node* property_cell = LoadWeakCellValue(weak_cell, &try_handler);
- AssertInstanceType(property_cell, PROPERTY_CELL_TYPE);
+ CSA_ASSERT(this, HasInstanceType(property_cell, PROPERTY_CELL_TYPE));
Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
GotoIf(WordEqual(value, TheHoleConstant()), &miss);
@@ -5088,7 +6572,7 @@ void CodeStubAssembler::LoadGlobalIC(const LoadICParameters* p) {
&miss);
// In this case {handler} must be a Code object.
- AssertInstanceType(handler, CODE_TYPE);
+ CSA_ASSERT(this, HasInstanceType(handler, CODE_TYPE));
LoadWithVectorDescriptor descriptor(isolate());
Node* native_context = LoadNativeContext(p->context);
Node* receiver =
@@ -5120,8 +6604,9 @@ void CodeStubAssembler::ExtendPropertiesBackingStore(compiler::Node* object) {
FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind));
// The size of a new properties backing store is guaranteed to be small
// enough that the new backing store will be allocated in new space.
- Assert(UintPtrLessThan(new_capacity, IntPtrConstant(kMaxNumberOfDescriptors +
- JSObject::kFieldsAdded)));
+ CSA_ASSERT(this, UintPtrLessThan(new_capacity,
+ IntPtrConstant(kMaxNumberOfDescriptors +
+ JSObject::kFieldsAdded)));
Node* new_properties = AllocateFixedArray(kind, new_capacity, mode);
@@ -5140,30 +6625,13 @@ Node* CodeStubAssembler::PrepareValueForWrite(Node* value,
Representation representation,
Label* bailout) {
if (representation.IsDouble()) {
- Variable var_value(this, MachineRepresentation::kFloat64);
- Label if_smi(this), if_heap_object(this), done(this);
- Branch(WordIsSmi(value), &if_smi, &if_heap_object);
- Bind(&if_smi);
- {
- var_value.Bind(SmiToFloat64(value));
- Goto(&done);
- }
- Bind(&if_heap_object);
- {
- GotoUnless(
- Word32Equal(LoadInstanceType(value), Int32Constant(HEAP_NUMBER_TYPE)),
- bailout);
- var_value.Bind(LoadHeapNumberValue(value));
- Goto(&done);
- }
- Bind(&done);
- value = var_value.value();
+ value = TryTaggedToFloat64(value, bailout);
} else if (representation.IsHeapObject()) {
// Field type is checked by the handler, here we only check if the value
// is a heap object.
- GotoIf(WordIsSmi(value), bailout);
+ GotoIf(TaggedIsSmi(value), bailout);
} else if (representation.IsSmi()) {
- GotoUnless(WordIsSmi(value), bailout);
+ GotoUnless(TaggedIsSmi(value), bailout);
} else {
DCHECK(representation.IsTagged());
}
@@ -5245,7 +6713,7 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
bool is_load = value == nullptr;
- GotoUnless(WordIsSmi(key), bailout);
+ GotoUnless(TaggedIsSmi(key), bailout);
key = SmiUntag(key);
GotoIf(IntPtrLessThan(key, IntPtrConstant(0)), bailout);
@@ -5268,7 +6736,7 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
Bind(&if_mapped);
{
- Assert(WordIsSmi(mapped_index));
+ CSA_ASSERT(this, TaggedIsSmi(mapped_index));
mapped_index = SmiUntag(mapped_index);
Node* the_context = LoadFixedArrayElement(elements, IntPtrConstant(0), 0,
INTPTR_PARAMETERS);
@@ -5280,7 +6748,7 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
if (is_load) {
Node* result = LoadFixedArrayElement(the_context, mapped_index, 0,
INTPTR_PARAMETERS);
- Assert(WordNotEqual(result, TheHoleConstant()));
+ CSA_ASSERT(this, WordNotEqual(result, TheHoleConstant()));
var_result.Bind(result);
} else {
StoreFixedArrayElement(the_context, mapped_index, value,
@@ -5360,9 +6828,8 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
ParameterMode mode) {
if (IsFixedTypedArrayElementsKind(kind)) {
if (kind == UINT8_CLAMPED_ELEMENTS) {
-#ifdef DEBUG
- Assert(Word32Equal(value, Word32And(Int32Constant(0xff), value)));
-#endif
+ CSA_ASSERT(this,
+ Word32Equal(value, Word32And(Int32Constant(0xff), value)));
}
Node* offset = ElementOffsetFromIndex(index, kind, mode, 0);
MachineRepresentation rep = ElementsKindToMachineRepresentation(kind);
@@ -5440,7 +6907,7 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray(
Variable var_result(this, rep);
Label done(this, &var_result), if_smi(this);
- GotoIf(WordIsSmi(input), &if_smi);
+ GotoIf(TaggedIsSmi(input), &if_smi);
// Try to convert a heap number to a Smi.
GotoUnless(IsHeapNumberMap(LoadMap(input)), bailout);
{
@@ -5555,9 +7022,9 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
// a smi before manipulating the backing store. Otherwise the backing store
// may be left in an invalid state.
if (IsFastSmiElementsKind(elements_kind)) {
- GotoUnless(WordIsSmi(value), bailout);
+ GotoUnless(TaggedIsSmi(value), bailout);
} else if (IsFastDoubleElementsKind(elements_kind)) {
- value = PrepareValueForWrite(value, Representation::Double(), bailout);
+ value = TryTaggedToFloat64(value, bailout);
}
if (IsGrowStoreMode(store_mode)) {
@@ -5695,42 +7162,44 @@ void CodeStubAssembler::TrapAllocationMemento(Node* object,
Node* new_space_top_address = ExternalConstant(
ExternalReference::new_space_allocation_top_address(isolate()));
- const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
- const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+ const int kMementoMapOffset = JSArray::kSize;
+ const int kMementoLastWordOffset =
+ kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
// Bail out if the object is not in new space.
Node* object_page = PageFromAddress(object);
{
- const int mask =
- (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
- Node* page_flags = Load(MachineType::IntPtr(), object_page);
- GotoIf(
- WordEqual(WordAnd(page_flags, IntPtrConstant(mask)), IntPtrConstant(0)),
- &no_memento_found);
+ Node* page_flags = Load(MachineType::IntPtr(), object_page,
+ IntPtrConstant(Page::kFlagsOffset));
+ GotoIf(WordEqual(WordAnd(page_flags,
+ IntPtrConstant(MemoryChunk::kIsInNewSpaceMask)),
+ IntPtrConstant(0)),
+ &no_memento_found);
}
- Node* memento_end = IntPtrAdd(object, IntPtrConstant(kMementoEndOffset));
- Node* memento_end_page = PageFromAddress(memento_end);
+ Node* memento_last_word = IntPtrAdd(
+ object, IntPtrConstant(kMementoLastWordOffset - kHeapObjectTag));
+ Node* memento_last_word_page = PageFromAddress(memento_last_word);
Node* new_space_top = Load(MachineType::Pointer(), new_space_top_address);
Node* new_space_top_page = PageFromAddress(new_space_top);
- // If the object is in new space, we need to check whether it is and
- // respective potential memento object on the same page as the current top.
- GotoIf(WordEqual(memento_end_page, new_space_top_page), &top_check);
+ // If the object is in new space, we need to check whether respective
+ // potential memento object is on the same page as the current top.
+ GotoIf(WordEqual(memento_last_word_page, new_space_top_page), &top_check);
// The object is on a different page than allocation top. Bail out if the
// object sits on the page boundary as no memento can follow and we cannot
// touch the memory following it.
- Branch(WordEqual(object_page, memento_end_page), &map_check,
+ Branch(WordEqual(object_page, memento_last_word_page), &map_check,
&no_memento_found);
// If top is on the same page as the current object, we need to check whether
// we are below top.
Bind(&top_check);
{
- Branch(UintPtrGreaterThan(memento_end, new_space_top), &no_memento_found,
- &map_check);
+ Branch(UintPtrGreaterThanOrEqual(memento_last_word, new_space_top),
+ &no_memento_found, &map_check);
}
// Memento map check.
@@ -5750,8 +7219,9 @@ Node* CodeStubAssembler::PageFromAddress(Node* address) {
}
Node* CodeStubAssembler::EnumLength(Node* map) {
+ CSA_ASSERT(this, IsMap(map));
Node* bitfield_3 = LoadMapBitField3(map);
- Node* enum_length = BitFieldDecode<Map::EnumLengthBits>(bitfield_3);
+ Node* enum_length = DecodeWordFromWord32<Map::EnumLengthBits>(bitfield_3);
return SmiTag(enum_length);
}
@@ -5773,8 +7243,8 @@ void CodeStubAssembler::CheckEnumCache(Node* receiver, Label* use_cache,
Node* invalid_enum_cache_sentinel =
SmiConstant(Smi::FromInt(kInvalidEnumCacheSentinel));
Node* enum_length = EnumLength(current_map.value());
- BranchIfWordEqual(enum_length, invalid_enum_cache_sentinel, use_runtime,
- &loop);
+ Branch(WordEqual(enum_length, invalid_enum_cache_sentinel), use_runtime,
+ &loop);
}
// Check that there are no elements. |current_js_object| contains
@@ -5785,24 +7255,24 @@ void CodeStubAssembler::CheckEnumCache(Node* receiver, Label* use_cache,
Node* elements = LoadElements(current_js_object.value());
Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
// Check that there are no elements.
- BranchIfWordEqual(elements, empty_fixed_array, &if_no_elements,
- &if_elements);
+ Branch(WordEqual(elements, empty_fixed_array), &if_no_elements,
+ &if_elements);
Bind(&if_elements);
{
// Second chance, the object may be using the empty slow element
// dictionary.
Node* slow_empty_dictionary =
LoadRoot(Heap::kEmptySlowElementDictionaryRootIndex);
- BranchIfWordNotEqual(elements, slow_empty_dictionary, use_runtime,
- &if_no_elements);
+ Branch(WordNotEqual(elements, slow_empty_dictionary), use_runtime,
+ &if_no_elements);
}
Bind(&if_no_elements);
{
// Update map prototype.
current_js_object.Bind(LoadMapPrototype(current_map.value()));
- BranchIfWordEqual(current_js_object.value(), NullConstant(), use_cache,
- &next);
+ Branch(WordEqual(current_js_object.value(), NullConstant()), use_cache,
+ &next);
}
}
@@ -5811,8 +7281,8 @@ void CodeStubAssembler::CheckEnumCache(Node* receiver, Label* use_cache,
// For all objects but the receiver, check that the cache is empty.
current_map.Bind(LoadMap(current_js_object.value()));
Node* enum_length = EnumLength(current_map.value());
- Node* zero_constant = SmiConstant(Smi::FromInt(0));
- BranchIf(WordEqual(enum_length, zero_constant), &loop, use_runtime);
+ Node* zero_constant = SmiConstant(Smi::kZero);
+ Branch(WordEqual(enum_length, zero_constant), &loop, use_runtime);
}
}
@@ -5880,5 +7350,1999 @@ Node* CodeStubAssembler::CreateWeakCellInFeedbackVector(Node* feedback_vector,
return cell;
}
+void CodeStubAssembler::BuildFastLoop(
+ const CodeStubAssembler::VariableList& vars,
+ MachineRepresentation index_rep, Node* start_index, Node* end_index,
+ std::function<void(CodeStubAssembler* assembler, Node* index)> body,
+ int increment, IndexAdvanceMode mode) {
+ Variable var(this, index_rep);
+ VariableList vars_copy(vars, zone());
+ vars_copy.Add(&var, zone());
+ var.Bind(start_index);
+ Label loop(this, vars_copy);
+ Label after_loop(this);
+ // Introduce an explicit second check of the termination condition before the
+ // loop that helps turbofan generate better code. If there's only a single
+ // check, then the CodeStubAssembler forces it to be at the beginning of the
+ // loop requiring a backwards branch at the end of the loop (it's not possible
+ // to force the loop header check at the end of the loop and branch forward to
+ // it from the pre-header). The extra branch is slower in the case that the
+ // loop actually iterates.
+ Branch(WordEqual(var.value(), end_index), &after_loop, &loop);
+ Bind(&loop);
+ {
+ if (mode == IndexAdvanceMode::kPre) {
+ var.Bind(IntPtrAdd(var.value(), IntPtrConstant(increment)));
+ }
+ body(this, var.value());
+ if (mode == IndexAdvanceMode::kPost) {
+ var.Bind(IntPtrAdd(var.value(), IntPtrConstant(increment)));
+ }
+ Branch(WordNotEqual(var.value(), end_index), &loop, &after_loop);
+ }
+ Bind(&after_loop);
+}
+
+void CodeStubAssembler::BuildFastFixedArrayForEach(
+ compiler::Node* fixed_array, ElementsKind kind,
+ compiler::Node* first_element_inclusive,
+ compiler::Node* last_element_exclusive,
+ std::function<void(CodeStubAssembler* assembler,
+ compiler::Node* fixed_array, compiler::Node* offset)>
+ body,
+ ParameterMode mode, ForEachDirection direction) {
+ STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
+ int32_t first_val;
+ bool constant_first = ToInt32Constant(first_element_inclusive, first_val);
+ int32_t last_val;
+ bool constent_last = ToInt32Constant(last_element_exclusive, last_val);
+ if (constant_first && constent_last) {
+ int delta = last_val - first_val;
+ DCHECK(delta >= 0);
+ if (delta <= kElementLoopUnrollThreshold) {
+ if (direction == ForEachDirection::kForward) {
+ for (int i = first_val; i < last_val; ++i) {
+ Node* index = IntPtrConstant(i);
+ Node* offset =
+ ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ body(this, fixed_array, offset);
+ }
+ } else {
+ for (int i = last_val - 1; i >= first_val; --i) {
+ Node* index = IntPtrConstant(i);
+ Node* offset =
+ ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ body(this, fixed_array, offset);
+ }
+ }
+ return;
+ }
+ }
+
+ Node* start =
+ ElementOffsetFromIndex(first_element_inclusive, kind, mode,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ Node* limit =
+ ElementOffsetFromIndex(last_element_exclusive, kind, mode,
+ FixedArray::kHeaderSize - kHeapObjectTag);
+ if (direction == ForEachDirection::kReverse) std::swap(start, limit);
+
+ int increment = IsFastDoubleElementsKind(kind) ? kDoubleSize : kPointerSize;
+ BuildFastLoop(
+ MachineType::PointerRepresentation(), start, limit,
+ [fixed_array, body](CodeStubAssembler* assembler, Node* offset) {
+ body(assembler, fixed_array, offset);
+ },
+ direction == ForEachDirection::kReverse ? -increment : increment,
+ direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre
+ : IndexAdvanceMode::kPost);
+}
+
+void CodeStubAssembler::BranchIfNumericRelationalComparison(
+ RelationalComparisonMode mode, compiler::Node* lhs, compiler::Node* rhs,
+ Label* if_true, Label* if_false) {
+ typedef compiler::Node Node;
+
+ Label end(this);
+ Variable result(this, MachineRepresentation::kTagged);
+
+ // Shared entry for floating point comparison.
+ Label do_fcmp(this);
+ Variable var_fcmp_lhs(this, MachineRepresentation::kFloat64),
+ var_fcmp_rhs(this, MachineRepresentation::kFloat64);
+
+ // Check if the {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(this), if_lhsisnotsmi(this);
+ Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+ Bind(&if_lhsissmi);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ {
+ // Both {lhs} and {rhs} are Smi, so just perform a fast Smi comparison.
+ switch (mode) {
+ case kLessThan:
+ BranchIfSmiLessThan(lhs, rhs, if_true, if_false);
+ break;
+ case kLessThanOrEqual:
+ BranchIfSmiLessThanOrEqual(lhs, rhs, if_true, if_false);
+ break;
+ case kGreaterThan:
+ BranchIfSmiLessThan(rhs, lhs, if_true, if_false);
+ break;
+ case kGreaterThanOrEqual:
+ BranchIfSmiLessThanOrEqual(rhs, lhs, if_true, if_false);
+ break;
+ }
+ }
+
+ Bind(&if_rhsisnotsmi);
+ {
+ CSA_ASSERT(this, WordEqual(LoadMap(rhs), HeapNumberMapConstant()));
+ // Convert the {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(SmiToFloat64(lhs));
+ var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
+ }
+ }
+
+ Bind(&if_lhsisnotsmi);
+ {
+ CSA_ASSERT(this, WordEqual(LoadMap(lhs), HeapNumberMapConstant()));
+
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ {
+ // Convert the {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fcmp_rhs.Bind(SmiToFloat64(rhs));
+ Goto(&do_fcmp);
+ }
+
+ Bind(&if_rhsisnotsmi);
+ {
+ CSA_ASSERT(this, WordEqual(LoadMap(rhs), HeapNumberMapConstant()));
+
+ // Convert the {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
+ }
+ }
+
+ Bind(&do_fcmp);
+ {
+ // Load the {lhs} and {rhs} floating point values.
+ Node* lhs = var_fcmp_lhs.value();
+ Node* rhs = var_fcmp_rhs.value();
+
+ // Perform a fast floating point comparison.
+ switch (mode) {
+ case kLessThan:
+ Branch(Float64LessThan(lhs, rhs), if_true, if_false);
+ break;
+ case kLessThanOrEqual:
+ Branch(Float64LessThanOrEqual(lhs, rhs), if_true, if_false);
+ break;
+ case kGreaterThan:
+ Branch(Float64GreaterThan(lhs, rhs), if_true, if_false);
+ break;
+ case kGreaterThanOrEqual:
+ Branch(Float64GreaterThanOrEqual(lhs, rhs), if_true, if_false);
+ break;
+ }
+ }
+}
+
+void CodeStubAssembler::GotoUnlessNumberLessThan(compiler::Node* lhs,
+ compiler::Node* rhs,
+ Label* if_false) {
+ Label if_true(this);
+ BranchIfNumericRelationalComparison(kLessThan, lhs, rhs, &if_true, if_false);
+ Bind(&if_true);
+}
+
+compiler::Node* CodeStubAssembler::RelationalComparison(
+ RelationalComparisonMode mode, compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* context) {
+ typedef compiler::Node Node;
+
+ Label return_true(this), return_false(this), end(this);
+ Variable result(this, MachineRepresentation::kTagged);
+
+ // Shared entry for floating point comparison.
+ Label do_fcmp(this);
+ Variable var_fcmp_lhs(this, MachineRepresentation::kFloat64),
+ var_fcmp_rhs(this, MachineRepresentation::kFloat64);
+
+ // We might need to loop several times due to ToPrimitive and/or ToNumber
+ // conversions.
+ Variable var_lhs(this, MachineRepresentation::kTagged),
+ var_rhs(this, MachineRepresentation::kTagged);
+ Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+ Label loop(this, 2, loop_vars);
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // Load the current {lhs} and {rhs} values.
+ lhs = var_lhs.value();
+ rhs = var_rhs.value();
+
+ // Check if the {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(this), if_lhsisnotsmi(this);
+ Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+ Bind(&if_lhsissmi);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ {
+ // Both {lhs} and {rhs} are Smi, so just perform a fast Smi comparison.
+ switch (mode) {
+ case kLessThan:
+ BranchIfSmiLessThan(lhs, rhs, &return_true, &return_false);
+ break;
+ case kLessThanOrEqual:
+ BranchIfSmiLessThanOrEqual(lhs, rhs, &return_true, &return_false);
+ break;
+ case kGreaterThan:
+ BranchIfSmiLessThan(rhs, lhs, &return_true, &return_false);
+ break;
+ case kGreaterThanOrEqual:
+ BranchIfSmiLessThanOrEqual(rhs, lhs, &return_true, &return_false);
+ break;
+ }
+ }
+
+ Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = LoadMap(rhs);
+
+ // Check if the {rhs} is a HeapNumber.
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
+
+ Bind(&if_rhsisnumber);
+ {
+ // Convert the {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(SmiToFloat64(lhs));
+ var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
+ }
+
+ Bind(&if_rhsisnotnumber);
+ {
+ // Convert the {rhs} to a Number; we don't need to perform the
+ // dedicated ToPrimitive(rhs, hint Number) operation, as the
+ // ToNumber(rhs) will by itself already invoke ToPrimitive with
+ // a Number hint.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
+ }
+ }
+ }
+
+ Bind(&if_lhsisnotsmi);
+ {
+ // Load the HeapNumber map for later comparisons.
+ Node* number_map = HeapNumberMapConstant();
+
+ // Load the map of {lhs}.
+ Node* lhs_map = LoadMap(lhs);
+
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ {
+ // Check if the {lhs} is a HeapNumber.
+ Label if_lhsisnumber(this), if_lhsisnotnumber(this, Label::kDeferred);
+ Branch(WordEqual(lhs_map, number_map), &if_lhsisnumber,
+ &if_lhsisnotnumber);
+
+ Bind(&if_lhsisnumber);
+ {
+ // Convert the {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fcmp_rhs.Bind(SmiToFloat64(rhs));
+ Goto(&do_fcmp);
+ }
+
+ Bind(&if_lhsisnotnumber);
+ {
+ // Convert the {lhs} to a Number; we don't need to perform the
+ // dedicated ToPrimitive(lhs, hint Number) operation, as the
+ // ToNumber(lhs) will by itself already invoke ToPrimitive with
+ // a Number hint.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
+ }
+ }
+
+ Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = LoadMap(rhs);
+
+ // Check if {lhs} is a HeapNumber.
+ Label if_lhsisnumber(this), if_lhsisnotnumber(this);
+ Branch(WordEqual(lhs_map, number_map), &if_lhsisnumber,
+ &if_lhsisnotnumber);
+
+ Bind(&if_lhsisnumber);
+ {
+ // Check if {rhs} is also a HeapNumber.
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
+ Branch(WordEqual(lhs_map, rhs_map), &if_rhsisnumber,
+ &if_rhsisnotnumber);
+
+ Bind(&if_rhsisnumber);
+ {
+ // Convert the {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
+ }
+
+ Bind(&if_rhsisnotnumber);
+ {
+ // Convert the {rhs} to a Number; we don't need to perform
+ // dedicated ToPrimitive(rhs, hint Number) operation, as the
+ // ToNumber(rhs) will by itself already invoke ToPrimitive with
+ // a Number hint.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
+ }
+ }
+
+ Bind(&if_lhsisnotnumber);
+ {
+ // Load the instance type of {lhs}.
+ Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
+
+ // Check if {lhs} is a String.
+ Label if_lhsisstring(this), if_lhsisnotstring(this, Label::kDeferred);
+ Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
+ &if_lhsisnotstring);
+
+ Bind(&if_lhsisstring);
+ {
+ // Load the instance type of {rhs}.
+ Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+
+ // Check if {rhs} is also a String.
+ Label if_rhsisstring(this, Label::kDeferred),
+ if_rhsisnotstring(this, Label::kDeferred);
+ Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+ &if_rhsisnotstring);
+
+ Bind(&if_rhsisstring);
+ {
+ // Both {lhs} and {rhs} are strings.
+ switch (mode) {
+ case kLessThan:
+ result.Bind(CallStub(CodeFactory::StringLessThan(isolate()),
+ context, lhs, rhs));
+ Goto(&end);
+ break;
+ case kLessThanOrEqual:
+ result.Bind(
+ CallStub(CodeFactory::StringLessThanOrEqual(isolate()),
+ context, lhs, rhs));
+ Goto(&end);
+ break;
+ case kGreaterThan:
+ result.Bind(
+ CallStub(CodeFactory::StringGreaterThan(isolate()),
+ context, lhs, rhs));
+ Goto(&end);
+ break;
+ case kGreaterThanOrEqual:
+ result.Bind(
+ CallStub(CodeFactory::StringGreaterThanOrEqual(isolate()),
+ context, lhs, rhs));
+ Goto(&end);
+ break;
+ }
+ }
+
+ Bind(&if_rhsisnotstring);
+ {
+ // The {lhs} is a String, while {rhs} is neither a Number nor a
+ // String, so we need to call ToPrimitive(rhs, hint Number) if
+ // {rhs} is a receiver or ToNumber(lhs) and ToNumber(rhs) in the
+ // other cases.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Label if_rhsisreceiver(this, Label::kDeferred),
+ if_rhsisnotreceiver(this, Label::kDeferred);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ Bind(&if_rhsisreceiver);
+ {
+ // Convert {rhs} to a primitive first passing Number hint.
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ isolate(), ToPrimitiveHint::kNumber);
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotreceiver);
+ {
+ // Convert both {lhs} and {rhs} to Number.
+ Callable callable = CodeFactory::ToNumber(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
+ }
+ }
+ }
+
+ Bind(&if_lhsisnotstring);
+ {
+ // The {lhs} is neither a Number nor a String, so we need to call
+ // ToPrimitive(lhs, hint Number) if {lhs} is a receiver or
+ // ToNumber(lhs) and ToNumber(rhs) in the other cases.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Label if_lhsisreceiver(this, Label::kDeferred),
+ if_lhsisnotreceiver(this, Label::kDeferred);
+ Branch(IsJSReceiverInstanceType(lhs_instance_type),
+ &if_lhsisreceiver, &if_lhsisnotreceiver);
+
+ Bind(&if_lhsisreceiver);
+ {
+ // Convert {lhs} to a primitive first passing Number hint.
+ Callable callable = CodeFactory::NonPrimitiveToPrimitive(
+ isolate(), ToPrimitiveHint::kNumber);
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
+ }
+
+ Bind(&if_lhsisnotreceiver);
+ {
+ // Convert both {lhs} and {rhs} to Number.
+ Callable callable = CodeFactory::ToNumber(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ Bind(&do_fcmp);
+ {
+ // Load the {lhs} and {rhs} floating point values.
+ Node* lhs = var_fcmp_lhs.value();
+ Node* rhs = var_fcmp_rhs.value();
+
+ // Perform a fast floating point comparison.
+ switch (mode) {
+ case kLessThan:
+ Branch(Float64LessThan(lhs, rhs), &return_true, &return_false);
+ break;
+ case kLessThanOrEqual:
+ Branch(Float64LessThanOrEqual(lhs, rhs), &return_true, &return_false);
+ break;
+ case kGreaterThan:
+ Branch(Float64GreaterThan(lhs, rhs), &return_true, &return_false);
+ break;
+ case kGreaterThanOrEqual:
+ Branch(Float64GreaterThanOrEqual(lhs, rhs), &return_true,
+ &return_false);
+ break;
+ }
+ }
+
+ Bind(&return_true);
+ {
+ result.Bind(BooleanConstant(true));
+ Goto(&end);
+ }
+
+ Bind(&return_false);
+ {
+ result.Bind(BooleanConstant(false));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return result.value();
+}
+
+namespace {
+
+void GenerateEqual_Same(CodeStubAssembler* assembler, compiler::Node* value,
+ CodeStubAssembler::Label* if_equal,
+ CodeStubAssembler::Label* if_notequal) {
+ // In case of abstract or strict equality checks, we need additional checks
+ // for NaN values because they are not considered equal, even if both the
+ // left and the right hand side reference exactly the same value.
+ // TODO(bmeurer): This seems to violate the SIMD.js specification, but it
+ // seems to be what is tested in the current SIMD.js testsuite.
+
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ // Check if {value} is a Smi or a HeapObject.
+ Label if_valueissmi(assembler), if_valueisnotsmi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(value), &if_valueissmi,
+ &if_valueisnotsmi);
+
+ assembler->Bind(&if_valueisnotsmi);
+ {
+ // Load the map of {value}.
+ Node* value_map = assembler->LoadMap(value);
+
+ // Check if {value} (and therefore {rhs}) is a HeapNumber.
+ Label if_valueisnumber(assembler), if_valueisnotnumber(assembler);
+ assembler->Branch(assembler->IsHeapNumberMap(value_map), &if_valueisnumber,
+ &if_valueisnotnumber);
+
+ assembler->Bind(&if_valueisnumber);
+ {
+ // Convert {value} (and therefore {rhs}) to floating point value.
+ Node* value_value = assembler->LoadHeapNumberValue(value);
+
+ // Check if the HeapNumber value is a NaN.
+ assembler->BranchIfFloat64IsNaN(value_value, if_notequal, if_equal);
+ }
+
+ assembler->Bind(&if_valueisnotnumber);
+ assembler->Goto(if_equal);
+ }
+
+ assembler->Bind(&if_valueissmi);
+ assembler->Goto(if_equal);
+}
+
+void GenerateEqual_Simd128Value_HeapObject(
+ CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* lhs_map,
+ compiler::Node* rhs, compiler::Node* rhs_map,
+ CodeStubAssembler::Label* if_equal, CodeStubAssembler::Label* if_notequal) {
+ assembler->BranchIfSimd128Equal(lhs, lhs_map, rhs, rhs_map, if_equal,
+ if_notequal);
+}
+
+} // namespace
+
+// ES6 section 7.2.12 Abstract Equality Comparison
+compiler::Node* CodeStubAssembler::Equal(ResultMode mode, compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
+ // This is a slightly optimized version of Object::Equals represented as
+ // scheduled TurboFan graph utilizing the CodeStubAssembler. Whenever you
+ // change something functionality wise in here, remember to update the
+ // Object::Equals method as well.
+ typedef compiler::Node Node;
+
+ Label if_equal(this), if_notequal(this),
+ do_rhsstringtonumber(this, Label::kDeferred), end(this);
+ Variable result(this, MachineRepresentation::kTagged);
+
+ // Shared entry for floating point comparison.
+ Label do_fcmp(this);
+ Variable var_fcmp_lhs(this, MachineRepresentation::kFloat64),
+ var_fcmp_rhs(this, MachineRepresentation::kFloat64);
+
+ // We might need to loop several times due to ToPrimitive and/or ToNumber
+ // conversions.
+ Variable var_lhs(this, MachineRepresentation::kTagged),
+ var_rhs(this, MachineRepresentation::kTagged);
+ Variable* loop_vars[2] = {&var_lhs, &var_rhs};
+ Label loop(this, 2, loop_vars);
+ var_lhs.Bind(lhs);
+ var_rhs.Bind(rhs);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // Load the current {lhs} and {rhs} values.
+ lhs = var_lhs.value();
+ rhs = var_rhs.value();
+
+ // Check if {lhs} and {rhs} refer to the same object.
+ Label if_same(this), if_notsame(this);
+ Branch(WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+ Bind(&if_same);
+ {
+ // The {lhs} and {rhs} reference the exact same value, yet we need special
+ // treatment for HeapNumber, as NaN is not equal to NaN.
+ GenerateEqual_Same(this, lhs, &if_equal, &if_notequal);
+ }
+
+ Bind(&if_notsame);
+ {
+ // Check if {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(this), if_lhsisnotsmi(this);
+ Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+ Bind(&if_lhsissmi);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ // We have already checked for {lhs} and {rhs} being the same value, so
+ // if both are Smis when we get here they must not be equal.
+ Goto(&if_notequal);
+
+ Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = LoadMap(rhs);
+
+ // Check if {rhs} is a HeapNumber.
+ Node* number_map = HeapNumberMapConstant();
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this);
+ Branch(WordEqual(rhs_map, number_map), &if_rhsisnumber,
+ &if_rhsisnotnumber);
+
+ Bind(&if_rhsisnumber);
+ {
+ // Convert {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(SmiToFloat64(lhs));
+ var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
+ }
+
+ Bind(&if_rhsisnotnumber);
+ {
+ // Load the instance type of the {rhs}.
+ Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+
+ // Check if the {rhs} is a String.
+ Label if_rhsisstring(this, Label::kDeferred),
+ if_rhsisnotstring(this);
+ Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+ &if_rhsisnotstring);
+
+ Bind(&if_rhsisstring);
+ {
+ // The {rhs} is a String and the {lhs} is a Smi; we need
+ // to convert the {rhs} to a Number and compare the output to
+ // the Number on the {lhs}.
+ Goto(&do_rhsstringtonumber);
+ }
+
+ Bind(&if_rhsisnotstring);
+ {
+ // Check if the {rhs} is a Boolean.
+ Label if_rhsisboolean(this), if_rhsisnotboolean(this);
+ Branch(IsBooleanMap(rhs_map), &if_rhsisboolean,
+ &if_rhsisnotboolean);
+
+ Bind(&if_rhsisboolean);
+ {
+ // The {rhs} is a Boolean, load its number value.
+ var_rhs.Bind(LoadObjectField(rhs, Oddball::kToNumberOffset));
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotboolean);
+ {
+ // Check if the {rhs} is a Receiver.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ Label if_rhsisreceiver(this, Label::kDeferred),
+ if_rhsisnotreceiver(this);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ Bind(&if_rhsisreceiver);
+ {
+ // Convert {rhs} to a primitive first (passing no hint).
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotreceiver);
+ Goto(&if_notequal);
+ }
+ }
+ }
+ }
+ }
+
+ Bind(&if_lhsisnotsmi);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ {
+ // The {lhs} is a HeapObject and the {rhs} is a Smi; swapping {lhs}
+ // and {rhs} is not observable and doesn't matter for the result, so
+ // we can just swap them and use the Smi handling above (for {lhs}
+ // being a Smi).
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotsmi);
+ {
+ Label if_lhsisstring(this), if_lhsisnumber(this),
+ if_lhsissymbol(this), if_lhsissimd128value(this),
+ if_lhsisoddball(this), if_lhsisreceiver(this);
+
+ // Both {lhs} and {rhs} are HeapObjects, load their maps
+ // and their instance types.
+ Node* lhs_map = LoadMap(lhs);
+ Node* rhs_map = LoadMap(rhs);
+
+ // Load the instance types of {lhs} and {rhs}.
+ Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
+ Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+
+ // Dispatch based on the instance type of {lhs}.
+ size_t const kNumCases = FIRST_NONSTRING_TYPE + 4;
+ Label* case_labels[kNumCases];
+ int32_t case_values[kNumCases];
+ for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
+ case_labels[i] = new Label(this);
+ case_values[i] = i;
+ }
+ case_labels[FIRST_NONSTRING_TYPE + 0] = &if_lhsisnumber;
+ case_values[FIRST_NONSTRING_TYPE + 0] = HEAP_NUMBER_TYPE;
+ case_labels[FIRST_NONSTRING_TYPE + 1] = &if_lhsissymbol;
+ case_values[FIRST_NONSTRING_TYPE + 1] = SYMBOL_TYPE;
+ case_labels[FIRST_NONSTRING_TYPE + 2] = &if_lhsissimd128value;
+ case_values[FIRST_NONSTRING_TYPE + 2] = SIMD128_VALUE_TYPE;
+ case_labels[FIRST_NONSTRING_TYPE + 3] = &if_lhsisoddball;
+ case_values[FIRST_NONSTRING_TYPE + 3] = ODDBALL_TYPE;
+ Switch(lhs_instance_type, &if_lhsisreceiver, case_values, case_labels,
+ arraysize(case_values));
+ for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
+ Bind(case_labels[i]);
+ Goto(&if_lhsisstring);
+ delete case_labels[i];
+ }
+
+ Bind(&if_lhsisstring);
+ {
+ // Check if {rhs} is also a String.
+ Label if_rhsisstring(this, Label::kDeferred),
+ if_rhsisnotstring(this);
+ Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+ &if_rhsisnotstring);
+
+ Bind(&if_rhsisstring);
+ {
+ // Both {lhs} and {rhs} are of type String, just do the
+ // string comparison then.
+ Callable callable = (mode == kDontNegateResult)
+ ? CodeFactory::StringEqual(isolate())
+ : CodeFactory::StringNotEqual(isolate());
+ result.Bind(CallStub(callable, context, lhs, rhs));
+ Goto(&end);
+ }
+
+ Bind(&if_rhsisnotstring);
+ {
+ // The {lhs} is a String and the {rhs} is some other HeapObject.
+ // Swapping {lhs} and {rhs} is not observable and doesn't matter
+ // for the result, so we can just swap them and use the String
+ // handling below (for {rhs} being a String).
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ Goto(&loop);
+ }
+ }
+
+ Bind(&if_lhsisnumber);
+ {
+ // Check if {rhs} is also a HeapNumber.
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this);
+ Branch(Word32Equal(lhs_instance_type, rhs_instance_type),
+ &if_rhsisnumber, &if_rhsisnotnumber);
+
+ Bind(&if_rhsisnumber);
+ {
+ // Convert {lhs} and {rhs} to floating point values, and
+ // perform a floating point comparison.
+ var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fcmp);
+ }
+
+ Bind(&if_rhsisnotnumber);
+ {
+ // The {lhs} is a Number, the {rhs} is some other HeapObject.
+ Label if_rhsisstring(this, Label::kDeferred),
+ if_rhsisnotstring(this);
+ Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+ &if_rhsisnotstring);
+
+ Bind(&if_rhsisstring);
+ {
+ // The {rhs} is a String and the {lhs} is a HeapNumber; we need
+ // to convert the {rhs} to a Number and compare the output to
+ // the Number on the {lhs}.
+ Goto(&do_rhsstringtonumber);
+ }
+
+ Bind(&if_rhsisnotstring);
+ {
+ // Check if the {rhs} is a JSReceiver.
+ Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ Bind(&if_rhsisreceiver);
+ {
+ // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
+ // Swapping {lhs} and {rhs} is not observable and doesn't
+ // matter for the result, so we can just swap them and use
+ // the JSReceiver handling below (for {lhs} being a
+ // JSReceiver).
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotreceiver);
+ {
+ // Check if {rhs} is a Boolean.
+ Label if_rhsisboolean(this), if_rhsisnotboolean(this);
+ Branch(IsBooleanMap(rhs_map), &if_rhsisboolean,
+ &if_rhsisnotboolean);
+
+ Bind(&if_rhsisboolean);
+ {
+ // The {rhs} is a Boolean, convert it to a Smi first.
+ var_rhs.Bind(
+ LoadObjectField(rhs, Oddball::kToNumberOffset));
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotboolean);
+ Goto(&if_notequal);
+ }
+ }
+ }
+ }
+
+ Bind(&if_lhsisoddball);
+ {
+ // The {lhs} is an Oddball and {rhs} is some other HeapObject.
+ Label if_lhsisboolean(this), if_lhsisnotboolean(this);
+ Node* boolean_map = BooleanMapConstant();
+ Branch(WordEqual(lhs_map, boolean_map), &if_lhsisboolean,
+ &if_lhsisnotboolean);
+
+ Bind(&if_lhsisboolean);
+ {
+ // The {lhs} is a Boolean, check if {rhs} is also a Boolean.
+ Label if_rhsisboolean(this), if_rhsisnotboolean(this);
+ Branch(WordEqual(rhs_map, boolean_map), &if_rhsisboolean,
+ &if_rhsisnotboolean);
+
+ Bind(&if_rhsisboolean);
+ {
+ // Both {lhs} and {rhs} are distinct Boolean values.
+ Goto(&if_notequal);
+ }
+
+ Bind(&if_rhsisnotboolean);
+ {
+ // Convert the {lhs} to a Number first.
+ var_lhs.Bind(LoadObjectField(lhs, Oddball::kToNumberOffset));
+ Goto(&loop);
+ }
+ }
+
+ Bind(&if_lhsisnotboolean);
+ {
+ // The {lhs} is either Null or Undefined; check if the {rhs} is
+ // undetectable (i.e. either also Null or Undefined or some
+ // undetectable JSReceiver).
+ Node* rhs_bitfield = LoadMapBitField(rhs_map);
+ Branch(Word32Equal(
+ Word32And(rhs_bitfield,
+ Int32Constant(1 << Map::kIsUndetectable)),
+ Int32Constant(0)),
+ &if_notequal, &if_equal);
+ }
+ }
+
+ Bind(&if_lhsissymbol);
+ {
+ // Check if the {rhs} is a JSReceiver.
+ Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ Bind(&if_rhsisreceiver);
+ {
+ // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
+ // Swapping {lhs} and {rhs} is not observable and doesn't
+ // matter for the result, so we can just swap them and use
+ // the JSReceiver handling below (for {lhs} being a JSReceiver).
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotreceiver);
+ {
+ // The {rhs} is not a JSReceiver and also not the same Symbol
+ // as the {lhs}, so this is equality check is considered false.
+ Goto(&if_notequal);
+ }
+ }
+
+ Bind(&if_lhsissimd128value);
+ {
+ // Check if the {rhs} is also a Simd128Value.
+ Label if_rhsissimd128value(this), if_rhsisnotsimd128value(this);
+ Branch(Word32Equal(lhs_instance_type, rhs_instance_type),
+ &if_rhsissimd128value, &if_rhsisnotsimd128value);
+
+ Bind(&if_rhsissimd128value);
+ {
+ // Both {lhs} and {rhs} is a Simd128Value.
+ GenerateEqual_Simd128Value_HeapObject(
+ this, lhs, lhs_map, rhs, rhs_map, &if_equal, &if_notequal);
+ }
+
+ Bind(&if_rhsisnotsimd128value);
+ {
+ // Check if the {rhs} is a JSReceiver.
+ Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ Bind(&if_rhsisreceiver);
+ {
+ // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
+ // Swapping {lhs} and {rhs} is not observable and doesn't
+ // matter for the result, so we can just swap them and use
+ // the JSReceiver handling below (for {lhs} being a JSReceiver).
+ var_lhs.Bind(rhs);
+ var_rhs.Bind(lhs);
+ Goto(&loop);
+ }
+
+ Bind(&if_rhsisnotreceiver);
+ {
+ // The {rhs} is some other Primitive.
+ Goto(&if_notequal);
+ }
+ }
+ }
+
+ Bind(&if_lhsisreceiver);
+ {
+ // Check if the {rhs} is also a JSReceiver.
+ Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
+
+ Bind(&if_rhsisreceiver);
+ {
+ // Both {lhs} and {rhs} are different JSReceiver references, so
+ // this cannot be considered equal.
+ Goto(&if_notequal);
+ }
+
+ Bind(&if_rhsisnotreceiver);
+ {
+ // Check if {rhs} is Null or Undefined (an undetectable check
+ // is sufficient here, since we already know that {rhs} is not
+ // a JSReceiver).
+ Label if_rhsisundetectable(this),
+ if_rhsisnotundetectable(this, Label::kDeferred);
+ Node* rhs_bitfield = LoadMapBitField(rhs_map);
+ Branch(Word32Equal(
+ Word32And(rhs_bitfield,
+ Int32Constant(1 << Map::kIsUndetectable)),
+ Int32Constant(0)),
+ &if_rhsisnotundetectable, &if_rhsisundetectable);
+
+ Bind(&if_rhsisundetectable);
+ {
+ // Check if {lhs} is an undetectable JSReceiver.
+ Node* lhs_bitfield = LoadMapBitField(lhs_map);
+ Branch(Word32Equal(
+ Word32And(lhs_bitfield,
+ Int32Constant(1 << Map::kIsUndetectable)),
+ Int32Constant(0)),
+ &if_notequal, &if_equal);
+ }
+
+ Bind(&if_rhsisnotundetectable);
+ {
+ // The {rhs} is some Primitive different from Null and
+ // Undefined, need to convert {lhs} to Primitive first.
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ Bind(&do_rhsstringtonumber);
+ {
+ Callable callable = CodeFactory::StringToNumber(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
+ }
+ }
+
+ Bind(&do_fcmp);
+ {
+ // Load the {lhs} and {rhs} floating point values.
+ Node* lhs = var_fcmp_lhs.value();
+ Node* rhs = var_fcmp_rhs.value();
+
+ // Perform a fast floating point comparison.
+ Branch(Float64Equal(lhs, rhs), &if_equal, &if_notequal);
+ }
+
+ Bind(&if_equal);
+ {
+ result.Bind(BooleanConstant(mode == kDontNegateResult));
+ Goto(&end);
+ }
+
+ Bind(&if_notequal);
+ {
+ result.Bind(BooleanConstant(mode == kNegateResult));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return result.value();
+}
+
+compiler::Node* CodeStubAssembler::StrictEqual(ResultMode mode,
+ compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
+ // Here's pseudo-code for the algorithm below in case of kDontNegateResult
+ // mode; for kNegateResult mode we properly negate the result.
+ //
+ // if (lhs == rhs) {
+ // if (lhs->IsHeapNumber()) return HeapNumber::cast(lhs)->value() != NaN;
+ // return true;
+ // }
+ // if (!lhs->IsSmi()) {
+ // if (lhs->IsHeapNumber()) {
+ // if (rhs->IsSmi()) {
+ // return Smi::cast(rhs)->value() == HeapNumber::cast(lhs)->value();
+ // } else if (rhs->IsHeapNumber()) {
+ // return HeapNumber::cast(rhs)->value() ==
+ // HeapNumber::cast(lhs)->value();
+ // } else {
+ // return false;
+ // }
+ // } else {
+ // if (rhs->IsSmi()) {
+ // return false;
+ // } else {
+ // if (lhs->IsString()) {
+ // if (rhs->IsString()) {
+ // return %StringEqual(lhs, rhs);
+ // } else {
+ // return false;
+ // }
+ // } else if (lhs->IsSimd128()) {
+ // if (rhs->IsSimd128()) {
+ // return %StrictEqual(lhs, rhs);
+ // }
+ // } else {
+ // return false;
+ // }
+ // }
+ // }
+ // } else {
+ // if (rhs->IsSmi()) {
+ // return false;
+ // } else {
+ // if (rhs->IsHeapNumber()) {
+ // return Smi::cast(lhs)->value() == HeapNumber::cast(rhs)->value();
+ // } else {
+ // return false;
+ // }
+ // }
+ // }
+
+ typedef compiler::Node Node;
+
+ Label if_equal(this), if_notequal(this), end(this);
+ Variable result(this, MachineRepresentation::kTagged);
+
+ // Check if {lhs} and {rhs} refer to the same object.
+ Label if_same(this), if_notsame(this);
+ Branch(WordEqual(lhs, rhs), &if_same, &if_notsame);
+
+ Bind(&if_same);
+ {
+ // The {lhs} and {rhs} reference the exact same value, yet we need special
+ // treatment for HeapNumber, as NaN is not equal to NaN.
+ GenerateEqual_Same(this, lhs, &if_equal, &if_notequal);
+ }
+
+ Bind(&if_notsame);
+ {
+ // The {lhs} and {rhs} reference different objects, yet for Smi, HeapNumber,
+ // String and Simd128Value they can still be considered equal.
+ Node* number_map = HeapNumberMapConstant();
+
+ // Check if {lhs} is a Smi or a HeapObject.
+ Label if_lhsissmi(this), if_lhsisnotsmi(this);
+ Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+
+ Bind(&if_lhsisnotsmi);
+ {
+ // Load the map of {lhs}.
+ Node* lhs_map = LoadMap(lhs);
+
+ // Check if {lhs} is a HeapNumber.
+ Label if_lhsisnumber(this), if_lhsisnotnumber(this);
+ Branch(WordEqual(lhs_map, number_map), &if_lhsisnumber,
+ &if_lhsisnotnumber);
+
+ Bind(&if_lhsisnumber);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ {
+ // Convert {lhs} and {rhs} to floating point values.
+ Node* lhs_value = LoadHeapNumberValue(lhs);
+ Node* rhs_value = SmiToFloat64(rhs);
+
+ // Perform a floating point comparison of {lhs} and {rhs}.
+ Branch(Float64Equal(lhs_value, rhs_value), &if_equal, &if_notequal);
+ }
+
+ Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = LoadMap(rhs);
+
+ // Check if {rhs} is also a HeapNumber.
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this);
+ Branch(WordEqual(rhs_map, number_map), &if_rhsisnumber,
+ &if_rhsisnotnumber);
+
+ Bind(&if_rhsisnumber);
+ {
+ // Convert {lhs} and {rhs} to floating point values.
+ Node* lhs_value = LoadHeapNumberValue(lhs);
+ Node* rhs_value = LoadHeapNumberValue(rhs);
+
+ // Perform a floating point comparison of {lhs} and {rhs}.
+ Branch(Float64Equal(lhs_value, rhs_value), &if_equal, &if_notequal);
+ }
+
+ Bind(&if_rhsisnotnumber);
+ Goto(&if_notequal);
+ }
+ }
+
+ Bind(&if_lhsisnotnumber);
+ {
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ Goto(&if_notequal);
+
+ Bind(&if_rhsisnotsmi);
+ {
+ // Load the instance type of {lhs}.
+ Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
+
+ // Check if {lhs} is a String.
+ Label if_lhsisstring(this), if_lhsisnotstring(this);
+ Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
+ &if_lhsisnotstring);
+
+ Bind(&if_lhsisstring);
+ {
+ // Load the instance type of {rhs}.
+ Node* rhs_instance_type = LoadInstanceType(rhs);
+
+ // Check if {rhs} is also a String.
+ Label if_rhsisstring(this, Label::kDeferred),
+ if_rhsisnotstring(this);
+ Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+ &if_rhsisnotstring);
+
+ Bind(&if_rhsisstring);
+ {
+ Callable callable = (mode == kDontNegateResult)
+ ? CodeFactory::StringEqual(isolate())
+ : CodeFactory::StringNotEqual(isolate());
+ result.Bind(CallStub(callable, context, lhs, rhs));
+ Goto(&end);
+ }
+
+ Bind(&if_rhsisnotstring);
+ Goto(&if_notequal);
+ }
+
+ Bind(&if_lhsisnotstring);
+ {
+ // Check if {lhs} is a Simd128Value.
+ Label if_lhsissimd128value(this), if_lhsisnotsimd128value(this);
+ Branch(Word32Equal(lhs_instance_type,
+ Int32Constant(SIMD128_VALUE_TYPE)),
+ &if_lhsissimd128value, &if_lhsisnotsimd128value);
+
+ Bind(&if_lhsissimd128value);
+ {
+ // Load the map of {rhs}.
+ Node* rhs_map = LoadMap(rhs);
+
+ // Check if {rhs} is also a Simd128Value that is equal to {lhs}.
+ GenerateEqual_Simd128Value_HeapObject(
+ this, lhs, lhs_map, rhs, rhs_map, &if_equal, &if_notequal);
+ }
+
+ Bind(&if_lhsisnotsimd128value);
+ Goto(&if_notequal);
+ }
+ }
+ }
+ }
+
+ Bind(&if_lhsissmi);
+ {
+ // We already know that {lhs} and {rhs} are not reference equal, and {lhs}
+ // is a Smi; so {lhs} and {rhs} can only be strictly equal if {rhs} is a
+ // HeapNumber with an equal floating point value.
+
+ // Check if {rhs} is a Smi or a HeapObject.
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+
+ Bind(&if_rhsissmi);
+ Goto(&if_notequal);
+
+ Bind(&if_rhsisnotsmi);
+ {
+ // Load the map of the {rhs}.
+ Node* rhs_map = LoadMap(rhs);
+
+ // The {rhs} could be a HeapNumber with the same value as {lhs}.
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this);
+ Branch(WordEqual(rhs_map, number_map), &if_rhsisnumber,
+ &if_rhsisnotnumber);
+
+ Bind(&if_rhsisnumber);
+ {
+ // Convert {lhs} and {rhs} to floating point values.
+ Node* lhs_value = SmiToFloat64(lhs);
+ Node* rhs_value = LoadHeapNumberValue(rhs);
+
+ // Perform a floating point comparison of {lhs} and {rhs}.
+ Branch(Float64Equal(lhs_value, rhs_value), &if_equal, &if_notequal);
+ }
+
+ Bind(&if_rhsisnotnumber);
+ Goto(&if_notequal);
+ }
+ }
+ }
+
+ Bind(&if_equal);
+ {
+ result.Bind(BooleanConstant(mode == kDontNegateResult));
+ Goto(&end);
+ }
+
+ Bind(&if_notequal);
+ {
+ result.Bind(BooleanConstant(mode == kNegateResult));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return result.value();
+}
+
+// ECMA#sec-samevalue
+// This algorithm differs from the Strict Equality Comparison Algorithm in its
+// treatment of signed zeroes and NaNs.
+compiler::Node* CodeStubAssembler::SameValue(compiler::Node* lhs,
+ compiler::Node* rhs,
+ compiler::Node* context) {
+ Variable var_result(this, MachineType::PointerRepresentation());
+ Label strict_equal(this), out(this);
+
+ Node* const int_false = IntPtrConstant(0);
+ Node* const int_true = IntPtrConstant(1);
+
+ Label if_equal(this), if_notequal(this);
+ Branch(WordEqual(lhs, rhs), &if_equal, &if_notequal);
+
+ Bind(&if_equal);
+ {
+ // This covers the case when {lhs} == {rhs}. We can simply return true
+ // because SameValue considers two NaNs to be equal.
+
+ var_result.Bind(int_true);
+ Goto(&out);
+ }
+
+ Bind(&if_notequal);
+ {
+ // This covers the case when {lhs} != {rhs}. We only handle numbers here
+ // and defer to StrictEqual for the rest.
+
+ Node* const lhs_float = TryTaggedToFloat64(lhs, &strict_equal);
+ Node* const rhs_float = TryTaggedToFloat64(rhs, &strict_equal);
+
+ Label if_lhsisnan(this), if_lhsnotnan(this);
+ BranchIfFloat64IsNaN(lhs_float, &if_lhsisnan, &if_lhsnotnan);
+
+ Bind(&if_lhsisnan);
+ {
+ // Return true iff {rhs} is NaN.
+
+ Node* const result =
+ Select(Float64Equal(rhs_float, rhs_float), int_false, int_true,
+ MachineType::PointerRepresentation());
+ var_result.Bind(result);
+ Goto(&out);
+ }
+
+ Bind(&if_lhsnotnan);
+ {
+ Label if_floatisequal(this), if_floatnotequal(this);
+ Branch(Float64Equal(lhs_float, rhs_float), &if_floatisequal,
+ &if_floatnotequal);
+
+ Bind(&if_floatisequal);
+ {
+ // We still need to handle the case when {lhs} and {rhs} are -0.0 and
+ // 0.0 (or vice versa). Compare the high word to
+ // distinguish between the two.
+
+ Node* const lhs_hi_word = Float64ExtractHighWord32(lhs_float);
+ Node* const rhs_hi_word = Float64ExtractHighWord32(rhs_float);
+
+ // If x is +0 and y is -0, return false.
+ // If x is -0 and y is +0, return false.
+
+ Node* const result = Word32Equal(lhs_hi_word, rhs_hi_word);
+ var_result.Bind(result);
+ Goto(&out);
+ }
+
+ Bind(&if_floatnotequal);
+ {
+ var_result.Bind(int_false);
+ Goto(&out);
+ }
+ }
+ }
+
+ Bind(&strict_equal);
+ {
+ Node* const is_equal = StrictEqual(kDontNegateResult, lhs, rhs, context);
+ Node* const result = WordEqual(is_equal, TrueConstant());
+ var_result.Bind(result);
+ Goto(&out);
+ }
+
+ Bind(&out);
+ return var_result.value();
+}
+
+compiler::Node* CodeStubAssembler::ForInFilter(compiler::Node* key,
+ compiler::Node* object,
+ compiler::Node* context) {
+ Label return_undefined(this, Label::kDeferred), return_to_name(this),
+ end(this);
+
+ Variable var_result(this, MachineRepresentation::kTagged);
+
+ Node* has_property =
+ HasProperty(object, key, context, Runtime::kForInHasProperty);
+
+ Branch(WordEqual(has_property, BooleanConstant(true)), &return_to_name,
+ &return_undefined);
+
+ Bind(&return_to_name);
+ {
+ var_result.Bind(ToName(context, key));
+ Goto(&end);
+ }
+
+ Bind(&return_undefined);
+ {
+ var_result.Bind(UndefinedConstant());
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return var_result.value();
+}
+
+compiler::Node* CodeStubAssembler::HasProperty(
+ compiler::Node* object, compiler::Node* key, compiler::Node* context,
+ Runtime::FunctionId fallback_runtime_function_id) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Label call_runtime(this, Label::kDeferred), return_true(this),
+ return_false(this), end(this);
+
+ CodeStubAssembler::LookupInHolder lookup_property_in_holder =
+ [this, &return_true](Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* unique_name,
+ Label* next_holder, Label* if_bailout) {
+ TryHasOwnProperty(holder, holder_map, holder_instance_type, unique_name,
+ &return_true, next_holder, if_bailout);
+ };
+
+ CodeStubAssembler::LookupInHolder lookup_element_in_holder =
+ [this, &return_true](Node* receiver, Node* holder, Node* holder_map,
+ Node* holder_instance_type, Node* index,
+ Label* next_holder, Label* if_bailout) {
+ TryLookupElement(holder, holder_map, holder_instance_type, index,
+ &return_true, next_holder, if_bailout);
+ };
+
+ TryPrototypeChainLookup(object, key, lookup_property_in_holder,
+ lookup_element_in_holder, &return_false,
+ &call_runtime);
+
+ Variable result(this, MachineRepresentation::kTagged);
+ Bind(&return_true);
+ {
+ result.Bind(BooleanConstant(true));
+ Goto(&end);
+ }
+
+ Bind(&return_false);
+ {
+ result.Bind(BooleanConstant(false));
+ Goto(&end);
+ }
+
+ Bind(&call_runtime);
+ {
+ result.Bind(
+ CallRuntime(fallback_runtime_function_id, context, object, key));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return result.value();
+}
+
+compiler::Node* CodeStubAssembler::Typeof(compiler::Node* value,
+ compiler::Node* context) {
+ Variable result_var(this, MachineRepresentation::kTagged);
+
+ Label return_number(this, Label::kDeferred), if_oddball(this),
+ return_function(this), return_undefined(this), return_object(this),
+ return_string(this), return_result(this);
+
+ GotoIf(TaggedIsSmi(value), &return_number);
+
+ Node* map = LoadMap(value);
+
+ GotoIf(IsHeapNumberMap(map), &return_number);
+
+ Node* instance_type = LoadMapInstanceType(map);
+
+ GotoIf(Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE)), &if_oddball);
+
+ Node* callable_or_undetectable_mask = Word32And(
+ LoadMapBitField(map),
+ Int32Constant(1 << Map::kIsCallable | 1 << Map::kIsUndetectable));
+
+ GotoIf(Word32Equal(callable_or_undetectable_mask,
+ Int32Constant(1 << Map::kIsCallable)),
+ &return_function);
+
+ GotoUnless(Word32Equal(callable_or_undetectable_mask, Int32Constant(0)),
+ &return_undefined);
+
+ GotoIf(IsJSReceiverInstanceType(instance_type), &return_object);
+
+ GotoIf(IsStringInstanceType(instance_type), &return_string);
+
+#define SIMD128_BRANCH(TYPE, Type, type, lane_count, lane_type) \
+ Label return_##type(this); \
+ Node* type##_map = HeapConstant(factory()->type##_map()); \
+ GotoIf(WordEqual(map, type##_map), &return_##type);
+ SIMD128_TYPES(SIMD128_BRANCH)
+#undef SIMD128_BRANCH
+
+ CSA_ASSERT(this, Word32Equal(instance_type, Int32Constant(SYMBOL_TYPE)));
+ result_var.Bind(HeapConstant(isolate()->factory()->symbol_string()));
+ Goto(&return_result);
+
+ Bind(&return_number);
+ {
+ result_var.Bind(HeapConstant(isolate()->factory()->number_string()));
+ Goto(&return_result);
+ }
+
+ Bind(&if_oddball);
+ {
+ Node* type = LoadObjectField(value, Oddball::kTypeOfOffset);
+ result_var.Bind(type);
+ Goto(&return_result);
+ }
+
+ Bind(&return_function);
+ {
+ result_var.Bind(HeapConstant(isolate()->factory()->function_string()));
+ Goto(&return_result);
+ }
+
+ Bind(&return_undefined);
+ {
+ result_var.Bind(HeapConstant(isolate()->factory()->undefined_string()));
+ Goto(&return_result);
+ }
+
+ Bind(&return_object);
+ {
+ result_var.Bind(HeapConstant(isolate()->factory()->object_string()));
+ Goto(&return_result);
+ }
+
+ Bind(&return_string);
+ {
+ result_var.Bind(HeapConstant(isolate()->factory()->string_string()));
+ Goto(&return_result);
+ }
+
+#define SIMD128_BIND_RETURN(TYPE, Type, type, lane_count, lane_type) \
+ Bind(&return_##type); \
+ { \
+ result_var.Bind(HeapConstant(isolate()->factory()->type##_string())); \
+ Goto(&return_result); \
+ }
+ SIMD128_TYPES(SIMD128_BIND_RETURN)
+#undef SIMD128_BIND_RETURN
+
+ Bind(&return_result);
+ return result_var.value();
+}
+
+compiler::Node* CodeStubAssembler::InstanceOf(compiler::Node* object,
+ compiler::Node* callable,
+ compiler::Node* context) {
+ Label return_runtime(this, Label::kDeferred), end(this);
+ Variable result(this, MachineRepresentation::kTagged);
+
+ // Check if no one installed @@hasInstance somewhere.
+ GotoUnless(
+ WordEqual(LoadObjectField(LoadRoot(Heap::kHasInstanceProtectorRootIndex),
+ PropertyCell::kValueOffset),
+ SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
+ &return_runtime);
+
+ // Check if {callable} is a valid receiver.
+ GotoIf(TaggedIsSmi(callable), &return_runtime);
+ GotoUnless(IsCallableMap(LoadMap(callable)), &return_runtime);
+
+ // Use the inline OrdinaryHasInstance directly.
+ result.Bind(OrdinaryHasInstance(context, callable, object));
+ Goto(&end);
+
+ // TODO(bmeurer): Use GetPropertyStub here once available.
+ Bind(&return_runtime);
+ {
+ result.Bind(CallRuntime(Runtime::kInstanceOf, context, object, callable));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return result.value();
+}
+
+compiler::Node* CodeStubAssembler::NumberInc(compiler::Node* value) {
+ Variable var_result(this, MachineRepresentation::kTagged),
+ var_finc_value(this, MachineRepresentation::kFloat64);
+ Label if_issmi(this), if_isnotsmi(this), do_finc(this), end(this);
+ Branch(TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
+
+ Bind(&if_issmi);
+ {
+ // Try fast Smi addition first.
+ Node* one = SmiConstant(Smi::FromInt(1));
+ Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(value),
+ BitcastTaggedToWord(one));
+ Node* overflow = Projection(1, pair);
+
+ // Check if the Smi addition overflowed.
+ Label if_overflow(this), if_notoverflow(this);
+ Branch(overflow, &if_overflow, &if_notoverflow);
+
+ Bind(&if_notoverflow);
+ var_result.Bind(Projection(0, pair));
+ Goto(&end);
+
+ Bind(&if_overflow);
+ {
+ var_finc_value.Bind(SmiToFloat64(value));
+ Goto(&do_finc);
+ }
+ }
+
+ Bind(&if_isnotsmi);
+ {
+ // Check if the value is a HeapNumber.
+ CSA_ASSERT(this, IsHeapNumberMap(LoadMap(value)));
+
+ // Load the HeapNumber value.
+ var_finc_value.Bind(LoadHeapNumberValue(value));
+ Goto(&do_finc);
+ }
+
+ Bind(&do_finc);
+ {
+ Node* finc_value = var_finc_value.value();
+ Node* one = Float64Constant(1.0);
+ Node* finc_result = Float64Add(finc_value, one);
+ var_result.Bind(AllocateHeapNumberWithValue(finc_result));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return var_result.value();
+}
+
+compiler::Node* CodeStubAssembler::CreateArrayIterator(
+ compiler::Node* array, compiler::Node* array_map,
+ compiler::Node* array_type, compiler::Node* context, IterationKind mode) {
+ int kBaseMapIndex = 0;
+ switch (mode) {
+ case IterationKind::kKeys:
+ kBaseMapIndex = Context::TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX;
+ break;
+ case IterationKind::kValues:
+ kBaseMapIndex = Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX;
+ break;
+ case IterationKind::kEntries:
+ kBaseMapIndex = Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX;
+ break;
+ }
+
+ // Fast Array iterator map index:
+ // (kBaseIndex + kFastIteratorOffset) + ElementsKind (for JSArrays)
+ // kBaseIndex + (ElementsKind - UINT8_ELEMENTS) (for JSTypedArrays)
+ const int kFastIteratorOffset =
+ Context::FAST_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX -
+ Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX;
+ STATIC_ASSERT(kFastIteratorOffset ==
+ (Context::FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX -
+ Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX));
+
+ // Slow Array iterator map index: (kBaseIndex + kSlowIteratorOffset)
+ const int kSlowIteratorOffset =
+ Context::GENERIC_ARRAY_VALUE_ITERATOR_MAP_INDEX -
+ Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX;
+ STATIC_ASSERT(kSlowIteratorOffset ==
+ (Context::GENERIC_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX -
+ Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX));
+
+ // Assert: Type(array) is Object
+ CSA_ASSERT(this, IsJSReceiverInstanceType(array_type));
+
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Variable var_map_index(this, MachineType::PointerRepresentation());
+ Variable var_array_map(this, MachineRepresentation::kTagged);
+
+ Label return_result(this);
+ Label allocate_iterator(this);
+
+ if (mode == IterationKind::kKeys) {
+ // There are only two key iterator maps, branch depending on whether or not
+ // the receiver is a TypedArray or not.
+
+ Label if_istypedarray(this), if_isgeneric(this);
+
+ Branch(Word32Equal(array_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ &if_istypedarray, &if_isgeneric);
+
+ Bind(&if_isgeneric);
+ {
+ Label if_isfast(this), if_isslow(this);
+ BranchIfFastJSArray(array, context, &if_isfast, &if_isslow);
+
+ Bind(&if_isfast);
+ {
+ var_map_index.Bind(
+ IntPtrConstant(Context::FAST_ARRAY_KEY_ITERATOR_MAP_INDEX));
+ var_array_map.Bind(array_map);
+ Goto(&allocate_iterator);
+ }
+
+ Bind(&if_isslow);
+ {
+ var_map_index.Bind(
+ IntPtrConstant(Context::GENERIC_ARRAY_KEY_ITERATOR_MAP_INDEX));
+ var_array_map.Bind(UndefinedConstant());
+ Goto(&allocate_iterator);
+ }
+ }
+
+ Bind(&if_istypedarray);
+ {
+ var_map_index.Bind(
+ IntPtrConstant(Context::TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX));
+ var_array_map.Bind(UndefinedConstant());
+ Goto(&allocate_iterator);
+ }
+ } else {
+ Label if_istypedarray(this), if_isgeneric(this);
+ Branch(Word32Equal(array_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ &if_istypedarray, &if_isgeneric);
+
+ Bind(&if_isgeneric);
+ {
+ Label if_isfast(this), if_isslow(this);
+ BranchIfFastJSArray(array, context, &if_isfast, &if_isslow);
+
+ Bind(&if_isfast);
+ {
+ Label if_ispacked(this), if_isholey(this);
+ Node* elements_kind = LoadMapElementsKind(array_map);
+ Branch(IsHoleyFastElementsKind(elements_kind), &if_isholey,
+ &if_ispacked);
+
+ Bind(&if_isholey);
+ {
+ // Fast holey JSArrays can treat the hole as undefined if the
+ // protector cell is valid, and the prototype chain is unchanged from
+ // its initial state (because the protector cell is only tracked for
+ // initial the Array and Object prototypes). Check these conditions
+ // here, and take the slow path if any fail.
+ Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
+ DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
+ GotoUnless(
+ WordEqual(
+ LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
+ &if_isslow);
+
+ Node* native_context = LoadNativeContext(context);
+
+ Node* prototype = LoadMapPrototype(array_map);
+ Node* array_prototype = LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
+ GotoUnless(WordEqual(prototype, array_prototype), &if_isslow);
+
+ Node* map = LoadMap(prototype);
+ prototype = LoadMapPrototype(map);
+ Node* object_prototype = LoadContextElement(
+ native_context, Context::INITIAL_OBJECT_PROTOTYPE_INDEX);
+ GotoUnless(WordEqual(prototype, object_prototype), &if_isslow);
+
+ map = LoadMap(prototype);
+ prototype = LoadMapPrototype(map);
+ Branch(IsNull(prototype), &if_ispacked, &if_isslow);
+ }
+ Bind(&if_ispacked);
+ {
+ Node* map_index =
+ IntPtrAdd(IntPtrConstant(kBaseMapIndex + kFastIteratorOffset),
+ LoadMapElementsKind(array_map));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
+ map_index, IntPtrConstant(kBaseMapIndex +
+ kFastIteratorOffset)));
+ CSA_ASSERT(this, IntPtrLessThan(map_index,
+ IntPtrConstant(kBaseMapIndex +
+ kSlowIteratorOffset)));
+
+ var_map_index.Bind(map_index);
+ var_array_map.Bind(array_map);
+ Goto(&allocate_iterator);
+ }
+ }
+
+ Bind(&if_isslow);
+ {
+ Node* map_index = IntPtrAdd(IntPtrConstant(kBaseMapIndex),
+ IntPtrConstant(kSlowIteratorOffset));
+ var_map_index.Bind(map_index);
+ var_array_map.Bind(UndefinedConstant());
+ Goto(&allocate_iterator);
+ }
+ }
+
+ Bind(&if_istypedarray);
+ {
+ Node* map_index =
+ IntPtrAdd(IntPtrConstant(kBaseMapIndex - UINT8_ELEMENTS),
+ LoadMapElementsKind(array_map));
+ CSA_ASSERT(
+ this, IntPtrLessThan(map_index, IntPtrConstant(kBaseMapIndex +
+ kFastIteratorOffset)));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(map_index,
+ IntPtrConstant(kBaseMapIndex)));
+ var_map_index.Bind(map_index);
+ var_array_map.Bind(UndefinedConstant());
+ Goto(&allocate_iterator);
+ }
+ }
+
+ Bind(&allocate_iterator);
+ {
+ Node* map =
+ LoadFixedArrayElement(LoadNativeContext(context), var_map_index.value(),
+ 0, CodeStubAssembler::INTPTR_PARAMETERS);
+ var_result.Bind(AllocateJSArrayIterator(array, var_array_map.value(), map));
+ Goto(&return_result);
+ }
+
+ Bind(&return_result);
+ return var_result.value();
+}
+
+compiler::Node* CodeStubAssembler::AllocateJSArrayIterator(
+ compiler::Node* array, compiler::Node* array_map, compiler::Node* map) {
+ Node* iterator = Allocate(JSArrayIterator::kSize);
+ StoreMapNoWriteBarrier(iterator, map);
+ StoreObjectFieldRoot(iterator, JSArrayIterator::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(iterator, JSArrayIterator::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldNoWriteBarrier(iterator,
+ JSArrayIterator::kIteratedObjectOffset, array);
+ StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset,
+ SmiConstant(Smi::FromInt(0)));
+ StoreObjectFieldNoWriteBarrier(
+ iterator, JSArrayIterator::kIteratedObjectMapOffset, array_map);
+ return iterator;
+}
+
+compiler::Node* CodeStubAssembler::IsDetachedBuffer(compiler::Node* buffer) {
+ CSA_ASSERT(this, HasInstanceType(buffer, JS_ARRAY_BUFFER_TYPE));
+
+ Node* buffer_bit_field = LoadObjectField(
+ buffer, JSArrayBuffer::kBitFieldOffset, MachineType::Uint32());
+ Node* was_neutered_mask = Int32Constant(JSArrayBuffer::WasNeutered::kMask);
+
+ return Word32NotEqual(Word32And(buffer_bit_field, was_neutered_mask),
+ Int32Constant(0));
+}
+
+CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler,
+ compiler::Node* argc,
+ CodeStubAssembler::ParameterMode mode)
+ : assembler_(assembler),
+ argc_(argc),
+ arguments_(nullptr),
+ fp_(assembler->LoadFramePointer()) {
+ compiler::Node* offset = assembler->ElementOffsetFromIndex(
+ argc_, FAST_ELEMENTS, mode,
+ (StandardFrameConstants::kFixedSlotCountAboveFp - 1) * kPointerSize);
+ arguments_ = assembler_->IntPtrAddFoldConstants(fp_, offset);
+ if (mode == CodeStubAssembler::INTEGER_PARAMETERS) {
+ argc_ = assembler->ChangeInt32ToIntPtr(argc_);
+ } else if (mode == CodeStubAssembler::SMI_PARAMETERS) {
+ argc_ = assembler->SmiUntag(argc_);
+ }
+}
+
+compiler::Node* CodeStubArguments::GetReceiver() {
+ return assembler_->Load(MachineType::AnyTagged(), arguments_,
+ assembler_->IntPtrConstant(kPointerSize));
+}
+
+compiler::Node* CodeStubArguments::AtIndex(
+ compiler::Node* index, CodeStubAssembler::ParameterMode mode) {
+ typedef compiler::Node Node;
+ Node* negated_index = assembler_->IntPtrSubFoldConstants(
+ assembler_->IntPtrOrSmiConstant(0, mode), index);
+ Node* offset =
+ assembler_->ElementOffsetFromIndex(negated_index, FAST_ELEMENTS, mode, 0);
+ return assembler_->Load(MachineType::AnyTagged(), arguments_, offset);
+}
+
+compiler::Node* CodeStubArguments::AtIndex(int index) {
+ return AtIndex(assembler_->IntPtrConstant(index));
+}
+
+void CodeStubArguments::ForEach(const CodeStubAssembler::VariableList& vars,
+ CodeStubArguments::ForEachBodyFunction body,
+ compiler::Node* first, compiler::Node* last,
+ CodeStubAssembler::ParameterMode mode) {
+ assembler_->Comment("CodeStubArguments::ForEach");
+ DCHECK_IMPLIES(first == nullptr || last == nullptr,
+ mode == CodeStubAssembler::INTPTR_PARAMETERS);
+ if (first == nullptr) {
+ first = assembler_->IntPtrOrSmiConstant(0, mode);
+ }
+ if (last == nullptr) {
+ last = argc_;
+ }
+ compiler::Node* start = assembler_->IntPtrSubFoldConstants(
+ arguments_,
+ assembler_->ElementOffsetFromIndex(first, FAST_ELEMENTS, mode));
+ compiler::Node* end = assembler_->IntPtrSubFoldConstants(
+ arguments_,
+ assembler_->ElementOffsetFromIndex(last, FAST_ELEMENTS, mode));
+ assembler_->BuildFastLoop(
+ vars, MachineType::PointerRepresentation(), start, end,
+ [body](CodeStubAssembler* assembler, compiler::Node* current) {
+ Node* arg = assembler->Load(MachineType::AnyTagged(), current);
+ body(assembler, arg);
+ },
+ -kPointerSize, CodeStubAssembler::IndexAdvanceMode::kPost);
+}
+
+void CodeStubArguments::PopAndReturn(compiler::Node* value) {
+ assembler_->PopAndReturn(
+ assembler_->IntPtrAddFoldConstants(argc_, assembler_->IntPtrConstant(1)),
+ value);
+}
+
+compiler::Node* CodeStubAssembler::IsFastElementsKind(
+ compiler::Node* elements_kind) {
+ return Uint32LessThanOrEqual(elements_kind,
+ Int32Constant(LAST_FAST_ELEMENTS_KIND));
+}
+
+compiler::Node* CodeStubAssembler::IsHoleyFastElementsKind(
+ compiler::Node* elements_kind) {
+ CSA_ASSERT(this, IsFastElementsKind(elements_kind));
+
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == (FAST_SMI_ELEMENTS | 1));
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == (FAST_ELEMENTS | 1));
+ STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == (FAST_DOUBLE_ELEMENTS | 1));
+
+ // Check prototype chain if receiver does not have packed elements.
+ Node* holey_elements = Word32And(elements_kind, Int32Constant(1));
+ return Word32Equal(holey_elements, Int32Constant(1));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index 25c7d5a8c8..f8f2686f8d 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -8,6 +8,7 @@
#include <functional>
#include "src/compiler/code-assembler.h"
+#include "src/globals.h"
#include "src/objects.h"
namespace v8 {
@@ -21,8 +22,10 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#define HEAP_CONSTANT_LIST(V) \
V(BooleanMap, BooleanMap) \
+ V(CodeMap, CodeMap) \
V(empty_string, EmptyString) \
V(EmptyFixedArray, EmptyFixedArray) \
+ V(FalseValue, False) \
V(FixedArrayMap, FixedArrayMap) \
V(FixedCOWArrayMap, FixedCOWArrayMap) \
V(FixedDoubleArrayMap, FixedDoubleArrayMap) \
@@ -31,6 +34,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(NanValue, Nan) \
V(NullValue, Null) \
V(TheHoleValue, TheHole) \
+ V(TrueValue, True) \
V(UndefinedValue, Undefined)
// Provides JavaScript-specific "macro-assembler" functionality on top of the
@@ -38,7 +42,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
// it's possible to add JavaScript-specific useful CodeAssembler "macros"
// without modifying files in the compiler directory (and requiring a review
// from a compiler directory OWNER).
-class CodeStubAssembler : public compiler::CodeAssembler {
+class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
public:
// Create with CallStub linkage.
// |result_size| specifies the number of results returned by the stub.
@@ -99,6 +103,14 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
+ compiler::Node* IntPtrAddFoldConstants(compiler::Node* left,
+ compiler::Node* right);
+ compiler::Node* IntPtrSubFoldConstants(compiler::Node* left,
+ compiler::Node* right);
+ // Round the 32bits payload of the provided word up to the next power of two.
+ compiler::Node* IntPtrRoundUpToPowerOfTwo32(compiler::Node* value);
+ compiler::Node* IntPtrMax(compiler::Node* left, compiler::Node* right);
+
// Float64 operations.
compiler::Node* Float64Ceil(compiler::Node* x);
compiler::Node* Float64Floor(compiler::Node* x);
@@ -120,9 +132,7 @@ class CodeStubAssembler : public compiler::CodeAssembler {
// Smi operations.
compiler::Node* SmiAdd(compiler::Node* a, compiler::Node* b);
- compiler::Node* SmiAddWithOverflow(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiSub(compiler::Node* a, compiler::Node* b);
- compiler::Node* SmiSubWithOverflow(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiEqual(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiAbove(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiAboveOrEqual(compiler::Node* a, compiler::Node* b);
@@ -136,41 +146,51 @@ class CodeStubAssembler : public compiler::CodeAssembler {
// Computes a * b for Smi inputs a and b; result is not necessarily a Smi.
compiler::Node* SmiMul(compiler::Node* a, compiler::Node* b);
compiler::Node* SmiOr(compiler::Node* a, compiler::Node* b) {
- return WordOr(a, b);
+ return BitcastWordToTaggedSigned(
+ WordOr(BitcastTaggedToWord(a), BitcastTaggedToWord(b)));
}
+ // Smi | HeapNumber operations.
+ compiler::Node* NumberInc(compiler::Node* value);
+
// Allocate an object of the given size.
compiler::Node* Allocate(compiler::Node* size, AllocationFlags flags = kNone);
compiler::Node* Allocate(int size, AllocationFlags flags = kNone);
compiler::Node* InnerAllocate(compiler::Node* previous, int offset);
compiler::Node* InnerAllocate(compiler::Node* previous,
compiler::Node* offset);
+ compiler::Node* IsRegularHeapObjectSize(compiler::Node* size);
- void Assert(compiler::Node* condition);
+ typedef std::function<compiler::Node*()> ConditionBody;
+ void Assert(ConditionBody condition_body, const char* string = nullptr,
+ const char* file = nullptr, int line = 0);
// Check a value for smi-ness
- compiler::Node* WordIsSmi(compiler::Node* a);
+ compiler::Node* TaggedIsSmi(compiler::Node* a);
// Check that the value is a non-negative smi.
compiler::Node* WordIsPositiveSmi(compiler::Node* a);
+ // Check that a word has a word-aligned address.
+ compiler::Node* WordIsWordAligned(compiler::Node* word);
+ compiler::Node* WordIsPowerOfTwo(compiler::Node* value);
void BranchIfSmiEqual(compiler::Node* a, compiler::Node* b, Label* if_true,
Label* if_false) {
- BranchIf(SmiEqual(a, b), if_true, if_false);
+ Branch(SmiEqual(a, b), if_true, if_false);
}
void BranchIfSmiLessThan(compiler::Node* a, compiler::Node* b, Label* if_true,
Label* if_false) {
- BranchIf(SmiLessThan(a, b), if_true, if_false);
+ Branch(SmiLessThan(a, b), if_true, if_false);
}
void BranchIfSmiLessThanOrEqual(compiler::Node* a, compiler::Node* b,
Label* if_true, Label* if_false) {
- BranchIf(SmiLessThanOrEqual(a, b), if_true, if_false);
+ Branch(SmiLessThanOrEqual(a, b), if_true, if_false);
}
void BranchIfFloat64IsNaN(compiler::Node* value, Label* if_true,
Label* if_false) {
- BranchIfFloat64Equal(value, value, if_false, if_true);
+ Branch(Float64Equal(value, value), if_false, if_true);
}
// Branches to {if_true} if ToBoolean applied to {value} yields true,
@@ -187,6 +207,10 @@ class CodeStubAssembler : public compiler::CodeAssembler {
if_notequal);
}
+ void BranchIfJSReceiver(compiler::Node* object, Label* if_true,
+ Label* if_false);
+ void BranchIfJSObject(compiler::Node* object, Label* if_true,
+ Label* if_false);
void BranchIfFastJSArray(compiler::Node* object, compiler::Node* context,
Label* if_true, Label* if_false);
@@ -222,8 +246,8 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* LoadMap(compiler::Node* object);
// Load the instance type of an HeapObject.
compiler::Node* LoadInstanceType(compiler::Node* object);
- // Checks that given heap object has given instance type.
- void AssertInstanceType(compiler::Node* object, InstanceType instance_type);
+ // Compare the instance the type of the object against the provided one.
+ compiler::Node* HasInstanceType(compiler::Node* object, InstanceType type);
// Load the properties backing store of a JSObject.
compiler::Node* LoadProperties(compiler::Node* object);
// Load the elements backing store of a JSObject.
@@ -248,6 +272,10 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* LoadMapDescriptors(compiler::Node* map);
// Load the prototype of a map.
compiler::Node* LoadMapPrototype(compiler::Node* map);
+ // Load the prototype info of a map. The result has to be checked if it is a
+ // prototype info object or not.
+ compiler::Node* LoadMapPrototypeInfo(compiler::Node* map,
+ Label* if_has_no_proto_info);
// Load the instance size of a Map.
compiler::Node* LoadMapInstanceSize(compiler::Node* map);
// Load the inobject properties count of a Map (valid only for JSObjects).
@@ -256,6 +284,8 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* LoadMapConstructorFunctionIndex(compiler::Node* map);
// Load the constructor of a Map (equivalent to Map::GetConstructor()).
compiler::Node* LoadMapConstructor(compiler::Node* map);
+ // Check if the map is set for slow properties.
+ compiler::Node* IsDictionaryMap(compiler::Node* map);
// Load the hash field of a name as an uint32 value.
compiler::Node* LoadNameHashField(compiler::Node* name);
@@ -270,6 +300,7 @@ class CodeStubAssembler : public compiler::CodeAssembler {
// Load value field of a JSValue object.
compiler::Node* LoadJSValueValue(compiler::Node* object);
// Load value field of a WeakCell object.
+ compiler::Node* LoadWeakCellValueUnchecked(compiler::Node* weak_cell);
compiler::Node* LoadWeakCellValue(compiler::Node* weak_cell,
Label* if_cleared = nullptr);
@@ -294,9 +325,20 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* LoadDoubleWithHoleCheck(
compiler::Node* base, compiler::Node* offset, Label* if_hole,
MachineType machine_type = MachineType::Float64());
+ compiler::Node* LoadFixedTypedArrayElement(
+ compiler::Node* data_pointer, compiler::Node* index_node,
+ ElementsKind elements_kind,
+ ParameterMode parameter_mode = INTEGER_PARAMETERS);
// Context manipulation
compiler::Node* LoadContextElement(compiler::Node* context, int slot_index);
+ compiler::Node* LoadContextElement(compiler::Node* context,
+ compiler::Node* slot_index);
+ compiler::Node* StoreContextElement(compiler::Node* context, int slot_index,
+ compiler::Node* value);
+ compiler::Node* StoreContextElement(compiler::Node* context,
+ compiler::Node* slot_index,
+ compiler::Node* value);
compiler::Node* LoadNativeContext(compiler::Node* context);
compiler::Node* LoadJSArrayElementsMap(ElementsKind kind,
@@ -324,6 +366,14 @@ class CodeStubAssembler : public compiler::CodeAssembler {
Heap::RootListIndex root);
// Store an array element to a FixedArray.
compiler::Node* StoreFixedArrayElement(
+ compiler::Node* object, int index, compiler::Node* value,
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
+ ParameterMode parameter_mode = INTEGER_PARAMETERS) {
+ return StoreFixedArrayElement(object, Int32Constant(index), value,
+ barrier_mode, parameter_mode);
+ }
+
+ compiler::Node* StoreFixedArrayElement(
compiler::Node* object, compiler::Node* index, compiler::Node* value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
ParameterMode parameter_mode = INTEGER_PARAMETERS);
@@ -332,19 +382,27 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* object, compiler::Node* index, compiler::Node* value,
ParameterMode parameter_mode = INTEGER_PARAMETERS);
+ void StoreFieldsNoWriteBarrier(compiler::Node* start_address,
+ compiler::Node* end_address,
+ compiler::Node* value);
+
// Allocate a HeapNumber without initializing its value.
compiler::Node* AllocateHeapNumber(MutableMode mode = IMMUTABLE);
// Allocate a HeapNumber with a specific value.
compiler::Node* AllocateHeapNumberWithValue(compiler::Node* value,
MutableMode mode = IMMUTABLE);
// Allocate a SeqOneByteString with the given length.
- compiler::Node* AllocateSeqOneByteString(int length);
- compiler::Node* AllocateSeqOneByteString(compiler::Node* context,
- compiler::Node* length);
+ compiler::Node* AllocateSeqOneByteString(int length,
+ AllocationFlags flags = kNone);
+ compiler::Node* AllocateSeqOneByteString(
+ compiler::Node* context, compiler::Node* length,
+ ParameterMode mode = INTPTR_PARAMETERS, AllocationFlags flags = kNone);
// Allocate a SeqTwoByteString with the given length.
- compiler::Node* AllocateSeqTwoByteString(int length);
- compiler::Node* AllocateSeqTwoByteString(compiler::Node* context,
- compiler::Node* length);
+ compiler::Node* AllocateSeqTwoByteString(int length,
+ AllocationFlags flags = kNone);
+ compiler::Node* AllocateSeqTwoByteString(
+ compiler::Node* context, compiler::Node* length,
+ ParameterMode mode = INTPTR_PARAMETERS, AllocationFlags flags = kNone);
// Allocate a SlicedOneByteString with the given length, parent and offset.
// |length| and |offset| are expected to be tagged.
@@ -357,6 +415,27 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* parent,
compiler::Node* offset);
+ // Allocate a one-byte ConsString with the given length, first and second
+ // parts. |length| is expected to be tagged, and |first| and |second| are
+ // expected to be one-byte strings.
+ compiler::Node* AllocateOneByteConsString(compiler::Node* length,
+ compiler::Node* first,
+ compiler::Node* second,
+ AllocationFlags flags = kNone);
+ // Allocate a two-byte ConsString with the given length, first and second
+ // parts. |length| is expected to be tagged, and |first| and |second| are
+ // expected to be two-byte strings.
+ compiler::Node* AllocateTwoByteConsString(compiler::Node* length,
+ compiler::Node* first,
+ compiler::Node* second,
+ AllocationFlags flags = kNone);
+
+ // Allocate an appropriate one- or two-byte ConsString with the first and
+ // second parts specified by |first| and |second|.
+ compiler::Node* NewConsString(compiler::Node* context, compiler::Node* length,
+ compiler::Node* left, compiler::Node* right,
+ AllocationFlags flags = kNone);
+
// Allocate a RegExpResult with the given length (the number of captures,
// including the match itself), index (the index where the match starts),
// and input string. |length| and |index| are expected to be tagged, and
@@ -366,6 +445,22 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* index,
compiler::Node* input);
+ compiler::Node* AllocateNameDictionary(int capacity);
+ compiler::Node* AllocateNameDictionary(compiler::Node* capacity);
+
+ compiler::Node* AllocateJSObjectFromMap(compiler::Node* map,
+ compiler::Node* properties = nullptr,
+ compiler::Node* elements = nullptr);
+
+ void InitializeJSObjectFromMap(compiler::Node* object, compiler::Node* map,
+ compiler::Node* size,
+ compiler::Node* properties = nullptr,
+ compiler::Node* elements = nullptr);
+
+ void InitializeJSObjectBody(compiler::Node* object, compiler::Node* map,
+ compiler::Node* size,
+ int start_offset = JSObject::kHeaderSize);
+
// Allocate a JSArray without elements and initialize the header fields.
compiler::Node* AllocateUninitializedJSArrayWithoutElements(
ElementsKind kind, compiler::Node* array_map, compiler::Node* length,
@@ -390,6 +485,17 @@ class CodeStubAssembler : public compiler::CodeAssembler {
ParameterMode mode = INTEGER_PARAMETERS,
AllocationFlags flags = kNone);
+ // Perform CreateArrayIterator (ES6 #sec-createarrayiterator).
+ compiler::Node* CreateArrayIterator(compiler::Node* array,
+ compiler::Node* array_map,
+ compiler::Node* array_type,
+ compiler::Node* context,
+ IterationKind mode);
+
+ compiler::Node* AllocateJSArrayIterator(compiler::Node* array,
+ compiler::Node* array_map,
+ compiler::Node* map);
+
void FillFixedArrayWithValue(ElementsKind kind, compiler::Node* array,
compiler::Node* from_index,
compiler::Node* to_index,
@@ -417,14 +523,20 @@ class CodeStubAssembler : public compiler::CodeAssembler {
ParameterMode mode = INTEGER_PARAMETERS);
// Copies |character_count| elements from |from_string| to |to_string|
- // starting at the |from_index|'th character. |from_index| and
- // |character_count| must be Smis s.t.
- // 0 <= |from_index| <= |from_index| + |character_count| < from_string.length.
+ // starting at the |from_index|'th character. |from_string| and |to_string|
+ // can either be one-byte strings or two-byte strings, although if
+ // |from_string| is two-byte, then |to_string| must be two-byte.
+ // |from_index|, |to_index| and |character_count| must be either Smis or
+ // intptr_ts depending on |mode| s.t. 0 <= |from_index| <= |from_index| +
+ // |character_count| <= from_string.length and 0 <= |to_index| <= |to_index| +
+ // |character_count| <= to_string.length.
void CopyStringCharacters(compiler::Node* from_string,
compiler::Node* to_string,
compiler::Node* from_index,
+ compiler::Node* to_index,
compiler::Node* character_count,
- String::Encoding encoding);
+ String::Encoding from_encoding,
+ String::Encoding to_encoding, ParameterMode mode);
// Loads an element from |array| of |from_kind| elements by given |offset|
// (NOTE: not index!), does a hole check if |if_hole| is provided and
@@ -467,6 +579,8 @@ class CodeStubAssembler : public compiler::CodeAssembler {
int base_allocation_size,
compiler::Node* allocation_site);
+ compiler::Node* TryTaggedToFloat64(compiler::Node* value,
+ Label* if_valueisnotnumber);
compiler::Node* TruncateTaggedToFloat64(compiler::Node* context,
compiler::Node* value);
compiler::Node* TruncateTaggedToWord32(compiler::Node* context,
@@ -499,8 +613,31 @@ class CodeStubAssembler : public compiler::CodeAssembler {
char const* method_name);
// Type checks.
+ // Check whether the map is for an object with special properties, such as a
+ // JSProxy or an object with interceptors.
+ compiler::Node* IsSpecialReceiverMap(compiler::Node* map);
+ compiler::Node* IsSpecialReceiverInstanceType(compiler::Node* instance_type);
compiler::Node* IsStringInstanceType(compiler::Node* instance_type);
+ compiler::Node* IsString(compiler::Node* object);
+ compiler::Node* IsJSObject(compiler::Node* object);
+ compiler::Node* IsJSGlobalProxy(compiler::Node* object);
compiler::Node* IsJSReceiverInstanceType(compiler::Node* instance_type);
+ compiler::Node* IsJSReceiver(compiler::Node* object);
+ compiler::Node* IsMap(compiler::Node* object);
+ compiler::Node* IsCallableMap(compiler::Node* map);
+ compiler::Node* IsName(compiler::Node* object);
+ compiler::Node* IsJSValue(compiler::Node* object);
+ compiler::Node* IsJSArray(compiler::Node* object);
+ compiler::Node* IsNativeContext(compiler::Node* object);
+ compiler::Node* IsWeakCell(compiler::Node* object);
+ compiler::Node* IsFixedDoubleArray(compiler::Node* object);
+ compiler::Node* IsHashTable(compiler::Node* object);
+ compiler::Node* IsDictionary(compiler::Node* object);
+ compiler::Node* IsUnseededNumberDictionary(compiler::Node* object);
+
+ // ElementsKind helpers:
+ compiler::Node* IsFastElementsKind(compiler::Node* elements_kind);
+ compiler::Node* IsHoleyFastElementsKind(compiler::Node* elements_kind);
// String helpers.
// Load a character from a String (might flatten a ConsString).
@@ -513,6 +650,20 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* SubString(compiler::Node* context, compiler::Node* string,
compiler::Node* from, compiler::Node* to);
+ // Return a new string object produced by concatenating |first| with |second|.
+ compiler::Node* StringAdd(compiler::Node* context, compiler::Node* first,
+ compiler::Node* second,
+ AllocationFlags flags = kNone);
+
+ // Return the first index >= {from} at which {needle_char} was found in
+ // {string}, or -1 if such an index does not exist. The returned value is
+ // a Smi, {string} is expected to be a String, {needle_char} is an intptr,
+ // and {from} is expected to be tagged.
+ compiler::Node* StringIndexOfChar(compiler::Node* context,
+ compiler::Node* string,
+ compiler::Node* needle_char,
+ compiler::Node* from);
+
compiler::Node* StringFromCodePoint(compiler::Node* codepoint,
UnicodeEncoding encoding);
@@ -520,6 +671,8 @@ class CodeStubAssembler : public compiler::CodeAssembler {
// Convert a String to a Number.
compiler::Node* StringToNumber(compiler::Node* context,
compiler::Node* input);
+ compiler::Node* NumberToString(compiler::Node* context,
+ compiler::Node* input);
// Convert an object to a name.
compiler::Node* ToName(compiler::Node* context, compiler::Node* input);
// Convert a Non-Number object to a Number.
@@ -528,6 +681,16 @@ class CodeStubAssembler : public compiler::CodeAssembler {
// Convert any object to a Number.
compiler::Node* ToNumber(compiler::Node* context, compiler::Node* input);
+ // Convert any object to a String.
+ compiler::Node* ToString(compiler::Node* context, compiler::Node* input);
+
+ // Convert any object to a Primitive.
+ compiler::Node* JSReceiverToPrimitive(compiler::Node* context,
+ compiler::Node* input);
+
+ // Convert a String to a flat String.
+ compiler::Node* FlattenString(compiler::Node* string);
+
enum ToIntegerTruncationMode {
kNoTruncation,
kTruncateMinusZero,
@@ -540,20 +703,50 @@ class CodeStubAssembler : public compiler::CodeAssembler {
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |T| in |word32|. Returns result as an uint32 node.
template <typename T>
- compiler::Node* BitFieldDecode(compiler::Node* word32) {
- return BitFieldDecode(word32, T::kShift, T::kMask);
+ compiler::Node* DecodeWord32(compiler::Node* word32) {
+ return DecodeWord32(word32, T::kShift, T::kMask);
+ }
+
+ // Returns a node that contains a decoded (unsigned!) value of a bit
+ // field |T| in |word|. Returns result as a word-size node.
+ template <typename T>
+ compiler::Node* DecodeWord(compiler::Node* word) {
+ return DecodeWord(word, T::kShift, T::kMask);
}
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |T| in |word32|. Returns result as a word-size node.
template <typename T>
- compiler::Node* BitFieldDecodeWord(compiler::Node* word32) {
- return ChangeUint32ToWord(BitFieldDecode<T>(word32));
+ compiler::Node* DecodeWordFromWord32(compiler::Node* word32) {
+ return DecodeWord<T>(ChangeUint32ToWord(word32));
}
// Decodes an unsigned (!) value from |word32| to an uint32 node.
- compiler::Node* BitFieldDecode(compiler::Node* word32, uint32_t shift,
- uint32_t mask);
+ compiler::Node* DecodeWord32(compiler::Node* word32, uint32_t shift,
+ uint32_t mask);
+
+ // Decodes an unsigned (!) value from |word| to a word-size node.
+ compiler::Node* DecodeWord(compiler::Node* word, uint32_t shift,
+ uint32_t mask);
+
+ // Returns true if any of the |T|'s bits in given |word32| are set.
+ template <typename T>
+ compiler::Node* IsSetWord32(compiler::Node* word32) {
+ return IsSetWord32(word32, T::kMask);
+ }
+
+ // Returns true if any of the mask's bits in given |word32| are set.
+ compiler::Node* IsSetWord32(compiler::Node* word32, uint32_t mask) {
+ return Word32NotEqual(Word32And(word32, Int32Constant(mask)),
+ Int32Constant(0));
+ }
+
+ // Returns true if any of the |T|'s bits in given |word| are set.
+ template <typename T>
+ compiler::Node* IsSetWord(compiler::Node* word) {
+ return WordNotEqual(WordAnd(word, IntPtrConstant(T::kMask)),
+ IntPtrConstant(0));
+ }
void SetCounter(StatsCounter* counter, int value);
void IncrementCounter(StatsCounter* counter, int delta);
@@ -576,6 +769,8 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* EntryToIndex(compiler::Node* entry) {
return EntryToIndex<Dictionary>(entry, Dictionary::kEntryKeyIndex);
}
+ // Calculate a valid size for the a hash table.
+ compiler::Node* HashTableComputeCapacity(compiler::Node* at_least_space_for);
// Looks up an entry in a NameDictionaryBase successor. If the entry is found
// control goes to {if_found} and {var_name_index} contains an index of the
@@ -723,6 +918,12 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* feedback, Label* if_handler,
Variable* var_handler, Label* if_miss,
int unroll_count);
+ void HandleKeyedStorePolymorphicCase(compiler::Node* receiver_map,
+ compiler::Node* feedback,
+ Label* if_handler, Variable* var_handler,
+ Label* if_transition_handler,
+ Variable* var_transition_map_cell,
+ Label* if_miss);
compiler::Node* StubCachePrimaryOffset(compiler::Node* name,
compiler::Node* map);
@@ -806,10 +1007,12 @@ class CodeStubAssembler : public compiler::CodeAssembler {
ParameterMode mode, Label* bailout);
void LoadIC(const LoadICParameters* p);
+ void LoadICProtoArray(const LoadICParameters* p, compiler::Node* handler);
void LoadGlobalIC(const LoadICParameters* p);
void KeyedLoadIC(const LoadICParameters* p);
void KeyedLoadICGeneric(const LoadICParameters* p);
void StoreIC(const StoreICParameters* p);
+ void KeyedStoreIC(const StoreICParameters* p, LanguageMode language_mode);
void TransitionElementsKind(compiler::Node* object, compiler::Node* map,
ElementsKind from_kind, ElementsKind to_kind,
@@ -838,14 +1041,110 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* CreateAllocationSiteInFeedbackVector(
compiler::Node* feedback_vector, compiler::Node* slot);
+ enum class IndexAdvanceMode { kPre, kPost };
+
+ void BuildFastLoop(
+ const VariableList& var_list, MachineRepresentation index_rep,
+ compiler::Node* start_index, compiler::Node* end_index,
+ std::function<void(CodeStubAssembler* assembler, compiler::Node* index)>
+ body,
+ int increment, IndexAdvanceMode mode = IndexAdvanceMode::kPre);
+
+ void BuildFastLoop(
+ MachineRepresentation index_rep, compiler::Node* start_index,
+ compiler::Node* end_index,
+ std::function<void(CodeStubAssembler* assembler, compiler::Node* index)>
+ body,
+ int increment, IndexAdvanceMode mode = IndexAdvanceMode::kPre) {
+ BuildFastLoop(VariableList(0, zone()), index_rep, start_index, end_index,
+ body, increment, mode);
+ }
+
+ enum class ForEachDirection { kForward, kReverse };
+
+ void BuildFastFixedArrayForEach(
+ compiler::Node* fixed_array, ElementsKind kind,
+ compiler::Node* first_element_inclusive,
+ compiler::Node* last_element_exclusive,
+ std::function<void(CodeStubAssembler* assembler,
+ compiler::Node* fixed_array, compiler::Node* offset)>
+ body,
+ ParameterMode mode = INTPTR_PARAMETERS,
+ ForEachDirection direction = ForEachDirection::kReverse);
+
+ compiler::Node* GetArrayAllocationSize(compiler::Node* element_count,
+ ElementsKind kind, ParameterMode mode,
+ int header_size) {
+ return ElementOffsetFromIndex(element_count, kind, mode, header_size);
+ }
+
compiler::Node* GetFixedArrayAllocationSize(compiler::Node* element_count,
ElementsKind kind,
ParameterMode mode) {
- return ElementOffsetFromIndex(element_count, kind, mode,
+ return GetArrayAllocationSize(element_count, kind, mode,
FixedArray::kHeaderSize);
}
+ enum RelationalComparisonMode {
+ kLessThan,
+ kLessThanOrEqual,
+ kGreaterThan,
+ kGreaterThanOrEqual
+ };
+
+ compiler::Node* RelationalComparison(RelationalComparisonMode mode,
+ compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* context);
+
+ void BranchIfNumericRelationalComparison(RelationalComparisonMode mode,
+ compiler::Node* lhs,
+ compiler::Node* rhs, Label* if_true,
+ Label* if_false);
+
+ void GotoUnlessNumberLessThan(compiler::Node* lhs, compiler::Node* rhs,
+ Label* if_false);
+
+ enum ResultMode { kDontNegateResult, kNegateResult };
+
+ compiler::Node* Equal(ResultMode mode, compiler::Node* lhs,
+ compiler::Node* rhs, compiler::Node* context);
+
+ compiler::Node* StrictEqual(ResultMode mode, compiler::Node* lhs,
+ compiler::Node* rhs, compiler::Node* context);
+
+ // ECMA#sec-samevalue
+ // Similar to StrictEqual except that NaNs are treated as equal and minus zero
+ // differs from positive zero.
+ // Unlike Equal and StrictEqual, returns a value suitable for use in Branch
+ // instructions, e.g. Branch(SameValue(...), &label).
+ compiler::Node* SameValue(compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* context);
+
+ compiler::Node* HasProperty(
+ compiler::Node* object, compiler::Node* key, compiler::Node* context,
+ Runtime::FunctionId fallback_runtime_function_id = Runtime::kHasProperty);
+ compiler::Node* ForInFilter(compiler::Node* key, compiler::Node* object,
+ compiler::Node* context);
+
+ compiler::Node* Typeof(compiler::Node* value, compiler::Node* context);
+
+ compiler::Node* InstanceOf(compiler::Node* object, compiler::Node* callable,
+ compiler::Node* context);
+
+ // TypedArray/ArrayBuffer helpers
+ compiler::Node* IsDetachedBuffer(compiler::Node* buffer);
+
+ compiler::Node* ElementOffsetFromIndex(compiler::Node* index,
+ ElementsKind kind, ParameterMode mode,
+ int base_size = 0);
+
+ protected:
+ void HandleStoreICHandlerCase(const StoreICParameters* p,
+ compiler::Node* handler, Label* miss);
+
private:
+ friend class CodeStubArguments;
+
enum ElementSupport { kOnlyProperties, kSupportElements };
void DescriptorLookupLinear(compiler::Node* unique_name,
@@ -861,6 +1160,47 @@ class CodeStubAssembler : public compiler::CodeAssembler {
void HandleLoadICHandlerCase(
const LoadICParameters* p, compiler::Node* handler, Label* miss,
ElementSupport support_elements = kOnlyProperties);
+
+ void HandleLoadICSmiHandlerCase(const LoadICParameters* p,
+ compiler::Node* holder,
+ compiler::Node* smi_handler, Label* miss,
+ ElementSupport support_elements);
+
+ void HandleLoadICProtoHandler(const LoadICParameters* p,
+ compiler::Node* handler, Variable* var_holder,
+ Variable* var_smi_handler,
+ Label* if_smi_handler, Label* miss);
+
+ compiler::Node* EmitLoadICProtoArrayCheck(const LoadICParameters* p,
+ compiler::Node* handler,
+ compiler::Node* handler_length,
+ compiler::Node* handler_flags,
+ Label* miss);
+
+ void CheckPrototype(compiler::Node* prototype_cell, compiler::Node* name,
+ Label* miss);
+
+ void NameDictionaryNegativeLookup(compiler::Node* object,
+ compiler::Node* name, Label* miss);
+
+ // If |transition| is nullptr then the normal field store is generated or
+ // transitioning store otherwise.
+ void HandleStoreFieldAndReturn(compiler::Node* handler_word,
+ compiler::Node* holder,
+ Representation representation,
+ compiler::Node* value,
+ compiler::Node* transition, Label* miss);
+
+ // If |transition| is nullptr then the normal field store is generated or
+ // transitioning store otherwise.
+ void HandleStoreICSmiHandlerCase(compiler::Node* handler_word,
+ compiler::Node* holder,
+ compiler::Node* value,
+ compiler::Node* transition, Label* miss);
+
+ void HandleStoreICProtoHandler(const StoreICParameters* p,
+ compiler::Node* handler, Label* miss);
+
compiler::Node* TryToIntptr(compiler::Node* key, Label* miss);
void EmitFastElementsBoundsCheck(compiler::Node* object,
compiler::Node* elements,
@@ -877,10 +1217,6 @@ class CodeStubAssembler : public compiler::CodeAssembler {
Label* definitely_no_elements,
Label* possibly_elements);
- compiler::Node* ElementOffsetFromIndex(compiler::Node* index,
- ElementsKind kind, ParameterMode mode,
- int base_size = 0);
-
compiler::Node* AllocateRawAligned(compiler::Node* size_in_bytes,
AllocationFlags flags,
compiler::Node* top_address,
@@ -906,9 +1242,83 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* value,
Label* bailout);
+ compiler::Node* AllocateSlicedString(Heap::RootListIndex map_root_index,
+ compiler::Node* length,
+ compiler::Node* parent,
+ compiler::Node* offset);
+
+ compiler::Node* AllocateConsString(Heap::RootListIndex map_root_index,
+ compiler::Node* length,
+ compiler::Node* first,
+ compiler::Node* second,
+ AllocationFlags flags);
+
static const int kElementLoopUnrollThreshold = 8;
};
+class CodeStubArguments {
+ public:
+ // |argc| specifies the number of arguments passed to the builtin excluding
+ // the receiver.
+ CodeStubArguments(CodeStubAssembler* assembler, compiler::Node* argc,
+ CodeStubAssembler::ParameterMode mode =
+ CodeStubAssembler::INTPTR_PARAMETERS);
+
+ compiler::Node* GetReceiver();
+
+ // |index| is zero-based and does not include the receiver
+ compiler::Node* AtIndex(compiler::Node* index,
+ CodeStubAssembler::ParameterMode mode =
+ CodeStubAssembler::INTPTR_PARAMETERS);
+
+ compiler::Node* AtIndex(int index);
+
+ typedef std::function<void(CodeStubAssembler* assembler, compiler::Node* arg)>
+ ForEachBodyFunction;
+
+ // Iteration doesn't include the receiver. |first| and |last| are zero-based.
+ void ForEach(ForEachBodyFunction body, compiler::Node* first = nullptr,
+ compiler::Node* last = nullptr,
+ CodeStubAssembler::ParameterMode mode =
+ CodeStubAssembler::INTPTR_PARAMETERS) {
+ CodeStubAssembler::VariableList list(0, assembler_->zone());
+ ForEach(list, body, first, last);
+ }
+
+ // Iteration doesn't include the receiver. |first| and |last| are zero-based.
+ void ForEach(const CodeStubAssembler::VariableList& vars,
+ ForEachBodyFunction body, compiler::Node* first = nullptr,
+ compiler::Node* last = nullptr,
+ CodeStubAssembler::ParameterMode mode =
+ CodeStubAssembler::INTPTR_PARAMETERS);
+
+ void PopAndReturn(compiler::Node* value);
+
+ private:
+ compiler::Node* GetArguments();
+
+ CodeStubAssembler* assembler_;
+ compiler::Node* argc_;
+ compiler::Node* arguments_;
+ compiler::Node* fp_;
+};
+
+#ifdef DEBUG
+#define CSA_ASSERT(csa, x) \
+ (csa)->Assert([&] { return (x); }, #x, __FILE__, __LINE__)
+#else
+#define CSA_ASSERT(csa, x) ((void)0)
+#endif
+
+#ifdef ENABLE_SLOW_DCHECKS
+#define CSA_SLOW_ASSERT(csa, x) \
+ if (FLAG_enable_slow_asserts) { \
+ (csa)->Assert([&] { return (x); }, #x, __FILE__, __LINE__); \
+ }
+#else
+#define CSA_SLOW_ASSERT(csa, x) ((void)0)
+#endif
+
DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags);
} // namespace internal
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index a294d56c7a..790f687925 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -249,7 +249,7 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(
Handle<Code> HydrogenCodeStub::GenerateRuntimeTailCall(
CodeStubDescriptor* descriptor) {
const char* name = CodeStub::MajorName(MajorKey());
- Zone zone(isolate()->allocator());
+ Zone zone(isolate()->allocator(), ZONE_NAME);
CallInterfaceDescriptor interface_descriptor(GetCallInterfaceDescriptor());
CodeStubAssembler assembler(isolate(), &zone, interface_descriptor,
GetCodeFlags(), name);
@@ -307,7 +307,7 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
if (FLAG_profile_hydrogen_code_stub_compilation) {
timer.Start();
}
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
CompilationInfo info(CStrVector(CodeStub::MajorName(stub->MajorKey())),
isolate, &zone, stub->GetCodeFlags());
// Parameter count is number of stack parameters.
@@ -328,18 +328,6 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
}
-template <>
-HValue* CodeStubGraphBuilder<NumberToStringStub>::BuildCodeStub() {
- info()->MarkAsSavesCallerDoubles();
- HValue* number = GetParameter(Descriptor::kArgument);
- return BuildNumberToString(number, AstType::Number());
-}
-
-
-Handle<Code> NumberToStringStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
HValue* CodeStubGraphBuilderBase::BuildPushElement(HValue* object, HValue* argc,
HValue* argument_elements,
ElementsKind kind) {
@@ -1043,7 +1031,7 @@ HValue* CodeStubGraphBuilderBase::BuildToString(HValue* input, bool convert) {
}
if_inputisprimitive.End();
// Convert the primitive to a string value.
- HValue* values[] = {context(), Pop()};
+ HValue* values[] = {Pop()};
Callable toString = CodeFactory::ToString(isolate());
Push(AddUncasted<HCallWithDescriptor>(Add<HConstant>(toString.code()), 0,
toString.descriptor(),
@@ -1132,39 +1120,11 @@ HValue* CodeStubGraphBuilderBase::BuildToPrimitive(HValue* input,
return Pop();
}
-
-template <>
-HValue* CodeStubGraphBuilder<StringAddStub>::BuildCodeInitializedStub() {
- StringAddStub* stub = casted_stub();
- StringAddFlags flags = stub->flags();
- PretenureFlag pretenure_flag = stub->pretenure_flag();
-
- HValue* left = GetParameter(Descriptor::kLeft);
- HValue* right = GetParameter(Descriptor::kRight);
-
- // Make sure that both arguments are strings if not known in advance.
- if ((flags & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
- left =
- BuildToString(left, (flags & STRING_ADD_CONVERT) == STRING_ADD_CONVERT);
- }
- if ((flags & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
- right = BuildToString(right,
- (flags & STRING_ADD_CONVERT) == STRING_ADD_CONVERT);
- }
-
- return BuildStringAdd(left, right, HAllocationMode(pretenure_flag));
-}
-
-
-Handle<Code> StringAddStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
template <>
HValue* CodeStubGraphBuilder<ToBooleanICStub>::BuildCodeInitializedStub() {
ToBooleanICStub* stub = casted_stub();
IfBuilder if_true(this);
- if_true.If<HBranch>(GetParameter(Descriptor::kArgument), stub->types());
+ if_true.If<HBranch>(GetParameter(Descriptor::kArgument), stub->hints());
if_true.Then();
if_true.Return(graph()->GetConstantTrue());
if_true.Else();
@@ -1193,276 +1153,5 @@ Handle<Code> LoadDictionaryElementStub::GenerateCode() {
return DoGenerateCode(this);
}
-
-template<>
-HValue* CodeStubGraphBuilder<RegExpConstructResultStub>::BuildCodeStub() {
- // Determine the parameters.
- HValue* length = GetParameter(Descriptor::kLength);
- HValue* index = GetParameter(Descriptor::kIndex);
- HValue* input = GetParameter(Descriptor::kInput);
-
- // TODO(turbofan): This codestub has regressed to need a frame on ia32 at some
- // point and wasn't caught since it wasn't built in the snapshot. We should
- // probably just replace with a TurboFan stub rather than fixing it.
-#if !(V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87)
- info()->MarkMustNotHaveEagerFrame();
-#endif
-
- return BuildRegExpConstructResult(length, index, input);
-}
-
-
-Handle<Code> RegExpConstructResultStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-class CodeStubGraphBuilder<KeyedLoadGenericStub>
- : public CodeStubGraphBuilderBase {
- public:
- explicit CodeStubGraphBuilder(CompilationInfo* info, CodeStub* stub)
- : CodeStubGraphBuilderBase(info, stub) {}
-
- typedef KeyedLoadGenericStub::Descriptor Descriptor;
-
- protected:
- virtual HValue* BuildCodeStub();
-
- void BuildElementsKindLimitCheck(HGraphBuilder::IfBuilder* if_builder,
- HValue* bit_field2,
- ElementsKind kind);
-
- void BuildFastElementLoad(HGraphBuilder::IfBuilder* if_builder,
- HValue* receiver,
- HValue* key,
- HValue* instance_type,
- HValue* bit_field2,
- ElementsKind kind);
-
- KeyedLoadGenericStub* casted_stub() {
- return static_cast<KeyedLoadGenericStub*>(stub());
- }
-};
-
-
-void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildElementsKindLimitCheck(
- HGraphBuilder::IfBuilder* if_builder, HValue* bit_field2,
- ElementsKind kind) {
- ElementsKind next_kind = static_cast<ElementsKind>(kind + 1);
- HValue* kind_limit = Add<HConstant>(
- static_cast<int>(Map::ElementsKindBits::encode(next_kind)));
-
- if_builder->If<HCompareNumericAndBranch>(bit_field2, kind_limit, Token::LT);
- if_builder->Then();
-}
-
-
-void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildFastElementLoad(
- HGraphBuilder::IfBuilder* if_builder, HValue* receiver, HValue* key,
- HValue* instance_type, HValue* bit_field2, ElementsKind kind) {
- BuildElementsKindLimitCheck(if_builder, bit_field2, kind);
-
- IfBuilder js_array_check(this);
- js_array_check.If<HCompareNumericAndBranch>(
- instance_type, Add<HConstant>(JS_ARRAY_TYPE), Token::EQ);
- js_array_check.Then();
- Push(BuildUncheckedMonomorphicElementAccess(receiver, key, NULL,
- true, kind,
- LOAD, NEVER_RETURN_HOLE,
- STANDARD_STORE));
- js_array_check.Else();
- Push(BuildUncheckedMonomorphicElementAccess(receiver, key, NULL,
- false, kind,
- LOAD, NEVER_RETURN_HOLE,
- STANDARD_STORE));
- js_array_check.End();
-}
-
-
-HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
- HValue* receiver = GetParameter(Descriptor::kReceiver);
- HValue* key = GetParameter(Descriptor::kName);
- // Split into a smi/integer case and unique string case.
- HIfContinuation index_name_split_continuation(graph()->CreateBasicBlock(),
- graph()->CreateBasicBlock());
-
- BuildKeyedIndexCheck(key, &index_name_split_continuation);
-
- IfBuilder index_name_split(this, &index_name_split_continuation);
- index_name_split.Then();
- {
- // Key is an index (number)
- key = Pop();
-
- int bit_field_mask = (1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor);
- BuildJSObjectCheck(receiver, bit_field_mask);
-
- HValue* map =
- Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
-
- HValue* instance_type =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
-
- HValue* bit_field2 =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
-
- IfBuilder kind_if(this);
- BuildFastElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
- FAST_HOLEY_ELEMENTS);
-
- kind_if.Else();
- {
- BuildFastElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
- FAST_HOLEY_DOUBLE_ELEMENTS);
- }
- kind_if.Else();
-
- // The DICTIONARY_ELEMENTS check generates a "kind_if.Then"
- BuildElementsKindLimitCheck(&kind_if, bit_field2, DICTIONARY_ELEMENTS);
- {
- HValue* elements = AddLoadElements(receiver);
-
- HValue* hash = BuildElementIndexHash(key);
-
- Push(BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash));
- }
- kind_if.Else();
-
- // The SLOW_SLOPPY_ARGUMENTS_ELEMENTS check generates a "kind_if.Then"
- STATIC_ASSERT(FAST_SLOPPY_ARGUMENTS_ELEMENTS <
- SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
- BuildElementsKindLimitCheck(&kind_if, bit_field2,
- SLOW_SLOPPY_ARGUMENTS_ELEMENTS);
- // Non-strict elements are not handled.
- Add<HDeoptimize>(DeoptimizeReason::kNonStrictElementsInKeyedLoadGenericStub,
- Deoptimizer::EAGER);
- Push(graph()->GetConstant0());
-
- kind_if.ElseDeopt(
- DeoptimizeReason::kElementsKindUnhandledInKeyedLoadGenericStub);
-
- kind_if.End();
- }
- index_name_split.Else();
- {
- // Key is a unique string.
- key = Pop();
-
- int bit_field_mask = (1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasNamedInterceptor);
- BuildJSObjectCheck(receiver, bit_field_mask);
-
- HIfContinuation continuation;
- BuildTestForDictionaryProperties(receiver, &continuation);
- IfBuilder if_dict_properties(this, &continuation);
- if_dict_properties.Then();
- {
- // Key is string, properties are dictionary mode
- BuildNonGlobalObjectCheck(receiver);
-
- HValue* properties = Add<HLoadNamedField>(
- receiver, nullptr, HObjectAccess::ForPropertiesPointer());
-
- HValue* hash =
- Add<HLoadNamedField>(key, nullptr, HObjectAccess::ForNameHashField());
-
- hash = AddUncasted<HShr>(hash, Add<HConstant>(Name::kHashShift));
-
- HValue* value =
- BuildUncheckedDictionaryElementLoad(receiver, properties, key, hash);
- Push(value);
- }
- if_dict_properties.Else();
- {
- // TODO(dcarney): don't use keyed lookup cache, but convert to use
- // megamorphic stub cache.
- UNREACHABLE();
- // Key is string, properties are fast mode
- HValue* hash = BuildKeyedLookupCacheHash(receiver, key);
-
- ExternalReference cache_keys_ref =
- ExternalReference::keyed_lookup_cache_keys(isolate());
- HValue* cache_keys = Add<HConstant>(cache_keys_ref);
-
- HValue* map =
- Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
- HValue* base_index = AddUncasted<HMul>(hash, Add<HConstant>(2));
- base_index->ClearFlag(HValue::kCanOverflow);
-
- HIfContinuation inline_or_runtime_continuation(
- graph()->CreateBasicBlock(), graph()->CreateBasicBlock());
- {
- IfBuilder lookup_ifs[KeyedLookupCache::kEntriesPerBucket];
- for (int probe = 0; probe < KeyedLookupCache::kEntriesPerBucket;
- ++probe) {
- IfBuilder* lookup_if = &lookup_ifs[probe];
- lookup_if->Initialize(this);
- int probe_base = probe * KeyedLookupCache::kEntryLength;
- HValue* map_index = AddUncasted<HAdd>(
- base_index,
- Add<HConstant>(probe_base + KeyedLookupCache::kMapIndex));
- map_index->ClearFlag(HValue::kCanOverflow);
- HValue* key_index = AddUncasted<HAdd>(
- base_index,
- Add<HConstant>(probe_base + KeyedLookupCache::kKeyIndex));
- key_index->ClearFlag(HValue::kCanOverflow);
- HValue* map_to_check =
- Add<HLoadKeyed>(cache_keys, map_index, nullptr, nullptr,
- FAST_ELEMENTS, NEVER_RETURN_HOLE, 0);
- lookup_if->If<HCompareObjectEqAndBranch>(map_to_check, map);
- lookup_if->And();
- HValue* key_to_check =
- Add<HLoadKeyed>(cache_keys, key_index, nullptr, nullptr,
- FAST_ELEMENTS, NEVER_RETURN_HOLE, 0);
- lookup_if->If<HCompareObjectEqAndBranch>(key_to_check, key);
- lookup_if->Then();
- {
- ExternalReference cache_field_offsets_ref =
- ExternalReference::keyed_lookup_cache_field_offsets(isolate());
- HValue* cache_field_offsets =
- Add<HConstant>(cache_field_offsets_ref);
- HValue* index = AddUncasted<HAdd>(hash, Add<HConstant>(probe));
- index->ClearFlag(HValue::kCanOverflow);
- HValue* property_index =
- Add<HLoadKeyed>(cache_field_offsets, index, nullptr, cache_keys,
- INT32_ELEMENTS, NEVER_RETURN_HOLE, 0);
- Push(property_index);
- }
- lookup_if->Else();
- }
- for (int i = 0; i < KeyedLookupCache::kEntriesPerBucket; ++i) {
- lookup_ifs[i].JoinContinuation(&inline_or_runtime_continuation);
- }
- }
-
- IfBuilder inline_or_runtime(this, &inline_or_runtime_continuation);
- inline_or_runtime.Then();
- {
- // Found a cached index, load property inline.
- Push(Add<HLoadFieldByIndex>(receiver, Pop()));
- }
- inline_or_runtime.Else();
- {
- // KeyedLookupCache miss; call runtime.
- Add<HPushArguments>(receiver, key);
- Push(Add<HCallRuntime>(
- Runtime::FunctionForId(Runtime::kKeyedGetProperty), 2));
- }
- inline_or_runtime.End();
- }
- if_dict_properties.End();
- }
- index_name_split.End();
-
- return Pop();
-}
-
-
-Handle<Code> KeyedLoadGenericStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index b899943e98..2ee5ece8da 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -22,7 +22,7 @@ namespace internal {
RUNTIME_FUNCTION(UnexpectedStubMiss) {
FATAL("Unexpected deopt of a stub");
- return Smi::FromInt(0);
+ return Smi::kZero;
}
CodeStubDescriptor::CodeStubDescriptor(CodeStub* stub)
@@ -318,33 +318,42 @@ void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(
}
}
-
-std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags) {
- switch (flags) {
- case STRING_ADD_CHECK_NONE:
- return os << "CheckNone";
- case STRING_ADD_CHECK_LEFT:
- return os << "CheckLeft";
- case STRING_ADD_CHECK_RIGHT:
- return os << "CheckRight";
- case STRING_ADD_CHECK_BOTH:
- return os << "CheckBoth";
- case STRING_ADD_CONVERT_LEFT:
- return os << "ConvertLeft";
- case STRING_ADD_CONVERT_RIGHT:
- return os << "ConvertRight";
- case STRING_ADD_CONVERT:
- break;
- }
- UNREACHABLE();
- return os;
-}
-
-
void StringAddStub::PrintBaseName(std::ostream& os) const { // NOLINT
os << "StringAddStub_" << flags() << "_" << pretenure_flag();
}
+void StringAddStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* left = assembler->Parameter(Descriptor::kLeft);
+ Node* right = assembler->Parameter(Descriptor::kRight);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ if ((flags() & STRING_ADD_CHECK_LEFT) != 0) {
+ DCHECK((flags() & STRING_ADD_CONVERT) != 0);
+ // TODO(danno): The ToString and JSReceiverToPrimitive below could be
+ // combined to avoid duplicate smi and instance type checks.
+ left = assembler->ToString(context,
+ assembler->JSReceiverToPrimitive(context, left));
+ }
+ if ((flags() & STRING_ADD_CHECK_RIGHT) != 0) {
+ DCHECK((flags() & STRING_ADD_CONVERT) != 0);
+ // TODO(danno): The ToString and JSReceiverToPrimitive below could be
+ // combined to avoid duplicate smi and instance type checks.
+ right = assembler->ToString(
+ context, assembler->JSReceiverToPrimitive(context, right));
+ }
+
+ if ((flags() & STRING_ADD_CHECK_BOTH) == 0) {
+ CodeStubAssembler::AllocationFlag flags =
+ (pretenure_flag() == TENURED) ? CodeStubAssembler::kPretenured
+ : CodeStubAssembler::kNone;
+ assembler->Return(assembler->StringAdd(context, left, right, flags));
+ } else {
+ Callable callable = CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE,
+ pretenure_flag());
+ assembler->TailCallStub(callable, context, left, right);
+ }
+}
InlineCacheState CompareICStub::GetICState() const {
CompareICState::State state = Max(left(), right());
@@ -411,7 +420,7 @@ void CompareICStub::Generate(MacroAssembler* masm) {
Handle<Code> TurboFanCodeStub::GenerateCode() {
const char* name = CodeStub::MajorName(MajorKey());
- Zone zone(isolate()->allocator());
+ Zone zone(isolate()->allocator(), ZONE_NAME);
CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
CodeStubAssembler assembler(isolate(), &zone, descriptor, GetCodeFlags(),
name);
@@ -419,7 +428,7 @@ Handle<Code> TurboFanCodeStub::GenerateCode() {
return assembler.GenerateCode();
}
-void LoadICTrampolineTFStub::GenerateAssembly(
+void LoadICTrampolineStub::GenerateAssembly(
CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
@@ -433,7 +442,7 @@ void LoadICTrampolineTFStub::GenerateAssembly(
assembler->LoadIC(&p);
}
-void LoadICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void LoadICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
Node* receiver = assembler->Parameter(Descriptor::kReceiver);
@@ -446,6 +455,21 @@ void LoadICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
assembler->LoadIC(&p);
}
+void LoadICProtoArrayStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* handler = assembler->Parameter(Descriptor::kHandler);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
+ assembler->LoadICProtoArray(&p, handler);
+}
+
void LoadGlobalICTrampolineStub::GenerateAssembly(
CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
@@ -498,7 +522,7 @@ void KeyedLoadICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
assembler->KeyedLoadIC(&p);
}
-void StoreICTrampolineTFStub::GenerateAssembly(
+void StoreICTrampolineStub::GenerateAssembly(
CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
@@ -514,7 +538,7 @@ void StoreICTrampolineTFStub::GenerateAssembly(
assembler->StoreIC(&p);
}
-void StoreICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void StoreICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
Node* receiver = assembler->Parameter(Descriptor::kReceiver);
@@ -529,6 +553,37 @@ void StoreICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
assembler->StoreIC(&p);
}
+void KeyedStoreICTrampolineTFStub::GenerateAssembly(
+ CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* value = assembler->Parameter(Descriptor::kValue);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* vector = assembler->LoadTypeFeedbackVectorForStub();
+
+ CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
+ vector);
+ assembler->KeyedStoreIC(&p, StoreICState::GetLanguageMode(GetExtraICState()));
+}
+
+void KeyedStoreICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ Node* name = assembler->Parameter(Descriptor::kName);
+ Node* value = assembler->Parameter(Descriptor::kValue);
+ Node* slot = assembler->Parameter(Descriptor::kSlot);
+ Node* vector = assembler->Parameter(Descriptor::kVector);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+
+ CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
+ vector);
+ assembler->KeyedStoreIC(&p, StoreICState::GetLanguageMode(GetExtraICState()));
+}
+
void StoreMapStub::GenerateAssembly(CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
@@ -662,368 +717,6 @@ void StringLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
}
// static
-compiler::Node* AddStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* left, compiler::Node* right,
- compiler::Node* context) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- // Shared entry for floating point addition.
- Label do_fadd(assembler);
- Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
- var_fadd_rhs(assembler, MachineRepresentation::kFloat64);
-
- // We might need to loop several times due to ToPrimitive, ToString and/or
- // ToNumber conversions.
- Variable var_lhs(assembler, MachineRepresentation::kTagged),
- var_rhs(assembler, MachineRepresentation::kTagged),
- var_result(assembler, MachineRepresentation::kTagged);
- Variable* loop_vars[2] = {&var_lhs, &var_rhs};
- Label loop(assembler, 2, loop_vars), end(assembler),
- string_add_convert_left(assembler, Label::kDeferred),
- string_add_convert_right(assembler, Label::kDeferred);
- var_lhs.Bind(left);
- var_rhs.Bind(right);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
- {
- // Load the current {lhs} and {rhs} values.
- Node* lhs = var_lhs.value();
- Node* rhs = var_rhs.value();
-
- // Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
-
- assembler->Bind(&if_lhsissmi);
- {
- // Check if the {rhs} is also a Smi.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
-
- assembler->Bind(&if_rhsissmi);
- {
- // Try fast Smi addition first.
- Node* pair = assembler->SmiAddWithOverflow(lhs, rhs);
- Node* overflow = assembler->Projection(1, pair);
-
- // Check if the Smi additon overflowed.
- Label if_overflow(assembler), if_notoverflow(assembler);
- assembler->Branch(overflow, &if_overflow, &if_notoverflow);
-
- assembler->Bind(&if_overflow);
- {
- var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
- var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
- assembler->Goto(&do_fadd);
- }
-
- assembler->Bind(&if_notoverflow);
- var_result.Bind(assembler->Projection(0, pair));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&if_rhsisnotsmi);
- {
- // Load the map of {rhs}.
- Node* rhs_map = assembler->LoadMap(rhs);
-
- // Check if the {rhs} is a HeapNumber.
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
- &if_rhsisnotnumber);
-
- assembler->Bind(&if_rhsisnumber);
- {
- var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
- var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fadd);
- }
-
- assembler->Bind(&if_rhsisnotnumber);
- {
- // Load the instance type of {rhs}.
- Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
-
- // Check if the {rhs} is a String.
- Label if_rhsisstring(assembler, Label::kDeferred),
- if_rhsisnotstring(assembler, Label::kDeferred);
- assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
- &if_rhsisstring, &if_rhsisnotstring);
-
- assembler->Bind(&if_rhsisstring);
- {
- var_lhs.Bind(lhs);
- var_rhs.Bind(rhs);
- assembler->Goto(&string_add_convert_left);
- }
-
- assembler->Bind(&if_rhsisnotstring);
- {
- // Check if {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- assembler->Bind(&if_rhsisreceiver);
- {
- // Convert {rhs} to a primitive first passing no hint.
- Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&if_rhsisnotreceiver);
- {
- // Convert {rhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
- }
- }
- }
- }
- }
-
- assembler->Bind(&if_lhsisnotsmi);
- {
- // Load the map and instance type of {lhs}.
- Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
-
- // Check if {lhs} is a String.
- Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
- assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
- &if_lhsisstring, &if_lhsisnotstring);
-
- assembler->Bind(&if_lhsisstring);
- {
- var_lhs.Bind(lhs);
- var_rhs.Bind(rhs);
- assembler->Goto(&string_add_convert_right);
- }
-
- assembler->Bind(&if_lhsisnotstring);
- {
- // Check if {rhs} is a Smi.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
-
- assembler->Bind(&if_rhsissmi);
- {
- // Check if {lhs} is a Number.
- Label if_lhsisnumber(assembler),
- if_lhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->Word32Equal(
- lhs_instance_type,
- assembler->Int32Constant(HEAP_NUMBER_TYPE)),
- &if_lhsisnumber, &if_lhsisnotnumber);
-
- assembler->Bind(&if_lhsisnumber);
- {
- // The {lhs} is a HeapNumber, the {rhs} is a Smi, just add them.
- var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
- var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
- assembler->Goto(&do_fadd);
- }
-
- assembler->Bind(&if_lhsisnotnumber);
- {
- // The {lhs} is neither a Number nor a String, and the {rhs} is a
- // Smi.
- Label if_lhsisreceiver(assembler, Label::kDeferred),
- if_lhsisnotreceiver(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(lhs_instance_type),
- &if_lhsisreceiver, &if_lhsisnotreceiver);
-
- assembler->Bind(&if_lhsisreceiver);
- {
- // Convert {lhs} to a primitive first passing no hint.
- Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&if_lhsisnotreceiver);
- {
- // Convert {lhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
- }
- }
- }
-
- assembler->Bind(&if_rhsisnotsmi);
- {
- // Load the instance type of {rhs}.
- Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
-
- // Check if {rhs} is a String.
- Label if_rhsisstring(assembler), if_rhsisnotstring(assembler);
- assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
- &if_rhsisstring, &if_rhsisnotstring);
-
- assembler->Bind(&if_rhsisstring);
- {
- var_lhs.Bind(lhs);
- var_rhs.Bind(rhs);
- assembler->Goto(&string_add_convert_left);
- }
-
- assembler->Bind(&if_rhsisnotstring);
- {
- // Check if {lhs} is a HeapNumber.
- Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
- assembler->Branch(assembler->Word32Equal(
- lhs_instance_type,
- assembler->Int32Constant(HEAP_NUMBER_TYPE)),
- &if_lhsisnumber, &if_lhsisnotnumber);
-
- assembler->Bind(&if_lhsisnumber);
- {
- // Check if {rhs} is also a HeapNumber.
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->Word32Equal(
- rhs_instance_type,
- assembler->Int32Constant(HEAP_NUMBER_TYPE)),
- &if_rhsisnumber, &if_rhsisnotnumber);
-
- assembler->Bind(&if_rhsisnumber);
- {
- // Perform a floating point addition.
- var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
- var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fadd);
- }
-
- assembler->Bind(&if_rhsisnotnumber);
- {
- // Check if {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- assembler->Bind(&if_rhsisreceiver);
- {
- // Convert {rhs} to a primitive first passing no hint.
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(
- assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&if_rhsisnotreceiver);
- {
- // Convert {rhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
- }
- }
- }
-
- assembler->Bind(&if_lhsisnotnumber);
- {
- // Check if {lhs} is a JSReceiver.
- Label if_lhsisreceiver(assembler, Label::kDeferred),
- if_lhsisnotreceiver(assembler);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(lhs_instance_type),
- &if_lhsisreceiver, &if_lhsisnotreceiver);
-
- assembler->Bind(&if_lhsisreceiver);
- {
- // Convert {lhs} to a primitive first passing no hint.
- Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&if_lhsisnotreceiver);
- {
- // Check if {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- assembler->Bind(&if_rhsisreceiver);
- {
- // Convert {rhs} to a primitive first passing no hint.
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(
- assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&if_rhsisnotreceiver);
- {
- // Convert {lhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
- }
- }
- }
- }
- }
- }
- }
- }
- assembler->Bind(&string_add_convert_left);
- {
- // Convert {lhs}, which is a Smi, to a String and concatenate the
- // resulting string with the String {rhs}.
- Callable callable = CodeFactory::StringAdd(
- assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
- var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
- var_rhs.value()));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&string_add_convert_right);
- {
- // Convert {lhs}, which is a Smi, to a String and concatenate the
- // resulting string with the String {rhs}.
- Callable callable = CodeFactory::StringAdd(
- assembler->isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
- var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
- var_rhs.value()));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&do_fadd);
- {
- Node* lhs_value = var_fadd_lhs.value();
- Node* rhs_value = var_fadd_rhs.value();
- Node* value = assembler->Float64Add(lhs_value, rhs_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- var_result.Bind(result);
- assembler->Goto(&end);
- }
- assembler->Bind(&end);
- return var_result.value();
-}
-
-// static
compiler::Node* AddWithFeedbackStub::Generate(
CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
compiler::Node* slot_id, compiler::Node* type_feedback_vector,
@@ -1033,8 +726,10 @@ compiler::Node* AddWithFeedbackStub::Generate(
typedef CodeStubAssembler::Variable Variable;
// Shared entry for floating point addition.
- Label do_fadd(assembler), end(assembler),
- do_add_any(assembler, Label::kDeferred), call_add_stub(assembler);
+ Label do_fadd(assembler), if_lhsisnotnumber(assembler, Label::kDeferred),
+ check_rhsisoddball(assembler, Label::kDeferred),
+ call_with_oddball_feedback(assembler), call_with_any_feedback(assembler),
+ call_add_stub(assembler), end(assembler);
Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
var_fadd_rhs(assembler, MachineRepresentation::kFloat64),
var_type_feedback(assembler, MachineRepresentation::kWord32),
@@ -1042,18 +737,21 @@ compiler::Node* AddWithFeedbackStub::Generate(
// Check if the {lhs} is a Smi or a HeapObject.
Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+ assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
assembler->Bind(&if_lhsissmi);
{
// Check if the {rhs} is also a Smi.
Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
assembler->Bind(&if_rhsissmi);
{
// Try fast Smi addition first.
- Node* pair = assembler->SmiAddWithOverflow(lhs, rhs);
+ Node* pair =
+ assembler->IntPtrAddWithOverflow(assembler->BitcastTaggedToWord(lhs),
+ assembler->BitcastTaggedToWord(rhs));
Node* overflow = assembler->Projection(1, pair);
// Check if the Smi additon overflowed.
@@ -1071,7 +769,8 @@ compiler::Node* AddWithFeedbackStub::Generate(
{
var_type_feedback.Bind(
assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
- var_result.Bind(assembler->Projection(0, pair));
+ var_result.Bind(assembler->BitcastWordToTaggedSigned(
+ assembler->Projection(0, pair)));
assembler->Goto(&end);
}
}
@@ -1082,7 +781,8 @@ compiler::Node* AddWithFeedbackStub::Generate(
Node* rhs_map = assembler->LoadMap(rhs);
// Check if the {rhs} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map), &do_add_any);
+ assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
+ &check_rhsisoddball);
var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
@@ -1092,18 +792,17 @@ compiler::Node* AddWithFeedbackStub::Generate(
assembler->Bind(&if_lhsisnotsmi);
{
- Label check_string(assembler);
-
// Load the map of {lhs}.
Node* lhs_map = assembler->LoadMap(lhs);
// Check if {lhs} is a HeapNumber.
- Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
- assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map), &check_string);
+ assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map),
+ &if_lhsisnotnumber);
// Check if the {rhs} is Smi.
Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
assembler->Bind(&if_rhsissmi);
{
@@ -1118,48 +817,93 @@ compiler::Node* AddWithFeedbackStub::Generate(
Node* rhs_map = assembler->LoadMap(rhs);
// Check if the {rhs} is a HeapNumber.
- assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map), &do_add_any);
+ assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
+ &check_rhsisoddball);
var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
assembler->Goto(&do_fadd);
}
+ }
+
+ assembler->Bind(&do_fadd);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ Node* value =
+ assembler->Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
+ Node* result = assembler->AllocateHeapNumberWithValue(value);
+ var_result.Bind(result);
+ assembler->Goto(&end);
+ }
- assembler->Bind(&check_string);
+ assembler->Bind(&if_lhsisnotnumber);
+ {
+ // No checks on rhs are done yet. We just know lhs is not a number or Smi.
+ Label if_lhsisoddball(assembler), if_lhsisnotoddball(assembler);
+ Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+ Node* lhs_is_oddball = assembler->Word32Equal(
+ lhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+ assembler->Branch(lhs_is_oddball, &if_lhsisoddball, &if_lhsisnotoddball);
+
+ assembler->Bind(&if_lhsisoddball);
{
- // Check if the {rhs} is a smi, and exit the string check early if it is.
- assembler->GotoIf(assembler->WordIsSmi(rhs), &do_add_any);
+ assembler->GotoIf(assembler->TaggedIsSmi(rhs),
+ &call_with_oddball_feedback);
- Node* lhs_instance_type = assembler->LoadMapInstanceType(lhs_map);
+ // Load the map of the {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
+ // Check if {rhs} is a HeapNumber.
+ assembler->Branch(assembler->IsHeapNumberMap(rhs_map),
+ &call_with_oddball_feedback, &check_rhsisoddball);
+ }
+
+ assembler->Bind(&if_lhsisnotoddball);
+ {
// Exit unless {lhs} is a string
assembler->GotoUnless(assembler->IsStringInstanceType(lhs_instance_type),
- &do_add_any);
+ &call_with_any_feedback);
+
+ // Check if the {rhs} is a smi, and exit the string check early if it is.
+ assembler->GotoIf(assembler->TaggedIsSmi(rhs), &call_with_any_feedback);
Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
- // Exit unless {rhs} is a string
+ // Exit unless {rhs} is a string. Since {lhs} is a string we no longer
+ // need an Oddball check.
assembler->GotoUnless(assembler->IsStringInstanceType(rhs_instance_type),
- &do_add_any);
+ &call_with_any_feedback);
var_type_feedback.Bind(
assembler->Int32Constant(BinaryOperationFeedback::kString));
- assembler->Goto(&call_add_stub);
+ Callable callable = CodeFactory::StringAdd(
+ assembler->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+ var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
+
+ assembler->Goto(&end);
}
}
- assembler->Bind(&do_fadd);
+ assembler->Bind(&check_rhsisoddball);
+ {
+ // Check if rhs is an oddball. At this point we know lhs is either a
+ // Smi or number or oddball and rhs is not a number or Smi.
+ Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+ Node* rhs_is_oddball = assembler->Word32Equal(
+ rhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+ assembler->Branch(rhs_is_oddball, &call_with_oddball_feedback,
+ &call_with_any_feedback);
+ }
+
+ assembler->Bind(&call_with_oddball_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumber));
- Node* value =
- assembler->Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
- Node* result = assembler->ChangeFloat64ToTagged(value);
- var_result.Bind(result);
- assembler->Goto(&end);
+ assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->Goto(&call_add_stub);
}
- assembler->Bind(&do_add_any);
+ assembler->Bind(&call_with_any_feedback);
{
var_type_feedback.Bind(
assembler->Int32Constant(BinaryOperationFeedback::kAny));
@@ -1180,180 +924,6 @@ compiler::Node* AddWithFeedbackStub::Generate(
}
// static
-compiler::Node* SubtractStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* left,
- compiler::Node* right,
- compiler::Node* context) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- // Shared entry for floating point subtraction.
- Label do_fsub(assembler), end(assembler);
- Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
- var_fsub_rhs(assembler, MachineRepresentation::kFloat64);
-
- // We might need to loop several times due to ToPrimitive and/or ToNumber
- // conversions.
- Variable var_lhs(assembler, MachineRepresentation::kTagged),
- var_rhs(assembler, MachineRepresentation::kTagged),
- var_result(assembler, MachineRepresentation::kTagged);
- Variable* loop_vars[2] = {&var_lhs, &var_rhs};
- Label loop(assembler, 2, loop_vars);
- var_lhs.Bind(left);
- var_rhs.Bind(right);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
- {
- // Load the current {lhs} and {rhs} values.
- Node* lhs = var_lhs.value();
- Node* rhs = var_rhs.value();
-
- // Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
-
- assembler->Bind(&if_lhsissmi);
- {
- // Check if the {rhs} is also a Smi.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
-
- assembler->Bind(&if_rhsissmi);
- {
- // Try a fast Smi subtraction first.
- Node* pair = assembler->SmiSubWithOverflow(lhs, rhs);
- Node* overflow = assembler->Projection(1, pair);
-
- // Check if the Smi subtraction overflowed.
- Label if_overflow(assembler), if_notoverflow(assembler);
- assembler->Branch(overflow, &if_overflow, &if_notoverflow);
-
- assembler->Bind(&if_overflow);
- {
- // The result doesn't fit into Smi range.
- var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
- var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
- assembler->Goto(&do_fsub);
- }
-
- assembler->Bind(&if_notoverflow);
- var_result.Bind(assembler->Projection(0, pair));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&if_rhsisnotsmi);
- {
- // Load the map of the {rhs}.
- Node* rhs_map = assembler->LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
- &if_rhsisnotnumber);
-
- assembler->Bind(&if_rhsisnumber);
- {
- // Perform a floating point subtraction.
- var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
- var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fsub);
- }
-
- assembler->Bind(&if_rhsisnotnumber);
- {
- // Convert the {rhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
- }
- }
- }
-
- assembler->Bind(&if_lhsisnotsmi);
- {
- // Load the map of the {lhs}.
- Node* lhs_map = assembler->LoadMap(lhs);
-
- // Check if the {lhs} is a HeapNumber.
- Label if_lhsisnumber(assembler),
- if_lhsisnotnumber(assembler, Label::kDeferred);
- Node* number_map = assembler->HeapNumberMapConstant();
- assembler->Branch(assembler->WordEqual(lhs_map, number_map),
- &if_lhsisnumber, &if_lhsisnotnumber);
-
- assembler->Bind(&if_lhsisnumber);
- {
- // Check if the {rhs} is a Smi.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
-
- assembler->Bind(&if_rhsissmi);
- {
- // Perform a floating point subtraction.
- var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
- var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
- assembler->Goto(&do_fsub);
- }
-
- assembler->Bind(&if_rhsisnotsmi);
- {
- // Load the map of the {rhs}.
- Node* rhs_map = assembler->LoadMap(rhs);
-
- // Check if the {rhs} is a HeapNumber.
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(rhs_map, number_map),
- &if_rhsisnumber, &if_rhsisnotnumber);
-
- assembler->Bind(&if_rhsisnumber);
- {
- // Perform a floating point subtraction.
- var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
- var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fsub);
- }
-
- assembler->Bind(&if_rhsisnotnumber);
- {
- // Convert the {rhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
- }
- }
- }
-
- assembler->Bind(&if_lhsisnotnumber);
- {
- // Convert the {lhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
- }
- }
- }
-
- assembler->Bind(&do_fsub);
- {
- Node* lhs_value = var_fsub_lhs.value();
- Node* rhs_value = var_fsub_rhs.value();
- Node* value = assembler->Float64Sub(lhs_value, rhs_value);
- var_result.Bind(assembler->ChangeFloat64ToTagged(value));
- assembler->Goto(&end);
- }
- assembler->Bind(&end);
- return var_result.value();
-}
-
-// static
compiler::Node* SubtractWithFeedbackStub::Generate(
CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
compiler::Node* slot_id, compiler::Node* type_feedback_vector,
@@ -1363,8 +933,9 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
typedef CodeStubAssembler::Variable Variable;
// Shared entry for floating point subtraction.
- Label do_fsub(assembler), end(assembler),
- call_subtract_stub(assembler, Label::kDeferred);
+ Label do_fsub(assembler), end(assembler), call_subtract_stub(assembler),
+ if_lhsisnotnumber(assembler), check_rhsisoddball(assembler),
+ call_with_any_feedback(assembler);
Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
var_fsub_rhs(assembler, MachineRepresentation::kFloat64),
var_type_feedback(assembler, MachineRepresentation::kWord32),
@@ -1372,18 +943,21 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
// Check if the {lhs} is a Smi or a HeapObject.
Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
+ assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
assembler->Bind(&if_lhsissmi);
{
// Check if the {rhs} is also a Smi.
Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
assembler->Bind(&if_rhsissmi);
{
// Try a fast Smi subtraction first.
- Node* pair = assembler->SmiSubWithOverflow(lhs, rhs);
+ Node* pair =
+ assembler->IntPtrSubWithOverflow(assembler->BitcastTaggedToWord(lhs),
+ assembler->BitcastTaggedToWord(rhs));
Node* overflow = assembler->Projection(1, pair);
// Check if the Smi subtraction overflowed.
@@ -1403,7 +977,8 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
// lhs, rhs, result smi. combined - smi.
var_type_feedback.Bind(
assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
- var_result.Bind(assembler->Projection(0, pair));
+ var_result.Bind(
+ assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
assembler->Goto(&end);
}
@@ -1414,7 +989,7 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
// Check if {rhs} is a HeapNumber.
assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
- &call_subtract_stub);
+ &check_rhsisoddball);
// Perform a floating point subtraction.
var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
@@ -1430,11 +1005,12 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
// Check if the {lhs} is a HeapNumber.
assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map),
- &call_subtract_stub);
+ &if_lhsisnotnumber);
// Check if the {rhs} is a Smi.
Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
+ assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
assembler->Bind(&if_rhsissmi);
{
@@ -1451,7 +1027,7 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
// Check if the {rhs} is a HeapNumber.
assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
- &call_subtract_stub);
+ &check_rhsisoddball);
// Perform a floating point subtraction.
var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
@@ -1467,178 +1043,80 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
Node* lhs_value = var_fsub_lhs.value();
Node* rhs_value = var_fsub_rhs.value();
Node* value = assembler->Float64Sub(lhs_value, rhs_value);
- var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+ var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
assembler->Goto(&end);
}
- assembler->Bind(&call_subtract_stub);
+ assembler->Bind(&if_lhsisnotnumber);
{
- var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
- Callable callable = CodeFactory::Subtract(assembler->isolate());
- var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
- assembler->Goto(&end);
- }
+ // No checks on rhs are done yet. We just know lhs is not a number or Smi.
+ // Check if lhs is an oddball.
+ Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+ Node* lhs_is_oddball = assembler->Word32Equal(
+ lhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+ assembler->GotoUnless(lhs_is_oddball, &call_with_any_feedback);
- assembler->Bind(&end);
- assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
- slot_id);
- return var_result.value();
-}
-
-// static
-compiler::Node* MultiplyStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* left,
- compiler::Node* right,
- compiler::Node* context) {
- using compiler::Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
-
- // Shared entry point for floating point multiplication.
- Label do_fmul(assembler), return_result(assembler);
- Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
- var_rhs_float64(assembler, MachineRepresentation::kFloat64);
-
- Node* number_map = assembler->HeapNumberMapConstant();
-
- // We might need to loop one or two times due to ToNumber conversions.
- Variable var_lhs(assembler, MachineRepresentation::kTagged),
- var_rhs(assembler, MachineRepresentation::kTagged),
- var_result(assembler, MachineRepresentation::kTagged);
- Variable* loop_variables[] = {&var_lhs, &var_rhs};
- Label loop(assembler, 2, loop_variables);
- var_lhs.Bind(left);
- var_rhs.Bind(right);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
- {
- Node* lhs = var_lhs.value();
- Node* rhs = var_rhs.value();
-
- Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
+ Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
+ &if_rhsisnotsmi);
- assembler->Bind(&lhs_is_smi);
+ assembler->Bind(&if_rhsissmi);
{
- Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi,
- &rhs_is_not_smi);
-
- assembler->Bind(&rhs_is_smi);
- {
- // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
- // in case of overflow.
- var_result.Bind(assembler->SmiMul(lhs, rhs));
- assembler->Goto(&return_result);
- }
-
- assembler->Bind(&rhs_is_not_smi);
- {
- Node* rhs_map = assembler->LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- Label rhs_is_number(assembler),
- rhs_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(rhs_map, number_map),
- &rhs_is_number, &rhs_is_not_number);
-
- assembler->Bind(&rhs_is_number);
- {
- // Convert {lhs} to a double and multiply it with the value of {rhs}.
- var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
- var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fmul);
- }
-
- assembler->Bind(&rhs_is_not_number);
- {
- // Multiplication is commutative, swap {lhs} with {rhs} and loop.
- var_lhs.Bind(rhs);
- var_rhs.Bind(lhs);
- assembler->Goto(&loop);
- }
- }
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->Goto(&call_subtract_stub);
}
- assembler->Bind(&lhs_is_not_smi);
+ assembler->Bind(&if_rhsisnotsmi);
{
- Node* lhs_map = assembler->LoadMap(lhs);
+ // Load the map of the {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
- // Check if {lhs} is a HeapNumber.
- Label lhs_is_number(assembler),
- lhs_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(lhs_map, number_map),
- &lhs_is_number, &lhs_is_not_number);
+ // Check if {rhs} is a HeapNumber.
+ assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
+ &check_rhsisoddball);
- assembler->Bind(&lhs_is_number);
- {
- // Check if {rhs} is a Smi.
- Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi,
- &rhs_is_not_smi);
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->Goto(&call_subtract_stub);
+ }
+ }
- assembler->Bind(&rhs_is_smi);
- {
- // Convert {rhs} to a double and multiply it with the value of {lhs}.
- var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
- var_rhs_float64.Bind(assembler->SmiToFloat64(rhs));
- assembler->Goto(&do_fmul);
- }
+ assembler->Bind(&check_rhsisoddball);
+ {
+ // Check if rhs is an oddball. At this point we know lhs is either a
+ // Smi or number or oddball and rhs is not a number or Smi.
+ Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+ Node* rhs_is_oddball = assembler->Word32Equal(
+ rhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+ assembler->GotoUnless(rhs_is_oddball, &call_with_any_feedback);
- assembler->Bind(&rhs_is_not_smi);
- {
- Node* rhs_map = assembler->LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- Label rhs_is_number(assembler),
- rhs_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(rhs_map, number_map),
- &rhs_is_number, &rhs_is_not_number);
-
- assembler->Bind(&rhs_is_number);
- {
- // Both {lhs} and {rhs} are HeapNumbers. Load their values and
- // multiply them.
- var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
- var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fmul);
- }
-
- assembler->Bind(&rhs_is_not_number);
- {
- // Multiplication is commutative, swap {lhs} with {rhs} and loop.
- var_lhs.Bind(rhs);
- var_rhs.Bind(lhs);
- assembler->Goto(&loop);
- }
- }
- }
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->Goto(&call_subtract_stub);
+ }
- assembler->Bind(&lhs_is_not_number);
- {
- // Convert {lhs} to a Number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
- }
- }
+ assembler->Bind(&call_with_any_feedback);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ assembler->Goto(&call_subtract_stub);
}
- assembler->Bind(&do_fmul);
+ assembler->Bind(&call_subtract_stub);
{
- Node* value =
- assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
- Node* result = assembler->ChangeFloat64ToTagged(value);
- var_result.Bind(result);
- assembler->Goto(&return_result);
+ Callable callable = CodeFactory::Subtract(assembler->isolate());
+ var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
+ assembler->Goto(&end);
}
- assembler->Bind(&return_result);
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
return var_result.value();
}
+
// static
compiler::Node* MultiplyWithFeedbackStub::Generate(
CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
@@ -1649,8 +1127,10 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
typedef CodeStubAssembler::Variable Variable;
// Shared entry point for floating point multiplication.
- Label do_fmul(assembler), end(assembler),
- call_multiply_stub(assembler, Label::kDeferred);
+ Label do_fmul(assembler), if_lhsisnotnumber(assembler, Label::kDeferred),
+ check_rhsisoddball(assembler, Label::kDeferred),
+ call_with_oddball_feedback(assembler), call_with_any_feedback(assembler),
+ call_multiply_stub(assembler), end(assembler);
Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
var_rhs_float64(assembler, MachineRepresentation::kFloat64),
var_result(assembler, MachineRepresentation::kTagged),
@@ -1659,12 +1139,13 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
Node* number_map = assembler->HeapNumberMapConstant();
Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
+ assembler->Branch(assembler->TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
assembler->Bind(&lhs_is_smi);
{
Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
+ assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
+ &rhs_is_not_smi);
assembler->Bind(&rhs_is_smi);
{
@@ -1672,7 +1153,7 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
// in case of overflow.
var_result.Bind(assembler->SmiMul(lhs, rhs));
var_type_feedback.Bind(assembler->Select(
- assembler->WordIsSmi(var_result.value()),
+ assembler->TaggedIsSmi(var_result.value()),
assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall),
assembler->Int32Constant(BinaryOperationFeedback::kNumber),
MachineRepresentation::kWord32));
@@ -1685,7 +1166,7 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
// Check if {rhs} is a HeapNumber.
assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
- &call_multiply_stub);
+ &check_rhsisoddball);
// Convert {lhs} to a double and multiply it with the value of {rhs}.
var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
@@ -1700,11 +1181,12 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
// Check if {lhs} is a HeapNumber.
assembler->GotoUnless(assembler->WordEqual(lhs_map, number_map),
- &call_multiply_stub);
+ &if_lhsisnotnumber);
// Check if {rhs} is a Smi.
Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
+ assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
+ &rhs_is_not_smi);
assembler->Bind(&rhs_is_smi);
{
@@ -1720,7 +1202,7 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
// Check if {rhs} is a HeapNumber.
assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
- &call_multiply_stub);
+ &check_rhsisoddball);
// Both {lhs} and {rhs} are HeapNumbers. Load their values and
// multiply them.
@@ -1736,244 +1218,69 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
assembler->Int32Constant(BinaryOperationFeedback::kNumber));
Node* value =
assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
- Node* result = assembler->ChangeFloat64ToTagged(value);
+ Node* result = assembler->AllocateHeapNumberWithValue(value);
var_result.Bind(result);
assembler->Goto(&end);
}
- assembler->Bind(&call_multiply_stub);
+ assembler->Bind(&if_lhsisnotnumber);
{
- var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
- Callable callable = CodeFactory::Multiply(assembler->isolate());
- var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&end);
- assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
- slot_id);
- return var_result.value();
-}
+ // No checks on rhs are done yet. We just know lhs is not a number or Smi.
+ // Check if lhs is an oddball.
+ Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+ Node* lhs_is_oddball = assembler->Word32Equal(
+ lhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+ assembler->GotoUnless(lhs_is_oddball, &call_with_any_feedback);
-// static
-compiler::Node* DivideStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* left,
- compiler::Node* right,
- compiler::Node* context) {
- using compiler::Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
+ assembler->GotoIf(assembler->TaggedIsSmi(rhs), &call_with_oddball_feedback);
- // Shared entry point for floating point division.
- Label do_fdiv(assembler), end(assembler);
- Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
- var_divisor_float64(assembler, MachineRepresentation::kFloat64);
+ // Load the map of the {rhs}.
+ Node* rhs_map = assembler->LoadMap(rhs);
- Node* number_map = assembler->HeapNumberMapConstant();
+ // Check if {rhs} is a HeapNumber.
+ assembler->Branch(assembler->IsHeapNumberMap(rhs_map),
+ &call_with_oddball_feedback, &check_rhsisoddball);
+ }
- // We might need to loop one or two times due to ToNumber conversions.
- Variable var_dividend(assembler, MachineRepresentation::kTagged),
- var_divisor(assembler, MachineRepresentation::kTagged),
- var_result(assembler, MachineRepresentation::kTagged);
- Variable* loop_variables[] = {&var_dividend, &var_divisor};
- Label loop(assembler, 2, loop_variables);
- var_dividend.Bind(left);
- var_divisor.Bind(right);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ assembler->Bind(&check_rhsisoddball);
{
- Node* dividend = var_dividend.value();
- Node* divisor = var_divisor.value();
-
- Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
- &dividend_is_not_smi);
-
- assembler->Bind(&dividend_is_smi);
- {
- Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
- &divisor_is_not_smi);
-
- assembler->Bind(&divisor_is_smi);
- {
- Label bailout(assembler);
-
- // Do floating point division if {divisor} is zero.
- assembler->GotoIf(
- assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
- &bailout);
-
- // Do floating point division {dividend} is zero and {divisor} is
- // negative.
- Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
- assembler->Branch(
- assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
- &dividend_is_zero, &dividend_is_not_zero);
-
- assembler->Bind(&dividend_is_zero);
- {
- assembler->GotoIf(
- assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
- &bailout);
- assembler->Goto(&dividend_is_not_zero);
- }
- assembler->Bind(&dividend_is_not_zero);
-
- Node* untagged_divisor = assembler->SmiUntag(divisor);
- Node* untagged_dividend = assembler->SmiUntag(dividend);
-
- // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
- // if the Smi size is 31) and {divisor} is -1.
- Label divisor_is_minus_one(assembler),
- divisor_is_not_minus_one(assembler);
- assembler->Branch(assembler->Word32Equal(untagged_divisor,
- assembler->Int32Constant(-1)),
- &divisor_is_minus_one, &divisor_is_not_minus_one);
-
- assembler->Bind(&divisor_is_minus_one);
- {
- assembler->GotoIf(
- assembler->Word32Equal(
- untagged_dividend,
- assembler->Int32Constant(
- kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))),
- &bailout);
- assembler->Goto(&divisor_is_not_minus_one);
- }
- assembler->Bind(&divisor_is_not_minus_one);
-
- // TODO(epertoso): consider adding a machine instruction that returns
- // both the result and the remainder.
- Node* untagged_result =
- assembler->Int32Div(untagged_dividend, untagged_divisor);
- Node* truncated =
- assembler->Int32Mul(untagged_result, untagged_divisor);
- // Do floating point division if the remainder is not 0.
- assembler->GotoIf(
- assembler->Word32NotEqual(untagged_dividend, truncated), &bailout);
- var_result.Bind(assembler->SmiTag(untagged_result));
- assembler->Goto(&end);
-
- // Bailout: convert {dividend} and {divisor} to double and do double
- // division.
- assembler->Bind(&bailout);
- {
- var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
- var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
- assembler->Goto(&do_fdiv);
- }
- }
-
- assembler->Bind(&divisor_is_not_smi);
- {
- Node* divisor_map = assembler->LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- Label divisor_is_number(assembler),
- divisor_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(divisor_map, number_map),
- &divisor_is_number, &divisor_is_not_number);
-
- assembler->Bind(&divisor_is_number);
- {
- // Convert {dividend} to a double and divide it with the value of
- // {divisor}.
- var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
- var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
- assembler->Goto(&do_fdiv);
- }
-
- assembler->Bind(&divisor_is_not_number);
- {
- // Convert {divisor} to a number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_divisor.Bind(assembler->CallStub(callable, context, divisor));
- assembler->Goto(&loop);
- }
- }
- }
-
- assembler->Bind(&dividend_is_not_smi);
- {
- Node* dividend_map = assembler->LoadMap(dividend);
-
- // Check if {dividend} is a HeapNumber.
- Label dividend_is_number(assembler),
- dividend_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(dividend_map, number_map),
- &dividend_is_number, &dividend_is_not_number);
-
- assembler->Bind(&dividend_is_number);
- {
- // Check if {divisor} is a Smi.
- Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
- &divisor_is_not_smi);
-
- assembler->Bind(&divisor_is_smi);
- {
- // Convert {divisor} to a double and use it for a floating point
- // division.
- var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
- assembler->Goto(&do_fdiv);
- }
+ // Check if rhs is an oddball. At this point we know lhs is either a
+ // Smi or number or oddball and rhs is not a number or Smi.
+ Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+ Node* rhs_is_oddball = assembler->Word32Equal(
+ rhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+ assembler->Branch(rhs_is_oddball, &call_with_oddball_feedback,
+ &call_with_any_feedback);
+ }
- assembler->Bind(&divisor_is_not_smi);
- {
- Node* divisor_map = assembler->LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- Label divisor_is_number(assembler),
- divisor_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(divisor_map, number_map),
- &divisor_is_number, &divisor_is_not_number);
-
- assembler->Bind(&divisor_is_number);
- {
- // Both {dividend} and {divisor} are HeapNumbers. Load their values
- // and divide them.
- var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
- assembler->Goto(&do_fdiv);
- }
-
- assembler->Bind(&divisor_is_not_number);
- {
- // Convert {divisor} to a number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_divisor.Bind(assembler->CallStub(callable, context, divisor));
- assembler->Goto(&loop);
- }
- }
- }
+ assembler->Bind(&call_with_oddball_feedback);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->Goto(&call_multiply_stub);
+ }
- assembler->Bind(&dividend_is_not_number);
- {
- // Convert {dividend} to a Number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_dividend.Bind(assembler->CallStub(callable, context, dividend));
- assembler->Goto(&loop);
- }
- }
+ assembler->Bind(&call_with_any_feedback);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ assembler->Goto(&call_multiply_stub);
}
- assembler->Bind(&do_fdiv);
+ assembler->Bind(&call_multiply_stub);
{
- Node* value = assembler->Float64Div(var_dividend_float64.value(),
- var_divisor_float64.value());
- var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+ Callable callable = CodeFactory::Multiply(assembler->isolate());
+ var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
assembler->Goto(&end);
}
+
assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
return var_result.value();
}
+
// static
compiler::Node* DivideWithFeedbackStub::Generate(
CodeStubAssembler* assembler, compiler::Node* dividend,
@@ -1984,7 +1291,10 @@ compiler::Node* DivideWithFeedbackStub::Generate(
typedef CodeStubAssembler::Variable Variable;
// Shared entry point for floating point division.
- Label do_fdiv(assembler), end(assembler), call_divide_stub(assembler);
+ Label do_fdiv(assembler), dividend_is_not_number(assembler, Label::kDeferred),
+ check_divisor_for_oddball(assembler, Label::kDeferred),
+ call_with_oddball_feedback(assembler), call_with_any_feedback(assembler),
+ call_divide_stub(assembler), end(assembler);
Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
var_divisor_float64(assembler, MachineRepresentation::kFloat64),
var_result(assembler, MachineRepresentation::kTagged),
@@ -1993,13 +1303,13 @@ compiler::Node* DivideWithFeedbackStub::Generate(
Node* number_map = assembler->HeapNumberMapConstant();
Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
+ assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
&dividend_is_not_smi);
assembler->Bind(&dividend_is_smi);
{
Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
&divisor_is_not_smi);
assembler->Bind(&divisor_is_smi);
@@ -2077,7 +1387,7 @@ compiler::Node* DivideWithFeedbackStub::Generate(
// Check if {divisor} is a HeapNumber.
assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
- &call_divide_stub);
+ &check_divisor_for_oddball);
// Convert {dividend} to a double and divide it with the value of
// {divisor}.
@@ -2092,11 +1402,11 @@ compiler::Node* DivideWithFeedbackStub::Generate(
// Check if {dividend} is a HeapNumber.
assembler->GotoUnless(assembler->WordEqual(dividend_map, number_map),
- &call_divide_stub);
+ &dividend_is_not_number);
// Check if {divisor} is a Smi.
Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
&divisor_is_not_smi);
assembler->Bind(&divisor_is_smi);
@@ -2114,7 +1424,7 @@ compiler::Node* DivideWithFeedbackStub::Generate(
// Check if {divisor} is a HeapNumber.
assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
- &call_divide_stub);
+ &check_divisor_for_oddball);
// Both {dividend} and {divisor} are HeapNumbers. Load their values
// and divide them.
@@ -2131,181 +1441,65 @@ compiler::Node* DivideWithFeedbackStub::Generate(
assembler->Int32Constant(BinaryOperationFeedback::kNumber));
Node* value = assembler->Float64Div(var_dividend_float64.value(),
var_divisor_float64.value());
- var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+ var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
assembler->Goto(&end);
}
- assembler->Bind(&call_divide_stub);
+ assembler->Bind(&dividend_is_not_number);
{
- var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
- Callable callable = CodeFactory::Divide(assembler->isolate());
- var_result.Bind(assembler->CallStub(callable, context, dividend, divisor));
- assembler->Goto(&end);
- }
+ // We just know dividend is not a number or Smi. No checks on divisor yet.
+ // Check if dividend is an oddball.
+ Node* dividend_instance_type = assembler->LoadInstanceType(dividend);
+ Node* dividend_is_oddball = assembler->Word32Equal(
+ dividend_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+ assembler->GotoUnless(dividend_is_oddball, &call_with_any_feedback);
- assembler->Bind(&end);
- assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
- slot_id);
- return var_result.value();
-}
-
-// static
-compiler::Node* ModulusStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* left,
- compiler::Node* right,
- compiler::Node* context) {
- using compiler::Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
-
- Variable var_result(assembler, MachineRepresentation::kTagged);
- Label return_result(assembler, &var_result);
+ assembler->GotoIf(assembler->TaggedIsSmi(divisor),
+ &call_with_oddball_feedback);
- // Shared entry point for floating point modulus.
- Label do_fmod(assembler);
- Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
- var_divisor_float64(assembler, MachineRepresentation::kFloat64);
+ // Load the map of the {divisor}.
+ Node* divisor_map = assembler->LoadMap(divisor);
- Node* number_map = assembler->HeapNumberMapConstant();
+ // Check if {divisor} is a HeapNumber.
+ assembler->Branch(assembler->IsHeapNumberMap(divisor_map),
+ &call_with_oddball_feedback, &check_divisor_for_oddball);
+ }
- // We might need to loop one or two times due to ToNumber conversions.
- Variable var_dividend(assembler, MachineRepresentation::kTagged),
- var_divisor(assembler, MachineRepresentation::kTagged);
- Variable* loop_variables[] = {&var_dividend, &var_divisor};
- Label loop(assembler, 2, loop_variables);
- var_dividend.Bind(left);
- var_divisor.Bind(right);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ assembler->Bind(&check_divisor_for_oddball);
{
- Node* dividend = var_dividend.value();
- Node* divisor = var_divisor.value();
-
- Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
- &dividend_is_not_smi);
-
- assembler->Bind(&dividend_is_smi);
- {
- Label dividend_is_not_zero(assembler);
- Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
- &divisor_is_not_smi);
-
- assembler->Bind(&divisor_is_smi);
- {
- // Compute the modulus of two Smis.
- var_result.Bind(assembler->SmiMod(dividend, divisor));
- assembler->Goto(&return_result);
- }
-
- assembler->Bind(&divisor_is_not_smi);
- {
- Node* divisor_map = assembler->LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- Label divisor_is_number(assembler),
- divisor_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(divisor_map, number_map),
- &divisor_is_number, &divisor_is_not_number);
-
- assembler->Bind(&divisor_is_number);
- {
- // Convert {dividend} to a double and compute its modulus with the
- // value of {dividend}.
- var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
- var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
- assembler->Goto(&do_fmod);
- }
-
- assembler->Bind(&divisor_is_not_number);
- {
- // Convert {divisor} to a number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_divisor.Bind(assembler->CallStub(callable, context, divisor));
- assembler->Goto(&loop);
- }
- }
- }
-
- assembler->Bind(&dividend_is_not_smi);
- {
- Node* dividend_map = assembler->LoadMap(dividend);
-
- // Check if {dividend} is a HeapNumber.
- Label dividend_is_number(assembler),
- dividend_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(dividend_map, number_map),
- &dividend_is_number, &dividend_is_not_number);
-
- assembler->Bind(&dividend_is_number);
- {
- // Check if {divisor} is a Smi.
- Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
- &divisor_is_not_smi);
-
- assembler->Bind(&divisor_is_smi);
- {
- // Convert {divisor} to a double and compute {dividend}'s modulus with
- // it.
- var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
- assembler->Goto(&do_fmod);
- }
+ // Check if divisor is an oddball. At this point we know dividend is either
+ // a Smi or number or oddball and divisor is not a number or Smi.
+ Node* divisor_instance_type = assembler->LoadInstanceType(divisor);
+ Node* divisor_is_oddball = assembler->Word32Equal(
+ divisor_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+ assembler->Branch(divisor_is_oddball, &call_with_oddball_feedback,
+ &call_with_any_feedback);
+ }
- assembler->Bind(&divisor_is_not_smi);
- {
- Node* divisor_map = assembler->LoadMap(divisor);
-
- // Check if {divisor} is a HeapNumber.
- Label divisor_is_number(assembler),
- divisor_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(divisor_map, number_map),
- &divisor_is_number, &divisor_is_not_number);
-
- assembler->Bind(&divisor_is_number);
- {
- // Both {dividend} and {divisor} are HeapNumbers. Load their values
- // and compute their modulus.
- var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
- assembler->Goto(&do_fmod);
- }
-
- assembler->Bind(&divisor_is_not_number);
- {
- // Convert {divisor} to a number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_divisor.Bind(assembler->CallStub(callable, context, divisor));
- assembler->Goto(&loop);
- }
- }
- }
+ assembler->Bind(&call_with_oddball_feedback);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->Goto(&call_divide_stub);
+ }
- assembler->Bind(&dividend_is_not_number);
- {
- // Convert {dividend} to a Number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_dividend.Bind(assembler->CallStub(callable, context, dividend));
- assembler->Goto(&loop);
- }
- }
+ assembler->Bind(&call_with_any_feedback);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ assembler->Goto(&call_divide_stub);
}
- assembler->Bind(&do_fmod);
+ assembler->Bind(&call_divide_stub);
{
- Node* value = assembler->Float64Mod(var_dividend_float64.value(),
- var_divisor_float64.value());
- var_result.Bind(assembler->ChangeFloat64ToTagged(value));
- assembler->Goto(&return_result);
+ Callable callable = CodeFactory::Divide(assembler->isolate());
+ var_result.Bind(assembler->CallStub(callable, context, dividend, divisor));
+ assembler->Goto(&end);
}
- assembler->Bind(&return_result);
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_id);
return var_result.value();
}
@@ -2319,7 +1513,10 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
typedef CodeStubAssembler::Variable Variable;
// Shared entry point for floating point division.
- Label do_fmod(assembler), end(assembler), call_modulus_stub(assembler);
+ Label do_fmod(assembler), dividend_is_not_number(assembler, Label::kDeferred),
+ check_divisor_for_oddball(assembler, Label::kDeferred),
+ call_with_oddball_feedback(assembler), call_with_any_feedback(assembler),
+ call_modulus_stub(assembler), end(assembler);
Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
var_divisor_float64(assembler, MachineRepresentation::kFloat64),
var_result(assembler, MachineRepresentation::kTagged),
@@ -2328,20 +1525,20 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
Node* number_map = assembler->HeapNumberMapConstant();
Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(dividend), &dividend_is_smi,
+ assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
&dividend_is_not_smi);
assembler->Bind(&dividend_is_smi);
{
Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
&divisor_is_not_smi);
assembler->Bind(&divisor_is_smi);
{
var_result.Bind(assembler->SmiMod(dividend, divisor));
var_type_feedback.Bind(assembler->Select(
- assembler->WordIsSmi(var_result.value()),
+ assembler->TaggedIsSmi(var_result.value()),
assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall),
assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
assembler->Goto(&end);
@@ -2353,7 +1550,7 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
// Check if {divisor} is a HeapNumber.
assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
- &call_modulus_stub);
+ &check_divisor_for_oddball);
// Convert {dividend} to a double and divide it with the value of
// {divisor}.
@@ -2369,11 +1566,11 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
// Check if {dividend} is a HeapNumber.
assembler->GotoUnless(assembler->WordEqual(dividend_map, number_map),
- &call_modulus_stub);
+ &dividend_is_not_number);
// Check if {divisor} is a Smi.
Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
- assembler->Branch(assembler->WordIsSmi(divisor), &divisor_is_smi,
+ assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
&divisor_is_not_smi);
assembler->Bind(&divisor_is_smi);
@@ -2391,7 +1588,7 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
// Check if {divisor} is a HeapNumber.
assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
- &call_modulus_stub);
+ &check_divisor_for_oddball);
// Both {dividend} and {divisor} are HeapNumbers. Load their values
// and divide them.
@@ -2407,14 +1604,57 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
assembler->Int32Constant(BinaryOperationFeedback::kNumber));
Node* value = assembler->Float64Mod(var_dividend_float64.value(),
var_divisor_float64.value());
- var_result.Bind(assembler->ChangeFloat64ToTagged(value));
+ var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
assembler->Goto(&end);
}
- assembler->Bind(&call_modulus_stub);
+ assembler->Bind(&dividend_is_not_number);
+ {
+ // No checks on divisor yet. We just know dividend is not a number or Smi.
+ // Check if dividend is an oddball.
+ Node* dividend_instance_type = assembler->LoadInstanceType(dividend);
+ Node* dividend_is_oddball = assembler->Word32Equal(
+ dividend_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+ assembler->GotoUnless(dividend_is_oddball, &call_with_any_feedback);
+
+ assembler->GotoIf(assembler->TaggedIsSmi(divisor),
+ &call_with_oddball_feedback);
+
+ // Load the map of the {divisor}.
+ Node* divisor_map = assembler->LoadMap(divisor);
+
+ // Check if {divisor} is a HeapNumber.
+ assembler->Branch(assembler->IsHeapNumberMap(divisor_map),
+ &call_with_oddball_feedback, &check_divisor_for_oddball);
+ }
+
+ assembler->Bind(&check_divisor_for_oddball);
+ {
+ // Check if divisor is an oddball. At this point we know dividend is either
+ // a Smi or number or oddball and divisor is not a number or Smi.
+ Node* divisor_instance_type = assembler->LoadInstanceType(divisor);
+ Node* divisor_is_oddball = assembler->Word32Equal(
+ divisor_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+ assembler->Branch(divisor_is_oddball, &call_with_oddball_feedback,
+ &call_with_any_feedback);
+ }
+
+ assembler->Bind(&call_with_oddball_feedback);
+ {
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->Goto(&call_modulus_stub);
+ }
+
+ assembler->Bind(&call_with_any_feedback);
{
var_type_feedback.Bind(
assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ assembler->Goto(&call_modulus_stub);
+ }
+
+ assembler->Bind(&call_modulus_stub);
+ {
Callable callable = CodeFactory::Modulus(assembler->isolate());
var_result.Bind(assembler->CallStub(callable, context, dividend, divisor));
assembler->Goto(&end);
@@ -2425,95 +1665,6 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
slot_id);
return var_result.value();
}
-// static
-compiler::Node* ShiftLeftStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* left,
- compiler::Node* right,
- compiler::Node* context) {
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* shift_count =
- assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
- Node* value = assembler->Word32Shl(lhs_value, shift_count);
- Node* result = assembler->ChangeInt32ToTagged(value);
- return result;
-}
-
-// static
-compiler::Node* ShiftRightStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* left,
- compiler::Node* right,
- compiler::Node* context) {
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* shift_count =
- assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
- Node* value = assembler->Word32Sar(lhs_value, shift_count);
- Node* result = assembler->ChangeInt32ToTagged(value);
- return result;
-}
-
-// static
-compiler::Node* ShiftRightLogicalStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* left,
- compiler::Node* right,
- compiler::Node* context) {
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* shift_count =
- assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
- Node* value = assembler->Word32Shr(lhs_value, shift_count);
- Node* result = assembler->ChangeUint32ToTagged(value);
- return result;
-}
-
-// static
-compiler::Node* BitwiseAndStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* left,
- compiler::Node* right,
- compiler::Node* context) {
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* value = assembler->Word32And(lhs_value, rhs_value);
- Node* result = assembler->ChangeInt32ToTagged(value);
- return result;
-}
-
-// static
-compiler::Node* BitwiseOrStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* left,
- compiler::Node* right,
- compiler::Node* context) {
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* value = assembler->Word32Or(lhs_value, rhs_value);
- Node* result = assembler->ChangeInt32ToTagged(value);
- return result;
-}
-
-// static
-compiler::Node* BitwiseXorStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* left,
- compiler::Node* right,
- compiler::Node* context) {
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* value = assembler->Word32Xor(lhs_value, rhs_value);
- Node* result = assembler->ChangeInt32ToTagged(value);
- return result;
-}
// static
compiler::Node* IncStub::Generate(CodeStubAssembler* assembler,
@@ -2544,13 +1695,15 @@ compiler::Node* IncStub::Generate(CodeStubAssembler* assembler,
value = value_var.value();
Label if_issmi(assembler), if_isnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(value), &if_issmi, &if_isnotsmi);
+ assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
assembler->Bind(&if_issmi);
{
// Try fast Smi addition first.
Node* one = assembler->SmiConstant(Smi::FromInt(1));
- Node* pair = assembler->SmiAddWithOverflow(value, one);
+ Node* pair = assembler->IntPtrAddWithOverflow(
+ assembler->BitcastTaggedToWord(value),
+ assembler->BitcastTaggedToWord(one));
Node* overflow = assembler->Projection(1, pair);
// Check if the Smi addition overflowed.
@@ -2561,7 +1714,8 @@ compiler::Node* IncStub::Generate(CodeStubAssembler* assembler,
var_type_feedback.Bind(assembler->Word32Or(
var_type_feedback.value(),
assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall)));
- result_var.Bind(assembler->Projection(0, pair));
+ result_var.Bind(
+ assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
assembler->Goto(&end);
assembler->Bind(&if_overflow);
@@ -2589,13 +1743,40 @@ compiler::Node* IncStub::Generate(CodeStubAssembler* assembler,
assembler->Bind(&if_valuenotnumber);
{
- // Convert to a Number first and try again.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
- value_var.Bind(assembler->CallStub(callable, context, value));
- assembler->Goto(&start);
+ // We do not require an Or with earlier feedback here because once we
+ // convert the value to a number, we cannot reach this path. We can
+ // only reach this path on the first pass when the feedback is kNone.
+ CSA_ASSERT(assembler,
+ assembler->Word32Equal(var_type_feedback.value(),
+ assembler->Int32Constant(
+ BinaryOperationFeedback::kNone)));
+
+ Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
+ Node* instance_type = assembler->LoadMapInstanceType(value_map);
+ Node* is_oddball = assembler->Word32Equal(
+ instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+ assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
+
+ assembler->Bind(&if_valueisoddball);
+ {
+ // Convert Oddball to Number and check again.
+ value_var.Bind(
+ assembler->LoadObjectField(value, Oddball::kToNumberOffset));
+ var_type_feedback.Bind(assembler->Int32Constant(
+ BinaryOperationFeedback::kNumberOrOddball));
+ assembler->Goto(&start);
+ }
+
+ assembler->Bind(&if_valuenotoddball);
+ {
+ // Convert to a Number first and try again.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ value_var.Bind(assembler->CallStub(callable, context, value));
+ assembler->Goto(&start);
+ }
}
}
}
@@ -2608,7 +1789,7 @@ compiler::Node* IncStub::Generate(CodeStubAssembler* assembler,
var_type_feedback.Bind(assembler->Word32Or(
var_type_feedback.value(),
assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
- result_var.Bind(assembler->ChangeFloat64ToTagged(finc_result));
+ result_var.Bind(assembler->AllocateHeapNumberWithValue(finc_result));
assembler->Goto(&end);
}
@@ -2618,6 +1799,13 @@ compiler::Node* IncStub::Generate(CodeStubAssembler* assembler,
return result_var.value();
}
+void NumberToStringStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+ typedef compiler::Node Node;
+ Node* argument = assembler->Parameter(Descriptor::kArgument);
+ Node* context = assembler->Parameter(Descriptor::kContext);
+ assembler->Return(assembler->NumberToString(context, argument));
+}
+
// static
compiler::Node* DecStub::Generate(CodeStubAssembler* assembler,
compiler::Node* value,
@@ -2647,13 +1835,15 @@ compiler::Node* DecStub::Generate(CodeStubAssembler* assembler,
value = value_var.value();
Label if_issmi(assembler), if_isnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(value), &if_issmi, &if_isnotsmi);
+ assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
assembler->Bind(&if_issmi);
{
// Try fast Smi subtraction first.
Node* one = assembler->SmiConstant(Smi::FromInt(1));
- Node* pair = assembler->SmiSubWithOverflow(value, one);
+ Node* pair = assembler->IntPtrSubWithOverflow(
+ assembler->BitcastTaggedToWord(value),
+ assembler->BitcastTaggedToWord(one));
Node* overflow = assembler->Projection(1, pair);
// Check if the Smi subtraction overflowed.
@@ -2664,7 +1854,8 @@ compiler::Node* DecStub::Generate(CodeStubAssembler* assembler,
var_type_feedback.Bind(assembler->Word32Or(
var_type_feedback.value(),
assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall)));
- result_var.Bind(assembler->Projection(0, pair));
+ result_var.Bind(
+ assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
assembler->Goto(&end);
assembler->Bind(&if_overflow);
@@ -2692,13 +1883,40 @@ compiler::Node* DecStub::Generate(CodeStubAssembler* assembler,
assembler->Bind(&if_valuenotnumber);
{
- // Convert to a Number first and try again.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
- value_var.Bind(assembler->CallStub(callable, context, value));
- assembler->Goto(&start);
+ // We do not require an Or with earlier feedback here because once we
+ // convert the value to a number, we cannot reach this path. We can
+ // only reach this path on the first pass when the feedback is kNone.
+ CSA_ASSERT(assembler,
+ assembler->Word32Equal(var_type_feedback.value(),
+ assembler->Int32Constant(
+ BinaryOperationFeedback::kNone)));
+
+ Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
+ Node* instance_type = assembler->LoadMapInstanceType(value_map);
+ Node* is_oddball = assembler->Word32Equal(
+ instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+ assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
+
+ assembler->Bind(&if_valueisoddball);
+ {
+ // Convert Oddball to Number and check again.
+ value_var.Bind(
+ assembler->LoadObjectField(value, Oddball::kToNumberOffset));
+ var_type_feedback.Bind(assembler->Int32Constant(
+ BinaryOperationFeedback::kNumberOrOddball));
+ assembler->Goto(&start);
+ }
+
+ assembler->Bind(&if_valuenotoddball);
+ {
+ // Convert to a Number first and try again.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_type_feedback.Bind(
+ assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ value_var.Bind(assembler->CallStub(callable, context, value));
+ assembler->Goto(&start);
+ }
}
}
}
@@ -2711,7 +1929,7 @@ compiler::Node* DecStub::Generate(CodeStubAssembler* assembler,
var_type_feedback.Bind(assembler->Word32Or(
var_type_feedback.value(),
assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
- result_var.Bind(assembler->ChangeFloat64ToTagged(fdec_result));
+ result_var.Bind(assembler->AllocateHeapNumberWithValue(fdec_result));
assembler->Goto(&end);
}
@@ -2730,1439 +1948,6 @@ compiler::Node* SubStringStub::Generate(CodeStubAssembler* assembler,
return assembler->SubString(context, string, from, to);
}
-// ES6 section 7.1.13 ToObject (argument)
-void ToObjectStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
-
- Label if_number(assembler, Label::kDeferred), if_notsmi(assembler),
- if_jsreceiver(assembler), if_noconstructor(assembler, Label::kDeferred),
- if_wrapjsvalue(assembler);
-
- Node* object = assembler->Parameter(Descriptor::kArgument);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- Variable constructor_function_index_var(assembler,
- MachineType::PointerRepresentation());
-
- assembler->Branch(assembler->WordIsSmi(object), &if_number, &if_notsmi);
-
- assembler->Bind(&if_notsmi);
- Node* map = assembler->LoadMap(object);
-
- assembler->GotoIf(assembler->IsHeapNumberMap(map), &if_number);
-
- Node* instance_type = assembler->LoadMapInstanceType(map);
- assembler->GotoIf(assembler->IsJSReceiverInstanceType(instance_type),
- &if_jsreceiver);
-
- Node* constructor_function_index =
- assembler->LoadMapConstructorFunctionIndex(map);
- assembler->GotoIf(assembler->WordEqual(constructor_function_index,
- assembler->IntPtrConstant(
- Map::kNoConstructorFunctionIndex)),
- &if_noconstructor);
- constructor_function_index_var.Bind(constructor_function_index);
- assembler->Goto(&if_wrapjsvalue);
-
- assembler->Bind(&if_number);
- constructor_function_index_var.Bind(
- assembler->IntPtrConstant(Context::NUMBER_FUNCTION_INDEX));
- assembler->Goto(&if_wrapjsvalue);
-
- assembler->Bind(&if_wrapjsvalue);
- Node* native_context = assembler->LoadNativeContext(context);
- Node* constructor = assembler->LoadFixedArrayElement(
- native_context, constructor_function_index_var.value(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- Node* initial_map = assembler->LoadObjectField(
- constructor, JSFunction::kPrototypeOrInitialMapOffset);
- Node* js_value = assembler->Allocate(JSValue::kSize);
- assembler->StoreMapNoWriteBarrier(js_value, initial_map);
- assembler->StoreObjectFieldRoot(js_value, JSValue::kPropertiesOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectField(js_value, JSValue::kValueOffset, object);
- assembler->Return(js_value);
-
- assembler->Bind(&if_noconstructor);
- assembler->TailCallRuntime(
- Runtime::kThrowUndefinedOrNullToObject, context,
- assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
- "ToObject", TENURED)));
-
- assembler->Bind(&if_jsreceiver);
- assembler->Return(object);
-}
-
-// static
-// ES6 section 12.5.5 typeof operator
-compiler::Node* TypeofStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* value,
- compiler::Node* context) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
-
- Variable result_var(assembler, MachineRepresentation::kTagged);
-
- Label return_number(assembler, Label::kDeferred), if_oddball(assembler),
- return_function(assembler), return_undefined(assembler),
- return_object(assembler), return_string(assembler),
- return_result(assembler);
-
- assembler->GotoIf(assembler->WordIsSmi(value), &return_number);
-
- Node* map = assembler->LoadMap(value);
-
- assembler->GotoIf(assembler->IsHeapNumberMap(map), &return_number);
-
- Node* instance_type = assembler->LoadMapInstanceType(map);
-
- assembler->GotoIf(assembler->Word32Equal(
- instance_type, assembler->Int32Constant(ODDBALL_TYPE)),
- &if_oddball);
-
- Node* callable_or_undetectable_mask =
- assembler->Word32And(assembler->LoadMapBitField(map),
- assembler->Int32Constant(1 << Map::kIsCallable |
- 1 << Map::kIsUndetectable));
-
- assembler->GotoIf(
- assembler->Word32Equal(callable_or_undetectable_mask,
- assembler->Int32Constant(1 << Map::kIsCallable)),
- &return_function);
-
- assembler->GotoUnless(assembler->Word32Equal(callable_or_undetectable_mask,
- assembler->Int32Constant(0)),
- &return_undefined);
-
- assembler->GotoIf(assembler->IsJSReceiverInstanceType(instance_type),
- &return_object);
-
- assembler->GotoIf(assembler->IsStringInstanceType(instance_type),
- &return_string);
-
-#define SIMD128_BRANCH(TYPE, Type, type, lane_count, lane_type) \
- Label return_##type(assembler); \
- Node* type##_map = \
- assembler->HeapConstant(assembler->factory()->type##_map()); \
- assembler->GotoIf(assembler->WordEqual(map, type##_map), &return_##type);
- SIMD128_TYPES(SIMD128_BRANCH)
-#undef SIMD128_BRANCH
-
- assembler->Assert(assembler->Word32Equal(
- instance_type, assembler->Int32Constant(SYMBOL_TYPE)));
- result_var.Bind(assembler->HeapConstant(
- assembler->isolate()->factory()->symbol_string()));
- assembler->Goto(&return_result);
-
- assembler->Bind(&return_number);
- {
- result_var.Bind(assembler->HeapConstant(
- assembler->isolate()->factory()->number_string()));
- assembler->Goto(&return_result);
- }
-
- assembler->Bind(&if_oddball);
- {
- Node* type = assembler->LoadObjectField(value, Oddball::kTypeOfOffset);
- result_var.Bind(type);
- assembler->Goto(&return_result);
- }
-
- assembler->Bind(&return_function);
- {
- result_var.Bind(assembler->HeapConstant(
- assembler->isolate()->factory()->function_string()));
- assembler->Goto(&return_result);
- }
-
- assembler->Bind(&return_undefined);
- {
- result_var.Bind(assembler->HeapConstant(
- assembler->isolate()->factory()->undefined_string()));
- assembler->Goto(&return_result);
- }
-
- assembler->Bind(&return_object);
- {
- result_var.Bind(assembler->HeapConstant(
- assembler->isolate()->factory()->object_string()));
- assembler->Goto(&return_result);
- }
-
- assembler->Bind(&return_string);
- {
- result_var.Bind(assembler->HeapConstant(
- assembler->isolate()->factory()->string_string()));
- assembler->Goto(&return_result);
- }
-
-#define SIMD128_BIND_RETURN(TYPE, Type, type, lane_count, lane_type) \
- assembler->Bind(&return_##type); \
- { \
- result_var.Bind(assembler->HeapConstant( \
- assembler->isolate()->factory()->type##_string())); \
- assembler->Goto(&return_result); \
- }
- SIMD128_TYPES(SIMD128_BIND_RETURN)
-#undef SIMD128_BIND_RETURN
-
- assembler->Bind(&return_result);
- return result_var.value();
-}
-
-// static
-compiler::Node* InstanceOfStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* object,
- compiler::Node* callable,
- compiler::Node* context) {
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
-
- Label return_runtime(assembler, Label::kDeferred), end(assembler);
- Variable result(assembler, MachineRepresentation::kTagged);
-
- // Check if no one installed @@hasInstance somewhere.
- assembler->GotoUnless(
- assembler->WordEqual(
- assembler->LoadObjectField(
- assembler->LoadRoot(Heap::kHasInstanceProtectorRootIndex),
- PropertyCell::kValueOffset),
- assembler->SmiConstant(Smi::FromInt(Isolate::kArrayProtectorValid))),
- &return_runtime);
-
- // Check if {callable} is a valid receiver.
- assembler->GotoIf(assembler->WordIsSmi(callable), &return_runtime);
- assembler->GotoIf(
- assembler->Word32Equal(
- assembler->Word32And(
- assembler->LoadMapBitField(assembler->LoadMap(callable)),
- assembler->Int32Constant(1 << Map::kIsCallable)),
- assembler->Int32Constant(0)),
- &return_runtime);
-
- // Use the inline OrdinaryHasInstance directly.
- result.Bind(assembler->OrdinaryHasInstance(context, callable, object));
- assembler->Goto(&end);
-
- // TODO(bmeurer): Use GetPropertyStub here once available.
- assembler->Bind(&return_runtime);
- {
- result.Bind(assembler->CallRuntime(Runtime::kInstanceOf, context, object,
- callable));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&end);
- return result.value();
-}
-
-namespace {
-
-enum RelationalComparisonMode {
- kLessThan,
- kLessThanOrEqual,
- kGreaterThan,
- kGreaterThanOrEqual
-};
-
-compiler::Node* GenerateAbstractRelationalComparison(
- CodeStubAssembler* assembler, RelationalComparisonMode mode,
- compiler::Node* lhs, compiler::Node* rhs, compiler::Node* context) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Label return_true(assembler), return_false(assembler), end(assembler);
- Variable result(assembler, MachineRepresentation::kTagged);
-
- // Shared entry for floating point comparison.
- Label do_fcmp(assembler);
- Variable var_fcmp_lhs(assembler, MachineRepresentation::kFloat64),
- var_fcmp_rhs(assembler, MachineRepresentation::kFloat64);
-
- // We might need to loop several times due to ToPrimitive and/or ToNumber
- // conversions.
- Variable var_lhs(assembler, MachineRepresentation::kTagged),
- var_rhs(assembler, MachineRepresentation::kTagged);
- Variable* loop_vars[2] = {&var_lhs, &var_rhs};
- Label loop(assembler, 2, loop_vars);
- var_lhs.Bind(lhs);
- var_rhs.Bind(rhs);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
- {
- // Load the current {lhs} and {rhs} values.
- lhs = var_lhs.value();
- rhs = var_rhs.value();
-
- // Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
-
- assembler->Bind(&if_lhsissmi);
- {
- // Check if {rhs} is a Smi or a HeapObject.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
-
- assembler->Bind(&if_rhsissmi);
- {
- // Both {lhs} and {rhs} are Smi, so just perform a fast Smi comparison.
- switch (mode) {
- case kLessThan:
- assembler->BranchIfSmiLessThan(lhs, rhs, &return_true,
- &return_false);
- break;
- case kLessThanOrEqual:
- assembler->BranchIfSmiLessThanOrEqual(lhs, rhs, &return_true,
- &return_false);
- break;
- case kGreaterThan:
- assembler->BranchIfSmiLessThan(rhs, lhs, &return_true,
- &return_false);
- break;
- case kGreaterThanOrEqual:
- assembler->BranchIfSmiLessThanOrEqual(rhs, lhs, &return_true,
- &return_false);
- break;
- }
- }
-
- assembler->Bind(&if_rhsisnotsmi);
- {
- // Load the map of {rhs}.
- Node* rhs_map = assembler->LoadMap(rhs);
-
- // Check if the {rhs} is a HeapNumber.
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
- &if_rhsisnotnumber);
-
- assembler->Bind(&if_rhsisnumber);
- {
- // Convert the {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- var_fcmp_lhs.Bind(assembler->SmiToFloat64(lhs));
- var_fcmp_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fcmp);
- }
-
- assembler->Bind(&if_rhsisnotnumber);
- {
- // Convert the {rhs} to a Number; we don't need to perform the
- // dedicated ToPrimitive(rhs, hint Number) operation, as the
- // ToNumber(rhs) will by itself already invoke ToPrimitive with
- // a Number hint.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
- }
- }
- }
-
- assembler->Bind(&if_lhsisnotsmi);
- {
- // Load the HeapNumber map for later comparisons.
- Node* number_map = assembler->HeapNumberMapConstant();
-
- // Load the map of {lhs}.
- Node* lhs_map = assembler->LoadMap(lhs);
-
- // Check if {rhs} is a Smi or a HeapObject.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
-
- assembler->Bind(&if_rhsissmi);
- {
- // Check if the {lhs} is a HeapNumber.
- Label if_lhsisnumber(assembler),
- if_lhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(lhs_map, number_map),
- &if_lhsisnumber, &if_lhsisnotnumber);
-
- assembler->Bind(&if_lhsisnumber);
- {
- // Convert the {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- var_fcmp_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
- var_fcmp_rhs.Bind(assembler->SmiToFloat64(rhs));
- assembler->Goto(&do_fcmp);
- }
-
- assembler->Bind(&if_lhsisnotnumber);
- {
- // Convert the {lhs} to a Number; we don't need to perform the
- // dedicated ToPrimitive(lhs, hint Number) operation, as the
- // ToNumber(lhs) will by itself already invoke ToPrimitive with
- // a Number hint.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
- }
- }
-
- assembler->Bind(&if_rhsisnotsmi);
- {
- // Load the map of {rhs}.
- Node* rhs_map = assembler->LoadMap(rhs);
-
- // Check if {lhs} is a HeapNumber.
- Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
- assembler->Branch(assembler->WordEqual(lhs_map, number_map),
- &if_lhsisnumber, &if_lhsisnotnumber);
-
- assembler->Bind(&if_lhsisnumber);
- {
- // Check if {rhs} is also a HeapNumber.
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(lhs_map, rhs_map),
- &if_rhsisnumber, &if_rhsisnotnumber);
-
- assembler->Bind(&if_rhsisnumber);
- {
- // Convert the {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- var_fcmp_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
- var_fcmp_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fcmp);
- }
-
- assembler->Bind(&if_rhsisnotnumber);
- {
- // Convert the {rhs} to a Number; we don't need to perform
- // dedicated ToPrimitive(rhs, hint Number) operation, as the
- // ToNumber(rhs) will by itself already invoke ToPrimitive with
- // a Number hint.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
- }
- }
-
- assembler->Bind(&if_lhsisnotnumber);
- {
- // Load the instance type of {lhs}.
- Node* lhs_instance_type = assembler->LoadMapInstanceType(lhs_map);
-
- // Check if {lhs} is a String.
- Label if_lhsisstring(assembler),
- if_lhsisnotstring(assembler, Label::kDeferred);
- assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
- &if_lhsisstring, &if_lhsisnotstring);
-
- assembler->Bind(&if_lhsisstring);
- {
- // Load the instance type of {rhs}.
- Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
-
- // Check if {rhs} is also a String.
- Label if_rhsisstring(assembler, Label::kDeferred),
- if_rhsisnotstring(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->IsStringInstanceType(rhs_instance_type),
- &if_rhsisstring, &if_rhsisnotstring);
-
- assembler->Bind(&if_rhsisstring);
- {
- // Both {lhs} and {rhs} are strings.
- switch (mode) {
- case kLessThan:
- result.Bind(assembler->CallStub(
- CodeFactory::StringLessThan(assembler->isolate()),
- context, lhs, rhs));
- assembler->Goto(&end);
- break;
- case kLessThanOrEqual:
- result.Bind(assembler->CallStub(
- CodeFactory::StringLessThanOrEqual(assembler->isolate()),
- context, lhs, rhs));
- assembler->Goto(&end);
- break;
- case kGreaterThan:
- result.Bind(assembler->CallStub(
- CodeFactory::StringGreaterThan(assembler->isolate()),
- context, lhs, rhs));
- assembler->Goto(&end);
- break;
- case kGreaterThanOrEqual:
- result.Bind(
- assembler->CallStub(CodeFactory::StringGreaterThanOrEqual(
- assembler->isolate()),
- context, lhs, rhs));
- assembler->Goto(&end);
- break;
- }
- }
-
- assembler->Bind(&if_rhsisnotstring);
- {
- // The {lhs} is a String, while {rhs} is neither a Number nor a
- // String, so we need to call ToPrimitive(rhs, hint Number) if
- // {rhs} is a receiver or ToNumber(lhs) and ToNumber(rhs) in the
- // other cases.
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- assembler->Bind(&if_rhsisreceiver);
- {
- // Convert {rhs} to a primitive first passing Number hint.
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(
- assembler->isolate(), ToPrimitiveHint::kNumber);
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&if_rhsisnotreceiver);
- {
- // Convert both {lhs} and {rhs} to Number.
- Callable callable = CodeFactory::ToNumber(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
- }
- }
- }
-
- assembler->Bind(&if_lhsisnotstring);
- {
- // The {lhs} is neither a Number nor a String, so we need to call
- // ToPrimitive(lhs, hint Number) if {lhs} is a receiver or
- // ToNumber(lhs) and ToNumber(rhs) in the other cases.
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- Label if_lhsisreceiver(assembler, Label::kDeferred),
- if_lhsisnotreceiver(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(lhs_instance_type),
- &if_lhsisreceiver, &if_lhsisnotreceiver);
-
- assembler->Bind(&if_lhsisreceiver);
- {
- // Convert {lhs} to a primitive first passing Number hint.
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(
- assembler->isolate(), ToPrimitiveHint::kNumber);
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&if_lhsisnotreceiver);
- {
- // Convert both {lhs} and {rhs} to Number.
- Callable callable = CodeFactory::ToNumber(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
- }
- }
- }
- }
- }
- }
-
- assembler->Bind(&do_fcmp);
- {
- // Load the {lhs} and {rhs} floating point values.
- Node* lhs = var_fcmp_lhs.value();
- Node* rhs = var_fcmp_rhs.value();
-
- // Perform a fast floating point comparison.
- switch (mode) {
- case kLessThan:
- assembler->BranchIfFloat64LessThan(lhs, rhs, &return_true,
- &return_false);
- break;
- case kLessThanOrEqual:
- assembler->BranchIfFloat64LessThanOrEqual(lhs, rhs, &return_true,
- &return_false);
- break;
- case kGreaterThan:
- assembler->BranchIfFloat64GreaterThan(lhs, rhs, &return_true,
- &return_false);
- break;
- case kGreaterThanOrEqual:
- assembler->BranchIfFloat64GreaterThanOrEqual(lhs, rhs, &return_true,
- &return_false);
- break;
- }
- }
-
- assembler->Bind(&return_true);
- {
- result.Bind(assembler->BooleanConstant(true));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&return_false);
- {
- result.Bind(assembler->BooleanConstant(false));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&end);
- return result.value();
-}
-
-enum ResultMode { kDontNegateResult, kNegateResult };
-
-void GenerateEqual_Same(CodeStubAssembler* assembler, compiler::Node* value,
- CodeStubAssembler::Label* if_equal,
- CodeStubAssembler::Label* if_notequal) {
- // In case of abstract or strict equality checks, we need additional checks
- // for NaN values because they are not considered equal, even if both the
- // left and the right hand side reference exactly the same value.
- // TODO(bmeurer): This seems to violate the SIMD.js specification, but it
- // seems to be what is tested in the current SIMD.js testsuite.
-
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- // Check if {value} is a Smi or a HeapObject.
- Label if_valueissmi(assembler), if_valueisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(value), &if_valueissmi,
- &if_valueisnotsmi);
-
- assembler->Bind(&if_valueisnotsmi);
- {
- // Load the map of {value}.
- Node* value_map = assembler->LoadMap(value);
-
- // Check if {value} (and therefore {rhs}) is a HeapNumber.
- Label if_valueisnumber(assembler), if_valueisnotnumber(assembler);
- assembler->Branch(assembler->IsHeapNumberMap(value_map), &if_valueisnumber,
- &if_valueisnotnumber);
-
- assembler->Bind(&if_valueisnumber);
- {
- // Convert {value} (and therefore {rhs}) to floating point value.
- Node* value_value = assembler->LoadHeapNumberValue(value);
-
- // Check if the HeapNumber value is a NaN.
- assembler->BranchIfFloat64IsNaN(value_value, if_notequal, if_equal);
- }
-
- assembler->Bind(&if_valueisnotnumber);
- assembler->Goto(if_equal);
- }
-
- assembler->Bind(&if_valueissmi);
- assembler->Goto(if_equal);
-}
-
-void GenerateEqual_Simd128Value_HeapObject(
- CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* lhs_map,
- compiler::Node* rhs, compiler::Node* rhs_map,
- CodeStubAssembler::Label* if_equal, CodeStubAssembler::Label* if_notequal) {
- assembler->BranchIfSimd128Equal(lhs, lhs_map, rhs, rhs_map, if_equal,
- if_notequal);
-}
-
-// ES6 section 7.2.12 Abstract Equality Comparison
-compiler::Node* GenerateEqual(CodeStubAssembler* assembler, ResultMode mode,
- compiler::Node* lhs, compiler::Node* rhs,
- compiler::Node* context) {
- // This is a slightly optimized version of Object::Equals represented as
- // scheduled TurboFan graph utilizing the CodeStubAssembler. Whenever you
- // change something functionality wise in here, remember to update the
- // Object::Equals method as well.
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Label if_equal(assembler), if_notequal(assembler),
- do_rhsstringtonumber(assembler, Label::kDeferred), end(assembler);
- Variable result(assembler, MachineRepresentation::kTagged);
-
- // Shared entry for floating point comparison.
- Label do_fcmp(assembler);
- Variable var_fcmp_lhs(assembler, MachineRepresentation::kFloat64),
- var_fcmp_rhs(assembler, MachineRepresentation::kFloat64);
-
- // We might need to loop several times due to ToPrimitive and/or ToNumber
- // conversions.
- Variable var_lhs(assembler, MachineRepresentation::kTagged),
- var_rhs(assembler, MachineRepresentation::kTagged);
- Variable* loop_vars[2] = {&var_lhs, &var_rhs};
- Label loop(assembler, 2, loop_vars);
- var_lhs.Bind(lhs);
- var_rhs.Bind(rhs);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
- {
- // Load the current {lhs} and {rhs} values.
- lhs = var_lhs.value();
- rhs = var_rhs.value();
-
- // Check if {lhs} and {rhs} refer to the same object.
- Label if_same(assembler), if_notsame(assembler);
- assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
-
- assembler->Bind(&if_same);
- {
- // The {lhs} and {rhs} reference the exact same value, yet we need special
- // treatment for HeapNumber, as NaN is not equal to NaN.
- GenerateEqual_Same(assembler, lhs, &if_equal, &if_notequal);
- }
-
- assembler->Bind(&if_notsame);
- {
- // Check if {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi,
- &if_lhsisnotsmi);
-
- assembler->Bind(&if_lhsissmi);
- {
- // Check if {rhs} is a Smi or a HeapObject.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
-
- assembler->Bind(&if_rhsissmi);
- // We have already checked for {lhs} and {rhs} being the same value, so
- // if both are Smis when we get here they must not be equal.
- assembler->Goto(&if_notequal);
-
- assembler->Bind(&if_rhsisnotsmi);
- {
- // Load the map of {rhs}.
- Node* rhs_map = assembler->LoadMap(rhs);
-
- // Check if {rhs} is a HeapNumber.
- Node* number_map = assembler->HeapNumberMapConstant();
- Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
- assembler->Branch(assembler->WordEqual(rhs_map, number_map),
- &if_rhsisnumber, &if_rhsisnotnumber);
-
- assembler->Bind(&if_rhsisnumber);
- {
- // Convert {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- var_fcmp_lhs.Bind(assembler->SmiToFloat64(lhs));
- var_fcmp_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fcmp);
- }
-
- assembler->Bind(&if_rhsisnotnumber);
- {
- // Load the instance type of the {rhs}.
- Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
-
- // Check if the {rhs} is a String.
- Label if_rhsisstring(assembler, Label::kDeferred),
- if_rhsisnotstring(assembler);
- assembler->Branch(
- assembler->IsStringInstanceType(rhs_instance_type),
- &if_rhsisstring, &if_rhsisnotstring);
-
- assembler->Bind(&if_rhsisstring);
- {
- // The {rhs} is a String and the {lhs} is a Smi; we need
- // to convert the {rhs} to a Number and compare the output to
- // the Number on the {lhs}.
- assembler->Goto(&do_rhsstringtonumber);
- }
-
- assembler->Bind(&if_rhsisnotstring);
- {
- // Check if the {rhs} is a Boolean.
- Label if_rhsisboolean(assembler), if_rhsisnotboolean(assembler);
- assembler->Branch(assembler->IsBooleanMap(rhs_map),
- &if_rhsisboolean, &if_rhsisnotboolean);
-
- assembler->Bind(&if_rhsisboolean);
- {
- // The {rhs} is a Boolean, load its number value.
- var_rhs.Bind(
- assembler->LoadObjectField(rhs, Oddball::kToNumberOffset));
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&if_rhsisnotboolean);
- {
- // Check if the {rhs} is a Receiver.
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- assembler->Bind(&if_rhsisreceiver);
- {
- // Convert {rhs} to a primitive first (passing no hint).
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(
- assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&if_rhsisnotreceiver);
- assembler->Goto(&if_notequal);
- }
- }
- }
- }
- }
-
- assembler->Bind(&if_lhsisnotsmi);
- {
- // Check if {rhs} is a Smi or a HeapObject.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
-
- assembler->Bind(&if_rhsissmi);
- {
- // The {lhs} is a HeapObject and the {rhs} is a Smi; swapping {lhs}
- // and {rhs} is not observable and doesn't matter for the result, so
- // we can just swap them and use the Smi handling above (for {lhs}
- // being a Smi).
- var_lhs.Bind(rhs);
- var_rhs.Bind(lhs);
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&if_rhsisnotsmi);
- {
- Label if_lhsisstring(assembler), if_lhsisnumber(assembler),
- if_lhsissymbol(assembler), if_lhsissimd128value(assembler),
- if_lhsisoddball(assembler), if_lhsisreceiver(assembler);
-
- // Both {lhs} and {rhs} are HeapObjects, load their maps
- // and their instance types.
- Node* lhs_map = assembler->LoadMap(lhs);
- Node* rhs_map = assembler->LoadMap(rhs);
-
- // Load the instance types of {lhs} and {rhs}.
- Node* lhs_instance_type = assembler->LoadMapInstanceType(lhs_map);
- Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
-
- // Dispatch based on the instance type of {lhs}.
- size_t const kNumCases = FIRST_NONSTRING_TYPE + 4;
- Label* case_labels[kNumCases];
- int32_t case_values[kNumCases];
- for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
- case_labels[i] = new Label(assembler);
- case_values[i] = i;
- }
- case_labels[FIRST_NONSTRING_TYPE + 0] = &if_lhsisnumber;
- case_values[FIRST_NONSTRING_TYPE + 0] = HEAP_NUMBER_TYPE;
- case_labels[FIRST_NONSTRING_TYPE + 1] = &if_lhsissymbol;
- case_values[FIRST_NONSTRING_TYPE + 1] = SYMBOL_TYPE;
- case_labels[FIRST_NONSTRING_TYPE + 2] = &if_lhsissimd128value;
- case_values[FIRST_NONSTRING_TYPE + 2] = SIMD128_VALUE_TYPE;
- case_labels[FIRST_NONSTRING_TYPE + 3] = &if_lhsisoddball;
- case_values[FIRST_NONSTRING_TYPE + 3] = ODDBALL_TYPE;
- assembler->Switch(lhs_instance_type, &if_lhsisreceiver, case_values,
- case_labels, arraysize(case_values));
- for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
- assembler->Bind(case_labels[i]);
- assembler->Goto(&if_lhsisstring);
- delete case_labels[i];
- }
-
- assembler->Bind(&if_lhsisstring);
- {
- // Check if {rhs} is also a String.
- Label if_rhsisstring(assembler, Label::kDeferred),
- if_rhsisnotstring(assembler);
- assembler->Branch(
- assembler->IsStringInstanceType(rhs_instance_type),
- &if_rhsisstring, &if_rhsisnotstring);
-
- assembler->Bind(&if_rhsisstring);
- {
- // Both {lhs} and {rhs} are of type String, just do the
- // string comparison then.
- Callable callable =
- (mode == kDontNegateResult)
- ? CodeFactory::StringEqual(assembler->isolate())
- : CodeFactory::StringNotEqual(assembler->isolate());
- result.Bind(assembler->CallStub(callable, context, lhs, rhs));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&if_rhsisnotstring);
- {
- // The {lhs} is a String and the {rhs} is some other HeapObject.
- // Swapping {lhs} and {rhs} is not observable and doesn't matter
- // for the result, so we can just swap them and use the String
- // handling below (for {rhs} being a String).
- var_lhs.Bind(rhs);
- var_rhs.Bind(lhs);
- assembler->Goto(&loop);
- }
- }
-
- assembler->Bind(&if_lhsisnumber);
- {
- // Check if {rhs} is also a HeapNumber.
- Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
- assembler->Branch(
- assembler->Word32Equal(lhs_instance_type, rhs_instance_type),
- &if_rhsisnumber, &if_rhsisnotnumber);
-
- assembler->Bind(&if_rhsisnumber);
- {
- // Convert {lhs} and {rhs} to floating point values, and
- // perform a floating point comparison.
- var_fcmp_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
- var_fcmp_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fcmp);
- }
-
- assembler->Bind(&if_rhsisnotnumber);
- {
- // The {lhs} is a Number, the {rhs} is some other HeapObject.
- Label if_rhsisstring(assembler, Label::kDeferred),
- if_rhsisnotstring(assembler);
- assembler->Branch(
- assembler->IsStringInstanceType(rhs_instance_type),
- &if_rhsisstring, &if_rhsisnotstring);
-
- assembler->Bind(&if_rhsisstring);
- {
- // The {rhs} is a String and the {lhs} is a HeapNumber; we need
- // to convert the {rhs} to a Number and compare the output to
- // the Number on the {lhs}.
- assembler->Goto(&do_rhsstringtonumber);
- }
-
- assembler->Bind(&if_rhsisnotstring);
- {
- // Check if the {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler),
- if_rhsisnotreceiver(assembler);
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- assembler->Bind(&if_rhsisreceiver);
- {
- // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
- // Swapping {lhs} and {rhs} is not observable and doesn't
- // matter for the result, so we can just swap them and use
- // the JSReceiver handling below (for {lhs} being a
- // JSReceiver).
- var_lhs.Bind(rhs);
- var_rhs.Bind(lhs);
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&if_rhsisnotreceiver);
- {
- // Check if {rhs} is a Boolean.
- Label if_rhsisboolean(assembler),
- if_rhsisnotboolean(assembler);
- assembler->Branch(assembler->IsBooleanMap(rhs_map),
- &if_rhsisboolean, &if_rhsisnotboolean);
-
- assembler->Bind(&if_rhsisboolean);
- {
- // The {rhs} is a Boolean, convert it to a Smi first.
- var_rhs.Bind(assembler->LoadObjectField(
- rhs, Oddball::kToNumberOffset));
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&if_rhsisnotboolean);
- assembler->Goto(&if_notequal);
- }
- }
- }
- }
-
- assembler->Bind(&if_lhsisoddball);
- {
- // The {lhs} is an Oddball and {rhs} is some other HeapObject.
- Label if_lhsisboolean(assembler), if_lhsisnotboolean(assembler);
- Node* boolean_map = assembler->BooleanMapConstant();
- assembler->Branch(assembler->WordEqual(lhs_map, boolean_map),
- &if_lhsisboolean, &if_lhsisnotboolean);
-
- assembler->Bind(&if_lhsisboolean);
- {
- // The {lhs} is a Boolean, check if {rhs} is also a Boolean.
- Label if_rhsisboolean(assembler), if_rhsisnotboolean(assembler);
- assembler->Branch(assembler->WordEqual(rhs_map, boolean_map),
- &if_rhsisboolean, &if_rhsisnotboolean);
-
- assembler->Bind(&if_rhsisboolean);
- {
- // Both {lhs} and {rhs} are distinct Boolean values.
- assembler->Goto(&if_notequal);
- }
-
- assembler->Bind(&if_rhsisnotboolean);
- {
- // Convert the {lhs} to a Number first.
- var_lhs.Bind(
- assembler->LoadObjectField(lhs, Oddball::kToNumberOffset));
- assembler->Goto(&loop);
- }
- }
-
- assembler->Bind(&if_lhsisnotboolean);
- {
- // The {lhs} is either Null or Undefined; check if the {rhs} is
- // undetectable (i.e. either also Null or Undefined or some
- // undetectable JSReceiver).
- Node* rhs_bitfield = assembler->LoadMapBitField(rhs_map);
- assembler->BranchIfWord32Equal(
- assembler->Word32And(
- rhs_bitfield,
- assembler->Int32Constant(1 << Map::kIsUndetectable)),
- assembler->Int32Constant(0), &if_notequal, &if_equal);
- }
- }
-
- assembler->Bind(&if_lhsissymbol);
- {
- // Check if the {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- assembler->Bind(&if_rhsisreceiver);
- {
- // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
- // Swapping {lhs} and {rhs} is not observable and doesn't
- // matter for the result, so we can just swap them and use
- // the JSReceiver handling below (for {lhs} being a JSReceiver).
- var_lhs.Bind(rhs);
- var_rhs.Bind(lhs);
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&if_rhsisnotreceiver);
- {
- // The {rhs} is not a JSReceiver and also not the same Symbol
- // as the {lhs}, so this is equality check is considered false.
- assembler->Goto(&if_notequal);
- }
- }
-
- assembler->Bind(&if_lhsissimd128value);
- {
- // Check if the {rhs} is also a Simd128Value.
- Label if_rhsissimd128value(assembler),
- if_rhsisnotsimd128value(assembler);
- assembler->Branch(
- assembler->Word32Equal(lhs_instance_type, rhs_instance_type),
- &if_rhsissimd128value, &if_rhsisnotsimd128value);
-
- assembler->Bind(&if_rhsissimd128value);
- {
- // Both {lhs} and {rhs} is a Simd128Value.
- GenerateEqual_Simd128Value_HeapObject(assembler, lhs, lhs_map,
- rhs, rhs_map, &if_equal,
- &if_notequal);
- }
-
- assembler->Bind(&if_rhsisnotsimd128value);
- {
- // Check if the {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- assembler->Bind(&if_rhsisreceiver);
- {
- // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
- // Swapping {lhs} and {rhs} is not observable and doesn't
- // matter for the result, so we can just swap them and use
- // the JSReceiver handling below (for {lhs} being a JSReceiver).
- var_lhs.Bind(rhs);
- var_rhs.Bind(lhs);
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&if_rhsisnotreceiver);
- {
- // The {rhs} is some other Primitive.
- assembler->Goto(&if_notequal);
- }
- }
- }
-
- assembler->Bind(&if_lhsisreceiver);
- {
- // Check if the {rhs} is also a JSReceiver.
- Label if_rhsisreceiver(assembler), if_rhsisnotreceiver(assembler);
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
-
- assembler->Bind(&if_rhsisreceiver);
- {
- // Both {lhs} and {rhs} are different JSReceiver references, so
- // this cannot be considered equal.
- assembler->Goto(&if_notequal);
- }
-
- assembler->Bind(&if_rhsisnotreceiver);
- {
- // Check if {rhs} is Null or Undefined (an undetectable check
- // is sufficient here, since we already know that {rhs} is not
- // a JSReceiver).
- Label if_rhsisundetectable(assembler),
- if_rhsisnotundetectable(assembler, Label::kDeferred);
- Node* rhs_bitfield = assembler->LoadMapBitField(rhs_map);
- assembler->BranchIfWord32Equal(
- assembler->Word32And(
- rhs_bitfield,
- assembler->Int32Constant(1 << Map::kIsUndetectable)),
- assembler->Int32Constant(0), &if_rhsisnotundetectable,
- &if_rhsisundetectable);
-
- assembler->Bind(&if_rhsisundetectable);
- {
- // Check if {lhs} is an undetectable JSReceiver.
- Node* lhs_bitfield = assembler->LoadMapBitField(lhs_map);
- assembler->BranchIfWord32Equal(
- assembler->Word32And(
- lhs_bitfield,
- assembler->Int32Constant(1 << Map::kIsUndetectable)),
- assembler->Int32Constant(0), &if_notequal, &if_equal);
- }
-
- assembler->Bind(&if_rhsisnotundetectable);
- {
- // The {rhs} is some Primitive different from Null and
- // Undefined, need to convert {lhs} to Primitive first.
- Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
- }
- }
- }
- }
- }
- }
-
- assembler->Bind(&do_rhsstringtonumber);
- {
- Callable callable = CodeFactory::StringToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
- }
- }
-
- assembler->Bind(&do_fcmp);
- {
- // Load the {lhs} and {rhs} floating point values.
- Node* lhs = var_fcmp_lhs.value();
- Node* rhs = var_fcmp_rhs.value();
-
- // Perform a fast floating point comparison.
- assembler->BranchIfFloat64Equal(lhs, rhs, &if_equal, &if_notequal);
- }
-
- assembler->Bind(&if_equal);
- {
- result.Bind(assembler->BooleanConstant(mode == kDontNegateResult));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&if_notequal);
- {
- result.Bind(assembler->BooleanConstant(mode == kNegateResult));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&end);
- return result.value();
-}
-
-compiler::Node* GenerateStrictEqual(CodeStubAssembler* assembler,
- ResultMode mode, compiler::Node* lhs,
- compiler::Node* rhs,
- compiler::Node* context) {
- // Here's pseudo-code for the algorithm below in case of kDontNegateResult
- // mode; for kNegateResult mode we properly negate the result.
- //
- // if (lhs == rhs) {
- // if (lhs->IsHeapNumber()) return HeapNumber::cast(lhs)->value() != NaN;
- // return true;
- // }
- // if (!lhs->IsSmi()) {
- // if (lhs->IsHeapNumber()) {
- // if (rhs->IsSmi()) {
- // return Smi::cast(rhs)->value() == HeapNumber::cast(lhs)->value();
- // } else if (rhs->IsHeapNumber()) {
- // return HeapNumber::cast(rhs)->value() ==
- // HeapNumber::cast(lhs)->value();
- // } else {
- // return false;
- // }
- // } else {
- // if (rhs->IsSmi()) {
- // return false;
- // } else {
- // if (lhs->IsString()) {
- // if (rhs->IsString()) {
- // return %StringEqual(lhs, rhs);
- // } else {
- // return false;
- // }
- // } else if (lhs->IsSimd128()) {
- // if (rhs->IsSimd128()) {
- // return %StrictEqual(lhs, rhs);
- // }
- // } else {
- // return false;
- // }
- // }
- // }
- // } else {
- // if (rhs->IsSmi()) {
- // return false;
- // } else {
- // if (rhs->IsHeapNumber()) {
- // return Smi::cast(lhs)->value() == HeapNumber::cast(rhs)->value();
- // } else {
- // return false;
- // }
- // }
- // }
-
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
- typedef compiler::Node Node;
-
- Label if_equal(assembler), if_notequal(assembler), end(assembler);
- Variable result(assembler, MachineRepresentation::kTagged);
-
- // Check if {lhs} and {rhs} refer to the same object.
- Label if_same(assembler), if_notsame(assembler);
- assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
-
- assembler->Bind(&if_same);
- {
- // The {lhs} and {rhs} reference the exact same value, yet we need special
- // treatment for HeapNumber, as NaN is not equal to NaN.
- GenerateEqual_Same(assembler, lhs, &if_equal, &if_notequal);
- }
-
- assembler->Bind(&if_notsame);
- {
- // The {lhs} and {rhs} reference different objects, yet for Smi, HeapNumber,
- // String and Simd128Value they can still be considered equal.
- Node* number_map = assembler->HeapNumberMapConstant();
-
- // Check if {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
-
- assembler->Bind(&if_lhsisnotsmi);
- {
- // Load the map of {lhs}.
- Node* lhs_map = assembler->LoadMap(lhs);
-
- // Check if {lhs} is a HeapNumber.
- Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
- assembler->Branch(assembler->WordEqual(lhs_map, number_map),
- &if_lhsisnumber, &if_lhsisnotnumber);
-
- assembler->Bind(&if_lhsisnumber);
- {
- // Check if {rhs} is a Smi or a HeapObject.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
-
- assembler->Bind(&if_rhsissmi);
- {
- // Convert {lhs} and {rhs} to floating point values.
- Node* lhs_value = assembler->LoadHeapNumberValue(lhs);
- Node* rhs_value = assembler->SmiToFloat64(rhs);
-
- // Perform a floating point comparison of {lhs} and {rhs}.
- assembler->BranchIfFloat64Equal(lhs_value, rhs_value, &if_equal,
- &if_notequal);
- }
-
- assembler->Bind(&if_rhsisnotsmi);
- {
- // Load the map of {rhs}.
- Node* rhs_map = assembler->LoadMap(rhs);
-
- // Check if {rhs} is also a HeapNumber.
- Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
- assembler->Branch(assembler->WordEqual(rhs_map, number_map),
- &if_rhsisnumber, &if_rhsisnotnumber);
-
- assembler->Bind(&if_rhsisnumber);
- {
- // Convert {lhs} and {rhs} to floating point values.
- Node* lhs_value = assembler->LoadHeapNumberValue(lhs);
- Node* rhs_value = assembler->LoadHeapNumberValue(rhs);
-
- // Perform a floating point comparison of {lhs} and {rhs}.
- assembler->BranchIfFloat64Equal(lhs_value, rhs_value, &if_equal,
- &if_notequal);
- }
-
- assembler->Bind(&if_rhsisnotnumber);
- assembler->Goto(&if_notequal);
- }
- }
-
- assembler->Bind(&if_lhsisnotnumber);
- {
- // Check if {rhs} is a Smi or a HeapObject.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
-
- assembler->Bind(&if_rhsissmi);
- assembler->Goto(&if_notequal);
-
- assembler->Bind(&if_rhsisnotsmi);
- {
- // Load the instance type of {lhs}.
- Node* lhs_instance_type = assembler->LoadMapInstanceType(lhs_map);
-
- // Check if {lhs} is a String.
- Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
- assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
- &if_lhsisstring, &if_lhsisnotstring);
-
- assembler->Bind(&if_lhsisstring);
- {
- // Load the instance type of {rhs}.
- Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
-
- // Check if {rhs} is also a String.
- Label if_rhsisstring(assembler, Label::kDeferred),
- if_rhsisnotstring(assembler);
- assembler->Branch(
- assembler->IsStringInstanceType(rhs_instance_type),
- &if_rhsisstring, &if_rhsisnotstring);
-
- assembler->Bind(&if_rhsisstring);
- {
- Callable callable =
- (mode == kDontNegateResult)
- ? CodeFactory::StringEqual(assembler->isolate())
- : CodeFactory::StringNotEqual(assembler->isolate());
- result.Bind(assembler->CallStub(callable, context, lhs, rhs));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&if_rhsisnotstring);
- assembler->Goto(&if_notequal);
- }
-
- assembler->Bind(&if_lhsisnotstring);
- {
- // Check if {lhs} is a Simd128Value.
- Label if_lhsissimd128value(assembler),
- if_lhsisnotsimd128value(assembler);
- assembler->Branch(assembler->Word32Equal(
- lhs_instance_type,
- assembler->Int32Constant(SIMD128_VALUE_TYPE)),
- &if_lhsissimd128value, &if_lhsisnotsimd128value);
-
- assembler->Bind(&if_lhsissimd128value);
- {
- // Load the map of {rhs}.
- Node* rhs_map = assembler->LoadMap(rhs);
-
- // Check if {rhs} is also a Simd128Value that is equal to {lhs}.
- GenerateEqual_Simd128Value_HeapObject(assembler, lhs, lhs_map,
- rhs, rhs_map, &if_equal,
- &if_notequal);
- }
-
- assembler->Bind(&if_lhsisnotsimd128value);
- assembler->Goto(&if_notequal);
- }
- }
- }
- }
-
- assembler->Bind(&if_lhsissmi);
- {
- // We already know that {lhs} and {rhs} are not reference equal, and {lhs}
- // is a Smi; so {lhs} and {rhs} can only be strictly equal if {rhs} is a
- // HeapNumber with an equal floating point value.
-
- // Check if {rhs} is a Smi or a HeapObject.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->WordIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
-
- assembler->Bind(&if_rhsissmi);
- assembler->Goto(&if_notequal);
-
- assembler->Bind(&if_rhsisnotsmi);
- {
- // Load the map of the {rhs}.
- Node* rhs_map = assembler->LoadMap(rhs);
-
- // The {rhs} could be a HeapNumber with the same value as {lhs}.
- Label if_rhsisnumber(assembler), if_rhsisnotnumber(assembler);
- assembler->Branch(assembler->WordEqual(rhs_map, number_map),
- &if_rhsisnumber, &if_rhsisnotnumber);
-
- assembler->Bind(&if_rhsisnumber);
- {
- // Convert {lhs} and {rhs} to floating point values.
- Node* lhs_value = assembler->SmiToFloat64(lhs);
- Node* rhs_value = assembler->LoadHeapNumberValue(rhs);
-
- // Perform a floating point comparison of {lhs} and {rhs}.
- assembler->BranchIfFloat64Equal(lhs_value, rhs_value, &if_equal,
- &if_notequal);
- }
-
- assembler->Bind(&if_rhsisnotnumber);
- assembler->Goto(&if_notequal);
- }
- }
- }
-
- assembler->Bind(&if_equal);
- {
- result.Bind(assembler->BooleanConstant(mode == kDontNegateResult));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&if_notequal);
- {
- result.Bind(assembler->BooleanConstant(mode == kNegateResult));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&end);
- return result.value();
-}
-
-} // namespace
-
void LoadApiGetterStub::GenerateAssembly(CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
Node* context = assembler->Parameter(Descriptor::kContext);
@@ -4244,7 +2029,7 @@ void StoreGlobalStub::GenerateAssembly(CodeStubAssembler* assembler) const {
Node* global = assembler->LoadObjectField(proxy_map, Map::kPrototypeOffset);
Node* map_cell = assembler->HeapConstant(isolate()->factory()->NewWeakCell(
StoreGlobalStub::global_map_placeholder(isolate())));
- Node* expected_map = assembler->LoadWeakCellValue(map_cell);
+ Node* expected_map = assembler->LoadWeakCellValueUnchecked(map_cell);
Node* map = assembler->LoadMap(global);
assembler->GotoIf(assembler->WordNotEqual(expected_map, map), &miss);
}
@@ -4252,7 +2037,7 @@ void StoreGlobalStub::GenerateAssembly(CodeStubAssembler* assembler) const {
Node* weak_cell = assembler->HeapConstant(isolate()->factory()->NewWeakCell(
StoreGlobalStub::property_cell_placeholder(isolate())));
Node* cell = assembler->LoadWeakCellValue(weak_cell);
- assembler->GotoIf(assembler->WordIsSmi(cell), &miss);
+ assembler->GotoIf(assembler->TaggedIsSmi(cell), &miss);
// Load the payload of the global parameter cell. A hole indicates that the
// cell has been invalidated and that the store must be handled by the
@@ -4274,7 +2059,7 @@ void StoreGlobalStub::GenerateAssembly(CodeStubAssembler* assembler) const {
if (cell_type == PropertyCellType::kConstantType) {
switch (constant_type()) {
case PropertyCellConstantType::kSmi:
- assembler->GotoUnless(assembler->WordIsSmi(value), &miss);
+ assembler->GotoUnless(assembler->TaggedIsSmi(value), &miss);
value_is_smi = true;
break;
case PropertyCellConstantType::kStableMap: {
@@ -4283,8 +2068,8 @@ void StoreGlobalStub::GenerateAssembly(CodeStubAssembler* assembler) const {
// are the maps that were originally in the cell or not. If optimized
// code will deopt when a cell has a unstable map and if it has a
// dependency on a stable map, it will deopt if the map destabilizes.
- assembler->GotoIf(assembler->WordIsSmi(value), &miss);
- assembler->GotoIf(assembler->WordIsSmi(cell_contents), &miss);
+ assembler->GotoIf(assembler->TaggedIsSmi(value), &miss);
+ assembler->GotoIf(assembler->TaggedIsSmi(cell_contents), &miss);
Node* expected_map = assembler->LoadMap(cell_contents);
Node* map = assembler->LoadMap(value);
assembler->GotoIf(assembler->WordNotEqual(expected_map, map), &miss);
@@ -4392,155 +2177,6 @@ void StoreScriptContextFieldStub::GenerateAssembly(
assembler->Return(value);
}
-// static
-compiler::Node* LessThanStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* lhs, compiler::Node* rhs,
- compiler::Node* context) {
- return GenerateAbstractRelationalComparison(assembler, kLessThan, lhs, rhs,
- context);
-}
-
-// static
-compiler::Node* LessThanOrEqualStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* lhs,
- compiler::Node* rhs,
- compiler::Node* context) {
- return GenerateAbstractRelationalComparison(assembler, kLessThanOrEqual, lhs,
- rhs, context);
-}
-
-// static
-compiler::Node* GreaterThanStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* lhs,
- compiler::Node* rhs,
- compiler::Node* context) {
- return GenerateAbstractRelationalComparison(assembler, kGreaterThan, lhs, rhs,
- context);
-}
-
-// static
-compiler::Node* GreaterThanOrEqualStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* lhs,
- compiler::Node* rhs,
- compiler::Node* context) {
- return GenerateAbstractRelationalComparison(assembler, kGreaterThanOrEqual,
- lhs, rhs, context);
-}
-
-// static
-compiler::Node* EqualStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* lhs, compiler::Node* rhs,
- compiler::Node* context) {
- return GenerateEqual(assembler, kDontNegateResult, lhs, rhs, context);
-}
-
-// static
-compiler::Node* NotEqualStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* lhs, compiler::Node* rhs,
- compiler::Node* context) {
- return GenerateEqual(assembler, kNegateResult, lhs, rhs, context);
-}
-
-// static
-compiler::Node* StrictEqualStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* lhs,
- compiler::Node* rhs,
- compiler::Node* context) {
- return GenerateStrictEqual(assembler, kDontNegateResult, lhs, rhs, context);
-}
-
-// static
-compiler::Node* StrictNotEqualStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* lhs,
- compiler::Node* rhs,
- compiler::Node* context) {
- return GenerateStrictEqual(assembler, kNegateResult, lhs, rhs, context);
-}
-
-void ToLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* context = assembler->Parameter(1);
-
- // We might need to loop once for ToNumber conversion.
- Variable var_len(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_len);
- var_len.Bind(assembler->Parameter(0));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
- {
- // Shared entry points.
- Label return_len(assembler),
- return_two53minus1(assembler, Label::kDeferred),
- return_zero(assembler, Label::kDeferred);
-
- // Load the current {len} value.
- Node* len = var_len.value();
-
- // Check if {len} is a positive Smi.
- assembler->GotoIf(assembler->WordIsPositiveSmi(len), &return_len);
-
- // Check if {len} is a (negative) Smi.
- assembler->GotoIf(assembler->WordIsSmi(len), &return_zero);
-
- // Check if {len} is a HeapNumber.
- Label if_lenisheapnumber(assembler),
- if_lenisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->IsHeapNumberMap(assembler->LoadMap(len)),
- &if_lenisheapnumber, &if_lenisnotheapnumber);
-
- assembler->Bind(&if_lenisheapnumber);
- {
- // Load the floating-point value of {len}.
- Node* len_value = assembler->LoadHeapNumberValue(len);
-
- // Check if {len} is not greater than zero.
- assembler->GotoUnless(assembler->Float64GreaterThan(
- len_value, assembler->Float64Constant(0.0)),
- &return_zero);
-
- // Check if {len} is greater than or equal to 2^53-1.
- assembler->GotoIf(
- assembler->Float64GreaterThanOrEqual(
- len_value, assembler->Float64Constant(kMaxSafeInteger)),
- &return_two53minus1);
-
- // Round the {len} towards -Infinity.
- Node* value = assembler->Float64Floor(len_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
- }
-
- assembler->Bind(&if_lenisnotheapnumber);
- {
- // Need to convert {len} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
- var_len.Bind(assembler->CallStub(callable, context, len));
- assembler->Goto(&loop);
- }
-
- assembler->Bind(&return_len);
- assembler->Return(var_len.value());
-
- assembler->Bind(&return_two53minus1);
- assembler->Return(assembler->NumberConstant(kMaxSafeInteger));
-
- assembler->Bind(&return_zero);
- assembler->Return(assembler->SmiConstant(Smi::FromInt(0)));
- }
-}
-
-void ToIntegerStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* input = assembler->Parameter(Descriptor::kArgument);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- assembler->Return(assembler->ToInteger(context, input));
-}
-
void StoreInterceptorStub::GenerateAssembly(
CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
@@ -4713,27 +2349,6 @@ void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
os << ": " << from << "=>" << to << "]" << std::endl;
}
-
-// TODO(svenpanne) Make this a real infix_ostream_iterator.
-class SimpleListPrinter {
- public:
- explicit SimpleListPrinter(std::ostream& os) : os_(os), first_(true) {}
-
- void Add(const char* s) {
- if (first_) {
- first_ = false;
- } else {
- os_ << ",";
- }
- os_ << s;
- }
-
- private:
- std::ostream& os_;
- bool first_;
-};
-
-
void CallICStub::PrintState(std::ostream& os) const { // NOLINT
os << state();
}
@@ -4753,14 +2368,6 @@ void LoadDictionaryElementStub::InitializeDescriptor(
FUNCTION_ADDR(Runtime_KeyedLoadIC_MissFromStubFailure));
}
-
-void KeyedLoadGenericStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry);
-}
-
-
void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
DCHECK(kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC);
if (kind() == Code::KEYED_LOAD_IC) {
@@ -4779,21 +2386,6 @@ CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() const {
}
}
-
-void NumberToStringStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kNumberToString)->entry);
- descriptor->SetMissHandler(Runtime::kNumberToString);
-}
-
-void RegExpConstructResultStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
- descriptor->SetMissHandler(Runtime::kRegExpConstructResult);
-}
-
-
void TransitionElementsKindStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
descriptor->Initialize(
@@ -4835,118 +2427,6 @@ void BinaryOpWithAllocationSiteStub::InitializeDescriptor(
FUNCTION_ADDR(Runtime_BinaryOpIC_MissWithAllocationSite));
}
-
-void StringAddStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(Runtime::FunctionForId(Runtime::kStringAdd)->entry);
- descriptor->SetMissHandler(Runtime::kStringAdd);
-}
-
-namespace {
-
-compiler::Node* GenerateHasProperty(
- CodeStubAssembler* assembler, compiler::Node* object, compiler::Node* key,
- compiler::Node* context, Runtime::FunctionId fallback_runtime_function_id) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
-
- Label call_runtime(assembler, Label::kDeferred), return_true(assembler),
- return_false(assembler), end(assembler);
-
- CodeStubAssembler::LookupInHolder lookup_property_in_holder =
- [assembler, &return_true](Node* receiver, Node* holder, Node* holder_map,
- Node* holder_instance_type, Node* unique_name,
- Label* next_holder, Label* if_bailout) {
- assembler->TryHasOwnProperty(holder, holder_map, holder_instance_type,
- unique_name, &return_true, next_holder,
- if_bailout);
- };
-
- CodeStubAssembler::LookupInHolder lookup_element_in_holder =
- [assembler, &return_true](Node* receiver, Node* holder, Node* holder_map,
- Node* holder_instance_type, Node* index,
- Label* next_holder, Label* if_bailout) {
- assembler->TryLookupElement(holder, holder_map, holder_instance_type,
- index, &return_true, next_holder,
- if_bailout);
- };
-
- assembler->TryPrototypeChainLookup(object, key, lookup_property_in_holder,
- lookup_element_in_holder, &return_false,
- &call_runtime);
-
- Variable result(assembler, MachineRepresentation::kTagged);
- assembler->Bind(&return_true);
- {
- result.Bind(assembler->BooleanConstant(true));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&return_false);
- {
- result.Bind(assembler->BooleanConstant(false));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&call_runtime);
- {
- result.Bind(assembler->CallRuntime(fallback_runtime_function_id, context,
- object, key));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&end);
- return result.value();
-}
-
-} // namespace
-
-// static
-compiler::Node* HasPropertyStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* key,
- compiler::Node* object,
- compiler::Node* context) {
- return GenerateHasProperty(assembler, object, key, context,
- Runtime::kHasProperty);
-}
-
-// static
-compiler::Node* ForInFilterStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* key,
- compiler::Node* object,
- compiler::Node* context) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
-
- Label return_undefined(assembler, Label::kDeferred),
- return_to_name(assembler), end(assembler);
-
- Variable var_result(assembler, MachineRepresentation::kTagged);
-
- Node* has_property = GenerateHasProperty(assembler, object, key, context,
- Runtime::kForInHasProperty);
-
- assembler->Branch(
- assembler->WordEqual(has_property, assembler->BooleanConstant(true)),
- &return_to_name, &return_undefined);
-
- assembler->Bind(&return_to_name);
- {
- var_result.Bind(assembler->ToName(context, key));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&return_undefined);
- {
- var_result.Bind(assembler->UndefinedConstant());
- assembler->Goto(&end);
- }
-
- assembler->Bind(&end);
- return var_result.value();
-}
-
void GetPropertyStub::GenerateAssembly(CodeStubAssembler* assembler) const {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
@@ -5064,12 +2544,13 @@ compiler::Node* FastNewClosureStub::Generate(CodeStubAssembler* assembler,
if (FLAG_debug_code) {
// Function must be a function without a prototype.
- assembler->Assert(assembler->Word32And(
- compiler_hints,
- assembler->Int32Constant((FunctionKind::kAccessorFunction |
- FunctionKind::kArrowFunction |
- FunctionKind::kConciseMethod)
- << SharedFunctionInfo::kFunctionKindShift)));
+ CSA_ASSERT(assembler, assembler->Word32And(
+ compiler_hints,
+ assembler->Int32Constant(
+ (FunctionKind::kAccessorFunction |
+ FunctionKind::kArrowFunction |
+ FunctionKind::kConciseMethod)
+ << SharedFunctionInfo::kFunctionKindShift)));
}
assembler->Goto(&if_function_without_prototype);
@@ -5167,9 +2648,7 @@ void FastNewClosureStub::GenerateAssembly(CodeStubAssembler* assembler) const {
compiler::Node* FastNewFunctionContextStub::Generate(
CodeStubAssembler* assembler, compiler::Node* function,
compiler::Node* slots, compiler::Node* context) {
- typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
Node* min_context_slots =
assembler->Int32Constant(Context::MIN_CONTEXT_SLOTS);
@@ -5208,24 +2687,12 @@ compiler::Node* FastNewFunctionContextStub::Generate(
// Initialize the rest of the slots to undefined.
Node* undefined = assembler->UndefinedConstant();
- Variable var_slot_index(assembler, MachineRepresentation::kWord32);
- var_slot_index.Bind(min_context_slots);
- Label loop(assembler, &var_slot_index), after_loop(assembler);
- assembler->Goto(&loop);
-
- assembler->Bind(&loop);
- {
- Node* slot_index = var_slot_index.value();
- assembler->GotoUnless(assembler->Int32LessThan(slot_index, length),
- &after_loop);
- assembler->StoreFixedArrayElement(function_context, slot_index, undefined,
- SKIP_WRITE_BARRIER);
- Node* one = assembler->Int32Constant(1);
- Node* next_index = assembler->Int32Add(slot_index, one);
- var_slot_index.Bind(next_index);
- assembler->Goto(&loop);
- }
- assembler->Bind(&after_loop);
+ assembler->BuildFastFixedArrayForEach(
+ function_context, FAST_ELEMENTS, min_context_slots, length,
+ [undefined](CodeStubAssembler* assembler, Node* context, Node* offset) {
+ assembler->StoreNoWriteBarrier(MachineType::PointerRepresentation(),
+ context, offset, undefined);
+ });
return function_context;
}
@@ -5380,7 +2847,7 @@ compiler::Node* FastCloneShallowArrayStub::Generate(
allocation_site =
allocation_site_mode == TRACK_ALLOCATION_SITE ? allocation_site : nullptr;
- Node* zero = assembler->SmiConstant(Smi::FromInt(0));
+ Node* zero = assembler->SmiConstant(Smi::kZero);
assembler->GotoIf(assembler->SmiEqual(capacity, zero), &zero_capacity);
Node* elements_map = assembler->LoadMap(boilerplate_elements);
@@ -5391,14 +2858,16 @@ compiler::Node* FastCloneShallowArrayStub::Generate(
assembler->Comment("fast double elements path");
if (FLAG_debug_code) {
Label correct_elements_map(assembler), abort(assembler, Label::kDeferred);
- assembler->BranchIf(assembler->IsFixedDoubleArrayMap(elements_map),
- &correct_elements_map, &abort);
+ assembler->Branch(assembler->IsFixedDoubleArrayMap(elements_map),
+ &correct_elements_map, &abort);
assembler->Bind(&abort);
{
Node* abort_id = assembler->SmiConstant(
Smi::FromInt(BailoutReason::kExpectedFixedDoubleArrayMap));
- assembler->TailCallRuntime(Runtime::kAbort, context, abort_id);
+ assembler->CallRuntime(Runtime::kAbort, context, abort_id);
+ result.Bind(assembler->UndefinedConstant());
+ assembler->Goto(&return_result);
}
assembler->Bind(&correct_elements_map);
}
@@ -5544,103 +3013,54 @@ void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
}
}
-void ArrayConstructorStub::PrintName(std::ostream& os) const { // NOLINT
- os << "ArrayConstructorStub";
- switch (argument_count()) {
- case ANY:
- os << "_Any";
- break;
- case NONE:
- os << "_None";
- break;
- case ONE:
- os << "_One";
- break;
- case MORE_THAN_ONE:
- os << "_More_Than_One";
- break;
- }
- return;
-}
-
-
bool ToBooleanICStub::UpdateStatus(Handle<Object> object) {
- Types new_types = types();
- Types old_types = new_types;
- bool to_boolean_value = new_types.UpdateStatus(isolate(), object);
- TraceTransition(old_types, new_types);
- set_sub_minor_key(TypesBits::update(sub_minor_key(), new_types.ToIntegral()));
- return to_boolean_value;
-}
-
-void ToBooleanICStub::PrintState(std::ostream& os) const { // NOLINT
- os << types();
-}
-
-std::ostream& operator<<(std::ostream& os, const ToBooleanICStub::Types& s) {
- os << "(";
- SimpleListPrinter p(os);
- if (s.IsEmpty()) p.Add("None");
- if (s.Contains(ToBooleanICStub::UNDEFINED)) p.Add("Undefined");
- if (s.Contains(ToBooleanICStub::BOOLEAN)) p.Add("Bool");
- if (s.Contains(ToBooleanICStub::NULL_TYPE)) p.Add("Null");
- if (s.Contains(ToBooleanICStub::SMI)) p.Add("Smi");
- if (s.Contains(ToBooleanICStub::SPEC_OBJECT)) p.Add("SpecObject");
- if (s.Contains(ToBooleanICStub::STRING)) p.Add("String");
- if (s.Contains(ToBooleanICStub::SYMBOL)) p.Add("Symbol");
- if (s.Contains(ToBooleanICStub::HEAP_NUMBER)) p.Add("HeapNumber");
- if (s.Contains(ToBooleanICStub::SIMD_VALUE)) p.Add("SimdValue");
- return os << ")";
-}
-
-bool ToBooleanICStub::Types::UpdateStatus(Isolate* isolate,
- Handle<Object> object) {
- if (object->IsUndefined(isolate)) {
- Add(UNDEFINED);
- return false;
+ ToBooleanHints old_hints = hints();
+ ToBooleanHints new_hints = old_hints;
+ bool to_boolean_value = false; // Dummy initialization.
+ if (object->IsUndefined(isolate())) {
+ new_hints |= ToBooleanHint::kUndefined;
+ to_boolean_value = false;
} else if (object->IsBoolean()) {
- Add(BOOLEAN);
- return object->IsTrue(isolate);
- } else if (object->IsNull(isolate)) {
- Add(NULL_TYPE);
- return false;
+ new_hints |= ToBooleanHint::kBoolean;
+ to_boolean_value = object->IsTrue(isolate());
+ } else if (object->IsNull(isolate())) {
+ new_hints |= ToBooleanHint::kNull;
+ to_boolean_value = false;
} else if (object->IsSmi()) {
- Add(SMI);
- return Smi::cast(*object)->value() != 0;
+ new_hints |= ToBooleanHint::kSmallInteger;
+ to_boolean_value = Smi::cast(*object)->value() != 0;
} else if (object->IsJSReceiver()) {
- Add(SPEC_OBJECT);
- return !object->IsUndetectable();
+ new_hints |= ToBooleanHint::kReceiver;
+ to_boolean_value = !object->IsUndetectable();
} else if (object->IsString()) {
DCHECK(!object->IsUndetectable());
- Add(STRING);
- return String::cast(*object)->length() != 0;
+ new_hints |= ToBooleanHint::kString;
+ to_boolean_value = String::cast(*object)->length() != 0;
} else if (object->IsSymbol()) {
- Add(SYMBOL);
- return true;
+ new_hints |= ToBooleanHint::kSymbol;
+ to_boolean_value = true;
} else if (object->IsHeapNumber()) {
DCHECK(!object->IsUndetectable());
- Add(HEAP_NUMBER);
+ new_hints |= ToBooleanHint::kHeapNumber;
double value = HeapNumber::cast(*object)->value();
- return value != 0 && !std::isnan(value);
+ to_boolean_value = value != 0 && !std::isnan(value);
} else if (object->IsSimd128Value()) {
- Add(SIMD_VALUE);
- return true;
+ new_hints |= ToBooleanHint::kSimdValue;
+ to_boolean_value = true;
} else {
// We should never see an internal object at runtime here!
UNREACHABLE();
- return true;
+ to_boolean_value = true;
}
+ TraceTransition(old_hints, new_hints);
+ set_sub_minor_key(HintsBits::update(sub_minor_key(), new_hints));
+ return to_boolean_value;
}
-bool ToBooleanICStub::Types::NeedsMap() const {
- return Contains(ToBooleanICStub::SPEC_OBJECT) ||
- Contains(ToBooleanICStub::STRING) ||
- Contains(ToBooleanICStub::SYMBOL) ||
- Contains(ToBooleanICStub::HEAP_NUMBER) ||
- Contains(ToBooleanICStub::SIMD_VALUE);
+void ToBooleanICStub::PrintState(std::ostream& os) const { // NOLINT
+ os << hints();
}
-
void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
StubFailureTrampolineStub stub1(isolate, NOT_JS_FUNCTION_STUB_MODE);
StubFailureTrampolineStub stub2(isolate, JS_FUNCTION_STUB_MODE);
@@ -5687,7 +3107,7 @@ void ArrayNoArgumentConstructorStub::GenerateAssembly(
Node* array = assembler->AllocateJSArray(
elements_kind(), array_map,
assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
- assembler->SmiConstant(Smi::FromInt(0)), allocation_site);
+ assembler->SmiConstant(Smi::kZero), allocation_site);
assembler->Return(array);
}
@@ -5700,7 +3120,7 @@ void InternalArrayNoArgumentConstructorStub::GenerateAssembly(
Node* array = assembler->AllocateJSArray(
elements_kind(), array_map,
assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
- assembler->SmiConstant(Smi::FromInt(0)), nullptr);
+ assembler->SmiConstant(Smi::kZero), nullptr);
assembler->Return(array);
}
@@ -5721,14 +3141,14 @@ void SingleArgumentConstructorCommon(CodeStubAssembler* assembler,
Label call_runtime(assembler, Label::kDeferred);
Node* size = assembler->Parameter(Descriptor::kArraySizeSmiParameter);
- assembler->Branch(assembler->WordIsSmi(size), &smi_size, &call_runtime);
+ assembler->Branch(assembler->TaggedIsSmi(size), &smi_size, &call_runtime);
assembler->Bind(&smi_size);
if (IsFastPackedElementsKind(elements_kind)) {
Label abort(assembler, Label::kDeferred);
assembler->Branch(
- assembler->SmiEqual(size, assembler->SmiConstant(Smi::FromInt(0))),
+ assembler->SmiEqual(size, assembler->SmiConstant(Smi::kZero)),
&small_smi_size, &abort);
assembler->Bind(&abort);
@@ -5828,42 +3248,10 @@ void GrowArrayElementsStub::GenerateAssembly(
}
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
- : PlatformCodeStub(isolate) {
- minor_key_ = ArgumentCountBits::encode(ANY);
-}
-
-ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate,
- int argument_count)
- : PlatformCodeStub(isolate) {
- if (argument_count == 0) {
- minor_key_ = ArgumentCountBits::encode(NONE);
- } else if (argument_count == 1) {
- minor_key_ = ArgumentCountBits::encode(ONE);
- } else if (argument_count >= 2) {
- minor_key_ = ArgumentCountBits::encode(MORE_THAN_ONE);
- } else {
- UNREACHABLE();
- }
-}
+ : PlatformCodeStub(isolate) {}
InternalArrayConstructorStub::InternalArrayConstructorStub(Isolate* isolate)
: PlatformCodeStub(isolate) {}
-Representation RepresentationFromMachineType(MachineType type) {
- if (type == MachineType::Int32()) {
- return Representation::Integer32();
- }
-
- if (type == MachineType::TaggedSigned()) {
- return Representation::Smi();
- }
-
- if (type == MachineType::Pointer()) {
- return Representation::External();
- }
-
- return Representation::Tagged();
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 5c83fdebb0..450d0c12c2 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -14,6 +14,7 @@
#include "src/interface-descriptors.h"
#include "src/macro-assembler.h"
#include "src/ostreams.h"
+#include "src/type-hints.h"
namespace v8 {
namespace internal {
@@ -43,10 +44,7 @@ class ObjectLiteral;
V(StoreBufferOverflow) \
V(StoreElement) \
V(SubString) \
- V(StoreIC) \
V(KeyedStoreIC) \
- V(KeyedLoadIC) \
- V(LoadIC) \
V(LoadGlobalIC) \
V(FastNewObject) \
V(FastNewRestParameter) \
@@ -61,13 +59,8 @@ class ObjectLiteral;
/* version of the corresponding stub is */ \
/* used universally */ \
V(CallICTrampoline) \
- V(LoadICTrampoline) \
- V(KeyedLoadICTrampoline) \
V(KeyedStoreICTrampoline) \
- V(StoreICTrampoline) \
/* --- HydrogenCodeStubs --- */ \
- V(NumberToString) \
- V(StringAdd) \
/* These builtins w/ JS linkage are */ \
/* just fast-cases of C++ builtins. They */ \
/* require varg support from TF */ \
@@ -76,7 +69,6 @@ class ObjectLiteral;
/* These will be ported/eliminated */ \
/* as part of the new IC system, ask */ \
/* ishell before doing anything */ \
- V(KeyedLoadGeneric) \
V(LoadConstant) \
V(LoadDictionaryElement) \
V(LoadFastElement) \
@@ -87,9 +79,7 @@ class ObjectLiteral;
V(BinaryOpIC) \
V(BinaryOpWithAllocationSite) \
V(ToBooleanIC) \
- V(RegExpConstructResult) \
V(TransitionElementsKind) \
- V(StoreGlobalViaContext) \
/* --- TurboFanCodeStubs --- */ \
V(AllocateHeapNumber) \
V(AllocateFloat32x4) \
@@ -108,22 +98,11 @@ class ObjectLiteral;
V(CreateAllocationSite) \
V(CreateWeakCell) \
V(StringLength) \
- V(Add) \
V(AddWithFeedback) \
- V(Subtract) \
V(SubtractWithFeedback) \
- V(Multiply) \
V(MultiplyWithFeedback) \
- V(Divide) \
V(DivideWithFeedback) \
- V(Modulus) \
V(ModulusWithFeedback) \
- V(ShiftRight) \
- V(ShiftRightLogical) \
- V(ShiftLeft) \
- V(BitwiseAnd) \
- V(BitwiseOr) \
- V(BitwiseXor) \
V(Inc) \
V(InternalArrayNoArgumentConstructor) \
V(InternalArraySingleArgumentConstructor) \
@@ -134,45 +113,35 @@ class ObjectLiteral;
V(FastCloneShallowObject) \
V(FastNewClosure) \
V(FastNewFunctionContext) \
- V(InstanceOf) \
- V(LessThan) \
- V(LessThanOrEqual) \
- V(GreaterThan) \
- V(GreaterThanOrEqual) \
- V(Equal) \
- V(NotEqual) \
V(KeyedLoadSloppyArguments) \
V(KeyedStoreSloppyArguments) \
V(LoadScriptContextField) \
V(StoreScriptContextField) \
- V(StrictEqual) \
- V(StrictNotEqual) \
- V(ToInteger) \
- V(ToLength) \
- V(HasProperty) \
- V(ForInFilter) \
+ V(NumberToString) \
+ V(StringAdd) \
V(GetProperty) \
- V(LoadICTF) \
+ V(LoadIC) \
+ V(LoadICProtoArray) \
V(KeyedLoadICTF) \
V(StoreFastElement) \
V(StoreField) \
V(StoreGlobal) \
- V(StoreICTF) \
+ V(StoreIC) \
+ V(KeyedStoreICTF) \
V(StoreInterceptor) \
V(StoreMap) \
V(StoreTransition) \
V(LoadApiGetter) \
V(LoadIndexedInterceptor) \
V(GrowArrayElements) \
- V(ToObject) \
- V(Typeof) \
/* These are only called from FGC and */ \
/* can be removed when we use ignition */ \
/* only */ \
- V(LoadICTrampolineTF) \
+ V(LoadICTrampoline) \
V(LoadGlobalICTrampoline) \
V(KeyedLoadICTrampolineTF) \
- V(StoreICTrampolineTF)
+ V(StoreICTrampoline) \
+ V(KeyedStoreICTrampolineTF)
// List of code stubs only used on ARM 32 bits platforms.
#if V8_TARGET_ARCH_ARM
@@ -756,14 +725,6 @@ class StringLengthStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(StringLength, TurboFanCodeStub);
};
-class AddStub final : public TurboFanCodeStub {
- public:
- explicit AddStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Add, TurboFanCodeStub);
-};
-
class AddWithFeedbackStub final : public TurboFanCodeStub {
public:
explicit AddWithFeedbackStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
@@ -773,14 +734,6 @@ class AddWithFeedbackStub final : public TurboFanCodeStub {
TurboFanCodeStub);
};
-class SubtractStub final : public TurboFanCodeStub {
- public:
- explicit SubtractStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Subtract, TurboFanCodeStub);
-};
-
class SubtractWithFeedbackStub final : public TurboFanCodeStub {
public:
explicit SubtractWithFeedbackStub(Isolate* isolate)
@@ -791,14 +744,6 @@ class SubtractWithFeedbackStub final : public TurboFanCodeStub {
TurboFanCodeStub);
};
-class MultiplyStub final : public TurboFanCodeStub {
- public:
- explicit MultiplyStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Multiply, TurboFanCodeStub);
-};
-
class MultiplyWithFeedbackStub final : public TurboFanCodeStub {
public:
explicit MultiplyWithFeedbackStub(Isolate* isolate)
@@ -809,14 +754,6 @@ class MultiplyWithFeedbackStub final : public TurboFanCodeStub {
TurboFanCodeStub);
};
-class DivideStub final : public TurboFanCodeStub {
- public:
- explicit DivideStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Divide, TurboFanCodeStub);
-};
-
class DivideWithFeedbackStub final : public TurboFanCodeStub {
public:
explicit DivideWithFeedbackStub(Isolate* isolate)
@@ -827,14 +764,6 @@ class DivideWithFeedbackStub final : public TurboFanCodeStub {
TurboFanCodeStub);
};
-class ModulusStub final : public TurboFanCodeStub {
- public:
- explicit ModulusStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Modulus, TurboFanCodeStub);
-};
-
class ModulusWithFeedbackStub final : public TurboFanCodeStub {
public:
explicit ModulusWithFeedbackStub(Isolate* isolate)
@@ -845,55 +774,6 @@ class ModulusWithFeedbackStub final : public TurboFanCodeStub {
TurboFanCodeStub);
};
-class ShiftRightStub final : public TurboFanCodeStub {
- public:
- explicit ShiftRightStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(ShiftRight, TurboFanCodeStub);
-};
-
-class ShiftRightLogicalStub final : public TurboFanCodeStub {
- public:
- explicit ShiftRightLogicalStub(Isolate* isolate)
- : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(ShiftRightLogical, TurboFanCodeStub);
-};
-
-class ShiftLeftStub final : public TurboFanCodeStub {
- public:
- explicit ShiftLeftStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(ShiftLeft, TurboFanCodeStub);
-};
-
-class BitwiseAndStub final : public TurboFanCodeStub {
- public:
- explicit BitwiseAndStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(BitwiseAnd, TurboFanCodeStub);
-};
-
-class BitwiseOrStub final : public TurboFanCodeStub {
- public:
- explicit BitwiseOrStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(BitwiseOr, TurboFanCodeStub);
-};
-
-class BitwiseXorStub final : public TurboFanCodeStub {
- public:
- explicit BitwiseXorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(BitwiseXor, TurboFanCodeStub);
-};
-
class IncStub final : public TurboFanCodeStub {
public:
explicit IncStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
@@ -910,96 +790,6 @@ class DecStub final : public TurboFanCodeStub {
DEFINE_TURBOFAN_UNARY_OP_CODE_STUB_WITH_FEEDBACK(Dec, TurboFanCodeStub);
};
-class InstanceOfStub final : public TurboFanCodeStub {
- public:
- explicit InstanceOfStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(InstanceOf, TurboFanCodeStub);
-};
-
-class LessThanStub final : public TurboFanCodeStub {
- public:
- explicit LessThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(LessThan, TurboFanCodeStub);
-};
-
-class LessThanOrEqualStub final : public TurboFanCodeStub {
- public:
- explicit LessThanOrEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(LessThanOrEqual, TurboFanCodeStub);
-};
-
-class GreaterThanStub final : public TurboFanCodeStub {
- public:
- explicit GreaterThanStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(GreaterThan, TurboFanCodeStub);
-};
-
-class GreaterThanOrEqualStub final : public TurboFanCodeStub {
- public:
- explicit GreaterThanOrEqualStub(Isolate* isolate)
- : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(GreaterThanOrEqual, TurboFanCodeStub);
-};
-
-class EqualStub final : public TurboFanCodeStub {
- public:
- explicit EqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(Equal, TurboFanCodeStub);
-};
-
-class NotEqualStub final : public TurboFanCodeStub {
- public:
- explicit NotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(NotEqual, TurboFanCodeStub);
-};
-
-class StrictEqualStub final : public TurboFanCodeStub {
- public:
- explicit StrictEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(StrictEqual, TurboFanCodeStub);
-};
-
-class StrictNotEqualStub final : public TurboFanCodeStub {
- public:
- explicit StrictNotEqualStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Compare);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(StrictNotEqual, TurboFanCodeStub);
-};
-
-class ToIntegerStub final : public TurboFanCodeStub {
- public:
- explicit ToIntegerStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
- DEFINE_TURBOFAN_CODE_STUB(ToInteger, TurboFanCodeStub);
-};
-
-class ToLengthStub final : public TurboFanCodeStub {
- public:
- explicit ToLengthStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
- DEFINE_TURBOFAN_CODE_STUB(ToLength, TurboFanCodeStub);
-};
-
class StoreInterceptorStub : public TurboFanCodeStub {
public:
explicit StoreInterceptorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
@@ -1025,23 +815,6 @@ class LoadIndexedInterceptorStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(LoadIndexedInterceptor, TurboFanCodeStub);
};
-// ES6 section 12.10.3 "in" operator evaluation.
-class HasPropertyStub : public TurboFanCodeStub {
- public:
- explicit HasPropertyStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(HasProperty);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(HasProperty, TurboFanCodeStub);
-};
-
-class ForInFilterStub : public TurboFanCodeStub {
- public:
- explicit ForInFilterStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ForInFilter);
- DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(ForInFilter, TurboFanCodeStub);
-};
-
// ES6 [[Get]] operation.
class GetPropertyStub : public TurboFanCodeStub {
public:
@@ -1051,39 +824,12 @@ class GetPropertyStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(GetProperty, TurboFanCodeStub);
};
-enum StringAddFlags {
- // Omit both parameter checks.
- STRING_ADD_CHECK_NONE = 0,
- // Check left parameter.
- STRING_ADD_CHECK_LEFT = 1 << 0,
- // Check right parameter.
- STRING_ADD_CHECK_RIGHT = 1 << 1,
- // Check both parameters.
- STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT,
- // Convert parameters when check fails (instead of throwing an exception).
- STRING_ADD_CONVERT = 1 << 2,
- STRING_ADD_CONVERT_LEFT = STRING_ADD_CHECK_LEFT | STRING_ADD_CONVERT,
- STRING_ADD_CONVERT_RIGHT = STRING_ADD_CHECK_RIGHT | STRING_ADD_CONVERT
-};
-
-
-std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags);
-
-
-class NumberToStringStub final : public HydrogenCodeStub {
+class NumberToStringStub final : public TurboFanCodeStub {
public:
- explicit NumberToStringStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+ explicit NumberToStringStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
- DEFINE_HYDROGEN_CODE_STUB(NumberToString, HydrogenCodeStub);
-};
-
-class TypeofStub final : public TurboFanCodeStub {
- public:
- explicit TypeofStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Typeof);
- DEFINE_TURBOFAN_UNARY_OP_CODE_STUB(Typeof, TurboFanCodeStub);
+ DEFINE_TURBOFAN_CODE_STUB(NumberToString, TurboFanCodeStub);
};
class FastNewClosureStub : public TurboFanCodeStub {
@@ -1329,24 +1075,12 @@ enum AllocationSiteOverrideMode {
class ArrayConstructorStub: public PlatformCodeStub {
public:
- enum ArgumentCountKey { ANY, NONE, ONE, MORE_THAN_ONE };
-
- ArrayConstructorStub(Isolate* isolate, int argument_count);
-
explicit ArrayConstructorStub(Isolate* isolate);
private:
- ArgumentCountKey argument_count() const {
- return ArgumentCountBits::decode(minor_key_);
- }
-
void GenerateDispatchToArrayStub(MacroAssembler* masm,
AllocationSiteOverrideMode mode);
- void PrintName(std::ostream& os) const override; // NOLINT
-
- class ArgumentCountBits : public BitField<ArgumentCountKey, 0, 2> {};
-
DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayNArgumentsConstructor);
DEFINE_PLATFORM_CODE_STUB(ArrayConstructor, PlatformCodeStub);
};
@@ -1410,7 +1144,6 @@ class CallICStub: public PlatformCodeStub {
}
protected:
- int arg_count() const { return state().argc(); }
ConvertReceiverMode convert_mode() const { return state().convert_mode(); }
TailCallMode tail_call_mode() const { return state().tail_call_mode(); }
@@ -1726,33 +1459,6 @@ class StoreGlobalStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(StoreGlobal, TurboFanCodeStub);
};
-// TODO(ishell): remove, once StoreGlobalIC is implemented.
-class StoreGlobalViaContextStub final : public PlatformCodeStub {
- public:
- static const int kMaximumDepth = 15;
-
- StoreGlobalViaContextStub(Isolate* isolate, int depth,
- LanguageMode language_mode)
- : PlatformCodeStub(isolate) {
- minor_key_ =
- DepthBits::encode(depth) | LanguageModeBits::encode(language_mode);
- }
-
- int depth() const { return DepthBits::decode(minor_key_); }
- LanguageMode language_mode() const {
- return LanguageModeBits::decode(minor_key_);
- }
-
- private:
- class DepthBits : public BitField<int, 0, 4> {};
- STATIC_ASSERT(DepthBits::kMax == kMaximumDepth);
- class LanguageModeBits : public BitField<LanguageMode, 4, 1> {};
- STATIC_ASSERT(LANGUAGE_END == 2);
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreGlobalViaContext);
- DEFINE_PLATFORM_CODE_STUB(StoreGlobalViaContext, PlatformCodeStub);
-};
-
class CallApiCallbackStub : public PlatformCodeStub {
public:
static const int kArgBits = 3;
@@ -1896,22 +1602,21 @@ class BinaryOpWithAllocationSiteStub final : public BinaryOpICStub {
DEFINE_HYDROGEN_CODE_STUB(BinaryOpWithAllocationSite, BinaryOpICStub);
};
-
-class StringAddStub final : public HydrogenCodeStub {
+class StringAddStub final : public TurboFanCodeStub {
public:
StringAddStub(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag)
- : HydrogenCodeStub(isolate) {
- set_sub_minor_key(StringAddFlagsBits::encode(flags) |
- PretenureFlagBits::encode(pretenure_flag));
+ : TurboFanCodeStub(isolate) {
+ minor_key_ = (StringAddFlagsBits::encode(flags) |
+ PretenureFlagBits::encode(pretenure_flag));
}
StringAddFlags flags() const {
- return StringAddFlagsBits::decode(sub_minor_key());
+ return StringAddFlagsBits::decode(minor_key_);
}
PretenureFlag pretenure_flag() const {
- return PretenureFlagBits::decode(sub_minor_key());
+ return PretenureFlagBits::decode(minor_key_);
}
private:
@@ -1921,7 +1626,7 @@ class StringAddStub final : public HydrogenCodeStub {
void PrintBaseName(std::ostream& os) const override; // NOLINT
DEFINE_CALL_INTERFACE_DESCRIPTOR(StringAdd);
- DEFINE_HYDROGEN_CODE_STUB(StringAdd, HydrogenCodeStub);
+ DEFINE_TURBOFAN_CODE_STUB(StringAdd, TurboFanCodeStub);
};
@@ -2077,17 +1782,6 @@ class RegExpExecStub: public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(RegExpExec, PlatformCodeStub);
};
-// TODO(jgruber): Remove this once all uses in regexp.js have been removed.
-class RegExpConstructResultStub final : public HydrogenCodeStub {
- public:
- explicit RegExpConstructResultStub(Isolate* isolate)
- : HydrogenCodeStub(isolate) { }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(RegExpConstructResult);
- DEFINE_HYDROGEN_CODE_STUB(RegExpConstructResult, HydrogenCodeStub);
-};
-
-
// TODO(bmeurer/mvstanton): Turn CallConstructStub into ConstructICStub.
class CallConstructStub final : public PlatformCodeStub {
public:
@@ -2277,39 +1971,16 @@ class LoadDictionaryElementStub : public HydrogenCodeStub {
DEFINE_HYDROGEN_CODE_STUB(LoadDictionaryElement, HydrogenCodeStub);
};
-
-class KeyedLoadGenericStub : public HydrogenCodeStub {
+class LoadICTrampolineStub : public TurboFanCodeStub {
public:
- explicit KeyedLoadGenericStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
-
- Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_HYDROGEN_CODE_STUB(KeyedLoadGeneric, HydrogenCodeStub);
-};
-
-
-class LoadICTrampolineStub : public PlatformCodeStub {
- public:
- explicit LoadICTrampolineStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
- DEFINE_PLATFORM_CODE_STUB(LoadICTrampoline, PlatformCodeStub);
-};
-
-class LoadICTrampolineTFStub : public TurboFanCodeStub {
- public:
- explicit LoadICTrampolineTFStub(Isolate* isolate)
- : TurboFanCodeStub(isolate) {}
+ explicit LoadICTrampolineStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
void GenerateAssembly(CodeStubAssembler* assembler) const override;
Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
- DEFINE_CODE_STUB(LoadICTrampolineTF, TurboFanCodeStub);
+ DEFINE_CODE_STUB(LoadICTrampoline, TurboFanCodeStub);
};
class LoadGlobalICTrampolineStub : public TurboFanCodeStub {
@@ -2332,35 +2003,27 @@ class LoadGlobalICTrampolineStub : public TurboFanCodeStub {
DEFINE_CODE_STUB(LoadGlobalICTrampoline, TurboFanCodeStub);
};
-class KeyedLoadICTrampolineStub : public LoadICTrampolineStub {
- public:
- explicit KeyedLoadICTrampolineStub(Isolate* isolate)
- : LoadICTrampolineStub(isolate) {}
-
- Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
-
- DEFINE_PLATFORM_CODE_STUB(KeyedLoadICTrampoline, LoadICTrampolineStub);
-};
-
-class KeyedLoadICTrampolineTFStub : public LoadICTrampolineTFStub {
+class KeyedLoadICTrampolineTFStub : public LoadICTrampolineStub {
public:
explicit KeyedLoadICTrampolineTFStub(Isolate* isolate)
- : LoadICTrampolineTFStub(isolate) {}
+ : LoadICTrampolineStub(isolate) {}
void GenerateAssembly(CodeStubAssembler* assembler) const override;
Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
- DEFINE_CODE_STUB(KeyedLoadICTrampolineTF, LoadICTrampolineTFStub);
+ DEFINE_CODE_STUB(KeyedLoadICTrampolineTF, LoadICTrampolineStub);
};
-class StoreICTrampolineStub : public PlatformCodeStub {
+class StoreICTrampolineStub : public TurboFanCodeStub {
public:
StoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
- : PlatformCodeStub(isolate) {
+ : TurboFanCodeStub(isolate) {
minor_key_ = state.GetExtraICState();
}
+ void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
ExtraICState GetExtraICState() const final {
@@ -2370,21 +2033,19 @@ class StoreICTrampolineStub : public PlatformCodeStub {
protected:
StoreICState state() const { return StoreICState(GetExtraICState()); }
- private:
DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
- DEFINE_PLATFORM_CODE_STUB(StoreICTrampoline, PlatformCodeStub);
+ DEFINE_CODE_STUB(StoreICTrampoline, TurboFanCodeStub);
};
-class StoreICTrampolineTFStub : public TurboFanCodeStub {
+class KeyedStoreICTrampolineStub : public PlatformCodeStub {
public:
- StoreICTrampolineTFStub(Isolate* isolate, const StoreICState& state)
- : TurboFanCodeStub(isolate) {
+ KeyedStoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
+ : PlatformCodeStub(isolate) {
minor_key_ = state.GetExtraICState();
}
- void GenerateAssembly(CodeStubAssembler* assembler) const override;
+ Code::Kind GetCodeKind() const override { return Code::KEYED_STORE_IC; }
- Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
@@ -2392,21 +2053,23 @@ class StoreICTrampolineTFStub : public TurboFanCodeStub {
protected:
StoreICState state() const { return StoreICState(GetExtraICState()); }
+ private:
DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
- DEFINE_CODE_STUB(StoreICTrampolineTF, TurboFanCodeStub);
+ DEFINE_PLATFORM_CODE_STUB(KeyedStoreICTrampoline, PlatformCodeStub);
};
-class KeyedStoreICTrampolineStub : public StoreICTrampolineStub {
+class KeyedStoreICTrampolineTFStub : public StoreICTrampolineStub {
public:
- KeyedStoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
+ KeyedStoreICTrampolineTFStub(Isolate* isolate, const StoreICState& state)
: StoreICTrampolineStub(isolate, state) {}
+ void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
Code::Kind GetCodeKind() const override { return Code::KEYED_STORE_IC; }
- DEFINE_PLATFORM_CODE_STUB(KeyedStoreICTrampoline, StoreICTrampolineStub);
+ DEFINE_CODE_STUB(KeyedStoreICTrampolineTF, StoreICTrampolineStub);
};
-
class CallICTrampolineStub : public PlatformCodeStub {
public:
CallICTrampolineStub(Isolate* isolate, const CallICState& state)
@@ -2429,32 +2092,26 @@ class CallICTrampolineStub : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(CallICTrampoline, PlatformCodeStub);
};
-
-class LoadICStub : public PlatformCodeStub {
+class LoadICStub : public TurboFanCodeStub {
public:
- explicit LoadICStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+ explicit LoadICStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
- void GenerateForTrampoline(MacroAssembler* masm);
+ void GenerateAssembly(CodeStubAssembler* assembler) const override;
Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_PLATFORM_CODE_STUB(LoadIC, PlatformCodeStub);
-
- protected:
- void GenerateImpl(MacroAssembler* masm, bool in_frame);
+ DEFINE_CODE_STUB(LoadIC, TurboFanCodeStub);
};
-class LoadICTFStub : public TurboFanCodeStub {
+class LoadICProtoArrayStub : public TurboFanCodeStub {
public:
- explicit LoadICTFStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
+ explicit LoadICProtoArrayStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
void GenerateAssembly(CodeStubAssembler* assembler) const override;
- Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_CODE_STUB(LoadICTF, TurboFanCodeStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadICProtoArray);
+ DEFINE_CODE_STUB(LoadICProtoArray, TurboFanCodeStub);
};
class LoadGlobalICStub : public TurboFanCodeStub {
@@ -2476,57 +2133,20 @@ class LoadGlobalICStub : public TurboFanCodeStub {
DEFINE_CODE_STUB(LoadGlobalIC, TurboFanCodeStub);
};
-class KeyedLoadICStub : public PlatformCodeStub {
+class KeyedLoadICTFStub : public LoadICStub {
public:
- explicit KeyedLoadICStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- void GenerateForTrampoline(MacroAssembler* masm);
-
- Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_PLATFORM_CODE_STUB(KeyedLoadIC, PlatformCodeStub);
-
- protected:
- void GenerateImpl(MacroAssembler* masm, bool in_frame);
-};
-
-class KeyedLoadICTFStub : public LoadICTFStub {
- public:
- explicit KeyedLoadICTFStub(Isolate* isolate) : LoadICTFStub(isolate) {}
+ explicit KeyedLoadICTFStub(Isolate* isolate) : LoadICStub(isolate) {}
void GenerateAssembly(CodeStubAssembler* assembler) const override;
Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
- DEFINE_CODE_STUB(KeyedLoadICTF, LoadICTFStub);
+ DEFINE_CODE_STUB(KeyedLoadICTF, LoadICStub);
};
-class StoreICStub : public PlatformCodeStub {
+class StoreICStub : public TurboFanCodeStub {
public:
StoreICStub(Isolate* isolate, const StoreICState& state)
- : PlatformCodeStub(isolate) {
- minor_key_ = state.GetExtraICState();
- }
-
- void GenerateForTrampoline(MacroAssembler* masm);
-
- Code::Kind GetCodeKind() const final { return Code::STORE_IC; }
-
- ExtraICState GetExtraICState() const final {
- return static_cast<ExtraICState>(minor_key_);
- }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_PLATFORM_CODE_STUB(StoreIC, PlatformCodeStub);
-
- protected:
- void GenerateImpl(MacroAssembler* masm, bool in_frame);
-};
-
-class StoreICTFStub : public TurboFanCodeStub {
- public:
- StoreICTFStub(Isolate* isolate, const StoreICState& state)
: TurboFanCodeStub(isolate) {
minor_key_ = state.GetExtraICState();
}
@@ -2539,7 +2159,7 @@ class StoreICTFStub : public TurboFanCodeStub {
}
DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_CODE_STUB(StoreICTF, TurboFanCodeStub);
+ DEFINE_CODE_STUB(StoreIC, TurboFanCodeStub);
};
class KeyedStoreICStub : public PlatformCodeStub {
@@ -2564,6 +2184,17 @@ class KeyedStoreICStub : public PlatformCodeStub {
void GenerateImpl(MacroAssembler* masm, bool in_frame);
};
+class KeyedStoreICTFStub : public StoreICStub {
+ public:
+ KeyedStoreICTFStub(Isolate* isolate, const StoreICState& state)
+ : StoreICStub(isolate, state) {}
+
+ void GenerateAssembly(CodeStubAssembler* assembler) const override;
+
+ Code::Kind GetCodeKind() const override { return Code::KEYED_STORE_IC; }
+
+ DEFINE_CODE_STUB(KeyedStoreICTF, StoreICStub);
+};
class DoubleToIStub : public PlatformCodeStub {
public:
@@ -2938,45 +2569,15 @@ class StoreElementStub : public PlatformCodeStub {
class ToBooleanICStub : public HydrogenCodeStub {
public:
- enum Type {
- UNDEFINED,
- BOOLEAN,
- NULL_TYPE,
- SMI,
- SPEC_OBJECT,
- STRING,
- SYMBOL,
- HEAP_NUMBER,
- SIMD_VALUE,
- NUMBER_OF_TYPES
- };
-
- // At most 16 different types can be distinguished, because the Code object
- // only has room for two bytes to hold a set of these types. :-P
- STATIC_ASSERT(NUMBER_OF_TYPES <= 16);
-
- class Types : public EnumSet<Type, uint16_t> {
- public:
- Types() : EnumSet<Type, uint16_t>(0) {}
- explicit Types(uint16_t bits) : EnumSet<Type, uint16_t>(bits) {}
-
- bool UpdateStatus(Isolate* isolate, Handle<Object> object);
- bool NeedsMap() const;
- bool CanBeUndetectable() const {
- return Contains(ToBooleanICStub::SPEC_OBJECT);
- }
- bool IsGeneric() const { return ToIntegral() == Generic().ToIntegral(); }
-
- static Types Generic() { return Types((1 << NUMBER_OF_TYPES) - 1); }
- };
-
ToBooleanICStub(Isolate* isolate, ExtraICState state)
: HydrogenCodeStub(isolate) {
- set_sub_minor_key(TypesBits::encode(static_cast<uint16_t>(state)));
+ set_sub_minor_key(HintsBits::encode(static_cast<uint16_t>(state)));
}
bool UpdateStatus(Handle<Object> object);
- Types types() const { return Types(TypesBits::decode(sub_minor_key())); }
+ ToBooleanHints hints() const {
+ return ToBooleanHints(HintsBits::decode(sub_minor_key()));
+ }
Code::Kind GetCodeKind() const override { return Code::TO_BOOLEAN_IC; }
void PrintState(std::ostream& os) const override; // NOLINT
@@ -2987,10 +2588,10 @@ class ToBooleanICStub : public HydrogenCodeStub {
return ToBooleanICStub(isolate, UNINITIALIZED).GetCode();
}
- ExtraICState GetExtraICState() const override { return types().ToIntegral(); }
+ ExtraICState GetExtraICState() const override { return hints(); }
InlineCacheState GetICState() const {
- if (types().IsEmpty()) {
+ if (hints() == ToBooleanHint::kNone) {
return ::v8::internal::UNINITIALIZED;
} else {
return MONOMORPHIC;
@@ -3001,14 +2602,15 @@ class ToBooleanICStub : public HydrogenCodeStub {
ToBooleanICStub(Isolate* isolate, InitializationState init_state)
: HydrogenCodeStub(isolate, init_state) {}
- class TypesBits : public BitField<uint16_t, 0, NUMBER_OF_TYPES> {};
+ static const int kNumHints = 9;
+ STATIC_ASSERT(static_cast<int>(ToBooleanHint::kAny) ==
+ ((1 << kNumHints) - 1));
+ class HintsBits : public BitField<uint16_t, 0, kNumHints> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
DEFINE_HYDROGEN_CODE_STUB(ToBooleanIC, HydrogenCodeStub);
};
-std::ostream& operator<<(std::ostream& os, const ToBooleanICStub::Types& t);
-
class ElementsTransitionAndStoreStub : public TurboFanCodeStub {
public:
ElementsTransitionAndStoreStub(Isolate* isolate, ElementsKind from_kind,
@@ -3123,13 +2725,6 @@ class SubStringStub : public TurboFanCodeStub {
DEFINE_CODE_STUB(SubString, TurboFanCodeStub);
};
-class ToObjectStub final : public TurboFanCodeStub {
- public:
- explicit ToObjectStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(TypeConversion);
- DEFINE_TURBOFAN_CODE_STUB(ToObject, TurboFanCodeStub);
-};
#undef DEFINE_CALL_INTERFACE_DESCRIPTOR
#undef DEFINE_PLATFORM_CODE_STUB
@@ -3138,8 +2733,6 @@ class ToObjectStub final : public TurboFanCodeStub {
#undef DEFINE_CODE_STUB
#undef DEFINE_CODE_STUB_BASE
-extern Representation RepresentationFromMachineType(MachineType type);
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index d0b67f1f45..a17ad2a880 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -6,6 +6,7 @@
#define V8_CODEGEN_H_
#include "src/code-stubs.h"
+#include "src/globals.h"
#include "src/runtime/runtime.h"
// Include the declaration of the architecture defined class CodeGenerator.
@@ -97,8 +98,7 @@ typedef double (*UnaryMathFunctionWithIsolate)(double x, Isolate* isolate);
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate);
-
-double modulo(double x, double y);
+V8_EXPORT_PRIVATE double modulo(double x, double y);
// Custom implementation of math functions.
double fast_sqrt(double input, Isolate* isolate);
diff --git a/deps/v8/src/compilation-dependencies.h b/deps/v8/src/compilation-dependencies.h
index a40eb74801..9a8229e029 100644
--- a/deps/v8/src/compilation-dependencies.h
+++ b/deps/v8/src/compilation-dependencies.h
@@ -28,8 +28,8 @@ class CompilationDependencies {
void AssumeInitialMapCantChange(Handle<Map> map) {
Insert(DependentCode::kInitialMapChangedGroup, map);
}
- void AssumeFieldType(Handle<Map> map) {
- Insert(DependentCode::kFieldTypeGroup, map);
+ void AssumeFieldOwner(Handle<Map> map) {
+ Insert(DependentCode::kFieldOwnerGroup, map);
}
void AssumeMapStable(Handle<Map> map);
void AssumePrototypeMapsStable(
diff --git a/deps/v8/src/compilation-info.cc b/deps/v8/src/compilation-info.cc
index 2e0934a2cd..5c9fa58367 100644
--- a/deps/v8/src/compilation-info.cc
+++ b/deps/v8/src/compilation-info.cc
@@ -9,6 +9,7 @@
#include "src/ast/scopes.h"
#include "src/isolate.h"
#include "src/parsing/parse-info.h"
+#include "src/source-position.h"
namespace v8 {
namespace internal {
@@ -32,6 +33,20 @@ PARSE_INFO_GETTER(Handle<SharedFunctionInfo>, shared_info)
#undef PARSE_INFO_GETTER
#undef PARSE_INFO_GETTER_WITH_DEFAULT
+bool CompilationInfo::is_debug() const {
+ return parse_info() ? parse_info()->is_debug() : false;
+}
+
+void CompilationInfo::set_is_debug() {
+ CHECK(parse_info());
+ parse_info()->set_is_debug();
+}
+
+void CompilationInfo::PrepareForSerializing() {
+ if (parse_info()) parse_info()->set_will_serialize();
+ SetFlag(kSerializing);
+}
+
bool CompilationInfo::has_shared_info() const {
return parse_info_ && !parse_info_->shared_info().is_null();
}
@@ -51,8 +66,12 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info,
if (isolate_->serializer_enabled()) EnableDeoptimizationSupport();
if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
- if (FLAG_turbo_source_positions) MarkAsSourcePositionsEnabled();
if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
+
+ if (FLAG_trace_deopt || FLAG_trace_turbo || FLAG_trace_turbo_graph ||
+ FLAG_turbo_profiling || isolate_->is_profiling()) {
+ MarkAsSourcePositionsEnabled();
+ }
}
CompilationInfo::CompilationInfo(Vector<const char> debug_name,
@@ -200,10 +219,12 @@ void CompilationInfo::SetOptimizing() {
code_flags_ = Code::KindField::update(code_flags_, Code::OPTIMIZED_FUNCTION);
}
-void CompilationInfo::AddInlinedFunction(
- Handle<SharedFunctionInfo> inlined_function) {
+int CompilationInfo::AddInlinedFunction(
+ Handle<SharedFunctionInfo> inlined_function, SourcePosition pos) {
+ int id = static_cast<int>(inlined_functions_.size());
inlined_functions_.push_back(InlinedFunctionHolder(
- inlined_function, handle(inlined_function->code())));
+ inlined_function, handle(inlined_function->code()), pos));
+ return id;
}
Code::Kind CompilationInfo::output_code_kind() const {
diff --git a/deps/v8/src/compilation-info.h b/deps/v8/src/compilation-info.h
index 88477ae75e..77b9e34306 100644
--- a/deps/v8/src/compilation-info.h
+++ b/deps/v8/src/compilation-info.h
@@ -37,22 +37,19 @@ class CompilationInfo final {
kNonDeferredCalling = 1 << 1,
kSavesCallerDoubles = 1 << 2,
kRequiresFrame = 1 << 3,
- kMustNotHaveEagerFrame = 1 << 4,
- kDeoptimizationSupport = 1 << 5,
- kDebug = 1 << 6,
- kSerializing = 1 << 7,
- kFunctionContextSpecializing = 1 << 8,
- kFrameSpecializing = 1 << 9,
- kNativeContextSpecializing = 1 << 10,
- kInliningEnabled = 1 << 11,
- kDisableFutureOptimization = 1 << 12,
- kSplittingEnabled = 1 << 13,
- kDeoptimizationEnabled = 1 << 14,
- kSourcePositionsEnabled = 1 << 15,
- kBailoutOnUninitialized = 1 << 16,
- kOptimizeFromBytecode = 1 << 17,
- kTypeFeedbackEnabled = 1 << 18,
- kAccessorInliningEnabled = 1 << 19,
+ kDeoptimizationSupport = 1 << 4,
+ kAccessorInliningEnabled = 1 << 5,
+ kSerializing = 1 << 6,
+ kFunctionContextSpecializing = 1 << 7,
+ kFrameSpecializing = 1 << 8,
+ kInliningEnabled = 1 << 9,
+ kDisableFutureOptimization = 1 << 10,
+ kSplittingEnabled = 1 << 11,
+ kDeoptimizationEnabled = 1 << 12,
+ kSourcePositionsEnabled = 1 << 13,
+ kBailoutOnUninitialized = 1 << 14,
+ kOptimizeFromBytecode = 1 << 15,
+ kTypeFeedbackEnabled = 1 << 16,
};
CompilationInfo(ParseInfo* parse_info, Handle<JSFunction> closure);
@@ -112,23 +109,17 @@ class CompilationInfo final {
bool requires_frame() const { return GetFlag(kRequiresFrame); }
- void MarkMustNotHaveEagerFrame() { SetFlag(kMustNotHaveEagerFrame); }
-
- bool GetMustNotHaveEagerFrame() const {
- return GetFlag(kMustNotHaveEagerFrame);
- }
-
// Compiles marked as debug produce unoptimized code with debug break slots.
// Inner functions that cannot be compiled w/o context are compiled eagerly.
// Always include deoptimization support to avoid having to recompile again.
void MarkAsDebug() {
- SetFlag(kDebug);
+ set_is_debug();
SetFlag(kDeoptimizationSupport);
}
- bool is_debug() const { return GetFlag(kDebug); }
+ bool is_debug() const;
- void PrepareForSerializing() { SetFlag(kSerializing); }
+ void PrepareForSerializing();
bool will_serialize() const { return GetFlag(kSerializing); }
@@ -144,14 +135,6 @@ class CompilationInfo final {
bool is_frame_specializing() const { return GetFlag(kFrameSpecializing); }
- void MarkAsNativeContextSpecializing() {
- SetFlag(kNativeContextSpecializing);
- }
-
- bool is_native_context_specializing() const {
- return GetFlag(kNativeContextSpecializing);
- }
-
void MarkAsDeoptimizationEnabled() { SetFlag(kDeoptimizationEnabled); }
bool is_deoptimization_enabled() const {
@@ -300,18 +283,29 @@ class CompilationInfo final {
// Do not remove.
Handle<Code> inlined_code_object_root;
+ InliningPosition position;
+
InlinedFunctionHolder(Handle<SharedFunctionInfo> inlined_shared_info,
- Handle<Code> inlined_code_object_root)
+ Handle<Code> inlined_code_object_root,
+ SourcePosition pos)
: shared_info(inlined_shared_info),
- inlined_code_object_root(inlined_code_object_root) {}
+ inlined_code_object_root(inlined_code_object_root) {
+ position.position = pos;
+ // initialized when generating the deoptimization literals
+ position.inlined_function_id = DeoptimizationInputData::kNotInlinedIndex;
+ }
+
+ void RegisterInlinedFunctionId(size_t inlined_function_id) {
+ position.inlined_function_id = static_cast<int>(inlined_function_id);
+ }
};
typedef std::vector<InlinedFunctionHolder> InlinedFunctionList;
- InlinedFunctionList const& inlined_functions() const {
- return inlined_functions_;
- }
+ InlinedFunctionList& inlined_functions() { return inlined_functions_; }
- void AddInlinedFunction(Handle<SharedFunctionInfo> inlined_function);
+ // Returns the inlining id for source position tracking.
+ int AddInlinedFunction(Handle<SharedFunctionInfo> inlined_function,
+ SourcePosition pos);
std::unique_ptr<char[]> GetDebugName() const;
@@ -346,6 +340,8 @@ class CompilationInfo final {
bool GetFlag(Flag flag) const { return (flags_ & flag) != 0; }
+ void set_is_debug();
+
unsigned flags_;
Code::Flags code_flags_;
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
index 96956aec97..b87a4a5c32 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
@@ -6,6 +6,7 @@
#include "src/assert-scope.h"
#include "src/compilation-info.h"
+#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
#include "src/compiler.h"
#include "src/global-handles.h"
#include "src/isolate.h"
@@ -23,6 +24,7 @@ CompilerDispatcherJob::CompilerDispatcherJob(Isolate* isolate,
Handle<SharedFunctionInfo> shared,
size_t max_stack_size)
: isolate_(isolate),
+ tracer_(isolate_->compiler_dispatcher_tracer()),
shared_(Handle<SharedFunctionInfo>::cast(
isolate_->global_handles()->Create(*shared))),
max_stack_size_(max_stack_size),
@@ -45,9 +47,10 @@ CompilerDispatcherJob::~CompilerDispatcherJob() {
void CompilerDispatcherJob::PrepareToParseOnMainThread() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
DCHECK(status() == CompileJobStatus::kInitial);
+ COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kPrepareToParse);
HandleScope scope(isolate_);
unicode_cache_.reset(new UnicodeCache());
- zone_.reset(new Zone(isolate_->allocator()));
+ zone_.reset(new Zone(isolate_->allocator(), ZONE_NAME));
Handle<Script> script(Script::cast(shared_->script()), isolate_);
DCHECK(script->type() != Script::TYPE_NATIVE);
@@ -66,7 +69,6 @@ void CompilerDispatcherJob::PrepareToParseOnMainThread() {
parse_info_.reset(new ParseInfo(zone_.get()));
parse_info_->set_isolate(isolate_);
parse_info_->set_character_stream(character_stream_.get());
- parse_info_->set_lazy();
parse_info_->set_hash_seed(isolate_->heap()->HashSeed());
parse_info_->set_is_named_expression(shared_->is_named_expression());
parse_info_->set_compiler_hints(shared_->compiler_hints());
@@ -93,6 +95,9 @@ void CompilerDispatcherJob::Parse() {
DCHECK(can_parse_on_background_thread_ ||
ThreadId::Current().Equals(isolate_->thread_id()));
DCHECK(status() == CompileJobStatus::kReadyToParse);
+ COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(
+ tracer_, kParse,
+ parse_info_->end_position() - parse_info_->start_position());
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
@@ -120,6 +125,7 @@ void CompilerDispatcherJob::Parse() {
bool CompilerDispatcherJob::FinalizeParsingOnMainThread() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
DCHECK(status() == CompileJobStatus::kParsed);
+ COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kFinalizeParsing);
if (!source_.is_null()) {
i::GlobalHandles::Destroy(Handle<Object>::cast(source_).location());
@@ -144,17 +150,9 @@ bool CompilerDispatcherJob::FinalizeParsingOnMainThread() {
}
parse_info_->set_shared_info(shared_);
- {
- // Create a canonical handle scope if compiling ignition bytecode. This is
- // required by the constant array builder to de-duplicate objects without
- // dereferencing handles.
- std::unique_ptr<CanonicalHandleScope> canonical;
- if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(isolate_));
-
- // Do the parsing tasks which need to be done on the main thread. This
- // will also handle parse errors.
- parser_->Internalize(isolate_, script, parse_info_->literal() == nullptr);
- }
+ // Do the parsing tasks which need to be done on the main thread. This
+ // will also handle parse errors.
+ parser_->Internalize(isolate_, script, parse_info_->literal() == nullptr);
parser_->HandleSourceURLComments(isolate_, script);
parse_info_->set_character_stream(nullptr);
@@ -171,6 +169,7 @@ bool CompilerDispatcherJob::FinalizeParsingOnMainThread() {
bool CompilerDispatcherJob::PrepareToCompileOnMainThread() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
DCHECK(status() == CompileJobStatus::kReadyToAnalyse);
+ COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kPrepareToCompile);
compile_info_.reset(
new CompilationInfo(parse_info_.get(), Handle<JSFunction>::null()));
@@ -198,6 +197,8 @@ void CompilerDispatcherJob::Compile() {
DCHECK(status() == CompileJobStatus::kReadyToCompile);
DCHECK(can_compile_on_background_thread_ ||
ThreadId::Current().Equals(isolate_->thread_id()));
+ COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(
+ tracer_, kCompile, parse_info_->literal()->ast_node_count());
// Disallowing of handle dereference and heap access dealt with in
// CompilationJob::ExecuteJob.
@@ -216,6 +217,7 @@ void CompilerDispatcherJob::Compile() {
bool CompilerDispatcherJob::FinalizeCompilingOnMainThread() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
DCHECK(status() == CompileJobStatus::kCompiled);
+ COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kFinalizeCompiling);
if (compile_job_->state() == CompilationJob::State::kFailed ||
!Compiler::FinalizeCompilationJob(compile_job_.release())) {
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
index f3aaf939e0..7f4c6ced71 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
@@ -8,12 +8,14 @@
#include <memory>
#include "src/base/macros.h"
+#include "src/globals.h"
#include "src/handles.h"
#include "testing/gtest/include/gtest/gtest_prod.h"
namespace v8 {
namespace internal {
+class CompilerDispatcherTracer;
class CompilationInfo;
class CompilationJob;
class Isolate;
@@ -36,7 +38,7 @@ enum class CompileJobStatus {
kDone,
};
-class CompilerDispatcherJob {
+class V8_EXPORT_PRIVATE CompilerDispatcherJob {
public:
CompilerDispatcherJob(Isolate* isolate, Handle<SharedFunctionInfo> shared,
size_t max_stack_size);
@@ -81,6 +83,7 @@ class CompilerDispatcherJob {
CompileJobStatus status_ = CompileJobStatus::kInitial;
Isolate* isolate_;
+ CompilerDispatcherTracer* tracer_;
Handle<SharedFunctionInfo> shared_; // Global handle.
Handle<String> source_; // Global handle.
size_t max_stack_size_;
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
new file mode 100644
index 0000000000..f8af05feb9
--- /dev/null
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
@@ -0,0 +1,171 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
+
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+double MonotonicallyIncreasingTimeInMs() {
+ return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
+ static_cast<double>(base::Time::kMillisecondsPerSecond);
+}
+
+} // namespace
+
+CompilerDispatcherTracer::Scope::Scope(CompilerDispatcherTracer* tracer,
+ ScopeID scope_id, size_t num)
+ : tracer_(tracer), scope_id_(scope_id), num_(num) {
+ start_time_ = MonotonicallyIncreasingTimeInMs();
+ // TODO(cbruni): remove once we fully moved to a trace-based system.
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
+ RuntimeCallStats::Enter(tracer_->runtime_call_stats_, &timer_,
+ &RuntimeCallStats::CompilerDispatcher);
+ }
+}
+
+CompilerDispatcherTracer::Scope::~Scope() {
+ double elapsed = MonotonicallyIncreasingTimeInMs() - start_time_;
+ switch (scope_id_) {
+ case ScopeID::kPrepareToParse:
+ tracer_->RecordPrepareToParse(elapsed);
+ break;
+ case ScopeID::kParse:
+ tracer_->RecordParse(elapsed, num_);
+ break;
+ case ScopeID::kFinalizeParsing:
+ tracer_->RecordFinalizeParsing(elapsed);
+ break;
+ case ScopeID::kPrepareToCompile:
+ tracer_->RecordPrepareToCompile(elapsed);
+ break;
+ case ScopeID::kCompile:
+ tracer_->RecordCompile(elapsed, num_);
+ break;
+ case ScopeID::kFinalizeCompiling:
+ tracer_->RecordFinalizeCompiling(elapsed);
+ break;
+ }
+ // TODO(cbruni): remove once we fully moved to a trace-based system.
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
+ RuntimeCallStats::Leave(tracer_->runtime_call_stats_, &timer_);
+ }
+}
+
+// static
+const char* CompilerDispatcherTracer::Scope::Name(ScopeID scope_id) {
+ switch (scope_id) {
+ case ScopeID::kPrepareToParse:
+ return "V8.BackgroundCompile_PrepareToParse";
+ case ScopeID::kParse:
+ return "V8.BackgroundCompile_Parse";
+ case ScopeID::kFinalizeParsing:
+ return "V8.BackgroundCompile_FinalizeParsing";
+ case ScopeID::kPrepareToCompile:
+ return "V8.BackgroundCompile_PrepareToCompile";
+ case ScopeID::kCompile:
+ return "V8.BackgroundCompile_Compile";
+ case ScopeID::kFinalizeCompiling:
+ return "V8.BackgroundCompile_FinalizeCompiling";
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+CompilerDispatcherTracer::CompilerDispatcherTracer(Isolate* isolate)
+ : runtime_call_stats_(nullptr) {
+ // isolate might be nullptr during unittests.
+ if (isolate) {
+ runtime_call_stats_ = isolate->counters()->runtime_call_stats();
+ }
+}
+
+CompilerDispatcherTracer::~CompilerDispatcherTracer() {}
+
+void CompilerDispatcherTracer::RecordPrepareToParse(double duration_ms) {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ prepare_parse_events_.Push(duration_ms);
+}
+
+void CompilerDispatcherTracer::RecordParse(double duration_ms,
+ size_t source_length) {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ parse_events_.Push(std::make_pair(source_length, duration_ms));
+}
+
+void CompilerDispatcherTracer::RecordFinalizeParsing(double duration_ms) {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ finalize_parsing_events_.Push(duration_ms);
+}
+
+void CompilerDispatcherTracer::RecordPrepareToCompile(double duration_ms) {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ prepare_compile_events_.Push(duration_ms);
+}
+
+void CompilerDispatcherTracer::RecordCompile(double duration_ms,
+ size_t ast_size_in_bytes) {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ compile_events_.Push(std::make_pair(ast_size_in_bytes, duration_ms));
+}
+
+void CompilerDispatcherTracer::RecordFinalizeCompiling(double duration_ms) {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ finalize_compiling_events_.Push(duration_ms);
+}
+
+double CompilerDispatcherTracer::EstimatePrepareToParseInMs() const {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ return Average(prepare_parse_events_);
+}
+
+double CompilerDispatcherTracer::EstimateParseInMs(size_t source_length) const {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ return Estimate(parse_events_, source_length);
+}
+
+double CompilerDispatcherTracer::EstimateFinalizeParsingInMs() {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ return Average(finalize_parsing_events_);
+}
+
+double CompilerDispatcherTracer::EstimatePrepareToCompileInMs() {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ return Average(prepare_compile_events_);
+}
+
+double CompilerDispatcherTracer::EstimateCompileInMs(size_t ast_size_in_bytes) {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ return Estimate(compile_events_, ast_size_in_bytes);
+}
+
+double CompilerDispatcherTracer::EstimateFinalizeCompilingInMs() {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ return Average(finalize_compiling_events_);
+}
+
+double CompilerDispatcherTracer::Average(
+ const base::RingBuffer<double>& buffer) {
+ if (buffer.Count() == 0) return 0.0;
+ double sum = buffer.Sum([](double a, double b) { return a + b; }, 0.0);
+ return sum / buffer.Count();
+}
+
+double CompilerDispatcherTracer::Estimate(
+ const base::RingBuffer<std::pair<size_t, double>>& buffer, size_t num) {
+ if (buffer.Count() == 0) return 0.0;
+ std::pair<size_t, double> sum = buffer.Sum(
+ [](std::pair<size_t, double> a, std::pair<size_t, double> b) {
+ return std::make_pair(a.first + b.first, a.second + b.second);
+ },
+ std::make_pair(0, 0.0));
+ return num * (sum.second / sum.first);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h
new file mode 100644
index 0000000000..b505511eb9
--- /dev/null
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h
@@ -0,0 +1,98 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_TRACER_H_
+#define V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_TRACER_H_
+
+#include <utility>
+
+#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/ring-buffer.h"
+#include "src/counters.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class RuntimeCallStats;
+
+#define COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(tracer, scope_id, num) \
+ CompilerDispatcherTracer::ScopeID tracer_scope_id( \
+ CompilerDispatcherTracer::ScopeID::scope_id); \
+ CompilerDispatcherTracer::Scope trace_scope(tracer, tracer_scope_id, num); \
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), \
+ CompilerDispatcherTracer::Scope::Name(tracer_scope_id))
+
+#define COMPILER_DISPATCHER_TRACE_SCOPE(tracer, scope_id) \
+ COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(tracer, scope_id, 0)
+
+class V8_EXPORT_PRIVATE CompilerDispatcherTracer {
+ public:
+ enum class ScopeID {
+ kPrepareToParse,
+ kParse,
+ kFinalizeParsing,
+ kPrepareToCompile,
+ kCompile,
+ kFinalizeCompiling
+ };
+
+ class Scope {
+ public:
+ Scope(CompilerDispatcherTracer* tracer, ScopeID scope_id, size_t num = 0);
+ ~Scope();
+
+ static const char* Name(ScopeID scoped_id);
+
+ private:
+ CompilerDispatcherTracer* tracer_;
+ ScopeID scope_id_;
+ size_t num_;
+ double start_time_;
+ RuntimeCallTimer timer_;
+
+ DISALLOW_COPY_AND_ASSIGN(Scope);
+ };
+
+ explicit CompilerDispatcherTracer(Isolate* isolate);
+ ~CompilerDispatcherTracer();
+
+ void RecordPrepareToParse(double duration_ms);
+ void RecordParse(double duration_ms, size_t source_length);
+ void RecordFinalizeParsing(double duration_ms);
+ void RecordPrepareToCompile(double duration_ms);
+ void RecordCompile(double duration_ms, size_t ast_size_in_bytes);
+ void RecordFinalizeCompiling(double duration_ms);
+
+ double EstimatePrepareToParseInMs() const;
+ double EstimateParseInMs(size_t source_length) const;
+ double EstimateFinalizeParsingInMs();
+ double EstimatePrepareToCompileInMs();
+ double EstimateCompileInMs(size_t ast_size_in_bytes);
+ double EstimateFinalizeCompilingInMs();
+
+ private:
+ static double Average(const base::RingBuffer<double>& buffer);
+ static double Estimate(
+ const base::RingBuffer<std::pair<size_t, double>>& buffer, size_t num);
+
+ mutable base::Mutex mutex_;
+ base::RingBuffer<double> prepare_parse_events_;
+ base::RingBuffer<std::pair<size_t, double>> parse_events_;
+ base::RingBuffer<double> finalize_parsing_events_;
+ base::RingBuffer<double> prepare_compile_events_;
+ base::RingBuffer<std::pair<size_t, double>> compile_events_;
+ base::RingBuffer<double> finalize_compiling_events_;
+
+ RuntimeCallStats* runtime_call_stats_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompilerDispatcherTracer);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_TRACER_H_
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 75c50eec7d..1169506384 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -134,7 +134,23 @@ void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
}
}
-void OptimizingCompileDispatcher::Flush() {
+void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
+ if (FLAG_block_concurrent_recompilation) Unblock();
+ if (blocking_behavior == BlockingBehavior::kDontBlock) {
+ base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
+ while (input_queue_length_ > 0) {
+ CompilationJob* job = input_queue_[InputQueueIndex(0)];
+ DCHECK_NOT_NULL(job);
+ input_queue_shift_ = InputQueueIndex(1);
+ input_queue_length_--;
+ DisposeCompilationJob(job, true);
+ }
+ FlushOutputQueue(true);
+ if (FLAG_trace_concurrent_recompilation) {
+ PrintF(" ** Flushed concurrent recompilation queues (not blocking).\n");
+ }
+ return;
+ }
base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
if (FLAG_block_concurrent_recompilation) Unblock();
{
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index 8c032ab320..7e08161517 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -22,6 +22,8 @@ class SharedFunctionInfo;
class OptimizingCompileDispatcher {
public:
+ enum class BlockingBehavior { kBlock, kDontBlock };
+
explicit OptimizingCompileDispatcher(Isolate* isolate)
: isolate_(isolate),
input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
@@ -38,7 +40,7 @@ class OptimizingCompileDispatcher {
void Run();
void Stop();
- void Flush();
+ void Flush(BlockingBehavior blocking_behavior);
void QueueForOptimization(CompilationJob* job);
void Unblock();
void InstallOptimizedFunctions();
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index ec402fa822..3435f530c2 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -20,7 +20,6 @@
#include "src/crankshaft/hydrogen.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
-#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/globals.h"
@@ -251,22 +250,6 @@ void CompilationJob::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
namespace {
-bool IsEvalToplevel(Handle<SharedFunctionInfo> shared) {
- return shared->is_toplevel() && shared->script()->IsScript() &&
- Script::cast(shared->script())->compilation_type() ==
- Script::COMPILATION_TYPE_EVAL;
-}
-
-bool Parse(ParseInfo* info) {
- // Create a canonical handle scope if compiling ignition bytecode. This is
- // required by the constant array builder to de-duplicate objects without
- // dereferencing handles.
- std::unique_ptr<CanonicalHandleScope> canonical;
- if (FLAG_ignition) canonical.reset(new CanonicalHandleScope(info->isolate()));
-
- return Parser::ParseStatic(info);
-}
-
void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
CompilationInfo* info) {
// Log the code generation. If source information is available include
@@ -320,21 +303,46 @@ void EnsureFeedbackMetadata(CompilationInfo* info) {
info->literal()->feedback_vector_spec()));
}
-bool ShouldUseIgnition(CompilationInfo* info) {
- if (!FLAG_ignition) return false;
+bool UseTurboFan(Handle<SharedFunctionInfo> shared) {
+ bool optimization_disabled = shared->optimization_disabled();
+ bool dont_crankshaft = shared->dont_crankshaft();
+
+ // Check the enabling conditions for Turbofan.
+ // 1. "use asm" code.
+ bool is_turbofanable_asm =
+ FLAG_turbo_asm && shared->asm_function() && !optimization_disabled;
+ // 2. Fallback for features unsupported by Crankshaft.
+ bool is_unsupported_by_crankshaft_but_turbofanable =
+ dont_crankshaft && strcmp(FLAG_turbo_filter, "~~") == 0 &&
+ !optimization_disabled;
+
+ // 3. Explicitly enabled by the command-line filter.
+ bool passes_turbo_filter = shared->PassesFilter(FLAG_turbo_filter);
+
+ return is_turbofanable_asm || is_unsupported_by_crankshaft_but_turbofanable ||
+ passes_turbo_filter;
+}
+
+bool ShouldUseIgnition(CompilationInfo* info) {
DCHECK(info->has_shared_info());
+ // Skip Ignition for asm.js functions.
+ if (info->shared_info()->asm_function()) {
+ return false;
+ }
+
// When requesting debug code as a replacement for existing code, we provide
// the same kind as the existing code (to prevent implicit tier-change).
if (info->is_debug() && info->shared_info()->is_compiled()) {
return !info->shared_info()->HasBaselineCode();
}
- // Since we can't OSR from Ignition, skip Ignition for asm.js functions.
- if (info->shared_info()->asm_function()) {
- return false;
- }
+ // Code destined for TurboFan should be compiled with Ignition first.
+ if (UseTurboFan(info->shared_info())) return true;
+
+ // Only use Ignition for any other function if FLAG_ignition is true.
+ if (!FLAG_ignition) return false;
// Checks whether top level functions should be passed by the filter.
if (info->shared_info()->is_toplevel()) {
@@ -360,38 +368,6 @@ CompilationJob* GetUnoptimizedCompilationJob(CompilationInfo* info) {
}
}
-bool GenerateUnoptimizedCode(CompilationInfo* info) {
- if (FLAG_validate_asm && info->scope()->asm_module() &&
- !info->shared_info()->is_asm_wasm_broken()) {
- EnsureFeedbackMetadata(info);
- MaybeHandle<FixedArray> wasm_data;
- wasm_data = AsmJs::ConvertAsmToWasm(info->parse_info());
- if (!wasm_data.is_null()) {
- info->shared_info()->set_asm_wasm_data(*wasm_data.ToHandleChecked());
- info->SetCode(info->isolate()->builtins()->InstantiateAsmJs());
- return true;
- }
- }
-
- std::unique_ptr<CompilationJob> job(GetUnoptimizedCompilationJob(info));
- if (job->PrepareJob() != CompilationJob::SUCCEEDED) return false;
- if (job->ExecuteJob() != CompilationJob::SUCCEEDED) return false;
- if (job->FinalizeJob() != CompilationJob::SUCCEEDED) return false;
- job->RecordUnoptimizedCompilationStats();
- return true;
-}
-
-bool CompileUnoptimizedCode(CompilationInfo* info) {
- DCHECK(AllowCompilation::IsAllowed(info->isolate()));
- if (!Compiler::Analyze(info->parse_info()) ||
- !GenerateUnoptimizedCode(info)) {
- Isolate* isolate = info->isolate();
- if (!isolate->has_pending_exception()) isolate->StackOverflow();
- return false;
- }
- return true;
-}
-
void InstallSharedScopeInfo(CompilationInfo* info,
Handle<SharedFunctionInfo> shared) {
Handle<ScopeInfo> scope_info = info->scope()->scope_info();
@@ -426,9 +402,50 @@ void InstallUnoptimizedCode(CompilationInfo* info) {
// Install compilation result on the shared function info
InstallSharedCompilationResult(info, shared);
+}
- // Record the function compilation event.
- RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
+CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job) {
+ CompilationJob::Status status = job->FinalizeJob();
+ if (status == CompilationJob::SUCCEEDED) {
+ InstallUnoptimizedCode(job->info());
+ job->RecordUnoptimizedCompilationStats();
+ }
+ return status;
+}
+
+bool GenerateUnoptimizedCode(CompilationInfo* info) {
+ if (FLAG_validate_asm && info->scope()->asm_module() &&
+ !info->shared_info()->is_asm_wasm_broken() && !info->is_debug()) {
+ EnsureFeedbackMetadata(info);
+ MaybeHandle<FixedArray> wasm_data;
+ wasm_data = AsmJs::ConvertAsmToWasm(info->parse_info());
+ if (!wasm_data.is_null()) {
+ info->shared_info()->set_asm_wasm_data(*wasm_data.ToHandleChecked());
+ info->SetCode(info->isolate()->builtins()->InstantiateAsmJs());
+ InstallUnoptimizedCode(info);
+ return true;
+ }
+ }
+
+ std::unique_ptr<CompilationJob> job(GetUnoptimizedCompilationJob(info));
+ if (job->PrepareJob() != CompilationJob::SUCCEEDED) return false;
+ if (job->ExecuteJob() != CompilationJob::SUCCEEDED) return false;
+ if (FinalizeUnoptimizedCompilationJob(job.get()) !=
+ CompilationJob::SUCCEEDED) {
+ return false;
+ }
+ return true;
+}
+
+bool CompileUnoptimizedCode(CompilationInfo* info) {
+ DCHECK(AllowCompilation::IsAllowed(info->isolate()));
+ if (!Compiler::Analyze(info->parse_info()) ||
+ !GenerateUnoptimizedCode(info)) {
+ Isolate* isolate = info->isolate();
+ if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ return false;
+ }
+ return true;
}
MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(CompilationInfo* info) {
@@ -436,28 +453,19 @@ MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(CompilationInfo* info) {
PostponeInterruptsScope postpone(info->isolate());
// Parse and update CompilationInfo with the results.
- if (!Parse(info->parse_info())) return MaybeHandle<Code>();
+ if (!Parser::ParseStatic(info->parse_info())) return MaybeHandle<Code>();
DCHECK_EQ(info->shared_info()->language_mode(),
info->literal()->language_mode());
// Compile either unoptimized code or bytecode for the interpreter.
if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
- InstallUnoptimizedCode(info);
+ // Record the function compilation event.
+ RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
return info->code();
}
-CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job) {
- CompilationJob::Status status = job->FinalizeJob();
- if (status == CompilationJob::SUCCEEDED) {
- DCHECK(!job->info()->shared_info()->is_compiled());
- InstallUnoptimizedCode(job->info());
- job->RecordUnoptimizedCompilationStats();
- }
- return status;
-}
-
MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
Handle<JSFunction> function, BailoutId osr_ast_id) {
Handle<SharedFunctionInfo> shared(function->shared());
@@ -495,29 +503,9 @@ void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
Handle<Context> native_context(function->context()->native_context());
SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
literals, info->osr_ast_id());
-
- // Do not cache (native) context-independent code compiled for OSR.
- if (code->is_turbofanned() && info->is_osr()) return;
-
- // Cache optimized (native) context-independent code.
- if (FLAG_turbo_cache_shared_code && code->is_turbofanned() &&
- !info->is_native_context_specializing()) {
- DCHECK(!info->is_function_context_specializing());
- DCHECK(info->osr_ast_id().IsNone());
- Handle<SharedFunctionInfo> shared(function->shared());
- SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(shared, code);
- }
}
bool Renumber(ParseInfo* parse_info) {
- // Create a canonical handle scope if compiling ignition bytecode. This is
- // required by the constant array builder to de-duplicate objects without
- // dereferencing handles.
- std::unique_ptr<CanonicalHandleScope> canonical;
- if (FLAG_ignition) {
- canonical.reset(new CanonicalHandleScope(parse_info->isolate()));
- }
-
if (!AstNumbering::Renumber(parse_info->isolate(), parse_info->zone(),
parse_info->literal())) {
return false;
@@ -536,27 +524,6 @@ bool Renumber(ParseInfo* parse_info) {
return true;
}
-bool UseTurboFan(Handle<SharedFunctionInfo> shared) {
- bool optimization_disabled = shared->optimization_disabled();
- bool dont_crankshaft = shared->dont_crankshaft();
-
- // Check the enabling conditions for Turbofan.
- // 1. "use asm" code.
- bool is_turbofanable_asm =
- FLAG_turbo_asm && shared->asm_function() && !optimization_disabled;
-
- // 2. Fallback for features unsupported by Crankshaft.
- bool is_unsupported_by_crankshaft_but_turbofanable =
- dont_crankshaft && strcmp(FLAG_turbo_filter, "~~") == 0 &&
- !optimization_disabled;
-
- // 3. Explicitly enabled by the command-line filter.
- bool passes_turbo_filter = shared->PassesFilter(FLAG_turbo_filter);
-
- return is_turbofanable_asm || is_unsupported_by_crankshaft_but_turbofanable ||
- passes_turbo_filter;
-}
-
bool GetOptimizedCodeNow(CompilationJob* job) {
CompilationInfo* info = job->info();
Isolate* isolate = info->isolate();
@@ -652,8 +619,8 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
DCHECK_IMPLIES(ignition_osr, !osr_ast_id.IsNone());
DCHECK_IMPLIES(ignition_osr, FLAG_ignition_osr);
- // Flag combination --ignition-osr --no-turbo-from-bytecode is unsupported.
- if (ignition_osr && !FLAG_turbo_from_bytecode) return MaybeHandle<Code>();
+ // Shared function no longer needs to be tiered up
+ shared->set_marked_for_tier_up(false);
Handle<Code> cached_code;
// TODO(4764): When compiling for OSR from bytecode, BailoutId might derive
@@ -673,8 +640,10 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
}
// Reset profiler ticks, function is no longer considered hot.
- if (shared->is_compiled()) {
+ if (shared->HasBaselineCode()) {
shared->code()->set_profiler_ticks(0);
+ } else if (shared->HasBytecodeArray()) {
+ shared->set_profiler_ticks(0);
}
VMState<COMPILER> state(isolate);
@@ -708,7 +677,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
// TurboFan can optimize directly from existing bytecode.
- if (FLAG_turbo_from_bytecode && use_turbofan && ShouldUseIgnition(info)) {
+ if (use_turbofan && ShouldUseIgnition(info)) {
if (info->is_osr() && !ignition_osr) return MaybeHandle<Code>();
if (!Compiler::EnsureBytecode(info)) {
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
@@ -717,14 +686,6 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
info->MarkAsOptimizeFromBytecode();
}
- if (IsEvalToplevel(shared)) {
- parse_info->set_eval();
- if (function->context()->IsNativeContext()) parse_info->set_global();
- parse_info->set_toplevel();
- parse_info->set_allow_lazy_parsing(false);
- parse_info->set_lazy(false);
- }
-
// Verify that OSR compilations are delegated to the correct graph builder.
// Depending on the underlying frame the semantics of the {BailoutId} differ
// and the various graph builders hard-code a certain semantic:
@@ -775,7 +736,13 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
"V8.RecompileSynchronous");
Handle<SharedFunctionInfo> shared = info->shared_info();
- shared->code()->set_profiler_ticks(0);
+
+ // Reset profiler ticks, function is no longer considered hot.
+ if (shared->HasBaselineCode()) {
+ shared->code()->set_profiler_ticks(0);
+ } else if (shared->HasBytecodeArray()) {
+ shared->set_profiler_ticks(0);
+ }
DCHECK(!shared->HasDebugInfo());
@@ -818,84 +785,17 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
return CompilationJob::FAILED;
}
-class InterpreterActivationsFinder : public ThreadVisitor,
- public OptimizedFunctionVisitor {
- public:
- explicit InterpreterActivationsFinder(SharedFunctionInfo* shared)
- : shared_(shared), has_activations_(false) {}
-
- void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- Address* activation_pc_address = nullptr;
- JavaScriptFrameIterator it(isolate, top);
- for (; !it.done(); it.Advance()) {
- JavaScriptFrame* frame = it.frame();
- if (FLAG_turbo_from_bytecode && FLAG_ignition_osr &&
- frame->is_optimized() && frame->function()->shared() == shared_) {
- // If we are able to optimize functions directly from bytecode, then
- // there might be optimized OSR code active on the stack that is not
- // reachable through a function. We count this as an activation.
- has_activations_ = true;
- }
- if (frame->is_interpreted() && frame->function()->shared() == shared_) {
- has_activations_ = true;
- activation_pc_address = frame->pc_address();
- }
- }
-
- if (activation_pc_address) {
- activation_pc_addresses_.push_back(activation_pc_address);
- }
- }
-
- void VisitFunction(JSFunction* function) {
- if (function->Inlines(shared_)) has_activations_ = true;
- }
-
- void EnterContext(Context* context) {}
- void LeaveContext(Context* context) {}
-
- bool MarkActivationsForBaselineOnReturn(Isolate* isolate) {
- if (activation_pc_addresses_.empty()) return false;
-
- for (Address* activation_pc_address : activation_pc_addresses_) {
- DCHECK(isolate->inner_pointer_to_code_cache()
- ->GetCacheEntry(*activation_pc_address)
- ->code->is_interpreter_trampoline_builtin());
- *activation_pc_address =
- isolate->builtins()->InterpreterMarkBaselineOnReturn()->entry();
- }
- return true;
- }
-
- bool has_activations() { return has_activations_; }
-
- private:
- SharedFunctionInfo* shared_;
- bool has_activations_;
- std::vector<Address*> activation_pc_addresses_;
-};
-
-bool HasInterpreterActivations(
- Isolate* isolate, InterpreterActivationsFinder* activations_finder) {
- activations_finder->VisitThread(isolate, isolate->thread_local_top());
- isolate->thread_manager()->IterateArchivedThreads(activations_finder);
- if (FLAG_turbo_from_bytecode) {
- // If we are able to optimize functions directly from bytecode, then there
- // might be optimized functions that rely on bytecode being around. We need
- // to prevent switching the given function to baseline code in those cases.
- Deoptimizer::VisitAllOptimizedFunctions(isolate, activations_finder);
- }
- return activations_finder->has_activations();
-}
-
MaybeHandle<Code> GetBaselineCode(Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
VMState<COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate);
- Zone zone(isolate->allocator());
- ParseInfo parse_info(&zone, function);
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ ParseInfo parse_info(&zone, handle(function->shared()));
CompilationInfo info(&parse_info, function);
+ // Function no longer needs to be tiered up
+ function->shared()->set_marked_for_tier_up(false);
+
// Reset profiler ticks, function is no longer considered hot.
if (function->shared()->HasBytecodeArray()) {
function->shared()->set_profiler_ticks(0);
@@ -920,31 +820,6 @@ MaybeHandle<Code> GetBaselineCode(Handle<JSFunction> function) {
return MaybeHandle<Code>();
}
- // TODO(4280): For now we disable switching to baseline code in the presence
- // of interpreter activations of the given function. The reasons is that the
- // underlying bytecode is cleared below. Note that this only applies in case
- // the --ignition-preserve-bytecode flag is not passed.
- if (!FLAG_ignition_preserve_bytecode) {
- InterpreterActivationsFinder activations_finder(function->shared());
- if (HasInterpreterActivations(isolate, &activations_finder)) {
- if (FLAG_trace_opt) {
- OFStream os(stdout);
- os << "[unable to switch " << Brief(*function) << " due to activations]"
- << std::endl;
- }
-
- if (activations_finder.MarkActivationsForBaselineOnReturn(isolate)) {
- if (FLAG_trace_opt) {
- OFStream os(stdout);
- os << "[marking " << Brief(function->shared())
- << " for baseline recompilation on return]" << std::endl;
- }
- }
-
- return MaybeHandle<Code>();
- }
- }
-
if (FLAG_trace_opt) {
OFStream os(stdout);
os << "[switching method " << Brief(*function) << " to baseline code]"
@@ -952,7 +827,7 @@ MaybeHandle<Code> GetBaselineCode(Handle<JSFunction> function) {
}
// Parse and update CompilationInfo with the results.
- if (!Parse(info.parse_info())) return MaybeHandle<Code>();
+ if (!Parser::ParseStatic(info.parse_info())) return MaybeHandle<Code>();
Handle<SharedFunctionInfo> shared = info.shared_info();
DCHECK_EQ(shared->language_mode(), info.literal()->language_mode());
@@ -963,12 +838,6 @@ MaybeHandle<Code> GetBaselineCode(Handle<JSFunction> function) {
return MaybeHandle<Code>();
}
- // TODO(4280): For now we play it safe and remove the bytecode array when we
- // switch to baseline code. We might consider keeping around the bytecode so
- // that it can be used as the "source of truth" eventually. Note that this
- // only applies in case the --ignition-preserve-bytecode flag is not passed.
- if (!FLAG_ignition_preserve_bytecode) shared->ClearBytecodeArray();
-
// Update the shared function info with the scope info.
InstallSharedScopeInfo(&info, shared);
@@ -1003,6 +872,46 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
return cached_code;
}
+ if (function->shared()->marked_for_tier_up()) {
+ DCHECK(FLAG_mark_shared_functions_for_tier_up);
+
+ function->shared()->set_marked_for_tier_up(false);
+
+ switch (Compiler::NextCompilationTier(*function)) {
+ case Compiler::BASELINE: {
+ if (FLAG_trace_opt) {
+ PrintF("[recompiling function ");
+ function->ShortPrint();
+ PrintF(
+ " to baseline eagerly (shared function marked for tier up)]\n");
+ }
+
+ Handle<Code> code;
+ if (GetBaselineCode(function).ToHandle(&code)) {
+ return code;
+ }
+ break;
+ }
+ case Compiler::OPTIMIZED: {
+ if (FLAG_trace_opt) {
+ PrintF("[optimizing method ");
+ function->ShortPrint();
+ PrintF(" eagerly (shared function marked for tier up)]\n");
+ }
+
+ Handle<Code> code;
+ // TODO(leszeks): Look into performing this compilation concurrently.
+ if (GetOptimizedCode(function, Compiler::NOT_CONCURRENT)
+ .ToHandle(&code)) {
+ return code;
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
+
if (function->shared()->is_compiled()) {
return Handle<Code>(function->shared()->code());
}
@@ -1013,8 +922,8 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
return entry;
}
- Zone zone(isolate->allocator());
- ParseInfo parse_info(&zone, function);
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ ParseInfo parse_info(&zone, handle(function->shared()));
CompilationInfo info(&parse_info, function);
Handle<Code> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate, result, GetUnoptimizedCode(&info), Code);
@@ -1059,50 +968,13 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
isolate->debug()->OnBeforeCompile(script);
- DCHECK(parse_info->is_eval() || parse_info->is_global() ||
- parse_info->is_module());
-
- parse_info->set_toplevel();
-
Handle<SharedFunctionInfo> result;
{ VMState<COMPILER> state(info->isolate());
- if (parse_info->literal() == NULL) {
- // Parse the script if needed (if it's already parsed, literal() is
- // non-NULL). If compiling for debugging, we may eagerly compile inner
- // functions, so do not parse lazily in that case.
- ScriptCompiler::CompileOptions options = parse_info->compile_options();
- bool parse_allow_lazy = (options == ScriptCompiler::kConsumeParserCache ||
- String::cast(script->source())->length() >
- FLAG_min_preparse_length) &&
- !info->is_debug();
-
- // Consider parsing eagerly when targeting the code cache.
- parse_allow_lazy &= !(FLAG_serialize_eager && info->will_serialize());
-
- // Consider parsing eagerly when targeting Ignition.
- parse_allow_lazy &= !(FLAG_ignition && FLAG_ignition_eager &&
- !isolate->serializer_enabled());
-
- parse_info->set_allow_lazy_parsing(parse_allow_lazy);
- if (!parse_allow_lazy &&
- (options == ScriptCompiler::kProduceParserCache ||
- options == ScriptCompiler::kConsumeParserCache)) {
- // We are going to parse eagerly, but we either 1) have cached data
- // produced by lazy parsing or 2) are asked to generate cached data.
- // Eager parsing cannot benefit from cached data, and producing cached
- // data while parsing eagerly is not implemented.
- parse_info->set_cached_data(nullptr);
- parse_info->set_compile_options(ScriptCompiler::kNoCompileOptions);
- }
-
- if (!Parse(parse_info)) {
- return Handle<SharedFunctionInfo>::null();
- }
+ if (parse_info->literal() == nullptr && !Parser::ParseStatic(parse_info)) {
+ return Handle<SharedFunctionInfo>::null();
}
- DCHECK(!info->is_debug() || !parse_info->allow_lazy_parsing());
-
FunctionLiteral* lit = parse_info->literal();
// Measure how long it takes to do the compilation; only take the
@@ -1122,10 +994,6 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
DCHECK_EQ(kNoSourcePosition, lit->function_token_position());
result = NewSharedFunctionInfoForLiteral(isolate, lit, script);
result->set_is_toplevel(true);
- if (parse_info->is_eval()) {
- // Eval scripts cannot be (re-)compiled without context.
- result->set_allows_lazy_compilation_without_context(false);
- }
parse_info->set_shared_info(result);
// Compile the code.
@@ -1133,12 +1001,6 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
return Handle<SharedFunctionInfo>::null();
}
- // Update the shared function info with the scope info.
- InstallSharedScopeInfo(info, result);
-
- // Install compilation result on the shared function info
- InstallSharedCompilationResult(info, result);
-
Handle<String> script_name =
script->name()->IsString()
? Handle<String>(String::cast(script->name()))
@@ -1173,7 +1035,7 @@ bool Compiler::Analyze(ParseInfo* info) {
}
bool Compiler::ParseAndAnalyze(ParseInfo* info) {
- if (!Parse(info)) return false;
+ if (!Parser::ParseStatic(info)) return false;
if (!Compiler::Analyze(info)) return false;
DCHECK_NOT_NULL(info->literal());
DCHECK_NOT_NULL(info->scope());
@@ -1246,8 +1108,8 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function,
code = isolate->builtins()->InterpreterEntryTrampoline();
function->shared()->ReplaceCode(*code);
} else {
- Zone zone(isolate->allocator());
- ParseInfo parse_info(&zone, function);
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ ParseInfo parse_info(&zone, handle(function->shared()));
CompilationInfo info(&parse_info, function);
if (!GetUnoptimizedCode(&info).ToHandle(&code)) {
return false;
@@ -1266,44 +1128,14 @@ bool Compiler::CompileOptimized(Handle<JSFunction> function,
return true;
}
-bool Compiler::CompileDebugCode(Handle<JSFunction> function) {
- Isolate* isolate = function->GetIsolate();
- DCHECK(AllowCompilation::IsAllowed(isolate));
-
- // Start a compilation.
- Zone zone(isolate->allocator());
- ParseInfo parse_info(&zone, function);
- CompilationInfo info(&parse_info, Handle<JSFunction>::null());
- if (IsEvalToplevel(handle(function->shared()))) {
- parse_info.set_eval();
- if (function->context()->IsNativeContext()) parse_info.set_global();
- parse_info.set_toplevel();
- parse_info.set_allow_lazy_parsing(false);
- parse_info.set_lazy(false);
- }
- info.MarkAsDebug();
- if (GetUnoptimizedCode(&info).is_null()) {
- isolate->clear_pending_exception();
- return false;
- }
-
- // Check postconditions on success.
- DCHECK(!isolate->has_pending_exception());
- DCHECK(function->shared()->is_compiled());
- DCHECK(function->shared()->HasDebugCode());
- return true;
-}
-
bool Compiler::CompileDebugCode(Handle<SharedFunctionInfo> shared) {
Isolate* isolate = shared->GetIsolate();
DCHECK(AllowCompilation::IsAllowed(isolate));
// Start a compilation.
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
ParseInfo parse_info(&zone, shared);
CompilationInfo info(&parse_info, Handle<JSFunction>::null());
- DCHECK(shared->allows_lazy_compilation_without_context());
- DCHECK(!IsEvalToplevel(shared));
info.MarkAsDebug();
if (GetUnoptimizedCode(&info).is_null()) {
isolate->clear_pending_exception();
@@ -1325,13 +1157,12 @@ MaybeHandle<JSArray> Compiler::CompileForLiveEdit(Handle<Script> script) {
// generated shared function infos, clear the script's list temporarily
// and restore it at the end of this method.
Handle<Object> old_function_infos(script->shared_function_infos(), isolate);
- script->set_shared_function_infos(Smi::FromInt(0));
+ script->set_shared_function_infos(Smi::kZero);
// Start a compilation.
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
ParseInfo parse_info(&zone, script);
CompilationInfo info(&parse_info, Handle<JSFunction>::null());
- parse_info.set_global();
info.MarkAsDebug();
// TODO(635): support extensions.
@@ -1377,7 +1208,7 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
DCHECK_NOT_NULL(info->scope());
Handle<SharedFunctionInfo> shared = info->shared_info();
if (!shared->has_deoptimization_support()) {
- Zone zone(info->isolate()->allocator());
+ Zone zone(info->isolate()->allocator(), ZONE_NAME);
CompilationInfo unoptimized(info->parse_info(), info->closure());
unoptimized.EnableDeoptimizationSupport();
@@ -1387,18 +1218,9 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
// TurboFan in this case.
if (IsResumableFunction(shared->kind())) return false;
- // TODO(4280): For now we disable switching to baseline code in the presence
- // of interpreter activations of the given function. The reasons is that the
- // underlying bytecode is cleared below. The expensive check for activations
- // only needs to be done when the given function has bytecode, otherwise we
- // can be sure there are no activations. Note that this only applies in case
- // the --ignition-preserve-bytecode flag is not passed.
- if (!FLAG_ignition_preserve_bytecode && shared->HasBytecodeArray()) {
- InterpreterActivationsFinder activations_finder(*shared);
- if (HasInterpreterActivations(info->isolate(), &activations_finder)) {
- return false;
- }
- }
+ // When we call PrepareForSerializing below, we will change the shared
+ // ParseInfo. Make sure to reset it.
+ bool old_will_serialize_value = info->parse_info()->will_serialize();
// If the current code has reloc info for serialization, also include
// reloc info for serialization for the new code, so that deopt support
@@ -1410,13 +1232,7 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
EnsureFeedbackMetadata(&unoptimized);
if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
- // TODO(4280): For now we play it safe and remove the bytecode array when we
- // switch to baseline code. We might consider keeping around the bytecode so
- // that it can be used as the "source of truth" eventually. Note that this
- // only applies in case the --ignition-preserve-bytecode flag is not passed.
- if (!FLAG_ignition_preserve_bytecode && shared->HasBytecodeArray()) {
- shared->ClearBytecodeArray();
- }
+ info->parse_info()->set_will_serialize(old_will_serialize_value);
// The scope info might not have been set if a lazily compiled
// function is inlined before being called for the first time.
@@ -1437,8 +1253,8 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
// static
Compiler::CompilationTier Compiler::NextCompilationTier(JSFunction* function) {
Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
- if (shared->code()->is_interpreter_trampoline_builtin()) {
- if (FLAG_turbo_from_bytecode && UseTurboFan(shared)) {
+ if (shared->IsInterpreted()) {
+ if (UseTurboFan(shared)) {
return OPTIMIZED;
} else {
return BASELINE;
@@ -1468,6 +1284,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<Script> script;
if (!maybe_shared_info.ToHandle(&shared_info)) {
script = isolate->factory()->NewScript(source);
+ if (FLAG_trace_deopt) Script::InitLineEnds(script);
if (!script_name.is_null()) {
script->set_name(*script_name);
script->set_line_offset(line_offset);
@@ -1477,11 +1294,10 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
Script::SetEvalOrigin(script, outer_info, eval_position);
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
ParseInfo parse_info(&zone, script);
CompilationInfo info(&parse_info, Handle<JSFunction>::null());
parse_info.set_eval();
- if (context->IsNativeContext()) parse_info.set_global();
parse_info.set_language_mode(language_mode);
parse_info.set_parse_restriction(restriction);
if (!context->IsNativeContext()) {
@@ -1628,6 +1444,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
// Create a script object describing the script to be compiled.
Handle<Script> script = isolate->factory()->NewScript(source);
+ if (FLAG_trace_deopt) Script::InitLineEnds(script);
if (natives == NATIVES_CODE) {
script->set_type(Script::TYPE_NATIVE);
script->set_hide_source(true);
@@ -1646,14 +1463,10 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
}
// Compile the function and add it to the cache.
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
ParseInfo parse_info(&zone, script);
CompilationInfo info(&parse_info, Handle<JSFunction>::null());
- if (is_module) {
- parse_info.set_module();
- } else {
- parse_info.set_global();
- }
+ if (is_module) parse_info.set_module();
if (compile_options != ScriptCompiler::kNoCompileOptions) {
parse_info.set_cached_data(cached_data);
}
@@ -1766,59 +1579,49 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
result->set_never_compiled(outer_info->shared_info()->never_compiled());
}
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
ParseInfo parse_info(&zone, script);
CompilationInfo info(&parse_info, Handle<JSFunction>::null());
parse_info.set_literal(literal);
parse_info.set_shared_info(result);
parse_info.set_language_mode(literal->scope()->language_mode());
+ parse_info.set_ast_value_factory(
+ outer_info->parse_info()->ast_value_factory());
+ parse_info.set_ast_value_factory_owned(false);
+
if (outer_info->will_serialize()) info.PrepareForSerializing();
if (outer_info->is_debug()) info.MarkAsDebug();
- // Determine if the function can be lazily compiled. This is necessary to
- // allow some of our builtin JS files to be lazily compiled. These
- // builtins cannot be handled lazily by the parser, since we have to know
- // if a function uses the special natives syntax, which is something the
- // parser records.
- // If the debugger requests compilation for break points, we cannot be
- // aggressive about lazy compilation, because it might trigger compilation
- // of functions without an outer context when setting a breakpoint through
- // Debug::FindSharedFunctionInfoInScript.
- bool allow_lazy = literal->AllowsLazyCompilation() && !info.is_debug();
- bool lazy = FLAG_lazy && allow_lazy && !literal->should_eager_compile();
-
- // Consider compiling eagerly when targeting the code cache.
- lazy &= !(FLAG_serialize_eager && info.will_serialize());
-
- // Consider compiling eagerly when compiling bytecode for Ignition.
- lazy &=
- !(FLAG_ignition && FLAG_ignition_eager && !isolate->serializer_enabled());
-
- // Generate code
- TimerEventScope<TimerEventCompileCode> timer(isolate);
- RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::CompileCode);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
-
- if (lazy) {
- info.SetCode(isolate->builtins()->CompileLazy());
- Scope* outer_scope = literal->scope()->GetOuterScopeWithContext();
- if (outer_scope) {
- result->set_outer_scope_info(*outer_scope->scope_info());
- }
- } else if (Renumber(info.parse_info()) && GenerateUnoptimizedCode(&info)) {
- // Code generation will ensure that the feedback vector is present and
- // appropriately sized.
- DCHECK(!info.code().is_null());
- if (literal->should_eager_compile() &&
- literal->should_be_used_once_hint()) {
- info.code()->MarkToBeExecutedOnce(isolate);
+ // If this inner function is already compiled, we don't need to compile
+ // again. When compiling for debug, we are not interested in having debug
+ // break slots in inner functions, neither for setting break points nor
+ // for revealing inner functions.
+ // This is especially important for generators. We must not replace the
+ // code for generators, as there may be suspended generator objects.
+ if (!result->is_compiled()) {
+ if (!literal->ShouldEagerCompile()) {
+ info.SetCode(isolate->builtins()->CompileLazy());
+ Scope* outer_scope = literal->scope()->GetOuterScopeWithContext();
+ if (outer_scope) {
+ result->set_outer_scope_info(*outer_scope->scope_info());
+ }
+ } else {
+ // Generate code
+ TimerEventScope<TimerEventCompileCode> timer(isolate);
+ RuntimeCallTimerScope runtimeTimer(isolate,
+ &RuntimeCallStats::CompileCode);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
+ if (Renumber(info.parse_info()) && GenerateUnoptimizedCode(&info)) {
+ // Code generation will ensure that the feedback vector is present and
+ // appropriately sized.
+ DCHECK(!info.code().is_null());
+ if (literal->should_be_used_once_hint()) {
+ info.code()->MarkToBeExecutedOnce(isolate);
+ }
+ } else {
+ return Handle<SharedFunctionInfo>::null();
+ }
}
- // Update the shared function info with the scope info.
- InstallSharedScopeInfo(&info, result);
- // Install compilation result on the shared function info.
- InstallSharedCompilationResult(&info, result);
- } else {
- return Handle<SharedFunctionInfo>::null();
}
if (maybe_existing.is_null()) {
@@ -1887,8 +1690,13 @@ bool Compiler::FinalizeCompilationJob(CompilationJob* raw_job) {
return FinalizeOptimizedCompilationJob(job.get()) ==
CompilationJob::SUCCEEDED;
} else {
- return FinalizeUnoptimizedCompilationJob(job.get()) ==
- CompilationJob::SUCCEEDED;
+ if (FinalizeUnoptimizedCompilationJob(job.get()) ==
+ CompilationJob::SUCCEEDED) {
+ RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
+ job->info());
+ return true;
+ }
+ return false;
}
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index bfeaa8e7c3..03c6f8199f 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -48,7 +48,6 @@ class Compiler : public AllStatic {
static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag);
static bool CompileBaseline(Handle<JSFunction> function);
static bool CompileOptimized(Handle<JSFunction> function, ConcurrencyMode);
- static bool CompileDebugCode(Handle<JSFunction> function);
static bool CompileDebugCode(Handle<SharedFunctionInfo> shared);
static MaybeHandle<JSArray> CompileForLiveEdit(Handle<Script> script);
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 530143440d..540eb375b7 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -146,11 +146,10 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectContext() {
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectContinuation() {
- TypeCache const& type_cache = TypeCache::Get();
FieldAccess access = {kTaggedBase,
JSGeneratorObject::kContinuationOffset,
Handle<Name>(),
- type_cache.kSmi,
+ Type::SignedSmall(),
MachineType::TaggedSigned(),
kNoWriteBarrier};
return access;
@@ -180,10 +179,12 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectOperandStack() {
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectResumeMode() {
- TypeCache const& type_cache = TypeCache::Get();
- FieldAccess access = {
- kTaggedBase, JSGeneratorObject::kResumeModeOffset, Handle<Name>(),
- type_cache.kSmi, MachineType::TaggedSigned(), kNoWriteBarrier};
+ FieldAccess access = {kTaggedBase,
+ JSGeneratorObject::kResumeModeOffset,
+ Handle<Name>(),
+ Type::SignedSmall(),
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
return access;
}
@@ -356,7 +357,7 @@ FieldAccess AccessBuilder::ForFixedTypedArrayBaseExternalPointer() {
FieldAccess access = {kTaggedBase,
FixedTypedArrayBase::kExternalPointerOffset,
MaybeHandle<Name>(),
- Type::OtherInternal(),
+ Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier};
return access;
@@ -433,6 +434,27 @@ FieldAccess AccessBuilder::ForMapPrototype() {
return access;
}
+// static
+FieldAccess AccessBuilder::ForModuleRegularExports() {
+ FieldAccess access = {kTaggedBase,
+ Module::kRegularExportsOffset,
+ Handle<Name>(),
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForModuleRegularImports() {
+ FieldAccess access = {kTaggedBase,
+ Module::kRegularImportsOffset,
+ Handle<Name>(),
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
// static
FieldAccess AccessBuilder::ForNameHashField() {
@@ -490,7 +512,7 @@ FieldAccess AccessBuilder::ForExternalStringResourceData() {
FieldAccess access = {kTaggedBase,
ExternalString::kResourceDataOffset,
Handle<Name>(),
- Type::OtherInternal(),
+ Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier};
return access;
@@ -549,6 +571,59 @@ FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
}
// static
+FieldAccess AccessBuilder::ForJSArrayIteratorObject() {
+ FieldAccess access = {kTaggedBase,
+ JSArrayIterator::kIteratedObjectOffset,
+ Handle<Name>(),
+ Type::ReceiverOrUndefined(),
+ MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSArrayIteratorIndex(InstanceType instance_type,
+ ElementsKind elements_kind) {
+ // In generic case, cap to 2^53-1 (per ToLength() in spec) via
+ // kPositiveSafeInteger
+ FieldAccess access = {kTaggedBase,
+ JSArrayIterator::kNextIndexOffset,
+ Handle<Name>(),
+ TypeCache::Get().kPositiveSafeInteger,
+ MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ if (instance_type == JS_ARRAY_TYPE) {
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ access.type = TypeCache::Get().kFixedDoubleArrayLengthType;
+ access.machine_type = MachineType::TaggedSigned();
+ access.write_barrier_kind = kNoWriteBarrier;
+ } else if (IsFastElementsKind(elements_kind)) {
+ access.type = TypeCache::Get().kFixedArrayLengthType;
+ access.machine_type = MachineType::TaggedSigned();
+ access.write_barrier_kind = kNoWriteBarrier;
+ } else {
+ access.type = TypeCache::Get().kJSArrayLengthType;
+ }
+ } else if (instance_type == JS_TYPED_ARRAY_TYPE) {
+ access.type = TypeCache::Get().kJSTypedArrayLengthType;
+ access.machine_type = MachineType::TaggedSigned();
+ access.write_barrier_kind = kNoWriteBarrier;
+ }
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSArrayIteratorObjectMap() {
+ FieldAccess access = {kTaggedBase,
+ JSArrayIterator::kIteratedObjectMapOffset,
+ Handle<Name>(),
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSStringIteratorString() {
FieldAccess access = {
kTaggedBase, JSStringIterator::kStringOffset, Handle<Name>(),
@@ -611,6 +686,14 @@ FieldAccess AccessBuilder::ForFixedArraySlot(size_t index) {
// static
+FieldAccess AccessBuilder::ForCellValue() {
+ FieldAccess access = {
+ kTaggedBase, Cell::kValueOffset, Handle<Name>(),
+ Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForContextSlot(size_t index) {
int offset = Context::kHeaderSize + static_cast<int>(index) * kPointerSize;
DCHECK_EQ(offset,
@@ -656,7 +739,7 @@ ElementAccess AccessBuilder::ForFixedArrayElement(ElementsKind kind) {
MachineType::AnyTagged(), kFullWriteBarrier};
switch (kind) {
case FAST_SMI_ELEMENTS:
- access.type = TypeCache::Get().kSmi;
+ access.type = Type::SignedSmall();
access.machine_type = MachineType::TaggedSigned();
access.write_barrier_kind = kNoWriteBarrier;
break;
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 96f3200ee2..eb8e78fc36 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -5,8 +5,10 @@
#ifndef V8_COMPILER_ACCESS_BUILDER_H_
#define V8_COMPILER_ACCESS_BUILDER_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/simplified-operator.h"
#include "src/elements-kind.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -15,7 +17,8 @@ namespace compiler {
// This access builder provides a set of static methods constructing commonly
// used FieldAccess and ElementAccess descriptors. These descriptors serve as
// parameters to simplified load/store operators.
-class AccessBuilder final : public AllStatic {
+class V8_EXPORT_PRIVATE AccessBuilder final
+ : public NON_EXPORTED_BASE(AllStatic) {
public:
// ===========================================================================
// Access to external values (based on external references).
@@ -143,6 +146,12 @@ class AccessBuilder final : public AllStatic {
// Provides access to Map::prototype() field.
static FieldAccess ForMapPrototype();
+ // Provides access to Module::regular_exports() field.
+ static FieldAccess ForModuleRegularExports();
+
+ // Provides access to Module::regular_imports() field.
+ static FieldAccess ForModuleRegularImports();
+
// Provides access to Name::hash_field() field.
static FieldAccess ForNameHashField();
@@ -182,6 +191,16 @@ class AccessBuilder final : public AllStatic {
// Provides access to JSGlobalObject::native_context() field.
static FieldAccess ForJSGlobalObjectNativeContext();
+ // Provides access to JSArrayIterator::object() field.
+ static FieldAccess ForJSArrayIteratorObject();
+
+ // Provides access to JSArrayIterator::index() field.
+ static FieldAccess ForJSArrayIteratorIndex(InstanceType type = JS_OBJECT_TYPE,
+ ElementsKind kind = NO_ELEMENTS);
+
+ // Provides access to JSArrayIterator::object_map() field.
+ static FieldAccess ForJSArrayIteratorObjectMap();
+
// Provides access to JSStringIterator::string() field.
static FieldAccess ForJSStringIteratorString();
@@ -191,6 +210,9 @@ class AccessBuilder final : public AllStatic {
// Provides access to JSValue::value() field.
static FieldAccess ForValue();
+ // Provides access to Cell::value() field.
+ static FieldAccess ForCellValue();
+
// Provides access to arguments object fields.
static FieldAccess ForArgumentsLength();
static FieldAccess ForArgumentsCallee();
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 329cb93fe5..866b06086a 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -10,6 +10,7 @@
#include "src/compiler/type-cache.h"
#include "src/field-index-inl.h"
#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -24,8 +25,6 @@ bool CanInlineElementAccess(Handle<Map> map) {
if (map->has_indexed_interceptor()) return false;
ElementsKind const elements_kind = map->elements_kind();
if (IsFastElementsKind(elements_kind)) return true;
- // TODO(bmeurer): Add support for other elements kind.
- if (elements_kind == UINT8_CLAMPED_ELEMENTS) return false;
if (IsFixedTypedArrayElementsKind(elements_kind)) return true;
return false;
}
@@ -95,6 +94,12 @@ PropertyAccessInfo PropertyAccessInfo::AccessorConstant(
return PropertyAccessInfo(kAccessorConstant, holder, constant, receiver_maps);
}
+// static
+PropertyAccessInfo PropertyAccessInfo::Generic(MapList const& receiver_maps) {
+ return PropertyAccessInfo(kGeneric, MaybeHandle<JSObject>(), Handle<Object>(),
+ receiver_maps);
+}
+
PropertyAccessInfo::PropertyAccessInfo()
: kind_(kInvalid),
field_representation_(MachineRepresentation::kNone),
@@ -168,6 +173,12 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that) {
}
return false;
}
+ case kGeneric: {
+ this->receiver_maps_.insert(this->receiver_maps_.end(),
+ that->receiver_maps_.begin(),
+ that->receiver_maps_.end());
+ return true;
+ }
}
UNREACHABLE();
@@ -301,7 +312,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
MachineRepresentation::kTagged;
MaybeHandle<Map> field_map;
if (details_representation.IsSmi()) {
- field_type = type_cache_.kSmi;
+ field_type = Type::SignedSmall();
field_representation = MachineRepresentation::kTaggedSigned;
} else if (details_representation.IsDouble()) {
field_type = type_cache_.kFloat64;
@@ -322,7 +333,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
// Add proper code dependencies in case of stable field map(s).
Handle<Map> field_owner_map(map->FindFieldOwner(number),
isolate());
- dependencies()->AssumeFieldType(field_owner_map);
+ dependencies()->AssumeFieldOwner(field_owner_map);
// Remember the field map, and try to infer a useful type.
field_type = Type::For(descriptors_field_type->AsClass());
@@ -343,8 +354,13 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
: Handle<AccessorPair>::cast(accessors)->setter(),
isolate());
if (!accessor->IsJSFunction()) {
- // TODO(turbofan): Add support for API accessors.
- return false;
+ CallOptimization optimization(accessor);
+ if (!optimization.is_simple_api_call()) {
+ return false;
+ }
+ if (optimization.api_call_info()->fast_handler()->IsCode()) {
+ return false;
+ }
}
*access_info = PropertyAccessInfo::AccessorConstant(
MapList{receiver_map}, accessor, holder);
@@ -474,7 +490,10 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
MaybeHandle<JSObject> holder,
PropertyAccessInfo* access_info) {
// Check if the {map} has a data transition with the given {name}.
- if (map->unused_property_fields() == 0) return false;
+ if (map->unused_property_fields() == 0) {
+ *access_info = PropertyAccessInfo::Generic(MapList{map});
+ return true;
+ }
Handle<Map> transition_map;
if (TransitionArray::SearchTransition(map, kData, name, NONE)
.ToHandle(&transition_map)) {
@@ -493,7 +512,7 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
MaybeHandle<Map> field_map;
MachineRepresentation field_representation = MachineRepresentation::kTagged;
if (details_representation.IsSmi()) {
- field_type = type_cache_.kSmi;
+ field_type = Type::SignedSmall();
field_representation = MachineRepresentation::kTaggedSigned;
} else if (details_representation.IsDouble()) {
field_type = type_cache_.kFloat64;
@@ -512,7 +531,7 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
// Add proper code dependencies in case of stable field map(s).
Handle<Map> field_owner_map(transition_map->FindFieldOwner(number),
isolate());
- dependencies()->AssumeFieldType(field_owner_map);
+ dependencies()->AssumeFieldOwner(field_owner_map);
// Remember the field map, and try to infer a useful type.
field_type = Type::For(descriptors_field_type->AsClass());
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index ac186fb144..1d485dd0d4 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -61,7 +61,8 @@ class PropertyAccessInfo final {
kNotFound,
kDataConstant,
kDataField,
- kAccessorConstant
+ kAccessorConstant,
+ kGeneric
};
static PropertyAccessInfo NotFound(MapList const& receiver_maps,
@@ -78,6 +79,7 @@ class PropertyAccessInfo final {
static PropertyAccessInfo AccessorConstant(MapList const& receiver_maps,
Handle<Object> constant,
MaybeHandle<JSObject> holder);
+ static PropertyAccessInfo Generic(MapList const& receiver_maps);
PropertyAccessInfo();
@@ -87,6 +89,7 @@ class PropertyAccessInfo final {
bool IsDataConstant() const { return kind() == kDataConstant; }
bool IsDataField() const { return kind() == kDataField; }
bool IsAccessorConstant() const { return kind() == kAccessorConstant; }
+ bool IsGeneric() const { return kind() == kGeneric; }
bool HasTransitionMap() const { return !transition_map().is_null(); }
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index dbe182802a..c473b9b6aa 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -136,25 +136,13 @@ class ArmOperandConverter final : public InstructionOperandConverter {
FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
-
- FloatRegister InputFloat32Register(size_t index) {
- return ToFloat32Register(instr_->InputAt(index));
- }
-
- FloatRegister OutputFloat32Register() {
- return ToFloat32Register(instr_->Output());
- }
-
- FloatRegister ToFloat32Register(InstructionOperand* op) {
- return LowDwVfpRegister::from_code(ToDoubleRegister(op).code()).low();
- }
};
namespace {
-class OutOfLineLoadFloat32 final : public OutOfLineCode {
+class OutOfLineLoadFloat final : public OutOfLineCode {
public:
- OutOfLineLoadFloat32(CodeGenerator* gen, SwVfpRegister result)
+ OutOfLineLoadFloat(CodeGenerator* gen, SwVfpRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
@@ -679,8 +667,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallJSFunctionFromJSFunction:
- case kArchTailCallJSFunction: {
+ case kArchTailCallJSFunctionFromJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
@@ -688,11 +675,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmp(cp, kScratchReg);
__ Assert(eq, kWrongFunctionContext);
}
- if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
__ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(ip);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -759,7 +744,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchRet:
- AssembleReturn();
+ AssembleReturn(instr->InputAt(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchStackPointer:
@@ -1084,83 +1069,92 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mla(i.OutputRegister(1), i.InputRegister(2), i.InputRegister(1),
i.OutputRegister(1));
break;
- case kArmLslPair:
+ case kArmLslPair: {
+ Register second_output =
+ instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
if (instr->InputAt(2)->IsImmediate()) {
- __ LslPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ __ LslPair(i.OutputRegister(0), second_output, i.InputRegister(0),
i.InputRegister(1), i.InputInt32(2));
} else {
- __ LslPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ __ LslPair(i.OutputRegister(0), second_output, i.InputRegister(0),
i.InputRegister(1), kScratchReg, i.InputRegister(2));
}
break;
- case kArmLsrPair:
+ }
+ case kArmLsrPair: {
+ Register second_output =
+ instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
if (instr->InputAt(2)->IsImmediate()) {
- __ LsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ __ LsrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
i.InputRegister(1), i.InputInt32(2));
} else {
- __ LsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ __ LsrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
i.InputRegister(1), kScratchReg, i.InputRegister(2));
}
break;
- case kArmAsrPair:
+ }
+ case kArmAsrPair: {
+ Register second_output =
+ instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
if (instr->InputAt(2)->IsImmediate()) {
- __ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ __ AsrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
i.InputRegister(1), i.InputInt32(2));
} else {
- __ AsrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ __ AsrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
i.InputRegister(1), kScratchReg, i.InputRegister(2));
}
break;
+ }
case kArmVcmpF32:
if (instr->InputAt(1)->IsFPRegister()) {
- __ VFPCompareAndSetFlags(i.InputFloat32Register(0),
- i.InputFloat32Register(1));
+ __ VFPCompareAndSetFlags(i.InputFloatRegister(0),
+ i.InputFloatRegister(1));
} else {
DCHECK(instr->InputAt(1)->IsImmediate());
// 0.0 is the only immediate supported by vcmp instructions.
DCHECK(i.InputFloat32(1) == 0.0f);
- __ VFPCompareAndSetFlags(i.InputFloat32Register(0), i.InputFloat32(1));
+ __ VFPCompareAndSetFlags(i.InputFloatRegister(0), i.InputFloat32(1));
}
DCHECK_EQ(SetCC, i.OutputSBit());
break;
case kArmVaddF32:
- __ vadd(i.OutputFloat32Register(), i.InputFloat32Register(0),
- i.InputFloat32Register(1));
+ __ vadd(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVsubF32:
- __ vsub(i.OutputFloat32Register(), i.InputFloat32Register(0),
- i.InputFloat32Register(1));
+ __ vsub(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmulF32:
- __ vmul(i.OutputFloat32Register(), i.InputFloat32Register(0),
- i.InputFloat32Register(1));
+ __ vmul(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmlaF32:
- __ vmla(i.OutputFloat32Register(), i.InputFloat32Register(1),
- i.InputFloat32Register(2));
+ __ vmla(i.OutputFloatRegister(), i.InputFloatRegister(1),
+ i.InputFloatRegister(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmlsF32:
- __ vmls(i.OutputFloat32Register(), i.InputFloat32Register(1),
- i.InputFloat32Register(2));
+ __ vmls(i.OutputFloatRegister(), i.InputFloatRegister(1),
+ i.InputFloatRegister(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVdivF32:
- __ vdiv(i.OutputFloat32Register(), i.InputFloat32Register(0),
- i.InputFloat32Register(1));
+ __ vdiv(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVsqrtF32:
- __ vsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ __ vsqrt(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
case kArmVabsF32:
- __ vabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ __ vabs(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
case kArmVnegF32:
- __ vneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ __ vneg(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
case kArmVcmpF64:
if (instr->InputAt(1)->IsFPRegister()) {
@@ -1229,7 +1223,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmVrintmF32: {
CpuFeatureScope scope(masm(), ARMv8);
- __ vrintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ __ vrintm(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
}
case kArmVrintmF64: {
@@ -1239,7 +1233,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmVrintpF32: {
CpuFeatureScope scope(masm(), ARMv8);
- __ vrintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ __ vrintp(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
}
case kArmVrintpF64: {
@@ -1249,7 +1243,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmVrintzF32: {
CpuFeatureScope scope(masm(), ARMv8);
- __ vrintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ __ vrintz(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
}
case kArmVrintzF64: {
@@ -1264,7 +1258,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmVrintnF32: {
CpuFeatureScope scope(masm(), ARMv8);
- __ vrintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ __ vrintn(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
}
case kArmVrintnF64: {
@@ -1273,26 +1267,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVcvtF32F64: {
- __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputDoubleRegister(0));
+ __ vcvt_f32_f64(i.OutputFloatRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF64F32: {
- __ vcvt_f64_f32(i.OutputDoubleRegister(), i.InputFloat32Register(0));
+ __ vcvt_f64_f32(i.OutputDoubleRegister(), i.InputFloatRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF32S32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
- __ vcvt_f32_s32(i.OutputFloat32Register(), scratch);
+ __ vcvt_f32_s32(i.OutputFloatRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF32U32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
- __ vcvt_f32_u32(i.OutputFloat32Register(), scratch);
+ __ vcvt_f32_u32(i.OutputFloatRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -1312,7 +1306,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmVcvtS32F32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
- __ vcvt_s32_f32(scratch, i.InputFloat32Register(0));
+ __ vcvt_s32_f32(scratch, i.InputFloatRegister(0));
__ vmov(i.OutputRegister(), scratch);
// Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
// because INT32_MIN allows easier out-of-bounds detection.
@@ -1323,7 +1317,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmVcvtU32F32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
- __ vcvt_u32_f32(scratch, i.InputFloat32Register(0));
+ __ vcvt_u32_f32(scratch, i.InputFloatRegister(0));
__ vmov(i.OutputRegister(), scratch);
// Avoid UINT32_MAX as an overflow indicator and use 0 instead,
// because 0 allows easier out-of-bounds detection.
@@ -1347,11 +1341,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVmovU32F32:
- __ vmov(i.OutputRegister(), i.InputFloat32Register(0));
+ __ vmov(i.OutputRegister(), i.InputFloatRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovF32U32:
- __ vmov(i.OutputFloat32Register(), i.InputRegister(0));
+ __ vmov(i.OutputFloatRegister(), i.InputRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovLowU32F64:
@@ -1409,12 +1403,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVldrF32: {
- __ vldr(i.OutputFloat32Register(), i.InputOffset());
+ __ vldr(i.OutputFloatRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVstrF32:
- __ vstr(i.InputFloat32Register(0), i.InputOffset(1));
+ __ vstr(i.InputFloatRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVldrF64:
@@ -1426,9 +1420,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmFloat32Max: {
- SwVfpRegister result = i.OutputFloat32Register();
- SwVfpRegister left = i.InputFloat32Register(0);
- SwVfpRegister right = i.InputFloat32Register(1);
+ SwVfpRegister result = i.OutputFloatRegister();
+ SwVfpRegister left = i.InputFloatRegister(0);
+ SwVfpRegister right = i.InputFloatRegister(1);
if (left.is(right)) {
__ Move(result, left);
} else {
@@ -1454,9 +1448,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmFloat32Min: {
- SwVfpRegister result = i.OutputFloat32Register();
- SwVfpRegister left = i.InputFloat32Register(0);
- SwVfpRegister right = i.InputFloat32Register(1);
+ SwVfpRegister result = i.OutputFloatRegister();
+ SwVfpRegister left = i.InputFloatRegister(0);
+ SwVfpRegister right = i.InputFloatRegister(1);
if (left.is(right)) {
__ Move(result, left);
} else {
@@ -1495,7 +1489,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
- __ vpush(i.InputFloat32Register(0));
+ __ vpush(i.InputFloatRegister(0));
frame_access_state()->IncreaseSPDelta(1);
}
} else {
@@ -1526,7 +1520,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
break;
case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FP(Float32);
+ ASSEMBLE_CHECKED_LOAD_FP(Float);
break;
case kCheckedLoadFloat64:
ASSEMBLE_CHECKED_LOAD_FP(Double);
@@ -1541,7 +1535,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_CHECKED_STORE_INTEGER(str);
break;
case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FP(Float32);
+ ASSEMBLE_CHECKED_STORE_FP(Float);
break;
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FP(Double);
@@ -1649,7 +1643,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
__ CheckConstPool(false, false);
return kSuccess;
@@ -1696,6 +1690,9 @@ void CodeGenerator::AssembleConstructFrame() {
}
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(this->info()->GeneratePreagedPrologue());
+ if (descriptor->PushArgumentCount()) {
+ __ Push(kJavaScriptCallArgCountRegister);
+ }
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
}
@@ -1705,7 +1702,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots =
+ frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -1743,8 +1741,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int pop_count = static_cast<int>(descriptor->StackParameterCount());
@@ -1768,21 +1765,34 @@ void CodeGenerator::AssembleReturn() {
unwinding_info_writer_.MarkBlockWillExit();
+ ArmOperandConverter g(this, nullptr);
if (descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
- // Canonicalize JSFunction return sites for now.
- if (return_label_.is_bound()) {
- __ b(&return_label_);
- return;
+ // Canonicalize JSFunction return sites for now unless they have an variable
+ // number of stack slot pops.
+ if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (return_label_.is_bound()) {
+ __ b(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ AssembleDeconstructFrame();
+ }
} else {
- __ bind(&return_label_);
AssembleDeconstructFrame();
}
}
- __ Ret(pop_count);
-}
+ if (pop->IsImmediate()) {
+ DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+ pop_count += g.ToConstant(pop).ToInt32();
+ } else {
+ __ Drop(g.ToRegister(pop));
+ }
+ __ Drop(pop_count);
+ __ Ret();
+}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
@@ -1852,12 +1862,12 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
- if (destination->IsFPStackSlot()) {
+ if (destination->IsFloatStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
__ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ str(ip, dst);
} else {
- SwVfpRegister dst = g.ToFloat32Register(destination);
+ SwVfpRegister dst = g.ToFloatRegister(destination);
__ vmov(dst, src.ToFloat32());
}
} else {
@@ -1866,28 +1876,60 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
__ vmov(dst, src.ToFloat64(), kScratchReg);
- if (destination->IsFPStackSlot()) {
+ if (destination->IsDoubleStackSlot()) {
__ vstr(dst, g.ToMemOperand(destination));
}
}
} else if (source->IsFPRegister()) {
- DwVfpRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- DwVfpRegister dst = g.ToDoubleRegister(destination);
- __ Move(dst, src);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat64) {
+ DwVfpRegister src = g.ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ DwVfpRegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ __ vstr(src, g.ToMemOperand(destination));
+ }
} else {
- DCHECK(destination->IsFPStackSlot());
- __ vstr(src, g.ToMemOperand(destination));
+ DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ // GapResolver may give us reg codes that don't map to actual s-registers.
+ // Generate code to work around those cases.
+ int src_code = LocationOperand::cast(source)->register_code();
+ if (destination->IsFloatRegister()) {
+ int dst_code = LocationOperand::cast(destination)->register_code();
+ __ VmovExtended(dst_code, src_code, kScratchReg);
+ } else {
+ DCHECK(destination->IsFloatStackSlot());
+ __ VmovExtended(g.ToMemOperand(destination), src_code, kScratchReg);
+ }
}
} else if (source->IsFPStackSlot()) {
MemOperand src = g.ToMemOperand(source);
+ MachineRepresentation rep =
+ LocationOperand::cast(destination)->representation();
if (destination->IsFPRegister()) {
+ if (rep == MachineRepresentation::kFloat64) {
__ vldr(g.ToDoubleRegister(destination), src);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ // GapResolver may give us reg codes that don't map to actual
+ // s-registers. Generate code to work around those cases.
+ int dst_code = LocationOperand::cast(destination)->register_code();
+ __ VmovExtended(dst_code, src, kScratchReg);
+ }
} else {
DCHECK(destination->IsFPStackSlot());
+ if (rep == MachineRepresentation::kFloat64) {
DwVfpRegister temp = kScratchDoubleReg;
__ vldr(temp, src);
__ vstr(temp, g.ToMemOperand(destination));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ SwVfpRegister temp = kScratchDoubleReg.low();
+ __ vldr(temp, src);
+ __ vstr(temp, g.ToMemOperand(destination));
+ }
}
} else {
UNREACHABLE();
@@ -1927,17 +1969,35 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ str(temp_0, dst);
__ vstr(temp_1, src);
} else if (source->IsFPRegister()) {
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
LowDwVfpRegister temp = kScratchDoubleReg;
- DwVfpRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- DwVfpRegister dst = g.ToDoubleRegister(destination);
- __ vswp(src, dst);
+ if (rep == MachineRepresentation::kFloat64) {
+ DwVfpRegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ DwVfpRegister dst = g.ToDoubleRegister(destination);
+ __ vswp(src, dst);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(temp, src);
+ __ vldr(src, dst);
+ __ vstr(temp, dst);
+ }
} else {
- DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ Move(temp, src);
- __ vldr(src, dst);
- __ vstr(temp, dst);
+ DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ int src_code = LocationOperand::cast(source)->register_code();
+ if (destination->IsFPRegister()) {
+ int dst_code = LocationOperand::cast(destination)->register_code();
+ __ VmovExtended(temp.low().code(), src_code, kScratchReg);
+ __ VmovExtended(src_code, dst_code, kScratchReg);
+ __ VmovExtended(dst_code, temp.low().code(), kScratchReg);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ VmovExtended(temp.low().code(), src_code, kScratchReg);
+ __ VmovExtended(src_code, dst, kScratchReg);
+ __ vstr(temp.low(), dst);
+ }
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot());
@@ -1945,21 +2005,29 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
LowDwVfpRegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
MemOperand dst0 = g.ToMemOperand(destination);
- MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
- MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
- __ vldr(temp_1, dst0); // Save destination in temp_1.
- __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ str(temp_0, dst0);
- __ ldr(temp_0, src1);
- __ str(temp_0, dst1);
- __ vstr(temp_1, src0);
+ MachineRepresentation rep = LocationOperand::cast(source)->representation();
+ if (rep == MachineRepresentation::kFloat64) {
+ MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
+ MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
+ __ vldr(temp_1, dst0); // Save destination in temp_1.
+ __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ str(temp_0, dst0);
+ __ ldr(temp_0, src1);
+ __ str(temp_0, dst1);
+ __ vstr(temp_1, src0);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ __ vldr(temp_1.low(), dst0); // Save destination in temp_1.
+ __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ str(temp_0, dst0);
+ __ vstr(temp_1.low(), src0);
+ }
} else {
// No other combinations are possible.
UNREACHABLE();
}
}
-
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
// On 32-bit ARM we emit the jump tables inline.
UNREACHABLE();
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index ceb5b2507f..5279d1eec1 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -957,76 +957,83 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
void InstructionSelector::VisitInt32PairAdd(Node* node) {
ArmOperandGenerator g(this);
- // We use UseUniqueRegister here to avoid register sharing with the output
- // registers.
- InstructionOperand inputs[] = {
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
-
- InstructionOperand outputs[] = {
- g.DefineAsRegister(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
-
- Emit(kArmAddPair, 2, outputs, 4, inputs);
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ // We use UseUniqueRegister here to avoid register sharing with the output
+ // registers.
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ Emit(kArmAddPair, 2, outputs, 4, inputs);
+ } else {
+ // The high word of the result is not used, so we emit the standard 32 bit
+ // instruction.
+ Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R),
+ g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(2)));
+ }
}
void InstructionSelector::VisitInt32PairSub(Node* node) {
ArmOperandGenerator g(this);
- // We use UseUniqueRegister here to avoid register sharing with the output
- // register.
- InstructionOperand inputs[] = {
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ // We use UseUniqueRegister here to avoid register sharing with the output
+ // register.
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
- InstructionOperand outputs[] = {
- g.DefineAsRegister(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
- Emit(kArmSubPair, 2, outputs, 4, inputs);
+ Emit(kArmSubPair, 2, outputs, 4, inputs);
+ } else {
+ // The high word of the result is not used, so we emit the standard 32 bit
+ // instruction.
+ Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_R),
+ g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(2)));
+ }
}
void InstructionSelector::VisitInt32PairMul(Node* node) {
ArmOperandGenerator g(this);
- InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)),
- g.UseUniqueRegister(node->InputAt(2)),
- g.UseUniqueRegister(node->InputAt(3))};
-
- InstructionOperand outputs[] = {
- g.DefineAsRegister(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
-
- Emit(kArmMulPair, 2, outputs, 4, inputs);
-}
-
-void InstructionSelector::VisitWord32PairShl(Node* node) {
- ArmOperandGenerator g(this);
- // We use g.UseUniqueRegister here for InputAt(0) to guarantee that there is
- // no register aliasing with output registers.
- Int32Matcher m(node->InputAt(2));
- InstructionOperand shift_operand;
- if (m.HasValue()) {
- shift_operand = g.UseImmediate(m.node());
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)),
+ g.UseUniqueRegister(node->InputAt(3))};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ Emit(kArmMulPair, 2, outputs, 4, inputs);
} else {
- shift_operand = g.UseUniqueRegister(m.node());
+ // The high word of the result is not used, so we emit the standard 32 bit
+ // instruction.
+ Emit(kArmMul | AddressingModeField::encode(kMode_Operand2_R),
+ g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(2)));
}
-
- InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)),
- shift_operand};
-
- InstructionOperand outputs[] = {
- g.DefineAsRegister(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
-
- Emit(kArmLslPair, 2, outputs, 3, inputs);
}
-void InstructionSelector::VisitWord32PairShr(Node* node) {
- ArmOperandGenerator g(this);
- // We use g.UseUniqueRegister here for InputAt(1) and InputAt(2) to to
- // guarantee that there is no register aliasing with output register.
+namespace {
+// Shared routine for multiple shift operations.
+void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
+ ArmOperandGenerator g(selector);
+ // We use g.UseUniqueRegister here to guarantee that there is
+ // no register aliasing of input registers with output registers.
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
if (m.HasValue()) {
@@ -1035,38 +1042,37 @@ void InstructionSelector::VisitWord32PairShr(Node* node) {
shift_operand = g.UseUniqueRegister(m.node());
}
- InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
+ InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)),
shift_operand};
- InstructionOperand outputs[] = {
- g.DefineAsRegister(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
- Emit(kArmLsrPair, 2, outputs, 3, inputs);
-}
+ InstructionOperand outputs[2];
+ InstructionOperand temps[1];
+ int32_t output_count = 0;
+ int32_t temp_count = 0;
-void InstructionSelector::VisitWord32PairSar(Node* node) {
- ArmOperandGenerator g(this);
- // We use g.UseUniqueRegister here for InputAt(1) and InputAt(2) to to
- // guarantee that there is no register aliasing with output register.
- Int32Matcher m(node->InputAt(2));
- InstructionOperand shift_operand;
- if (m.HasValue()) {
- shift_operand = g.UseImmediate(m.node());
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (projection1) {
+ outputs[output_count++] = g.DefineAsRegister(projection1);
} else {
- shift_operand = g.UseUniqueRegister(m.node());
+ temps[temp_count++] = g.TempRegister();
}
- InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)),
- shift_operand};
+ selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
+}
+} // namespace
+void InstructionSelector::VisitWord32PairShl(Node* node) {
+ VisitWord32PairShift(this, kArmLslPair, node);
+}
- InstructionOperand outputs[] = {
- g.DefineAsRegister(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+void InstructionSelector::VisitWord32PairShr(Node* node) {
+ VisitWord32PairShift(this, kArmLsrPair, node);
+}
- Emit(kArmAsrPair, 2, outputs, 3, inputs);
+void InstructionSelector::VisitWord32PairSar(Node* node) {
+ VisitWord32PairShift(this, kArmAsrPair, node);
}
void InstructionSelector::VisitWord32Ror(Node* node) {
@@ -1870,21 +1876,22 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Shared routine for word comparisons against zero.
void VisitWordCompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
- while (selector->CanCover(user, value)) {
+ // Try to combine with comparisons against 0 by simply inverting the branch.
+ while (value->opcode() == IrOpcode::kWord32Equal &&
+ selector->CanCover(user, value)) {
+ Int32BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ }
+
+ if (selector->CanCover(user, value)) {
switch (value->opcode()) {
- case IrOpcode::kWord32Equal: {
- // Combine with comparisons against 0 by simply inverting the
- // continuation.
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont->Negate();
- continue;
- }
+ case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWordCompare(selector, value, cont);
- }
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWordCompare(selector, value, cont);
@@ -1968,7 +1975,6 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
default:
break;
}
- break;
}
if (user->opcode() == IrOpcode::kWord32Equal) {
diff --git a/deps/v8/src/compiler/arm/unwinding-info-writer-arm.cc b/deps/v8/src/compiler/arm/unwinding-info-writer-arm.cc
index a950612190..579e5c7f0a 100644
--- a/deps/v8/src/compiler/arm/unwinding-info-writer-arm.cc
+++ b/deps/v8/src/compiler/arm/unwinding-info-writer-arm.cc
@@ -15,7 +15,8 @@ void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset,
block_will_exit_ = false;
- DCHECK_LT(block->rpo_number().ToInt(), block_initial_states_.size());
+ DCHECK_LT(block->rpo_number().ToInt(),
+ static_cast<int>(block_initial_states_.size()));
const BlockInitialState* initial_state =
block_initial_states_[block->rpo_number().ToInt()];
if (initial_state) {
@@ -42,7 +43,7 @@ void UnwindingInfoWriter::EndInstructionBlock(const InstructionBlock* block) {
for (const RpoNumber& successor : block->successors()) {
int successor_index = successor.ToInt();
- DCHECK_LT(successor_index, block_initial_states_.size());
+ DCHECK_LT(successor_index, static_cast<int>(block_initial_states_.size()));
const BlockInitialState* existing_state =
block_initial_states_[successor_index];
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index f543b18682..8b1cb578e0 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -709,8 +709,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallJSFunctionFromJSFunction:
- case kArchTailCallJSFunction: {
+ case kArchTailCallJSFunctionFromJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
@@ -720,11 +719,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmp(cp, temp);
__ Assert(eq, kWrongFunctionContext);
}
- if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(x10);
frame_access_state()->ClearSPDelta();
@@ -786,7 +783,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchRet:
- AssembleReturn();
+ AssembleReturn(instr->InputAt(0));
break;
case kArchStackPointer:
__ mov(i.OutputRegister(), masm()->StackPointer());
@@ -1759,7 +1756,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -1798,43 +1795,57 @@ void CodeGenerator::AssembleConstructFrame() {
__ AssertCspAligned();
}
+ int fixed_frame_size = descriptor->CalculateFixedFrameSize();
+ int shrink_slots =
+ frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
+
if (frame_access_state()->has_frame()) {
+ // Link the frame
if (descriptor->IsJSFunctionCall()) {
DCHECK(!descriptor->UseNativeStack());
__ Prologue(this->info()->GeneratePreagedPrologue());
} else {
- if (descriptor->IsCFunctionCall()) {
- __ Push(lr, fp);
- __ Mov(fp, masm_.StackPointer());
- __ Claim(frame()->GetSpillSlotCount());
- } else {
- __ StubPrologue(info()->GetOutputStackFrameType(),
- frame()->GetTotalFrameSlotCount());
- }
+ __ Push(lr, fp);
+ __ Mov(fp, masm_.StackPointer());
}
-
if (!info()->GeneratePreagedPrologue()) {
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
}
- }
- int shrink_slots = frame()->GetSpillSlotCount();
-
- if (info()->is_osr()) {
- // TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
-
- // Unoptimized code jumps directly to this entrypoint while the unoptimized
- // frame is still on the stack. Optimized code uses OSR values directly from
- // the unoptimized frame. Thus, all that needs to be done is to allocate the
- // remaining stack slots.
- if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
- osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
- }
+ // Create OSR entry if applicable
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the
+ // unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly
+ // from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate
+ // the
+ // remaining stack slots.
+ if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ }
- if (descriptor->IsJSFunctionCall()) {
- __ Claim(shrink_slots);
+ // Build remainder of frame, including accounting for and filling-in
+ // frame-specific header information, e.g. claiming the extra slot that
+ // other platforms explicitly push for STUB frames and frames recording
+ // their argument count.
+ __ Claim(shrink_slots + (fixed_frame_size & 1));
+ if (descriptor->PushArgumentCount()) {
+ __ Str(kJavaScriptCallArgCountRegister,
+ MemOperand(fp, OptimizedBuiltinFrameConstants::kArgCOffset));
+ }
+ bool is_stub_frame =
+ !descriptor->IsJSFunctionCall() && !descriptor->IsCFunctionCall();
+ if (is_stub_frame) {
+ UseScratchRegisterScope temps(masm());
+ Register temp = temps.AcquireX();
+ __ Mov(temp, Smi::FromInt(info()->GetOutputStackFrameType()));
+ __ Str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
+ }
}
// Save FP registers.
@@ -1857,8 +1868,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
// Restore registers.
@@ -1877,16 +1887,25 @@ void CodeGenerator::AssembleReturn() {
unwinding_info_writer_.MarkBlockWillExit();
+ Arm64OperandConverter g(this, nullptr);
int pop_count = static_cast<int>(descriptor->StackParameterCount());
if (descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
- // Canonicalize JSFunction return sites for now.
- if (return_label_.is_bound()) {
- __ B(&return_label_);
- return;
+ // Canonicalize JSFunction return sites for now unless they have an variable
+ // number of stack slot pops.
+ if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (return_label_.is_bound()) {
+ __ B(&return_label_);
+ return;
+ } else {
+ __ Bind(&return_label_);
+ AssembleDeconstructFrame();
+ if (descriptor->UseNativeStack()) {
+ pop_count += (pop_count & 1); // align
+ }
+ }
} else {
- __ Bind(&return_label_);
AssembleDeconstructFrame();
if (descriptor->UseNativeStack()) {
pop_count += (pop_count & 1); // align
@@ -1895,7 +1914,16 @@ void CodeGenerator::AssembleReturn() {
} else if (descriptor->UseNativeStack()) {
pop_count += (pop_count & 1); // align
}
- __ Drop(pop_count);
+
+ if (pop->IsImmediate()) {
+ DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+ pop_count += g.ToConstant(pop).ToInt32();
+ __ Drop(pop_count);
+ } else {
+ Register pop_reg = g.ToRegister(pop);
+ __ Add(pop_reg, pop_reg, pop_count);
+ __ Drop(pop_reg);
+ }
if (descriptor->UseNativeStack()) {
__ AssertCspAligned();
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index da27be8626..0eef53c6d5 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -2152,6 +2152,20 @@ FlagsCondition MapForCbz(FlagsCondition cond) {
}
}
+void EmitBranchOrDeoptimize(InstructionSelector* selector,
+ InstructionCode opcode, InstructionOperand value,
+ FlagsContinuation* cont) {
+ Arm64OperandGenerator g(selector);
+ if (cont->IsBranch()) {
+ selector->Emit(cont->Encode(opcode), g.NoOutput(), value,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsDeoptimize());
+ selector->EmitDeoptimize(cont->Encode(opcode), g.NoOutput(), value,
+ cont->reason(), cont->frame_state());
+ }
+}
+
// Try to emit TBZ, TBNZ, CBZ or CBNZ for certain comparisons of {node}
// against zero, depending on the condition.
bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
@@ -2160,12 +2174,16 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
USE(m_user);
DCHECK(m_user.right().Is(0) || m_user.left().Is(0));
- // Only handle branches.
- if (!cont->IsBranch()) return false;
+ // Only handle branches and deoptimisations.
+ if (!cont->IsBranch() && !cont->IsDeoptimize()) return false;
switch (cond) {
case kSignedLessThan:
case kSignedGreaterThanOrEqual: {
+ // We don't generate TBZ/TBNZ for deoptimisations, as they have a
+ // shorter range than conditional branches and generating them for
+ // deoptimisations results in more veneers.
+ if (cont->IsDeoptimize()) return false;
Arm64OperandGenerator g(selector);
cont->Overwrite(MapForTbz(cond));
Int32Matcher m(node);
@@ -2192,9 +2210,8 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
case kUnsignedGreaterThan: {
Arm64OperandGenerator g(selector);
cont->Overwrite(MapForCbz(cond));
- selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
- g.UseRegister(node), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
+ EmitBranchOrDeoptimize(selector, kArm64CompareAndBranch32,
+ g.UseRegister(node), cont);
return true;
}
default:
@@ -2336,21 +2353,22 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
void VisitWordCompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
- while (selector->CanCover(user, value)) {
+ // Try to combine with comparisons against 0 by simply inverting the branch.
+ while (value->opcode() == IrOpcode::kWord32Equal &&
+ selector->CanCover(user, value)) {
+ Int32BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ }
+
+ if (selector->CanCover(user, value)) {
switch (value->opcode()) {
- case IrOpcode::kWord32Equal: {
- // Combine with comparisons against 0 by simply inverting the
- // continuation.
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont->Negate();
- continue;
- }
+ case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWord32Compare(selector, value, cont);
- }
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord32Compare(selector, value, cont);
@@ -2380,10 +2398,10 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
kLogical64Imm);
}
// Merge the Word64Equal(x, 0) comparison into a cbz instruction.
- if (cont->IsBranch()) {
- selector->Emit(cont->Encode(kArm64CompareAndBranch), g.NoOutput(),
- g.UseRegister(left), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
+ if (cont->IsBranch() || cont->IsDeoptimize()) {
+ EmitBranchOrDeoptimize(selector,
+ cont->Encode(kArm64CompareAndBranch),
+ g.UseRegister(left), cont);
return;
}
}
@@ -2488,7 +2506,6 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
default:
break;
}
- break;
}
// Branch could not be combined with a compare, compare against 0 and branch.
diff --git a/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc b/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc
index f4b732bf77..3095423854 100644
--- a/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc
+++ b/deps/v8/src/compiler/arm64/unwinding-info-writer-arm64.cc
@@ -15,7 +15,8 @@ void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset,
block_will_exit_ = false;
- DCHECK_LT(block->rpo_number().ToInt(), block_initial_states_.size());
+ DCHECK_LT(block->rpo_number().ToInt(),
+ static_cast<int>(block_initial_states_.size()));
const BlockInitialState* initial_state =
block_initial_states_[block->rpo_number().ToInt()];
if (initial_state) {
@@ -42,7 +43,7 @@ void UnwindingInfoWriter::EndInstructionBlock(const InstructionBlock* block) {
for (const RpoNumber& successor : block->successors()) {
int successor_index = successor.ToInt();
- DCHECK_LT(successor_index, block_initial_states_.size());
+ DCHECK_LT(successor_index, static_cast<int>(block_initial_states_.size()));
const BlockInitialState* existing_state =
block_initial_states_[successor_index];
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index b292a2e49e..1b7d1169dd 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -433,7 +433,7 @@ AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
type_hint_analysis_(type_hint_analysis),
state_values_cache_(jsgraph),
liveness_analyzer_(static_cast<size_t>(info->scope()->num_stack_slots()),
- local_zone),
+ false, local_zone),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kJavaScriptFunction, info->num_parameters() + 1,
info->scope()->num_stack_slots(), info->shared_info())) {
@@ -613,7 +613,7 @@ void AstGraphBuilder::ClearNonLiveSlotsInFrameStates() {
NonLiveFrameStateSlotReplacer replacer(
&state_values_cache_, jsgraph()->OptimizedOutConstant(),
- liveness_analyzer()->local_count(), local_zone());
+ liveness_analyzer()->local_count(), false, local_zone());
Variable* arguments = info()->scope()->arguments();
if (arguments != nullptr && arguments->IsStackAllocated()) {
replacer.MarkPermanentlyLive(arguments->index());
@@ -788,8 +788,10 @@ AstGraphBuilder::Environment::CopyAsUnreachable() {
}
AstGraphBuilder::Environment* AstGraphBuilder::Environment::CopyForOsrEntry() {
- return new (zone())
- Environment(this, builder_->liveness_analyzer()->NewBlock());
+ LivenessAnalyzerBlock* copy_block =
+ liveness_block() == nullptr ? nullptr
+ : builder_->liveness_analyzer()->NewBlock();
+ return new (zone()) Environment(this, copy_block);
}
AstGraphBuilder::Environment*
@@ -839,13 +841,6 @@ void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
}
-void AstGraphBuilder::Environment::UpdateStateValuesWithCache(
- Node** state_values, int offset, int count) {
- Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
- *state_values = builder_->state_values_cache_.GetNodeForValues(
- env_values, static_cast<size_t>(count));
-}
-
Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id,
OutputFrameStateCombine combine,
bool owner_has_exception) {
@@ -854,7 +849,7 @@ Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id,
}
UpdateStateValues(&parameters_node_, 0, parameters_count());
- UpdateStateValuesWithCache(&locals_node_, parameters_count(), locals_count());
+ UpdateStateValues(&locals_node_, parameters_count(), locals_count());
UpdateStateValues(&stack_node_, parameters_count() + locals_count(),
stack_height());
@@ -2283,21 +2278,9 @@ void AstGraphBuilder::VisitCall(Call* expr) {
ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
Node* receiver_value = nullptr;
Node* callee_value = nullptr;
- bool possibly_eval = false;
- switch (call_type) {
- case Call::GLOBAL_CALL: {
- VariableProxy* proxy = callee->AsVariableProxy();
- VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- PrepareEagerCheckpoint(BeforeId(proxy));
- callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
- pair, OutputFrameStateCombine::Push());
- receiver_hint = ConvertReceiverMode::kNullOrUndefined;
- receiver_value = jsgraph()->UndefinedConstant();
- break;
- }
- case Call::LOOKUP_SLOT_CALL: {
+ if (expr->is_possibly_eval()) {
+ if (callee->AsVariableProxy()->var()->IsLookupSlot()) {
Variable* variable = callee->AsVariableProxy()->var();
- DCHECK(variable->location() == VariableLocation::LOOKUP);
Node* name = jsgraph()->Constant(variable->name());
const Operator* op =
javascript()->CallRuntime(Runtime::kLoadLookupSlotForCall);
@@ -2306,89 +2289,26 @@ void AstGraphBuilder::VisitCall(Call* expr) {
receiver_value = NewNode(common()->Projection(1), pair);
PrepareFrameState(pair, expr->LookupId(),
OutputFrameStateCombine::Push(2));
- break;
- }
- case Call::NAMED_PROPERTY_CALL: {
- Property* property = callee->AsProperty();
- VectorSlotPair feedback =
- CreateVectorSlotPair(property->PropertyFeedbackSlot());
- VisitForValue(property->obj());
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* object = environment()->Top();
- callee_value = BuildNamedLoad(object, name, feedback);
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- // Note that a property call requires the receiver to be wrapped into
- // an object for sloppy callees. However the receiver is guaranteed
- // not to be null or undefined at this point.
- receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
- receiver_value = environment()->Pop();
- break;
- }
- case Call::KEYED_PROPERTY_CALL: {
- Property* property = callee->AsProperty();
- VectorSlotPair feedback =
- CreateVectorSlotPair(property->PropertyFeedbackSlot());
- VisitForValue(property->obj());
- VisitForValue(property->key());
- Node* key = environment()->Pop();
- Node* object = environment()->Top();
- callee_value = BuildKeyedLoad(object, key, feedback);
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- // Note that a property call requires the receiver to be wrapped into
- // an object for sloppy callees. However the receiver is guaranteed
- // not to be null or undefined at this point.
- receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
- receiver_value = environment()->Pop();
- break;
- }
- case Call::NAMED_SUPER_PROPERTY_CALL: {
- Property* property = callee->AsProperty();
- SuperPropertyReference* super_ref =
- property->obj()->AsSuperPropertyReference();
- VisitForValue(super_ref->home_object());
- VisitForValue(super_ref->this_var());
- Node* home = environment()->Peek(1);
- Node* object = environment()->Top();
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- callee_value = BuildNamedSuperLoad(object, home, name, VectorSlotPair());
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- // Note that a property call requires the receiver to be wrapped into
- // an object for sloppy callees. Since the receiver is not the target of
- // the load, it could very well be null or undefined at this point.
- receiver_value = environment()->Pop();
- environment()->Drop(1);
- break;
- }
- case Call::KEYED_SUPER_PROPERTY_CALL: {
- Property* property = callee->AsProperty();
- SuperPropertyReference* super_ref =
- property->obj()->AsSuperPropertyReference();
- VisitForValue(super_ref->home_object());
- VisitForValue(super_ref->this_var());
- environment()->Push(environment()->Top()); // Duplicate this_var.
- environment()->Push(environment()->Peek(2)); // Duplicate home_obj.
- VisitForValue(property->key());
- Node* key = environment()->Pop();
- Node* home = environment()->Pop();
- Node* object = environment()->Pop();
- callee_value = BuildKeyedSuperLoad(object, home, key, VectorSlotPair());
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- // Note that a property call requires the receiver to be wrapped into
- // an object for sloppy callees. Since the receiver is not the target of
- // the load, it could very well be null or undefined at this point.
- receiver_value = environment()->Pop();
- environment()->Drop(1);
- break;
+ } else {
+ VisitForValue(callee);
+ callee_value = environment()->Pop();
+ receiver_hint = ConvertReceiverMode::kNullOrUndefined;
+ receiver_value = jsgraph()->UndefinedConstant();
}
- case Call::SUPER_CALL:
- return VisitCallSuper(expr);
- case Call::POSSIBLY_EVAL_CALL:
- possibly_eval = true;
- if (callee->AsVariableProxy()->var()->IsLookupSlot()) {
+ } else {
+ switch (call_type) {
+ case Call::GLOBAL_CALL: {
+ VariableProxy* proxy = callee->AsVariableProxy();
+ VectorSlotPair pair =
+ CreateVectorSlotPair(proxy->VariableFeedbackSlot());
+ PrepareEagerCheckpoint(BeforeId(proxy));
+ callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
+ pair, OutputFrameStateCombine::Push());
+ receiver_hint = ConvertReceiverMode::kNullOrUndefined;
+ receiver_value = jsgraph()->UndefinedConstant();
+ break;
+ }
+ case Call::WITH_CALL: {
Variable* variable = callee->AsVariableProxy()->var();
Node* name = jsgraph()->Constant(variable->name());
const Operator* op =
@@ -2400,13 +2320,92 @@ void AstGraphBuilder::VisitCall(Call* expr) {
OutputFrameStateCombine::Push(2));
break;
}
- // Fall through.
- case Call::OTHER_CALL:
- VisitForValue(callee);
- callee_value = environment()->Pop();
- receiver_hint = ConvertReceiverMode::kNullOrUndefined;
- receiver_value = jsgraph()->UndefinedConstant();
- break;
+ case Call::NAMED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ VisitForValue(property->obj());
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* object = environment()->Top();
+ callee_value = BuildNamedLoad(object, name, feedback);
+ PrepareFrameState(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. However the receiver is guaranteed
+ // not to be null or undefined at this point.
+ receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+ receiver_value = environment()->Pop();
+ break;
+ }
+ case Call::KEYED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ VisitForValue(property->obj());
+ VisitForValue(property->key());
+ Node* key = environment()->Pop();
+ Node* object = environment()->Top();
+ callee_value = BuildKeyedLoad(object, key, feedback);
+ PrepareFrameState(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. However the receiver is guaranteed
+ // not to be null or undefined at this point.
+ receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+ receiver_value = environment()->Pop();
+ break;
+ }
+ case Call::NAMED_SUPER_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ SuperPropertyReference* super_ref =
+ property->obj()->AsSuperPropertyReference();
+ VisitForValue(super_ref->home_object());
+ VisitForValue(super_ref->this_var());
+ Node* home = environment()->Peek(1);
+ Node* object = environment()->Top();
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ callee_value =
+ BuildNamedSuperLoad(object, home, name, VectorSlotPair());
+ PrepareFrameState(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. Since the receiver is not the target of
+ // the load, it could very well be null or undefined at this point.
+ receiver_value = environment()->Pop();
+ environment()->Drop(1);
+ break;
+ }
+ case Call::KEYED_SUPER_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ SuperPropertyReference* super_ref =
+ property->obj()->AsSuperPropertyReference();
+ VisitForValue(super_ref->home_object());
+ VisitForValue(super_ref->this_var());
+ environment()->Push(environment()->Top()); // Duplicate this_var.
+ environment()->Push(environment()->Peek(2)); // Duplicate home_obj.
+ VisitForValue(property->key());
+ Node* key = environment()->Pop();
+ Node* home = environment()->Pop();
+ Node* object = environment()->Pop();
+ callee_value = BuildKeyedSuperLoad(object, home, key, VectorSlotPair());
+ PrepareFrameState(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. Since the receiver is not the target of
+ // the load, it could very well be null or undefined at this point.
+ receiver_value = environment()->Pop();
+ environment()->Drop(1);
+ break;
+ }
+ case Call::SUPER_CALL:
+ return VisitCallSuper(expr);
+ case Call::OTHER_CALL:
+ VisitForValue(callee);
+ callee_value = environment()->Pop();
+ receiver_hint = ConvertReceiverMode::kNullOrUndefined;
+ receiver_value = jsgraph()->UndefinedConstant();
+ break;
+ }
}
// The callee and the receiver both have to be pushed onto the operand stack
@@ -2420,7 +2419,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
// Resolve callee for a potential direct eval call. This block will mutate the
// callee value pushed onto the environment.
- if (possibly_eval && args->length() > 0) {
+ if (expr->is_possibly_eval() && args->length() > 0) {
int arg_count = args->length();
// Extract callee and source string from the environment.
@@ -2451,7 +2450,8 @@ void AstGraphBuilder::VisitCall(Call* expr) {
const Operator* call =
javascript()->CallFunction(args->length() + 2, frequency, feedback,
receiver_hint, expr->tail_call_mode());
- PrepareEagerCheckpoint(possibly_eval ? expr->EvalId() : expr->CallId());
+ PrepareEagerCheckpoint(expr->is_possibly_eval() ? expr->EvalId()
+ : expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
// The callee passed to the call, we just need to push something here to
// satisfy the bailout location contract. The fullcodegen code will not
@@ -2893,8 +2893,7 @@ void AstGraphBuilder::VisitCaseClause(CaseClause* expr) {
UNREACHABLE();
}
-
-void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
+void AstGraphBuilder::VisitDeclarations(Declaration::List* declarations) {
DCHECK(globals()->empty());
AstVisitor<AstGraphBuilder>::VisitDeclarations(declarations);
if (globals()->empty()) return;
@@ -3357,7 +3356,11 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
case VariableLocation::CONTEXT: {
// Context variable (potentially up the context chain).
int depth = current_scope()->ContextChainLength(variable->scope());
- bool immutable = variable->maybe_assigned() == kNotAssigned;
+ // TODO(mstarzinger): The {maybe_assigned} flag computed during variable
+ // resolution is highly inaccurate and cannot be trusted. We are only
+ // taking this information into account when asm.js compilation is used.
+ bool immutable = variable->maybe_assigned() == kNotAssigned &&
+ info()->is_function_context_specializing();
const Operator* op =
javascript()->LoadContext(depth, variable->index(), immutable);
Node* value = NewNode(op, current_context());
@@ -3775,7 +3778,8 @@ Node* AstGraphBuilder::BuildReturn(Node* return_value) {
return_value =
NewNode(javascript()->CallRuntime(Runtime::kTraceExit), return_value);
}
- Node* control = NewNode(common()->Return(), return_value);
+ Node* pop_node = jsgraph()->ZeroConstant();
+ Node* control = NewNode(common()->Return(), pop_node, return_value);
UpdateControlDependencyToLeaveFunction(control);
return control;
}
@@ -4202,27 +4206,49 @@ void AstGraphBuilder::Environment::PrepareForOsrEntry() {
graph->start(), graph->start());
UpdateControlDependency(osr_loop_entry);
UpdateEffectDependency(osr_loop_entry);
+
// Set OSR values.
for (int i = 0; i < size; ++i) {
values()->at(i) =
graph->NewNode(builder_->common()->OsrValue(i), osr_loop_entry);
}
- // Set the contexts.
+ // Set the innermost context.
+ const Operator* op_inner =
+ builder_->common()->OsrValue(Linkage::kOsrContextSpillSlotIndex);
+ contexts()->back() = graph->NewNode(op_inner, osr_loop_entry);
+
+ // Create a checkpoint.
+ Node* frame_state = Checkpoint(builder_->info()->osr_ast_id());
+ Node* checkpoint = graph->NewNode(common()->Checkpoint(), frame_state,
+ osr_loop_entry, osr_loop_entry);
+ UpdateEffectDependency(checkpoint);
+
+ // Create the OSR guard nodes.
+ const Operator* guard_op =
+ builder_->info()->is_deoptimization_enabled()
+ ? builder_->common()->OsrGuard(OsrGuardType::kUninitialized)
+ : builder_->common()->OsrGuard(OsrGuardType::kAny);
+ Node* effect = checkpoint;
+ for (int i = 0; i < size; ++i) {
+ values()->at(i) = effect =
+ graph->NewNode(guard_op, values()->at(i), effect, osr_loop_entry);
+ }
+ contexts()->back() = effect =
+ graph->NewNode(guard_op, contexts()->back(), effect, osr_loop_entry);
+
// The innermost context is the OSR value, and the outer contexts are
// reconstructed by dynamically walking up the context chain.
- Node* osr_context = nullptr;
- const Operator* op =
+ const Operator* load_op =
builder_->javascript()->LoadContext(0, Context::PREVIOUS_INDEX, true);
- const Operator* op_inner =
- builder_->common()->OsrValue(Linkage::kOsrContextSpillSlotIndex);
+ Node* osr_context = effect = contexts()->back();
int last = static_cast<int>(contexts()->size() - 1);
- for (int i = last; i >= 0; i--) {
- osr_context = (i == last) ? graph->NewNode(op_inner, osr_loop_entry)
- : graph->NewNode(op, osr_context, osr_context,
- osr_loop_entry);
+ for (int i = last - 1; i >= 0; i--) {
+ osr_context = effect =
+ graph->NewNode(load_op, osr_context, osr_context, effect);
contexts()->at(i) = osr_context;
}
+ UpdateEffectDependency(effect);
}
void AstGraphBuilder::Environment::PrepareForLoop(BitVector* assigned) {
@@ -4335,6 +4361,16 @@ Node* AstGraphBuilder::MergeValue(Node* value, Node* other, Node* control) {
return value;
}
+AstGraphBuilderWithPositions::AstGraphBuilderWithPositions(
+ Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
+ float invocation_frequency, LoopAssignmentAnalysis* loop_assignment,
+ TypeHintAnalysis* type_hint_analysis, SourcePositionTable* source_positions,
+ int inlining_id)
+ : AstGraphBuilder(local_zone, info, jsgraph, invocation_frequency,
+ loop_assignment, type_hint_analysis),
+ source_positions_(source_positions),
+ start_position_(info->shared_info()->start_position(), inlining_id) {}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index 27f2c9b4c6..2013f5053b 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_AST_GRAPH_BUILDER_H_
#include "src/ast/ast.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/liveness-analyzer.h"
#include "src/compiler/state-values-utils.h"
@@ -61,7 +62,7 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
#undef DECLARE_VISIT
// Visiting function for declarations list is overridden.
- void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void VisitDeclarations(Declaration::List* declarations);
private:
class AstContext;
@@ -601,7 +602,6 @@ class AstGraphBuilder::Environment : public ZoneObject {
LivenessAnalyzerBlock* liveness_block);
Environment* CopyAndShareLiveness();
void UpdateStateValues(Node** state_values, int offset, int count);
- void UpdateStateValuesWithCache(Node** state_values, int offset, int count);
Zone* zone() const { return builder_->local_zone(); }
Graph* graph() const { return builder_->graph(); }
AstGraphBuilder* builder() const { return builder_; }
@@ -617,6 +617,35 @@ class AstGraphBuilder::Environment : public ZoneObject {
void PrepareForOsrEntry();
};
+class AstGraphBuilderWithPositions final : public AstGraphBuilder {
+ public:
+ AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
+ JSGraph* jsgraph, float invocation_frequency,
+ LoopAssignmentAnalysis* loop_assignment,
+ TypeHintAnalysis* type_hint_analysis,
+ SourcePositionTable* source_positions,
+ int inlining_id = SourcePosition::kNotInlined);
+
+ bool CreateGraph(bool stack_check = true) {
+ SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
+ return AstGraphBuilder::CreateGraph(stack_check);
+ }
+
+#define DEF_VISIT(type) \
+ void Visit##type(type* node) override { \
+ SourcePositionTable::Scope pos( \
+ source_positions_, \
+ SourcePosition(node->position(), start_position_.InliningId())); \
+ AstGraphBuilder::Visit##type(node); \
+ }
+ AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ private:
+ SourcePositionTable* const source_positions_;
+ SourcePosition const start_position_;
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index 7abeecaf61..3c2cdb25a8 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -5,7 +5,9 @@
#ifndef V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
#define V8_COMPILER_BRANCH_CONDITION_ELIMINATION_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -15,8 +17,8 @@ namespace compiler {
class CommonOperatorBuilder;
class JSGraph;
-
-class BranchElimination final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE BranchElimination final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
public:
BranchElimination(Editor* editor, JSGraph* js_graph, Zone* zone);
~BranchElimination() final;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index d26ff93e3f..34b50df308 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -8,6 +8,7 @@
#include "src/ast/scopes.h"
#include "src/compilation-info.h"
#include "src/compiler/bytecode-branch-analysis.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/linkage.h"
#include "src/compiler/operator-properties.h"
#include "src/interpreter/bytecodes.h"
@@ -25,6 +26,11 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Environment(BytecodeGraphBuilder* builder, int register_count,
int parameter_count, Node* control_dependency, Node* context);
+ // Specifies whether environment binding methods should attach frame state
+ // inputs to nodes representing the value being bound. This is done because
+ // the {OutputFrameStateCombine} is closely related to the binding method.
+ enum FrameStateAttachmentMode { kAttachFrameState, kDontAttachFrameState };
+
int parameter_count() const { return parameter_count_; }
int register_count() const { return register_count_; }
@@ -32,12 +38,15 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Node* LookupRegister(interpreter::Register the_register) const;
void MarkAllRegistersLive();
- void BindAccumulator(Node* node, FrameStateBeforeAndAfter* states = nullptr);
+ void BindAccumulator(Node* node,
+ FrameStateAttachmentMode mode = kDontAttachFrameState);
void BindRegister(interpreter::Register the_register, Node* node,
- FrameStateBeforeAndAfter* states = nullptr);
- void BindRegistersToProjections(interpreter::Register first_reg, Node* node,
- FrameStateBeforeAndAfter* states = nullptr);
- void RecordAfterState(Node* node, FrameStateBeforeAndAfter* states);
+ FrameStateAttachmentMode mode = kDontAttachFrameState);
+ void BindRegistersToProjections(
+ interpreter::Register first_reg, Node* node,
+ FrameStateAttachmentMode mode = kDontAttachFrameState);
+ void RecordAfterState(Node* node,
+ FrameStateAttachmentMode mode = kDontAttachFrameState);
// Effect dependency tracked by this environment.
Node* GetEffectDependency() { return effect_dependency_; }
@@ -50,10 +59,6 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine,
bool owner_has_exception);
- // Returns true if the state values are up to date with the current
- // environment.
- bool StateValuesAreUpToDate(int output_poke_offset, int output_poke_count);
-
// Control dependency tracked by this environment.
Node* GetControlDependency() const { return control_dependency_; }
void UpdateControlDependency(Node* dependency) {
@@ -75,14 +80,8 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Environment(const Environment* copy, LivenessAnalyzerBlock* liveness_block);
void PrepareForLoop();
- enum { kNotCached, kCached };
-
- bool StateValuesAreUpToDate(Node** state_values, int offset, int count,
- int output_poke_start, int output_poke_end,
- int cached = kNotCached);
bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
void UpdateStateValues(Node** state_values, int offset, int count);
- void UpdateStateValuesWithCache(Node** state_values, int offset, int count);
int RegisterToValuesIndex(interpreter::Register the_register) const;
@@ -113,69 +112,6 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
int accumulator_base_;
};
-// Helper for generating frame states for before and after a bytecode.
-class BytecodeGraphBuilder::FrameStateBeforeAndAfter {
- public:
- explicit FrameStateBeforeAndAfter(BytecodeGraphBuilder* builder)
- : builder_(builder),
- id_after_(BailoutId::None()),
- added_to_node_(false),
- frame_states_unused_(false),
- output_poke_offset_(0),
- output_poke_count_(0) {
- BailoutId id_before(builder->bytecode_iterator().current_offset());
- frame_state_before_ = builder_->environment()->Checkpoint(
- id_before, OutputFrameStateCombine::Ignore(), false);
- id_after_ = BailoutId(id_before.ToInt() +
- builder->bytecode_iterator().current_bytecode_size());
- // Create an explicit checkpoint node for before the operation.
- Node* node = builder_->NewNode(builder_->common()->Checkpoint());
- DCHECK_EQ(IrOpcode::kDead,
- NodeProperties::GetFrameStateInput(node)->opcode());
- NodeProperties::ReplaceFrameStateInput(node, frame_state_before_);
- }
-
- ~FrameStateBeforeAndAfter() {
- DCHECK(added_to_node_);
- DCHECK(frame_states_unused_ ||
- builder_->environment()->StateValuesAreUpToDate(output_poke_offset_,
- output_poke_count_));
- }
-
- private:
- friend class Environment;
-
- void AddToNode(Node* node, OutputFrameStateCombine combine) {
- DCHECK(!added_to_node_);
- bool has_frame_state = OperatorProperties::HasFrameStateInput(node->op());
- if (has_frame_state) {
- // Add the frame state for after the operation.
- DCHECK_EQ(IrOpcode::kDead,
- NodeProperties::GetFrameStateInput(node)->opcode());
- bool has_exception = NodeProperties::IsExceptionalCall(node);
- Node* frame_state_after = builder_->environment()->Checkpoint(
- id_after_, combine, has_exception);
- NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
- }
-
- if (!combine.IsOutputIgnored()) {
- output_poke_offset_ = static_cast<int>(combine.GetOffsetToPokeAt());
- output_poke_count_ = node->op()->ValueOutputCount();
- }
- frame_states_unused_ = !has_frame_state;
- added_to_node_ = true;
- }
-
- BytecodeGraphBuilder* builder_;
- Node* frame_state_before_;
- BailoutId id_after_;
-
- bool added_to_node_;
- bool frame_states_unused_;
- int output_poke_offset_;
- int output_poke_count_;
-};
-
// Issues:
// - Scopes - intimately tied to AST. Need to eval what is needed.
@@ -259,6 +195,10 @@ bool BytecodeGraphBuilder::Environment::IsLivenessBlockConsistent() const {
}
Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
+ DCHECK(IsLivenessBlockConsistent());
+ if (liveness_block() != nullptr) {
+ liveness_block()->LookupAccumulator();
+ }
return values()->at(accumulator_base_);
}
@@ -291,21 +231,24 @@ void BytecodeGraphBuilder::Environment::MarkAllRegistersLive() {
}
void BytecodeGraphBuilder::Environment::BindAccumulator(
- Node* node, FrameStateBeforeAndAfter* states) {
- if (states) {
- states->AddToNode(node, OutputFrameStateCombine::PokeAt(0));
+ Node* node, FrameStateAttachmentMode mode) {
+ if (mode == FrameStateAttachmentMode::kAttachFrameState) {
+ builder()->PrepareFrameState(node, OutputFrameStateCombine::PokeAt(0));
+ }
+ DCHECK(IsLivenessBlockConsistent());
+ if (liveness_block() != nullptr) {
+ liveness_block()->BindAccumulator();
}
values()->at(accumulator_base_) = node;
}
-
void BytecodeGraphBuilder::Environment::BindRegister(
interpreter::Register the_register, Node* node,
- FrameStateBeforeAndAfter* states) {
+ FrameStateAttachmentMode mode) {
int values_index = RegisterToValuesIndex(the_register);
- if (states) {
- states->AddToNode(node, OutputFrameStateCombine::PokeAt(accumulator_base_ -
- values_index));
+ if (mode == FrameStateAttachmentMode::kAttachFrameState) {
+ builder()->PrepareFrameState(node, OutputFrameStateCombine::PokeAt(
+ accumulator_base_ - values_index));
}
values()->at(values_index) = node;
if (liveness_block() != nullptr && !the_register.is_parameter()) {
@@ -314,14 +257,13 @@ void BytecodeGraphBuilder::Environment::BindRegister(
}
}
-
void BytecodeGraphBuilder::Environment::BindRegistersToProjections(
interpreter::Register first_reg, Node* node,
- FrameStateBeforeAndAfter* states) {
+ FrameStateAttachmentMode mode) {
int values_index = RegisterToValuesIndex(first_reg);
- if (states) {
- states->AddToNode(node, OutputFrameStateCombine::PokeAt(accumulator_base_ -
- values_index));
+ if (mode == FrameStateAttachmentMode::kAttachFrameState) {
+ builder()->PrepareFrameState(node, OutputFrameStateCombine::PokeAt(
+ accumulator_base_ - values_index));
}
for (int i = 0; i < node->op()->ValueOutputCount(); i++) {
values()->at(values_index + i) =
@@ -329,10 +271,11 @@ void BytecodeGraphBuilder::Environment::BindRegistersToProjections(
}
}
-
void BytecodeGraphBuilder::Environment::RecordAfterState(
- Node* node, FrameStateBeforeAndAfter* states) {
- states->AddToNode(node, OutputFrameStateCombine::Ignore());
+ Node* node, FrameStateAttachmentMode mode) {
+ if (mode == FrameStateAttachmentMode::kAttachFrameState) {
+ builder()->PrepareFrameState(node, OutputFrameStateCombine::Ignore());
+ }
}
@@ -438,6 +381,24 @@ void BytecodeGraphBuilder::Environment::PrepareForOsrEntry() {
if (i >= accumulator_base()) idx = Linkage::kOsrAccumulatorRegisterIndex;
values()->at(i) = graph()->NewNode(common()->OsrValue(idx), entry);
}
+
+ BailoutId loop_id(builder_->bytecode_iterator().current_offset());
+ Node* frame_state =
+ Checkpoint(loop_id, OutputFrameStateCombine::Ignore(), false);
+ Node* checkpoint =
+ graph()->NewNode(common()->Checkpoint(), frame_state, entry, entry);
+ UpdateEffectDependency(checkpoint);
+
+ // Create the OSR guard nodes.
+ const Operator* guard_op = common()->OsrGuard(OsrGuardType::kUninitialized);
+ Node* effect = checkpoint;
+ for (int i = 0; i < size; i++) {
+ values()->at(i) = effect =
+ graph()->NewNode(guard_op, values()->at(i), effect, entry);
+ }
+ Node* context = effect = graph()->NewNode(guard_op, Context(), effect, entry);
+ SetContext(context);
+ UpdateEffectDependency(effect);
}
bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
@@ -491,19 +452,12 @@ void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
}
}
-void BytecodeGraphBuilder::Environment::UpdateStateValuesWithCache(
- Node** state_values, int offset, int count) {
- Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
- *state_values = builder_->state_values_cache_.GetNodeForValues(
- env_values, static_cast<size_t>(count));
-}
-
Node* BytecodeGraphBuilder::Environment::Checkpoint(
BailoutId bailout_id, OutputFrameStateCombine combine,
bool owner_has_exception) {
UpdateStateValues(&parameters_state_values_, 0, parameter_count());
- UpdateStateValuesWithCache(&registers_state_values_, register_base(),
- register_count());
+ UpdateStateValues(&registers_state_values_, register_base(),
+ register_count());
UpdateStateValues(&accumulator_state_values_, accumulator_base(), 1);
const Operator* op = common()->FrameState(
@@ -528,51 +482,10 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
return result;
}
-bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
- Node** state_values, int offset, int count, int output_poke_start,
- int output_poke_end, int cached) {
- DCHECK_LE(static_cast<size_t>(offset + count), values()->size());
- if (cached == kNotCached) {
- for (int i = 0; i < count; i++, offset++) {
- if (offset < output_poke_start || offset >= output_poke_end) {
- if ((*state_values)->InputAt(i) != values()->at(offset)) {
- return false;
- }
- }
- }
- } else {
- for (StateValuesAccess::TypedNode state_value :
- StateValuesAccess(*state_values)) {
- if (offset < output_poke_start || offset >= output_poke_end) {
- if (state_value.node != values()->at(offset)) {
- return false;
- }
- }
- ++offset;
- }
- }
- return true;
-}
-
-
-bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
- int output_poke_offset, int output_poke_count) {
- // Poke offset is relative to the top of the stack (i.e., the accumulator).
- int output_poke_start = accumulator_base() - output_poke_offset;
- int output_poke_end = output_poke_start + output_poke_count;
- return StateValuesAreUpToDate(&parameters_state_values_, 0, parameter_count(),
- output_poke_start, output_poke_end) &&
- StateValuesAreUpToDate(&registers_state_values_, register_base(),
- register_count(), output_poke_start,
- output_poke_end, kCached) &&
- StateValuesAreUpToDate(&accumulator_state_values_, accumulator_base(),
- 1, output_poke_start, output_poke_end);
-}
-
-BytecodeGraphBuilder::BytecodeGraphBuilder(Zone* local_zone,
- CompilationInfo* info,
- JSGraph* jsgraph,
- float invocation_frequency)
+BytecodeGraphBuilder::BytecodeGraphBuilder(
+ Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
+ float invocation_frequency, SourcePositionTable* source_positions,
+ int inlining_id)
: local_zone_(local_zone),
jsgraph_(jsgraph),
invocation_frequency_(invocation_frequency),
@@ -595,8 +508,10 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(Zone* local_zone,
info->is_deoptimization_enabled()),
state_values_cache_(jsgraph),
liveness_analyzer_(
- static_cast<size_t>(bytecode_array()->register_count()), local_zone) {
-}
+ static_cast<size_t>(bytecode_array()->register_count()), true,
+ local_zone),
+ source_positions_(source_positions),
+ start_position_(info->shared_info()->start_position(), inlining_id) {}
Node* BytecodeGraphBuilder::GetNewTarget() {
if (!new_target_.is_set()) {
@@ -649,7 +564,9 @@ VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
return VectorSlotPair(feedback_vector(), slot);
}
-bool BytecodeGraphBuilder::CreateGraph() {
+bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
+ SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
+
// Set up the basic structure of the graph. Outputs for {Start} are the formal
// parameters (including the receiver) plus new target, number of arguments,
// context and closure.
@@ -661,7 +578,7 @@ bool BytecodeGraphBuilder::CreateGraph() {
GetFunctionContext());
set_environment(&env);
- VisitBytecodes();
+ VisitBytecodes(stack_check);
// Finish the basic structure of the graph.
DCHECK_NE(0u, exit_controls_.size());
@@ -675,13 +592,44 @@ bool BytecodeGraphBuilder::CreateGraph() {
return true;
}
+void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
+ if (environment()->GetEffectDependency()->opcode() != IrOpcode::kCheckpoint) {
+ // Create an explicit checkpoint node for before the operation. This only
+ // needs to happen if we aren't effect-dominated by a {Checkpoint} already.
+ Node* node = NewNode(common()->Checkpoint());
+ DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node)->opcode());
+ BailoutId bailout_id(bytecode_iterator().current_offset());
+ Node* frame_state_before = environment()->Checkpoint(
+ bailout_id, OutputFrameStateCombine::Ignore(), false);
+ NodeProperties::ReplaceFrameStateInput(node, frame_state_before);
+ }
+}
+
+void BytecodeGraphBuilder::PrepareFrameState(Node* node,
+ OutputFrameStateCombine combine) {
+ if (OperatorProperties::HasFrameStateInput(node->op())) {
+ // Add the frame state for after the operation. The node in question has
+ // already been created and had a {Dead} frame state input up until now.
+ DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node)->opcode());
+ BailoutId bailout_id(bytecode_iterator().current_offset());
+ bool has_exception = NodeProperties::IsExceptionalCall(node);
+ Node* frame_state_after =
+ environment()->Checkpoint(bailout_id, combine, has_exception);
+ NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
+ }
+}
+
void BytecodeGraphBuilder::ClearNonLiveSlotsInFrameStates() {
if (!IsLivenessAnalysisEnabled()) {
return;
}
NonLiveFrameStateSlotReplacer replacer(
&state_values_cache_, jsgraph()->OptimizedOutConstant(),
- liveness_analyzer()->local_count(), local_zone());
+ liveness_analyzer()->local_count(), true, local_zone());
liveness_analyzer()->Run(&replacer);
if (FLAG_trace_environment_liveness) {
OFStream os(stdout);
@@ -689,24 +637,36 @@ void BytecodeGraphBuilder::ClearNonLiveSlotsInFrameStates() {
}
}
-void BytecodeGraphBuilder::VisitBytecodes() {
+void BytecodeGraphBuilder::VisitBytecodes(bool stack_check) {
BytecodeBranchAnalysis analysis(bytecode_array(), local_zone());
BytecodeLoopAnalysis loop_analysis(bytecode_array(), &analysis, local_zone());
analysis.Analyze();
loop_analysis.Analyze();
set_branch_analysis(&analysis);
set_loop_analysis(&loop_analysis);
+
interpreter::BytecodeArrayIterator iterator(bytecode_array());
set_bytecode_iterator(&iterator);
+ SourcePositionTableIterator source_position_iterator(
+ bytecode_array()->source_position_table());
+
BuildOSRNormalEntryPoint();
- while (!iterator.done()) {
+ for (; !iterator.done(); iterator.Advance()) {
int current_offset = iterator.current_offset();
+ UpdateCurrentSourcePosition(&source_position_iterator, current_offset);
EnterAndExitExceptionHandlers(current_offset);
SwitchToMergeEnvironment(current_offset);
if (environment() != nullptr) {
BuildLoopHeaderEnvironment(current_offset);
BuildOSRLoopEntryPoint(current_offset);
+ // Skip the first stack check if stack_check is false
+ if (!stack_check &&
+ iterator.current_bytecode() == interpreter::Bytecode::kStackCheck) {
+ stack_check = true;
+ continue;
+ }
+
switch (iterator.current_bytecode()) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
@@ -716,8 +676,8 @@ void BytecodeGraphBuilder::VisitBytecodes() {
#undef BYTECODE_CODE
}
}
- iterator.Advance();
}
+
set_branch_analysis(nullptr);
set_bytecode_iterator(nullptr);
DCHECK(exception_handlers_.empty());
@@ -744,11 +704,6 @@ void BytecodeGraphBuilder::VisitLdaUndefined() {
environment()->BindAccumulator(node);
}
-void BytecodeGraphBuilder::VisitLdrUndefined() {
- Node* node = jsgraph()->UndefinedConstant();
- environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), node);
-}
-
void BytecodeGraphBuilder::VisitLdaNull() {
Node* node = jsgraph()->NullConstant();
environment()->BindAccumulator(node);
@@ -797,29 +752,21 @@ Node* BytecodeGraphBuilder::BuildLoadGlobal(uint32_t feedback_slot_index,
}
void BytecodeGraphBuilder::VisitLdaGlobal() {
- FrameStateBeforeAndAfter states(this);
- Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
- TypeofMode::NOT_INSIDE_TYPEOF);
- environment()->BindAccumulator(node, &states);
-}
-
-void BytecodeGraphBuilder::VisitLdrGlobal() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
TypeofMode::NOT_INSIDE_TYPEOF);
- environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), node,
- &states);
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
TypeofMode::INSIDE_TYPEOF);
- environment()->BindAccumulator(node, &states);
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Handle<Name> name =
Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
VectorSlotPair feedback =
@@ -828,7 +775,7 @@ void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
const Operator* op = javascript()->StoreGlobal(language_mode, name, feedback);
Node* node = NewNode(op, value, GetFunctionClosure());
- environment()->RecordAfterState(node, &states);
+ environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitStaGlobalSloppy() {
@@ -839,7 +786,7 @@ void BytecodeGraphBuilder::VisitStaGlobalStrict() {
BuildStoreGlobal(LanguageMode::STRICT);
}
-Node* BytecodeGraphBuilder::BuildLoadContextSlot() {
+void BytecodeGraphBuilder::VisitLdaContextSlot() {
// TODO(mythria): immutable flag is also set to false. This information is not
// available in bytecode array. update this code when the implementation
// changes.
@@ -848,17 +795,19 @@ Node* BytecodeGraphBuilder::BuildLoadContextSlot() {
bytecode_iterator().GetIndexOperand(1), false);
Node* context =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- return NewNode(op, context);
-}
-
-void BytecodeGraphBuilder::VisitLdaContextSlot() {
- Node* node = BuildLoadContextSlot();
+ Node* node = NewNode(op, context);
environment()->BindAccumulator(node);
}
-void BytecodeGraphBuilder::VisitLdrContextSlot() {
- Node* node = BuildLoadContextSlot();
- environment()->BindRegister(bytecode_iterator().GetRegisterOperand(3), node);
+void BytecodeGraphBuilder::VisitLdaCurrentContextSlot() {
+ // TODO(mythria): immutable flag is also set to false. This information is not
+ // available in bytecode array. update this code when the implementation
+ // changes.
+ const Operator* op = javascript()->LoadContext(
+ 0, bytecode_iterator().GetIndexOperand(0), false);
+ Node* context = environment()->Context();
+ Node* node = NewNode(op, context);
+ environment()->BindAccumulator(node);
}
void BytecodeGraphBuilder::VisitStaContextSlot() {
@@ -871,8 +820,16 @@ void BytecodeGraphBuilder::VisitStaContextSlot() {
NewNode(op, context, value);
}
+void BytecodeGraphBuilder::VisitStaCurrentContextSlot() {
+ const Operator* op =
+ javascript()->StoreContext(0, bytecode_iterator().GetIndexOperand(0));
+ Node* context = environment()->Context();
+ Node* value = environment()->LookupAccumulator();
+ NewNode(op, context, value);
+}
+
void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* name =
jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
const Operator* op =
@@ -880,7 +837,7 @@ void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotInsideTypeof);
Node* value = NewNode(op, name);
- environment()->BindAccumulator(value, &states);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitLdaLookupSlot() {
@@ -960,8 +917,6 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
// Slow path, do a runtime load lookup.
set_environment(slow_environment);
{
- FrameStateBeforeAndAfter states(this);
-
Node* name = jsgraph()->Constant(
bytecode_iterator().GetConstantForIndexOperand(0));
@@ -970,7 +925,7 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotInsideTypeof);
Node* value = NewNode(op, name);
- environment()->BindAccumulator(value, &states);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
fast_environment->Merge(environment());
@@ -994,10 +949,10 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
// Fast path, do a global load.
{
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* node =
BuildLoadGlobal(bytecode_iterator().GetIndexOperand(1), typeof_mode);
- environment()->BindAccumulator(node, &states);
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
// Only build the slow path if there were any slow-path checks.
@@ -1009,8 +964,6 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
// Slow path, do a runtime load lookup.
set_environment(slow_environment);
{
- FrameStateBeforeAndAfter states(this);
-
Node* name = jsgraph()->Constant(
bytecode_iterator().GetConstantForIndexOperand(0));
@@ -1019,7 +972,7 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotInsideTypeof);
Node* value = NewNode(op, name);
- environment()->BindAccumulator(value, &states);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
fast_environment->Merge(environment());
@@ -1036,7 +989,7 @@ void BytecodeGraphBuilder::VisitLdaLookupGlobalSlotInsideTypeof() {
}
void BytecodeGraphBuilder::BuildStaLookupSlot(LanguageMode language_mode) {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
Node* name =
jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
@@ -1044,7 +997,7 @@ void BytecodeGraphBuilder::BuildStaLookupSlot(LanguageMode language_mode) {
is_strict(language_mode) ? Runtime::kStoreLookupSlot_Strict
: Runtime::kStoreLookupSlot_Sloppy);
Node* store = NewNode(op, name, value);
- environment()->BindAccumulator(store, &states);
+ environment()->BindAccumulator(store, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitStaLookupSlotSloppy() {
@@ -1055,7 +1008,8 @@ void BytecodeGraphBuilder::VisitStaLookupSlotStrict() {
BuildStaLookupSlot(LanguageMode::STRICT);
}
-Node* BytecodeGraphBuilder::BuildNamedLoad() {
+void BytecodeGraphBuilder::VisitLdaNamedProperty() {
+ PrepareEagerCheckpoint();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Handle<Name> name =
@@ -1064,23 +1018,12 @@ Node* BytecodeGraphBuilder::BuildNamedLoad() {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->LoadNamed(name, feedback);
- return NewNode(op, object, GetFunctionClosure());
+ Node* node = NewNode(op, object, GetFunctionClosure());
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitLdaNamedProperty() {
- FrameStateBeforeAndAfter states(this);
- Node* node = BuildNamedLoad();
- environment()->BindAccumulator(node, &states);
-}
-
-void BytecodeGraphBuilder::VisitLdrNamedProperty() {
- FrameStateBeforeAndAfter states(this);
- Node* node = BuildNamedLoad();
- environment()->BindRegister(bytecode_iterator().GetRegisterOperand(3), node,
- &states);
-}
-
-Node* BytecodeGraphBuilder::BuildKeyedLoad() {
+void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
+ PrepareEagerCheckpoint();
Node* key = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -1088,24 +1031,12 @@ Node* BytecodeGraphBuilder::BuildKeyedLoad() {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
const Operator* op = javascript()->LoadProperty(feedback);
- return NewNode(op, object, key, GetFunctionClosure());
-}
-
-void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
- FrameStateBeforeAndAfter states(this);
- Node* node = BuildKeyedLoad();
- environment()->BindAccumulator(node, &states);
-}
-
-void BytecodeGraphBuilder::VisitLdrKeyedProperty() {
- FrameStateBeforeAndAfter states(this);
- Node* node = BuildKeyedLoad();
- environment()->BindRegister(bytecode_iterator().GetRegisterOperand(2), node,
- &states);
+ Node* node = NewNode(op, object, key, GetFunctionClosure());
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -1116,7 +1047,7 @@ void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
const Operator* op = javascript()->StoreNamed(language_mode, name, feedback);
Node* node = NewNode(op, object, value, GetFunctionClosure());
- environment()->RecordAfterState(node, &states);
+ environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitStaNamedPropertySloppy() {
@@ -1128,7 +1059,7 @@ void BytecodeGraphBuilder::VisitStaNamedPropertyStrict() {
}
void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
@@ -1139,7 +1070,7 @@ void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
Node* node = NewNode(op, object, key, value, GetFunctionClosure());
- environment()->RecordAfterState(node, &states);
+ environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitStaKeyedPropertySloppy() {
@@ -1150,6 +1081,26 @@ void BytecodeGraphBuilder::VisitStaKeyedPropertyStrict() {
BuildKeyedStore(LanguageMode::STRICT);
}
+void BytecodeGraphBuilder::VisitLdaModuleVariable() {
+ int32_t cell_index = bytecode_iterator().GetImmediateOperand(0);
+ uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(1);
+ Node* module =
+ NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
+ environment()->Context());
+ Node* value = NewNode(javascript()->LoadModule(cell_index), module);
+ environment()->BindAccumulator(value);
+}
+
+void BytecodeGraphBuilder::VisitStaModuleVariable() {
+ int32_t cell_index = bytecode_iterator().GetImmediateOperand(0);
+ uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(1);
+ Node* module =
+ NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
+ environment()->Context());
+ Node* value = environment()->LookupAccumulator();
+ NewNode(javascript()->StoreModule(cell_index), module, value);
+}
+
void BytecodeGraphBuilder::VisitPushContext() {
Node* new_context = environment()->LookupAccumulator();
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0),
@@ -1218,10 +1169,9 @@ void BytecodeGraphBuilder::VisitCreateWithContext() {
}
void BytecodeGraphBuilder::BuildCreateArguments(CreateArgumentsType type) {
- FrameStateBeforeAndAfter states(this);
const Operator* op = javascript()->CreateArguments(type);
Node* object = NewNode(op, GetFunctionClosure());
- environment()->BindAccumulator(object, &states);
+ environment()->BindAccumulator(object, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitCreateMappedArguments() {
@@ -1236,20 +1186,15 @@ void BytecodeGraphBuilder::VisitCreateRestParameter() {
BuildCreateArguments(CreateArgumentsType::kRestParameter);
}
-void BytecodeGraphBuilder::BuildCreateLiteral(const Operator* op) {
- FrameStateBeforeAndAfter states(this);
- Node* literal = NewNode(op, GetFunctionClosure());
- environment()->BindAccumulator(literal, &states);
-}
-
void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
Handle<String> constant_pattern =
Handle<String>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
int literal_index = bytecode_iterator().GetIndexOperand(1);
int literal_flags = bytecode_iterator().GetFlagOperand(2);
- const Operator* op = javascript()->CreateLiteralRegExp(
- constant_pattern, literal_flags, literal_index);
- BuildCreateLiteral(op);
+ Node* literal = NewNode(javascript()->CreateLiteralRegExp(
+ constant_pattern, literal_flags, literal_index),
+ GetFunctionClosure());
+ environment()->BindAccumulator(literal, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
@@ -1263,13 +1208,15 @@ void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
// code. We can revisit this when we have data to the contrary.
literal_flags |= ArrayLiteral::kDisableMementos;
int number_of_elements = constant_elements->length();
- const Operator* op = javascript()->CreateLiteralArray(
- constant_elements, literal_flags, literal_index, number_of_elements);
- BuildCreateLiteral(op);
+ Node* literal = NewNode(
+ javascript()->CreateLiteralArray(constant_elements, literal_flags,
+ literal_index, number_of_elements),
+ GetFunctionClosure());
+ environment()->BindAccumulator(literal, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
bytecode_iterator().GetConstantForIndexOperand(0));
int literal_index = bytecode_iterator().GetIndexOperand(1);
@@ -1283,7 +1230,7 @@ void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
literal_index, number_of_properties),
GetFunctionClosure());
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(3),
- literal, &states);
+ literal, Environment::kAttachFrameState);
}
Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
@@ -1302,9 +1249,10 @@ Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
return value;
}
-void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode) {
- FrameStateBeforeAndAfter states(this);
- ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
+void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode,
+ ConvertReceiverMode receiver_hint) {
+ PrepareEagerCheckpoint();
+
Node* callee =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
@@ -1320,21 +1268,27 @@ void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode) {
const Operator* call = javascript()->CallFunction(
arg_count + 1, frequency, feedback, receiver_hint, tail_call_mode);
Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
- environment()->BindAccumulator(value, &states);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
+}
+
+void BytecodeGraphBuilder::VisitCall() {
+ BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kAny);
}
-void BytecodeGraphBuilder::VisitCall() { BuildCall(TailCallMode::kDisallow); }
+void BytecodeGraphBuilder::VisitCallProperty() {
+ BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNotNullOrUndefined);
+}
void BytecodeGraphBuilder::VisitTailCall() {
TailCallMode tail_call_mode =
bytecode_array_->GetIsolate()->is_tail_call_elimination_enabled()
? TailCallMode::kAllow
: TailCallMode::kDisallow;
- BuildCall(tail_call_mode);
+ BuildCall(tail_call_mode, ConvertReceiverMode::kAny);
}
void BytecodeGraphBuilder::VisitCallJSRuntime() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* callee =
BuildLoadNativeContextField(bytecode_iterator().GetIndexOperand(0));
interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
@@ -1343,7 +1297,7 @@ void BytecodeGraphBuilder::VisitCallJSRuntime() {
// Create node to perform the JS runtime call.
const Operator* call = javascript()->CallFunction(arg_count + 1);
Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
- environment()->BindAccumulator(value, &states);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
@@ -1360,7 +1314,7 @@ Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
}
void BytecodeGraphBuilder::VisitCallRuntime() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Runtime::FunctionId functionId = bytecode_iterator().GetRuntimeIdOperand(0);
interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
@@ -1368,11 +1322,11 @@ void BytecodeGraphBuilder::VisitCallRuntime() {
// Create node to perform the runtime call.
const Operator* call = javascript()->CallRuntime(functionId, arg_count);
Node* value = ProcessCallRuntimeArguments(call, first_arg, arg_count);
- environment()->BindAccumulator(value, &states);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Runtime::FunctionId functionId = bytecode_iterator().GetRuntimeIdOperand(0);
interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
@@ -1382,11 +1336,12 @@ void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
// Create node to perform the runtime call.
const Operator* call = javascript()->CallRuntime(functionId, arg_count);
Node* return_pair = ProcessCallRuntimeArguments(call, first_arg, arg_count);
- environment()->BindRegistersToProjections(first_return, return_pair, &states);
+ environment()->BindRegistersToProjections(first_return, return_pair,
+ Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Runtime::FunctionId functionId = bytecode_iterator().GetIntrinsicIdOperand(0);
interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
@@ -1395,7 +1350,7 @@ void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
// lowering.
const Operator* call = javascript()->CallRuntime(functionId, arg_count);
Node* value = ProcessCallRuntimeArguments(call, first_arg, arg_count);
- environment()->BindAccumulator(value, &states);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
Node* BytecodeGraphBuilder::ProcessCallNewArguments(
@@ -1414,7 +1369,7 @@ Node* BytecodeGraphBuilder::ProcessCallNewArguments(
}
void BytecodeGraphBuilder::VisitNew() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
@@ -1432,14 +1387,14 @@ void BytecodeGraphBuilder::VisitNew() {
static_cast<int>(arg_count) + 2, frequency, feedback);
Node* value = ProcessCallNewArguments(call, callee, new_target, first_arg,
arg_count + 2);
- environment()->BindAccumulator(value, &states);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::BuildThrow() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
Node* call = NewNode(javascript()->CallRuntime(Runtime::kThrow), value);
- environment()->BindAccumulator(call, &states);
+ environment()->BindAccumulator(call, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitThrow() {
@@ -1459,12 +1414,12 @@ void BytecodeGraphBuilder::VisitReThrow() {
}
void BytecodeGraphBuilder::BuildBinaryOp(const Operator* js_op) {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* left =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* right = environment()->LookupAccumulator();
Node* node = NewNode(js_op, left, right);
- environment()->BindAccumulator(node, &states);
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
// Helper function to create binary operation hint from the recorded type
@@ -1495,11 +1450,8 @@ CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() {
}
float BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
- if (slot_id >= TypeFeedbackVector::kReservedIndexCount) {
- CallICNexus nexus(feedback_vector(), feedback_vector()->ToSlot(slot_id));
- return nexus.ComputeCallFrequency() * invocation_frequency_;
- }
- return 0.0f;
+ CallICNexus nexus(feedback_vector(), feedback_vector()->ToSlot(slot_id));
+ return nexus.ComputeCallFrequency() * invocation_frequency_;
}
void BytecodeGraphBuilder::VisitAdd() {
@@ -1558,12 +1510,12 @@ void BytecodeGraphBuilder::VisitShiftRightLogical() {
}
void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* js_op) {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* left =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
Node* right = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
Node* node = NewNode(js_op, left, right);
- environment()->BindAccumulator(node, &states);
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitAddSmi() {
@@ -1597,23 +1549,23 @@ void BytecodeGraphBuilder::VisitShiftRightSmi() {
}
void BytecodeGraphBuilder::VisitInc() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
// Note: Use subtract -1 here instead of add 1 to ensure we always convert to
// a number, not a string.
const Operator* js_op =
javascript()->Subtract(GetBinaryOperationHint(kCountOperationHintIndex));
Node* node = NewNode(js_op, environment()->LookupAccumulator(),
jsgraph()->Constant(-1));
- environment()->BindAccumulator(node, &states);
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitDec() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
const Operator* js_op =
javascript()->Subtract(GetBinaryOperationHint(kCountOperationHintIndex));
Node* node = NewNode(js_op, environment()->LookupAccumulator(),
jsgraph()->OneConstant());
- environment()->BindAccumulator(node, &states);
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitLogicalNot() {
@@ -1638,13 +1590,13 @@ void BytecodeGraphBuilder::VisitTypeOf() {
}
void BytecodeGraphBuilder::BuildDelete(LanguageMode language_mode) {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* key = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* node =
NewNode(javascript()->DeleteProperty(language_mode), object, key);
- environment()->BindAccumulator(node, &states);
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitDeletePropertyStrict() {
@@ -1656,12 +1608,12 @@ void BytecodeGraphBuilder::VisitDeletePropertySloppy() {
}
void BytecodeGraphBuilder::BuildCompareOp(const Operator* js_op) {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* left =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* right = environment()->LookupAccumulator();
Node* node = NewNode(js_op, left, right);
- environment()->BindAccumulator(node, &states);
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitTestEqual() {
@@ -1701,10 +1653,10 @@ void BytecodeGraphBuilder::VisitTestInstanceOf() {
}
void BytecodeGraphBuilder::BuildCastOperator(const Operator* js_op) {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* value = NewNode(js_op, environment()->LookupAccumulator());
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), value,
- &states);
+ Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitToName() {
@@ -1772,23 +1724,24 @@ void BytecodeGraphBuilder::VisitJumpIfUndefinedConstant() {
void BytecodeGraphBuilder::VisitJumpLoop() { BuildJump(); }
void BytecodeGraphBuilder::VisitStackCheck() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* node = NewNode(javascript()->StackCheck());
- environment()->RecordAfterState(node, &states);
+ environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitReturn() {
BuildLoopExitsForFunctionExit();
+ Node* pop_node = jsgraph()->ZeroConstant();
Node* control =
- NewNode(common()->Return(), environment()->LookupAccumulator());
+ NewNode(common()->Return(), pop_node, environment()->LookupAccumulator());
MergeControlToLeaveFunction(control);
}
void BytecodeGraphBuilder::VisitDebugger() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* call =
NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
- environment()->BindAccumulator(call, &states);
+ environment()->BindAccumulator(call, Environment::kAttachFrameState);
environment()->MarkAllRegistersLive();
}
@@ -1799,18 +1752,19 @@ DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
#undef DEBUG_BREAK
void BytecodeGraphBuilder::BuildForInPrepare() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* receiver =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* prepare = NewNode(javascript()->ForInPrepare(), receiver);
environment()->BindRegistersToProjections(
- bytecode_iterator().GetRegisterOperand(1), prepare, &states);
+ bytecode_iterator().GetRegisterOperand(1), prepare,
+ Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitForInPrepare() { BuildForInPrepare(); }
void BytecodeGraphBuilder::VisitForInContinue() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* index =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* cache_length =
@@ -1818,11 +1772,11 @@ void BytecodeGraphBuilder::VisitForInContinue() {
Node* exit_cond =
NewNode(javascript()->LessThan(CompareOperationHint::kSignedSmall), index,
cache_length);
- environment()->BindAccumulator(exit_cond, &states);
+ environment()->BindAccumulator(exit_cond, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::BuildForInNext() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* receiver =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* index =
@@ -1835,18 +1789,18 @@ void BytecodeGraphBuilder::BuildForInNext() {
Node* value = NewNode(javascript()->ForInNext(), receiver, cache_array,
cache_type, index);
- environment()->BindAccumulator(value, &states);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitForInNext() { BuildForInNext(); }
void BytecodeGraphBuilder::VisitForInStep() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* index =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
index = NewNode(javascript()->Add(BinaryOperationHint::kSignedSmall), index,
jsgraph()->OneConstant());
- environment()->BindAccumulator(index, &states);
+ environment()->BindAccumulator(index, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitSuspendGenerator() {
@@ -1876,7 +1830,7 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
}
void BytecodeGraphBuilder::VisitResumeGenerator() {
- FrameStateBeforeAndAfter states(this);
+ PrepareEagerCheckpoint();
Node* generator = environment()->LookupRegister(
bytecode_iterator().GetRegisterOperand(0));
@@ -1891,7 +1845,7 @@ void BytecodeGraphBuilder::VisitResumeGenerator() {
Node* state =
NewNode(javascript()->GeneratorRestoreContinuation(), generator);
- environment()->BindAccumulator(state, &states);
+ environment()->BindAccumulator(state, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitWide() {
@@ -2114,9 +2068,9 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
*current_input++ = environment()->Context();
}
if (has_frame_state) {
- // The frame state will be inserted later. Here we misuse
- // the {Dead} node as a sentinel to be later overwritten
- // with the real frame state.
+ // The frame state will be inserted later. Here we misuse the {Dead} node
+ // as a sentinel to be later overwritten with the real frame state by the
+ // calls to {PrepareFrameState} within individual visitor methods.
*current_input++ = jsgraph()->Dead();
}
if (has_effect) {
@@ -2238,6 +2192,19 @@ Node* BytecodeGraphBuilder::MergeValue(Node* value, Node* other,
return value;
}
+void BytecodeGraphBuilder::UpdateCurrentSourcePosition(
+ SourcePositionTableIterator* it, int offset) {
+ if (it->done()) return;
+
+ if (it->code_offset() == offset) {
+ source_positions_->SetCurrentPosition(SourcePosition(
+ it->source_position().ScriptOffset(), start_position_.InliningId()));
+ it->Advance();
+ } else {
+ DCHECK_GT(it->code_offset(), offset);
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 53582f73d7..6994226dc3 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -14,6 +14,7 @@
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecodes.h"
+#include "src/source-position-table.h"
namespace v8 {
namespace internal {
@@ -22,21 +23,24 @@ class CompilationInfo;
namespace compiler {
+class SourcePositionTable;
+
// The BytecodeGraphBuilder produces a high-level IR graph based on
// interpreter bytecodes.
class BytecodeGraphBuilder {
public:
BytecodeGraphBuilder(Zone* local_zone, CompilationInfo* info,
- JSGraph* jsgraph, float invocation_frequency);
+ JSGraph* jsgraph, float invocation_frequency,
+ SourcePositionTable* source_positions,
+ int inlining_id = SourcePosition::kNotInlined);
// Creates a graph by visiting bytecodes.
- bool CreateGraph();
+ bool CreateGraph(bool stack_check = true);
private:
class Environment;
- class FrameStateBeforeAndAfter;
- void VisitBytecodes();
+ void VisitBytecodes(bool stack_check);
// Get or create the node that represents the outer function closure.
Node* GetFunctionClosure();
@@ -117,24 +121,31 @@ class BytecodeGraphBuilder {
interpreter::Register first_arg,
size_t arity);
+ // Prepare information for eager deoptimization. This information is carried
+ // by dedicated {Checkpoint} nodes that are wired into the effect chain.
+ // Conceptually this frame state is "before" a given operation.
+ void PrepareEagerCheckpoint();
+
+ // Prepare information for lazy deoptimization. This information is attached
+ // to the given node and the output value produced by the node is combined.
+ // Conceptually this frame state is "after" a given operation.
+ void PrepareFrameState(Node* node, OutputFrameStateCombine combine);
+
// Computes register liveness and replaces dead ones in frame states with the
// undefined values.
void ClearNonLiveSlotsInFrameStates();
- void BuildCreateLiteral(const Operator* op);
void BuildCreateArguments(CreateArgumentsType type);
- Node* BuildLoadContextSlot();
Node* BuildLoadGlobal(uint32_t feedback_slot_index, TypeofMode typeof_mode);
void BuildStoreGlobal(LanguageMode language_mode);
- Node* BuildNamedLoad();
void BuildNamedStore(LanguageMode language_mode);
- Node* BuildKeyedLoad();
void BuildKeyedStore(LanguageMode language_mode);
void BuildLdaLookupSlot(TypeofMode typeof_mode);
void BuildLdaLookupContextSlot(TypeofMode typeof_mode);
void BuildLdaLookupGlobalSlot(TypeofMode typeof_mode);
void BuildStaLookupSlot(LanguageMode language_mode);
- void BuildCall(TailCallMode tail_call_mode);
+ void BuildCall(TailCallMode tail_call_mode,
+ ConvertReceiverMode receiver_hint);
void BuildThrow();
void BuildBinaryOp(const Operator* op);
void BuildBinaryOpWithImmediate(const Operator* op);
@@ -301,6 +312,15 @@ class BytecodeGraphBuilder {
// Analyzer of register liveness.
LivenessAnalyzer liveness_analyzer_;
+ // The Turbofan source position table, to be populated.
+ SourcePositionTable* source_positions_;
+
+ SourcePosition const start_position_;
+
+ // Update [source_positions_]'s current position to that of the bytecode at
+ // [offset], if any.
+ void UpdateCurrentSourcePosition(SourcePositionTableIterator* it, int offset);
+
static int const kBinaryOperationHintIndex = 1;
static int const kCountOperationHintIndex = 0;
static int const kBinaryOperationSmiHintIndex = 2;
diff --git a/deps/v8/src/compiler/checkpoint-elimination.h b/deps/v8/src/compiler/checkpoint-elimination.h
index edaa0e7734..f30eec0f55 100644
--- a/deps/v8/src/compiler/checkpoint-elimination.h
+++ b/deps/v8/src/compiler/checkpoint-elimination.h
@@ -5,14 +5,17 @@
#ifndef V8_COMPILER_CHECKPOINT_ELIMINATION_H_
#define V8_COMPILER_CHECKPOINT_ELIMINATION_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
namespace compiler {
// Performs elimination of redundant checkpoints within the graph.
-class CheckpointElimination final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE CheckpointElimination final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
public:
explicit CheckpointElimination(Editor* editor);
~CheckpointElimination() final {}
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 46dc84dc7f..3431098446 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -41,8 +41,11 @@ CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
Code::Flags flags, const char* name)
: CodeAssembler(isolate, zone,
- Linkage::GetJSCallDescriptor(zone, false, parameter_count,
- CallDescriptor::kNoFlags),
+ Linkage::GetJSCallDescriptor(
+ zone, false, parameter_count,
+ Code::ExtractKindFromFlags(flags) == Code::BUILTIN
+ ? CallDescriptor::kPushArgumentCount
+ : CallDescriptor::kNoFlags),
flags, name) {}
CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
@@ -111,7 +114,11 @@ Node* CodeAssembler::NumberConstant(double value) {
}
Node* CodeAssembler::SmiConstant(Smi* value) {
- return IntPtrConstant(bit_cast<intptr_t>(value));
+ return BitcastWordToTaggedSigned(IntPtrConstant(bit_cast<intptr_t>(value)));
+}
+
+Node* CodeAssembler::SmiConstant(int value) {
+ return SmiConstant(Smi::FromInt(value));
}
Node* CodeAssembler::HeapConstant(Handle<HeapObject> object) {
@@ -152,6 +159,20 @@ bool CodeAssembler::ToInt64Constant(Node* node, int64_t& out_value) {
return m.HasValue();
}
+bool CodeAssembler::ToSmiConstant(Node* node, Smi*& out_value) {
+ if (node->opcode() == IrOpcode::kBitcastWordToTaggedSigned) {
+ node = node->InputAt(0);
+ } else {
+ return false;
+ }
+ IntPtrMatcher m(node);
+ if (m.HasValue()) {
+ out_value = Smi::cast(bit_cast<Object*>(m.Value()));
+ return true;
+ }
+ return false;
+}
+
bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t& out_value) {
IntPtrMatcher m(node);
if (m.HasValue()) out_value = m.Value();
@@ -166,6 +187,10 @@ void CodeAssembler::Return(Node* value) {
return raw_assembler_->Return(value);
}
+void CodeAssembler::PopAndReturn(Node* pop, Node* value) {
+ return raw_assembler_->PopAndReturn(pop, value);
+}
+
void CodeAssembler::DebugBreak() { raw_assembler_->DebugBreak(); }
void CodeAssembler::Comment(const char* format, ...) {
@@ -220,7 +245,7 @@ Node* CodeAssembler::WordShr(Node* value, int shift) {
}
Node* CodeAssembler::Word32Shr(Node* value, int shift) {
- return (shift != 0) ? raw_assembler_->Word32Shr(value, IntPtrConstant(shift))
+ return (shift != 0) ? raw_assembler_->Word32Shr(value, Int32Constant(shift))
: value;
}
@@ -318,15 +343,6 @@ Node* CodeAssembler::Projection(int index, Node* value) {
return raw_assembler_->Projection(index, value);
}
-void CodeAssembler::BranchIf(Node* condition, Label* if_true, Label* if_false) {
- Label if_condition_is_true(this), if_condition_is_false(this);
- Branch(condition, &if_condition_is_true, &if_condition_is_false);
- Bind(&if_condition_is_true);
- Goto(if_true);
- Bind(&if_condition_is_false);
- Goto(if_false);
-}
-
void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
Variable* exception_var) {
Label success(this), exception(this, Label::kDeferred);
@@ -404,6 +420,16 @@ Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
return return_value;
}
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5) {
+ CallPrologue();
+ Node* return_value = raw_assembler_->CallRuntime5(function_id, arg1, arg2,
+ arg3, arg4, arg5, context);
+ CallEpilogue();
+ return return_value;
+}
+
Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
Node* context) {
return raw_assembler_->TailCallRuntime0(function_id, context);
@@ -710,6 +736,14 @@ Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
arg4, result_size);
}
+Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
+ Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4, Node* arg5, size_t result_size) {
+ Node* target = HeapConstant(callable.code());
+ return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
+ arg4, arg5, result_size);
+}
+
Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, Node* arg1,
size_t result_size) {
@@ -798,6 +832,27 @@ Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
}
Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5, Node* arg6, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(7);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = arg4;
+ args[4] = arg5;
+ args[5] = arg6;
+ args[6] = context;
+
+ return raw_assembler_->TailCallN(call_descriptor, target, args);
+}
+
+Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
Node* target, Node* context, const Arg& arg1,
const Arg& arg2, const Arg& arg3,
const Arg& arg4, size_t result_size) {
@@ -900,6 +955,24 @@ Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
}
+Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
+ Node* function, Node* receiver, Node* arg1,
+ Node* arg2, Node* arg3, size_t result_size) {
+ const int argc = 3;
+ Node* target = HeapConstant(callable.code());
+
+ Node** args = zone()->NewArray<Node*>(argc + 4);
+ args[0] = function;
+ args[1] = Int32Constant(argc);
+ args[2] = receiver;
+ args[3] = arg1;
+ args[4] = arg2;
+ args[5] = arg3;
+ args[6] = context;
+
+ return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
+}
+
Node* CodeAssembler::CallCFunction2(MachineType return_type,
MachineType arg0_type,
MachineType arg1_type, Node* function,
@@ -1010,16 +1083,15 @@ bool CodeAssembler::Variable::IsBound() const {
return impl_->value_ != nullptr;
}
-CodeAssembler::Label::Label(CodeAssembler* assembler, int merged_value_count,
- CodeAssembler::Variable** merged_variables,
- CodeAssembler::Label::Type type)
+CodeAssembler::Label::Label(CodeAssembler* assembler, size_t vars_count,
+ Variable** vars, CodeAssembler::Label::Type type)
: bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
label_ = new (buffer)
RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
: RawMachineLabel::kNonDeferred);
- for (int i = 0; i < merged_value_count; ++i) {
- variable_phis_[merged_variables[i]->impl_] = nullptr;
+ for (size_t i = 0; i < vars_count; ++i) {
+ variable_phis_[vars[i]->impl_] = nullptr;
}
}
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 646a6d1c65..1f364d99e3 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -12,6 +12,7 @@
// Do not include anything from src/compiler here!
#include "src/allocation.h"
#include "src/builtins/builtins.h"
+#include "src/globals.h"
#include "src/heap/heap.h"
#include "src/machine-type.h"
#include "src/runtime/runtime.h"
@@ -57,6 +58,7 @@ class RawMachineLabel;
V(Uint32LessThanOrEqual) \
V(Uint32GreaterThanOrEqual) \
V(UintPtrLessThan) \
+ V(UintPtrLessThanOrEqual) \
V(UintPtrGreaterThan) \
V(UintPtrGreaterThanOrEqual) \
V(WordEqual) \
@@ -173,7 +175,7 @@ class RawMachineLabel;
// clients, CodeAssembler also provides an abstraction for creating variables
// and enhanced Label functionality to merge variable values along paths where
// they have differing values, including loops.
-class CodeAssembler {
+class V8_EXPORT_PRIVATE CodeAssembler {
public:
// Create with CallStub linkage.
// |result_size| specifies the number of results returned by the stub.
@@ -213,6 +215,8 @@ class CodeAssembler {
CodeAssembler* assembler_;
};
+ typedef ZoneList<Variable*> VariableList;
+
// ===========================================================================
// Base Assembler
// ===========================================================================
@@ -223,6 +227,7 @@ class CodeAssembler {
Node* IntPtrConstant(intptr_t value);
Node* NumberConstant(double value);
Node* SmiConstant(Smi* value);
+ Node* SmiConstant(int value);
Node* HeapConstant(Handle<HeapObject> object);
Node* BooleanConstant(bool value);
Node* ExternalConstant(ExternalReference address);
@@ -231,10 +236,12 @@ class CodeAssembler {
bool ToInt32Constant(Node* node, int32_t& out_value);
bool ToInt64Constant(Node* node, int64_t& out_value);
+ bool ToSmiConstant(Node* node, Smi*& out_value);
bool ToIntPtrConstant(Node* node, intptr_t& out_value);
Node* Parameter(int value);
void Return(Node* value);
+ void PopAndReturn(Node* pop, Node* value);
void DebugBreak();
void Comment(const char* format, ...);
@@ -403,6 +410,10 @@ class CodeAssembler {
Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
Node* arg2, Node* arg3, Node* arg4,
size_t result_size = 1);
+ Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4, Node* arg5,
+ size_t result_size = 1);
+
Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, size_t result_size = 1);
Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
@@ -417,6 +428,10 @@ class CodeAssembler {
Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, Node* arg1, Node* arg2, Node* arg3,
Node* arg4, Node* arg5, size_t result_size = 1);
+ Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4, Node* arg5, Node* arg6,
+ size_t result_size = 1);
Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
Node* context, const Arg& arg1, const Arg& arg2,
@@ -435,6 +450,9 @@ class CodeAssembler {
Node* receiver, Node* arg1, size_t result_size = 1);
Node* CallJS(Callable const& callable, Node* context, Node* function,
Node* receiver, Node* arg1, Node* arg2, size_t result_size = 1);
+ Node* CallJS(Callable const& callable, Node* context, Node* function,
+ Node* receiver, Node* arg1, Node* arg2, Node* arg3,
+ size_t result_size = 1);
// Call to a C function with two arguments.
Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
@@ -445,16 +463,6 @@ class CodeAssembler {
void GotoIfException(Node* node, Label* if_exception,
Variable* exception_var = nullptr);
- // Branching helpers.
- void BranchIf(Node* condition, Label* if_true, Label* if_false);
-
-#define BRANCH_HELPER(name) \
- void BranchIf##name(Node* a, Node* b, Label* if_true, Label* if_false) { \
- BranchIf(name(a, b), if_true, if_false); \
- }
- CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(BRANCH_HELPER)
-#undef BRANCH_HELPER
-
// Helpers which delegate to RawMachineAssembler.
Factory* factory() const;
Isolate* isolate() const;
@@ -489,12 +497,15 @@ class CodeAssembler::Label {
CodeAssembler* assembler,
CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
: CodeAssembler::Label(assembler, 0, nullptr, type) {}
- Label(CodeAssembler* assembler, CodeAssembler::Variable* merged_variable,
+ Label(CodeAssembler* assembler, const VariableList& merged_variables,
CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
- : CodeAssembler::Label(assembler, 1, &merged_variable, type) {}
- Label(CodeAssembler* assembler, int merged_variable_count,
- CodeAssembler::Variable** merged_variables,
+ : CodeAssembler::Label(assembler, merged_variables.length(),
+ &(merged_variables[0]), type) {}
+ Label(CodeAssembler* assembler, size_t count, Variable** vars,
CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred);
+ Label(CodeAssembler* assembler, CodeAssembler::Variable* merged_variable,
+ CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
+ : Label(assembler, 1, &merged_variable, type) {}
~Label() {}
private:
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 043582b17e..c69e86e0a5 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -88,10 +88,11 @@ Handle<Code> CodeGenerator::GenerateCode() {
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
- for (const CompilationInfo::InlinedFunctionHolder& inlined :
+ for (CompilationInfo::InlinedFunctionHolder& inlined :
info->inlined_functions()) {
if (!inlined.shared_info.is_identical_to(info->shared_info())) {
- DefineDeoptimizationLiteral(inlined.shared_info);
+ int index = DefineDeoptimizationLiteral(inlined.shared_info);
+ inlined.RegisterInlinedFunctionId(index);
}
}
inlined_function_count_ = deoptimization_literals_.size();
@@ -469,29 +470,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
- SourcePosition source_position;
+ SourcePosition source_position = SourcePosition::Unknown();
if (!code()->GetSourcePosition(instr, &source_position)) return;
if (source_position == current_source_position_) return;
current_source_position_ = source_position;
- if (source_position.IsUnknown()) return;
- int code_pos = source_position.raw();
- source_position_table_builder_.AddPosition(masm()->pc_offset(), code_pos,
- false);
+ if (!source_position.IsKnown()) return;
+ source_position_table_builder_.AddPosition(masm()->pc_offset(),
+ source_position, false);
if (FLAG_code_comments) {
CompilationInfo* info = this->info();
if (!info->parse_info()) return;
- Vector<char> buffer = Vector<char>::New(256);
- int ln = Script::GetLineNumber(info->script(), code_pos);
- int cn = Script::GetColumnNumber(info->script(), code_pos);
- if (info->script()->name()->IsString()) {
- Handle<String> file(String::cast(info->script()->name()));
- base::OS::SNPrintF(buffer.start(), buffer.length(), "-- %s:%d:%d --",
- file->ToCString().get(), ln, cn);
- } else {
- base::OS::SNPrintF(buffer.start(), buffer.length(),
- "-- <unknown>:%d:%d --", ln, cn);
- }
- masm()->RecordComment(buffer.start());
+ std::ostringstream buffer;
+ buffer << "-- " << source_position.InliningStack(info) << " --";
+ masm()->RecordComment(StrDup(buffer.str().c_str()));
}
}
@@ -516,6 +507,26 @@ void CodeGenerator::AssembleGaps(Instruction* instr) {
}
}
+namespace {
+
+Handle<PodArray<InliningPosition>> CreateInliningPositions(
+ CompilationInfo* info) {
+ const CompilationInfo::InlinedFunctionList& inlined_functions =
+ info->inlined_functions();
+ if (inlined_functions.size() == 0) {
+ return Handle<PodArray<InliningPosition>>::cast(
+ info->isolate()->factory()->empty_byte_array());
+ }
+ Handle<PodArray<InliningPosition>> inl_positions =
+ PodArray<InliningPosition>::New(
+ info->isolate(), static_cast<int>(inlined_functions.size()), TENURED);
+ for (size_t i = 0; i < inlined_functions.size(); ++i) {
+ inl_positions->set(static_cast<int>(i), inlined_functions[i].position);
+ }
+ return inl_positions;
+}
+
+} // namespace
void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
CompilationInfo* info = this->info();
@@ -535,7 +546,7 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
if (info->has_shared_info()) {
data->SetSharedFunctionInfo(*info->shared_info());
} else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
+ data->SetSharedFunctionInfo(Smi::kZero);
}
Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
@@ -548,6 +559,9 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
data->SetLiteralArray(*literals);
}
+ Handle<PodArray<InliningPosition>> inl_pos = CreateInliningPositions(info);
+ data->SetInliningPositions(*inl_pos);
+
if (info->is_osr()) {
DCHECK(osr_pc_offset_ >= 0);
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
@@ -565,7 +579,7 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
CHECK(deoptimization_states_[i]);
data->SetTranslationIndex(
i, Smi::FromInt(deoptimization_states_[i]->translation_id()));
- data->SetArgumentsStackHeight(i, Smi::FromInt(0));
+ data->SetArgumentsStackHeight(i, Smi::kZero);
data->SetPc(i, Smi::FromInt(deoptimization_state->pc_offset()));
}
@@ -858,10 +872,19 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
constant_object =
handle(reinterpret_cast<Smi*>(constant.ToInt32()), isolate());
DCHECK(constant_object->IsSmi());
+ } else if (type.representation() == MachineRepresentation::kBit) {
+ if (constant.ToInt32() == 0) {
+ constant_object = isolate()->factory()->false_value();
+ } else {
+ DCHECK_EQ(1, constant.ToInt32());
+ constant_object = isolate()->factory()->true_value();
+ }
} else {
+ // TODO(jarin,bmeurer): We currently pass in raw pointers to the
+ // JSFunction::entry here. We should really consider fixing this.
DCHECK(type == MachineType::Int32() ||
type == MachineType::Uint32() ||
- type.representation() == MachineRepresentation::kBit ||
+ type.representation() == MachineRepresentation::kWord32 ||
type.representation() == MachineRepresentation::kNone);
DCHECK(type.representation() != MachineRepresentation::kNone ||
constant.ToInt32() == FrameStateDescriptor::kImpossibleValue);
@@ -873,7 +896,10 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
case Constant::kInt64:
// When pointers are 8 bytes, we can use int64 constants to represent
// Smis.
- DCHECK(type.representation() == MachineRepresentation::kTagged ||
+ // TODO(jarin,bmeurer): We currently pass in raw pointers to the
+ // JSFunction::entry here. We should really consider fixing this.
+ DCHECK(type.representation() == MachineRepresentation::kWord64 ||
+ type.representation() == MachineRepresentation::kTagged ||
type.representation() == MachineRepresentation::kTaggedSigned);
DCHECK_EQ(8, kPointerSize);
constant_object =
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index 3032163d34..7aed85a37f 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -129,7 +129,7 @@ class CodeGenerator final : public GapResolver::Assembler {
// Generates an architecture-specific, descriptor-specific return sequence
// to tear down a stack frame.
- void AssembleReturn();
+ void AssembleReturn(InstructionOperand* pop);
void AssembleDeconstructFrame();
diff --git a/deps/v8/src/compiler/common-node-cache.h b/deps/v8/src/compiler/common-node-cache.h
index 1f07703e72..bce8d0f62e 100644
--- a/deps/v8/src/compiler/common-node-cache.h
+++ b/deps/v8/src/compiler/common-node-cache.h
@@ -45,6 +45,10 @@ class CommonNodeCache final {
Node** FindExternalConstant(ExternalReference value);
+ Node** FindPointerConstant(intptr_t value) {
+ return pointer_constants_.Find(zone(), value);
+ }
+
Node** FindNumberConstant(double value) {
// We canonicalize double constants at the bit representation level.
return number_constants_.Find(zone(), bit_cast<int64_t>(value));
@@ -73,6 +77,7 @@ class CommonNodeCache final {
Int32NodeCache float32_constants_;
Int64NodeCache float64_constants_;
IntPtrNodeCache external_constants_;
+ IntPtrNodeCache pointer_constants_;
Int64NodeCache number_constants_;
IntPtrNodeCache heap_constants_;
RelocInt32NodeCache relocatable_int32_constants_;
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index c5ced20373..9a368162ef 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -284,7 +284,7 @@ Reduction CommonOperatorReducer::ReducePhi(Node* node) {
Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
DCHECK_EQ(IrOpcode::kReturn, node->opcode());
- Node* const value = node->InputAt(0);
+ Node* const value = node->InputAt(1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* const control = NodeProperties::GetControlInput(node);
bool changed = false;
@@ -311,8 +311,9 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
// {end} as revisit, because we mark {node} as {Dead} below, which was
// previously connected to {end}, so we know for sure that at some point
// the reducer logic will visit {end} again.
- Node* ret = graph()->NewNode(common()->Return(), value->InputAt(i),
- effect->InputAt(i), control->InputAt(i));
+ Node* ret = graph()->NewNode(common()->Return(), node->InputAt(0),
+ value->InputAt(i), effect->InputAt(i),
+ control->InputAt(i));
NodeProperties::MergeControlToEnd(graph(), common(), ret);
}
// Mark the merge {control} and return {node} as {dead}.
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index b7aeeb7e3e..acc2092f5d 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -5,7 +5,9 @@
#ifndef V8_COMPILER_COMMON_OPERATOR_REDUCER_H_
#define V8_COMPILER_COMMON_OPERATOR_REDUCER_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -19,7 +21,8 @@ class Operator;
// Performs strength reduction on nodes that have common operators.
-class CommonOperatorReducer final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE CommonOperatorReducer final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
public:
CommonOperatorReducer(Editor* editor, Graph* graph,
CommonOperatorBuilder* common,
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index e57160a3f8..9ce6f71a0f 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -210,6 +210,37 @@ std::ostream& operator<<(std::ostream& os,
return os;
}
+int OsrValueIndexOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kOsrValue, op->opcode());
+ return OpParameter<int>(op);
+}
+
+size_t hash_value(OsrGuardType type) { return static_cast<size_t>(type); }
+
+std::ostream& operator<<(std::ostream& os, OsrGuardType type) {
+ switch (type) {
+ case OsrGuardType::kUninitialized:
+ return os << "Uninitialized";
+ case OsrGuardType::kSignedSmall:
+ return os << "SignedSmall";
+ case OsrGuardType::kAny:
+ return os << "Any";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+OsrGuardType OsrGuardTypeOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kOsrGuard, op->opcode());
+ return OpParameter<OsrGuardType>(op);
+}
+
+ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
+ DCHECK(op->opcode() == IrOpcode::kTypedObjectState ||
+ op->opcode() == IrOpcode::kTypedStateValues);
+ return OpParameter<const ZoneVector<MachineType>*>(op);
+}
+
#define CACHED_OP_LIST(V) \
V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
@@ -231,8 +262,8 @@ std::ostream& operator<<(std::ostream& os,
#define CACHED_RETURN_LIST(V) \
V(1) \
V(2) \
- V(3)
-
+ V(3) \
+ V(4)
#define CACHED_END_LIST(V) \
V(1) \
@@ -293,7 +324,7 @@ std::ostream& operator<<(std::ostream& os,
V(LostPrecisionOrNaN) \
V(NoReason) \
V(NotAHeapNumber) \
- V(NotAHeapNumberUndefinedBoolean) \
+ V(NotANumberOrOddball) \
V(NotASmi) \
V(OutOfBounds) \
V(WrongInstanceType) \
@@ -371,16 +402,16 @@ struct CommonOperatorGlobalCache final {
CACHED_END_LIST(CACHED_END)
#undef CACHED_END
- template <size_t kInputCount>
+ template <size_t kValueInputCount>
struct ReturnOperator final : public Operator {
ReturnOperator()
- : Operator( // --
- IrOpcode::kReturn, Operator::kNoThrow, // opcode
- "Return", // name
- kInputCount, 1, 1, 0, 0, 1) {} // counts
+ : Operator( // --
+ IrOpcode::kReturn, Operator::kNoThrow, // opcode
+ "Return", // name
+ kValueInputCount + 1, 1, 1, 0, 0, 1) {} // counts
};
-#define CACHED_RETURN(input_count) \
- ReturnOperator<input_count> kReturn##input_count##Operator;
+#define CACHED_RETURN(value_input_count) \
+ ReturnOperator<value_input_count> kReturn##value_input_count##Operator;
CACHED_RETURN_LIST(CACHED_RETURN)
#undef CACHED_RETURN
@@ -607,7 +638,6 @@ const Operator* CommonOperatorBuilder::End(size_t control_input_count) {
0, 0, control_input_count, 0, 0, 0); // counts
}
-
const Operator* CommonOperatorBuilder::Return(int value_input_count) {
switch (value_input_count) {
#define CACHED_RETURN(input_count) \
@@ -622,7 +652,7 @@ const Operator* CommonOperatorBuilder::Return(int value_input_count) {
return new (zone()) Operator( //--
IrOpcode::kReturn, Operator::kNoThrow, // opcode
"Return", // name
- value_input_count, 1, 1, 0, 0, 1); // counts
+ value_input_count + 1, 1, 1, 0, 0, 1); // counts
}
@@ -780,7 +810,6 @@ const Operator* CommonOperatorBuilder::Parameter(int index,
ParameterInfo(index, debug_name)); // parameter info
}
-
const Operator* CommonOperatorBuilder::OsrValue(int index) {
return new (zone()) Operator1<int>( // --
IrOpcode::kOsrValue, Operator::kNoProperties, // opcode
@@ -789,6 +818,13 @@ const Operator* CommonOperatorBuilder::OsrValue(int index) {
index); // parameter
}
+const Operator* CommonOperatorBuilder::OsrGuard(OsrGuardType type) {
+ return new (zone()) Operator1<OsrGuardType>( // --
+ IrOpcode::kOsrGuard, Operator::kNoThrow, // opcode
+ "OsrGuard", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ type); // parameter
+}
const Operator* CommonOperatorBuilder::Int32Constant(int32_t value) {
return new (zone()) Operator1<int32_t>( // --
@@ -844,6 +880,13 @@ const Operator* CommonOperatorBuilder::NumberConstant(volatile double value) {
value); // parameter
}
+const Operator* CommonOperatorBuilder::PointerConstant(intptr_t value) {
+ return new (zone()) Operator1<intptr_t>( // --
+ IrOpcode::kPointerConstant, Operator::kPure, // opcode
+ "PointerConstant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
+}
const Operator* CommonOperatorBuilder::HeapConstant(
const Handle<HeapObject>& value) {
@@ -974,24 +1017,32 @@ const Operator* CommonOperatorBuilder::StateValues(int arguments) {
arguments, 0, 0, 1, 0, 0); // counts
}
+const Operator* CommonOperatorBuilder::TypedStateValues(
+ const ZoneVector<MachineType>* types) {
+ return new (zone()) Operator1<const ZoneVector<MachineType>*>( // --
+ IrOpcode::kTypedStateValues, Operator::kPure, // opcode
+ "TypedStateValues", // name
+ static_cast<int>(types->size()), 0, 0, 1, 0, 0, // counts
+ types); // parameter
+}
-const Operator* CommonOperatorBuilder::ObjectState(int pointer_slots, int id) {
+const Operator* CommonOperatorBuilder::ObjectState(int pointer_slots) {
return new (zone()) Operator1<int>( // --
IrOpcode::kObjectState, Operator::kPure, // opcode
"ObjectState", // name
- pointer_slots, 0, 0, 1, 0, 0, id); // counts
+ pointer_slots, 0, 0, 1, 0, 0, // counts
+ pointer_slots); // parameter
}
-
-const Operator* CommonOperatorBuilder::TypedStateValues(
+const Operator* CommonOperatorBuilder::TypedObjectState(
const ZoneVector<MachineType>* types) {
return new (zone()) Operator1<const ZoneVector<MachineType>*>( // --
- IrOpcode::kTypedStateValues, Operator::kPure, // opcode
- "TypedStateValues", // name
- static_cast<int>(types->size()), 0, 0, 1, 0, 0, types); // counts
+ IrOpcode::kTypedObjectState, Operator::kPure, // opcode
+ "TypedObjectState", // name
+ static_cast<int>(types->size()), 0, 0, 1, 0, 0, // counts
+ types); // parameter
}
-
const Operator* CommonOperatorBuilder::FrameState(
BailoutId bailout_id, OutputFrameStateCombine state_combine,
const FrameStateFunctionInfo* function_info) {
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 2db0bfa7d1..1f258a0ec0 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -6,8 +6,10 @@
#define V8_COMPILER_COMMON_OPERATOR_H_
#include "src/assembler.h"
+#include "src/base/compiler-specific.h"
#include "src/compiler/frame-states.h"
#include "src/deoptimize-reason.h"
+#include "src/globals.h"
#include "src/machine-type.h"
#include "src/zone/zone-containers.h"
@@ -39,9 +41,9 @@ inline BranchHint NegateBranchHint(BranchHint hint) {
inline size_t hash_value(BranchHint hint) { return static_cast<size_t>(hint); }
-std::ostream& operator<<(std::ostream&, BranchHint);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchHint);
-BranchHint BranchHintOf(const Operator* const);
+V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const);
// Deoptimize reason for Deoptimize, DeoptimizeIf and DeoptimizeUnless.
DeoptimizeReason DeoptimizeReasonOf(Operator const* const);
@@ -98,14 +100,15 @@ size_t hash_value(SelectParameters const& p);
std::ostream& operator<<(std::ostream&, SelectParameters const& p);
-SelectParameters const& SelectParametersOf(const Operator* const);
+V8_EXPORT_PRIVATE SelectParameters const& SelectParametersOf(
+ const Operator* const);
-CallDescriptor const* CallDescriptorOf(const Operator* const);
+V8_EXPORT_PRIVATE CallDescriptor const* CallDescriptorOf(const Operator* const);
-size_t ProjectionIndexOf(const Operator* const);
-
-MachineRepresentation PhiRepresentationOf(const Operator* const);
+V8_EXPORT_PRIVATE size_t ProjectionIndexOf(const Operator* const);
+V8_EXPORT_PRIVATE MachineRepresentation
+PhiRepresentationOf(const Operator* const);
// The {IrOpcode::kParameter} opcode represents an incoming parameter to the
// function. This class bundles the index and a debug name for such operators.
@@ -124,7 +127,7 @@ class ParameterInfo final {
std::ostream& operator<<(std::ostream&, ParameterInfo const&);
-int ParameterIndexOf(const Operator* const);
+V8_EXPORT_PRIVATE int ParameterIndexOf(const Operator* const);
const ParameterInfo& ParameterInfoOf(const Operator* const);
class RelocatablePtrConstantInfo final {
@@ -171,9 +174,20 @@ std::ostream& operator<<(std::ostream& os,
Type* TypeGuardTypeOf(Operator const*) WARN_UNUSED_RESULT;
+int OsrValueIndexOf(Operator const*);
+
+enum class OsrGuardType { kUninitialized, kSignedSmall, kAny };
+size_t hash_value(OsrGuardType type);
+std::ostream& operator<<(std::ostream&, OsrGuardType);
+OsrGuardType OsrGuardTypeOf(Operator const*);
+
+ZoneVector<MachineType> const* MachineTypesOf(Operator const*)
+ WARN_UNUSED_RESULT;
+
// Interface for building common operators that can be used at any level of IR,
// including JavaScript, mid-level, and low-level.
-class CommonOperatorBuilder final : public ZoneObject {
+class V8_EXPORT_PRIVATE CommonOperatorBuilder final
+ : public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit CommonOperatorBuilder(Zone* zone);
@@ -202,6 +216,7 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* OsrNormalEntry();
const Operator* OsrLoopEntry();
const Operator* OsrValue(int index);
+ const Operator* OsrGuard(OsrGuardType type);
const Operator* Int32Constant(int32_t);
const Operator* Int64Constant(int64_t);
@@ -209,6 +224,7 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* Float64Constant(volatile double);
const Operator* ExternalConstant(const ExternalReference&);
const Operator* NumberConstant(volatile double);
+ const Operator* PointerConstant(intptr_t);
const Operator* HeapConstant(const Handle<HeapObject>&);
const Operator* RelocatableInt32Constant(int32_t value,
@@ -228,8 +244,9 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* BeginRegion(RegionObservability);
const Operator* FinishRegion();
const Operator* StateValues(int arguments);
- const Operator* ObjectState(int pointer_slots, int id);
const Operator* TypedStateValues(const ZoneVector<MachineType>* types);
+ const Operator* ObjectState(int pointer_slots);
+ const Operator* TypedObjectState(const ZoneVector<MachineType>* types);
const Operator* FrameState(BailoutId bailout_id,
OutputFrameStateCombine state_combine,
const FrameStateFunctionInfo* function_info);
diff --git a/deps/v8/src/compiler/source-position.cc b/deps/v8/src/compiler/compiler-source-position-table.cc
index 80f180076d..c5520e7407 100644
--- a/deps/v8/src/compiler/source-position.cc
+++ b/deps/v8/src/compiler/compiler-source-position-table.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/source-position.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-aux-data.h"
@@ -24,28 +24,24 @@ class SourcePositionTable::Decorator final : public GraphDecorator {
SourcePositionTable* source_positions_;
};
-
SourcePositionTable::SourcePositionTable(Graph* graph)
: graph_(graph),
decorator_(nullptr),
current_position_(SourcePosition::Unknown()),
table_(graph->zone()) {}
-
void SourcePositionTable::AddDecorator() {
DCHECK_NULL(decorator_);
decorator_ = new (graph_->zone()) Decorator(this);
graph_->AddDecorator(decorator_);
}
-
void SourcePositionTable::RemoveDecorator() {
DCHECK_NOT_NULL(decorator_);
graph_->RemoveDecorator(decorator_);
decorator_ = nullptr;
}
-
SourcePosition SourcePositionTable::GetSourcePosition(Node* node) const {
return table_.Get(node);
}
@@ -65,7 +61,7 @@ void SourcePositionTable::Print(std::ostream& os) const {
os << ",";
}
os << "\"" << i.first << "\""
- << ":" << pos.raw();
+ << ":" << pos.ScriptOffset();
needs_comma = true;
}
}
diff --git a/deps/v8/src/compiler/source-position.h b/deps/v8/src/compiler/compiler-source-position-table.h
index d4df7835ef..4d14ae2811 100644
--- a/deps/v8/src/compiler/source-position.h
+++ b/deps/v8/src/compiler/compiler-source-position-table.h
@@ -2,43 +2,20 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_SOURCE_POSITION_H_
-#define V8_COMPILER_SOURCE_POSITION_H_
+#ifndef V8_COMPILER_COMPILER_SOURCE_POSITION_TABLE_H_
+#define V8_COMPILER_COMPILER_SOURCE_POSITION_TABLE_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/node-aux-data.h"
#include "src/globals.h"
+#include "src/source-position.h"
namespace v8 {
namespace internal {
namespace compiler {
-// Encapsulates encoding and decoding of sources positions from which Nodes
-// originated.
-class SourcePosition final {
- public:
- explicit SourcePosition(int raw = kUnknownPosition) : raw_(raw) {}
-
- static SourcePosition Unknown() { return SourcePosition(kUnknownPosition); }
- bool IsUnknown() const { return raw() == kUnknownPosition; }
- bool IsKnown() const { return raw() != kUnknownPosition; }
-
- int raw() const { return raw_; }
-
- private:
- static const int kUnknownPosition = kNoSourcePosition;
- int raw_;
-};
-
-
-inline bool operator==(const SourcePosition& lhs, const SourcePosition& rhs) {
- return lhs.raw() == rhs.raw();
-}
-
-inline bool operator!=(const SourcePosition& lhs, const SourcePosition& rhs) {
- return !(lhs == rhs);
-}
-
-class SourcePositionTable final : public ZoneObject {
+class V8_EXPORT_PRIVATE SourcePositionTable final
+ : public NON_EXPORTED_BASE(ZoneObject) {
public:
class Scope final {
public:
@@ -72,6 +49,10 @@ class SourcePositionTable final : public ZoneObject {
SourcePosition GetSourcePosition(Node* node) const;
void SetSourcePosition(Node* node, SourcePosition position);
+ void SetCurrentPosition(const SourcePosition& pos) {
+ current_position_ = pos;
+ }
+
void Print(std::ostream& os) const;
private:
@@ -80,7 +61,7 @@ class SourcePositionTable final : public ZoneObject {
Graph* const graph_;
Decorator* decorator_;
SourcePosition current_position_;
- NodeAuxData<SourcePosition> table_;
+ NodeAuxData<SourcePosition, SourcePosition::Unknown> table_;
DISALLOW_COPY_AND_ASSIGN(SourcePositionTable);
};
@@ -89,4 +70,4 @@ class SourcePositionTable final : public ZoneObject {
} // namespace internal
} // namespace v8
-#endif // V8_COMPILER_SOURCE_POSITION_H_
+#endif // V8_COMPILER_COMPILER_SOURCE_POSITION_TABLE_H_
diff --git a/deps/v8/src/compiler/control-equivalence.h b/deps/v8/src/compiler/control-equivalence.h
index 4fb9c2718d..b76e04fe43 100644
--- a/deps/v8/src/compiler/control-equivalence.h
+++ b/deps/v8/src/compiler/control-equivalence.h
@@ -5,8 +5,10 @@
#ifndef V8_COMPILER_CONTROL_EQUIVALENCE_H_
#define V8_COMPILER_CONTROL_EQUIVALENCE_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
+#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -28,7 +30,8 @@ namespace compiler {
// control regions in linear time" by Johnson, Pearson & Pingali (PLDI94) which
// also contains proofs for the aforementioned equivalence. References to line
// numbers in the algorithm from figure 4 have been added [line:x].
-class ControlEquivalence final : public ZoneObject {
+class V8_EXPORT_PRIVATE ControlEquivalence final
+ : public NON_EXPORTED_BASE(ZoneObject) {
public:
ControlEquivalence(Zone* zone, Graph* graph)
: zone_(zone),
@@ -121,7 +124,11 @@ class ControlEquivalence final : public ZoneObject {
void DetermineParticipation(Node* exit);
private:
- NodeData* GetData(Node* node) { return &node_data_[node->id()]; }
+ NodeData* GetData(Node* node) {
+ size_t const index = node->id();
+ if (index >= node_data_.size()) node_data_.resize(index + 1, EmptyData());
+ return &node_data_[index];
+ }
int NewClassNumber() { return class_number_++; }
int NewDFSNumber() { return dfs_number_++; }
diff --git a/deps/v8/src/compiler/control-flow-optimizer.h b/deps/v8/src/compiler/control-flow-optimizer.h
index 61785a0fc0..577c40d96d 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.h
+++ b/deps/v8/src/compiler/control-flow-optimizer.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_CONTROL_FLOW_OPTIMIZER_H_
#include "src/compiler/node-marker.h"
+#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -18,8 +19,7 @@ class Graph;
class MachineOperatorBuilder;
class Node;
-
-class ControlFlowOptimizer final {
+class V8_EXPORT_PRIVATE ControlFlowOptimizer final {
public:
ControlFlowOptimizer(Graph* graph, CommonOperatorBuilder* common,
MachineOperatorBuilder* machine, Zone* zone);
diff --git a/deps/v8/src/compiler/dead-code-elimination.h b/deps/v8/src/compiler/dead-code-elimination.h
index 8e18561b4b..1cf9b22833 100644
--- a/deps/v8/src/compiler/dead-code-elimination.h
+++ b/deps/v8/src/compiler/dead-code-elimination.h
@@ -5,7 +5,9 @@
#ifndef V8_COMPILER_DEAD_CODE_ELIMINATION_H_
#define V8_COMPILER_DEAD_CODE_ELIMINATION_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -19,7 +21,8 @@ class CommonOperatorBuilder;
// Note that this does not include trimming dead uses from the graph, and it
// also does not include detecting dead code by any other means than seeing a
// {Dead} control input; that is left to other reducers.
-class DeadCodeElimination final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE DeadCodeElimination final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
public:
DeadCodeElimination(Editor* editor, Graph* graph,
CommonOperatorBuilder* common);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 4e53e5dcec..d4b0576f79 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -6,6 +6,7 @@
#include "src/code-factory.h"
#include "src/compiler/access-builder.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -17,10 +18,13 @@ namespace v8 {
namespace internal {
namespace compiler {
-EffectControlLinearizer::EffectControlLinearizer(JSGraph* js_graph,
- Schedule* schedule,
- Zone* temp_zone)
- : js_graph_(js_graph), schedule_(schedule), temp_zone_(temp_zone) {}
+EffectControlLinearizer::EffectControlLinearizer(
+ JSGraph* js_graph, Schedule* schedule, Zone* temp_zone,
+ SourcePositionTable* source_positions)
+ : js_graph_(js_graph),
+ schedule_(schedule),
+ temp_zone_(temp_zone),
+ source_positions_(source_positions) {}
Graph* EffectControlLinearizer::graph() const { return js_graph_->graph(); }
CommonOperatorBuilder* EffectControlLinearizer::common() const {
@@ -74,7 +78,8 @@ void UpdateEffectPhi(Node* node, BasicBlock* block,
// Update all inputs to an effect phi with the effects from the given
// block->effect map.
DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
- DCHECK_EQ(node->op()->EffectInputCount(), block->PredecessorCount());
+ DCHECK_EQ(static_cast<size_t>(node->op()->EffectInputCount()),
+ block->PredecessorCount());
for (int i = 0; i < node->op()->EffectInputCount(); i++) {
Node* input = node->InputAt(i);
BasicBlock* predecessor = block->PredecessorAt(static_cast<size_t>(i));
@@ -96,8 +101,10 @@ void UpdateBlockControl(BasicBlock* block,
// Update all inputs to the given control node with the correct control.
DCHECK(control->opcode() == IrOpcode::kMerge ||
- control->op()->ControlInputCount() == block->PredecessorCount());
- if (control->op()->ControlInputCount() != block->PredecessorCount()) {
+ static_cast<size_t>(control->op()->ControlInputCount()) ==
+ block->PredecessorCount());
+ if (static_cast<size_t>(control->op()->ControlInputCount()) !=
+ block->PredecessorCount()) {
return; // We already re-wired the control inputs of this node.
}
for (int i = 0; i < control->op()->ControlInputCount(); i++) {
@@ -141,7 +148,8 @@ void RemoveRegionNode(Node* node) {
void TryCloneBranch(Node* node, BasicBlock* block, Graph* graph,
CommonOperatorBuilder* common,
- BlockEffectControlMap* block_effects) {
+ BlockEffectControlMap* block_effects,
+ SourcePositionTable* source_positions) {
DCHECK_EQ(IrOpcode::kBranch, node->opcode());
// This optimization is a special case of (super)block cloning. It takes an
@@ -193,6 +201,8 @@ void TryCloneBranch(Node* node, BasicBlock* block, Graph* graph,
// ^ ^
// | |
+ SourcePositionTable::Scope scope(source_positions,
+ source_positions->GetSourcePosition(node));
Node* branch = node;
Node* cond = NodeProperties::GetValueInput(branch, 0);
if (!cond->OwnedBy(branch) || cond->opcode() != IrOpcode::kPhi) return;
@@ -246,7 +256,7 @@ void TryCloneBranch(Node* node, BasicBlock* block, Graph* graph,
merge_true->AppendInput(graph->zone(), merge_true_inputs[i]);
merge_false->AppendInput(graph->zone(), merge_false_inputs[i]);
}
- DCHECK_EQ(2, block->SuccessorCount());
+ DCHECK_EQ(2u, block->SuccessorCount());
NodeProperties::ChangeOp(matcher.IfTrue(), common->Merge(input_count));
NodeProperties::ChangeOp(matcher.IfFalse(), common->Merge(input_count));
int const true_index =
@@ -445,7 +455,7 @@ void EffectControlLinearizer::Run() {
case BasicBlock::kBranch:
ProcessNode(block->control_input(), &frame_state, &effect, &control);
TryCloneBranch(block->control_input(), block, graph(), common(),
- &block_effects);
+ &block_effects, source_positions_);
break;
}
@@ -491,6 +501,9 @@ void TryScheduleCallIfSuccess(Node* node, Node** control) {
void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
Node** effect, Node** control) {
+ SourcePositionTable::Scope scope(source_positions_,
+ source_positions_->GetSourcePosition(node));
+
// If the node needs to be wired into the effect/control chain, do this
// here. Pass current frame state for lowering to eager deoptimization.
if (TryWireInStateEffect(node, *frame_state, effect, control)) {
@@ -600,6 +613,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kChangeFloat64ToTagged:
state = LowerChangeFloat64ToTagged(node, *effect, *control);
break;
+ case IrOpcode::kChangeFloat64ToTaggedPointer:
+ state = LowerChangeFloat64ToTaggedPointer(node, *effect, *control);
+ break;
case IrOpcode::kChangeTaggedSignedToInt32:
state = LowerChangeTaggedSignedToInt32(node, *effect, *control);
break;
@@ -636,9 +652,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckIf:
state = LowerCheckIf(node, frame_state, *effect, *control);
break;
- case IrOpcode::kCheckHeapObject:
- state = LowerCheckHeapObject(node, frame_state, *effect, *control);
- break;
case IrOpcode::kCheckedInt32Add:
state = LowerCheckedInt32Add(node, frame_state, *effect, *control);
break;
@@ -688,6 +701,10 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
state = LowerCheckedTaggedToTaggedSigned(node, frame_state, *effect,
*control);
break;
+ case IrOpcode::kCheckedTaggedToTaggedPointer:
+ state = LowerCheckedTaggedToTaggedPointer(node, frame_state, *effect,
+ *control);
+ break;
case IrOpcode::kTruncateTaggedToWord32:
state = LowerTruncateTaggedToWord32(node, *effect, *control);
break;
@@ -776,6 +793,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kFloat64RoundTruncate:
state = LowerFloat64RoundTruncate(node, *effect, *control);
break;
+ case IrOpcode::kFloat64RoundTiesEven:
+ state = LowerFloat64RoundTiesEven(node, *effect, *control);
+ break;
default:
return false;
}
@@ -793,6 +813,14 @@ EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node, Node* effect,
}
EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node,
+ Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+ return AllocateHeapNumberWithValue(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerChangeBitToTagged(Node* node, Node* effect,
Node* control) {
Node* value = node->InputAt(0);
@@ -901,15 +929,14 @@ EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node, Node* effect,
Node* control) {
Node* value = node->InputAt(0);
- Node* one = jsgraph()->Int32Constant(1);
Node* zero = jsgraph()->Int32Constant(0);
Node* fzero = jsgraph()->Float64Constant(0.0);
// Collect effect/control/value triples.
int count = 0;
- Node* values[7];
- Node* effects[7];
- Node* controls[6];
+ Node* values[6];
+ Node* effects[6];
+ Node* controls[5];
// Check if {value} is a Smi.
Node* check_smi = ObjectIsSmi(value);
@@ -925,7 +952,7 @@ EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node, Node* effect,
values[count] =
graph()->NewNode(machine()->Word32Equal(),
graph()->NewNode(machine()->WordEqual(), value,
- jsgraph()->ZeroConstant()),
+ jsgraph()->IntPtrConstant(0)),
zero);
count++;
}
@@ -978,7 +1005,7 @@ EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node, Node* effect,
values[count] =
graph()->NewNode(machine()->Word32Equal(),
graph()->NewNode(machine()->WordEqual(), value_length,
- jsgraph()->ZeroConstant()),
+ jsgraph()->IntPtrConstant(0)),
zero);
count++;
}
@@ -1000,20 +1027,12 @@ EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node, Node* effect,
simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
eheapnumber, if_heapnumber);
- // Check if {value} is either less than 0.0 or greater than 0.0.
- Node* check =
- graph()->NewNode(machine()->Float64LessThan(), fzero, value_value);
- Node* branch = graph()->NewNode(common()->Branch(), check, if_heapnumber);
-
- controls[count] = graph()->NewNode(common()->IfTrue(), branch);
+ // Check if {value} is not one of 0, -0, or NaN.
+ controls[count] = if_heapnumber;
effects[count] = eheapnumber;
- values[count] = one;
- count++;
-
- controls[count] = graph()->NewNode(common()->IfFalse(), branch);
- effects[count] = eheapnumber;
- values[count] =
- graph()->NewNode(machine()->Float64LessThan(), value_value, fzero);
+ values[count] = graph()->NewNode(
+ machine()->Float64LessThan(), fzero,
+ graph()->NewNode(machine()->Float64Abs(), value_value));
count++;
}
control = graph()->NewNode(common()->IfFalse(), branch_heapnumber);
@@ -1274,19 +1293,6 @@ EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state,
}
EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckHeapObject(Node* node, Node* frame_state,
- Node* effect, Node* control) {
- Node* value = node->InputAt(0);
-
- Node* check = ObjectIsSmi(value);
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check,
- frame_state, effect, control);
-
- return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerCheckedInt32Add(Node* node, Node* frame_state,
Node* effect, Node* control) {
Node* lhs = node->InputAt(0);
@@ -1818,8 +1824,7 @@ EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
graph()->NewNode(machine()->Word32Equal(), instance_type,
jsgraph()->Int32Constant(ODDBALL_TYPE));
if_false = efalse = graph()->NewNode(
- common()->DeoptimizeUnless(
- DeoptimizeReason::kNotAHeapNumberUndefinedBoolean),
+ common()->DeoptimizeUnless(DeoptimizeReason::kNotANumberOrOddball),
check_oddball, frame_state, efalse, if_false);
STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
@@ -1884,6 +1889,21 @@ EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(Node* node,
}
EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control) {
+ Node* value = node->InputAt(0);
+
+ Node* check = ObjectIsSmi(value);
+ control = effect =
+ graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check,
+ frame_state, effect, control);
+
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node, Node* effect,
Node* control) {
Node* value = node->InputAt(0);
@@ -2447,104 +2467,63 @@ EffectControlLinearizer::LowerStringFromCharCode(Node* node, Node* effect,
Node* branch0 =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+
Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
Node* etrue0 = effect;
- Node* vtrue0;
- {
- // Load the isolate wide single character string cache.
- Node* cache =
- jsgraph()->HeapConstant(factory()->single_character_string_cache());
-
- // Compute the {cache} index for {code}.
- Node* index =
- machine()->Is32() ? code : graph()->NewNode(
- machine()->ChangeUint32ToUint64(), code);
-
- // Check if we have an entry for the {code} in the single character string
- // cache already.
- Node* entry = etrue0 = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), cache,
- index, etrue0, if_true0);
-
- Node* check1 = graph()->NewNode(machine()->WordEqual(), entry,
- jsgraph()->UndefinedConstant());
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_true0);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = etrue0;
- Node* vtrue1;
- {
- // Allocate a new SeqOneByteString for {code}.
- vtrue1 = etrue1 = graph()->NewNode(
- simplified()->Allocate(NOT_TENURED),
- jsgraph()->Int32Constant(SeqOneByteString::SizeFor(1)), etrue1,
- if_true1);
- etrue1 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), vtrue1,
- jsgraph()->HeapConstant(factory()->one_byte_string_map()), etrue1,
- if_true1);
- etrue1 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForNameHashField()), vtrue1,
- jsgraph()->IntPtrConstant(Name::kEmptyHashField), etrue1, if_true1);
- etrue1 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForStringLength()), vtrue1,
- jsgraph()->SmiConstant(1), etrue1, if_true1);
- etrue1 = graph()->NewNode(
- machine()->Store(StoreRepresentation(MachineRepresentation::kWord8,
- kNoWriteBarrier)),
- vtrue1, jsgraph()->IntPtrConstant(SeqOneByteString::kHeaderSize -
- kHeapObjectTag),
- code, etrue1, if_true1);
-
- // Remember it in the {cache}.
- etrue1 = graph()->NewNode(
- simplified()->StoreElement(AccessBuilder::ForFixedArrayElement()),
- cache, index, vtrue1, etrue1, if_true1);
- }
+ // Load the isolate wide single character string cache.
+ Node* cache =
+ jsgraph()->HeapConstant(factory()->single_character_string_cache());
- // Use the {entry} from the {cache}.
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = etrue0;
- Node* vfalse1 = entry;
+ // Compute the {cache} index for {code}.
+ Node* index = machine()->Is32()
+ ? code
+ : graph()->NewNode(machine()->ChangeUint32ToUint64(), code);
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- etrue0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
- vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue1, vfalse1, if_true0);
- }
+ // Check if we have an entry for the {code} in the single character string
+ // cache already.
+ Node* entry = etrue0 = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), cache,
+ index, etrue0, if_true0);
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
+ Node* check1 = graph()->NewNode(machine()->WordEqual(), entry,
+ jsgraph()->UndefinedConstant());
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, if_true0);
+
+ // Use the {entry} from the {cache}.
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = etrue0;
+ Node* vfalse1 = entry;
+
+ // Let %StringFromCharCode handle this case.
+ // TODO(turbofan): At some point we may consider adding a stub for this
+ // deferred case, so that we don't need to call to C++ here.
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = etrue0;
+ Node* vtrue1;
{
- // Allocate a new SeqTwoByteString for {code}.
- vfalse0 = efalse0 =
- graph()->NewNode(simplified()->Allocate(NOT_TENURED),
- jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(1)),
- efalse0, if_false0);
- efalse0 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), vfalse0,
- jsgraph()->HeapConstant(factory()->string_map()), efalse0, if_false0);
- efalse0 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse0,
- jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse0, if_false0);
- efalse0 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse0,
- jsgraph()->SmiConstant(1), efalse0, if_false0);
- efalse0 = graph()->NewNode(
- machine()->Store(StoreRepresentation(MachineRepresentation::kWord16,
- kNoWriteBarrier)),
- vfalse0, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
- kHeapObjectTag),
- code, efalse0, if_false0);
+ if_true1 = graph()->NewNode(common()->Merge(2), if_true1, if_false0);
+ etrue1 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse0, if_true1);
+ Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+ Runtime::FunctionId id = Runtime::kStringCharFromCode;
+ CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
+ vtrue1 = etrue1 = graph()->NewNode(
+ common()->Call(desc), jsgraph()->CEntryStubConstant(1),
+ ChangeInt32ToSmi(code),
+ jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
+ jsgraph()->Int32Constant(1), jsgraph()->NoContextConstant(), etrue1,
+ if_true1);
}
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ control = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, control);
value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue0, vfalse0, control);
+ vtrue1, vfalse1, control);
return ValueEffectControl(value, effect, control);
}
@@ -3412,6 +3391,137 @@ EffectControlLinearizer::LowerFloat64RoundUp(Node* node, Node* effect,
}
EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::BuildFloat64RoundDown(Node* value, Node* effect,
+ Node* control) {
+ if (machine()->Float64RoundDown().IsSupported()) {
+ value = graph()->NewNode(machine()->Float64RoundDown().op(), value);
+ } else {
+ Node* const one = jsgraph()->Float64Constant(1.0);
+ Node* const zero = jsgraph()->Float64Constant(0.0);
+ Node* const minus_one = jsgraph()->Float64Constant(-1.0);
+ Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
+ Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
+ Node* const minus_two_52 =
+ jsgraph()->Float64Constant(-4503599627370496.0E0);
+ Node* const input = value;
+
+ // General case for floor.
+ //
+ // if 0.0 < input then
+ // if 2^52 <= input then
+ // input
+ // else
+ // let temp1 = (2^52 + input) - 2^52 in
+ // if input < temp1 then
+ // temp1 - 1
+ // else
+ // temp1
+ // else
+ // if input == 0 then
+ // input
+ // else
+ // if input <= -2^52 then
+ // input
+ // else
+ // let temp1 = -0 - input in
+ // let temp2 = (2^52 + temp1) - 2^52 in
+ // if temp2 < temp1 then
+ // -1 - temp2
+ // else
+ // -0 - temp2
+ //
+ // Note: We do not use the Diamond helper class here, because it really
+ // hurts
+ // readability with nested diamonds.
+
+ Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* vtrue0;
+ {
+ Node* check1 =
+ graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = input;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ Node* temp1 = graph()->NewNode(
+ machine()->Float64Sub(),
+ graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
+ vfalse1 = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), input, temp1),
+ graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
+ }
+
+ if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vtrue0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_true0);
+ }
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* vfalse0;
+ {
+ Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = input;
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
+ input, minus_two_52);
+ Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check2, if_false1);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* vtrue2 = input;
+
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* vfalse2;
+ {
+ Node* temp1 =
+ graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
+ Node* temp2 = graph()->NewNode(
+ machine()->Float64Sub(),
+ graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
+ vfalse2 = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), temp2, temp1),
+ graph()->NewNode(machine()->Float64Sub(), minus_one, temp2),
+ graph()->NewNode(machine()->Float64Sub(), minus_zero, temp2));
+ }
+
+ if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+ vfalse1 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue2, vfalse2, if_false1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vfalse0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue0, vfalse0, control);
+ }
+ return ValueEffectControl(value, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::LowerFloat64RoundDown(Node* node, Node* effect,
Node* control) {
// Nothing to be done if a fast hardware instruction is available.
@@ -3419,108 +3529,78 @@ EffectControlLinearizer::LowerFloat64RoundDown(Node* node, Node* effect,
return ValueEffectControl(node, effect, control);
}
+ Node* const input = node->InputAt(0);
+ return BuildFloat64RoundDown(input, effect, control);
+}
+
+EffectControlLinearizer::ValueEffectControl
+EffectControlLinearizer::LowerFloat64RoundTiesEven(Node* node, Node* effect,
+ Node* control) {
+ // Nothing to be done if a fast hardware instruction is available.
+ if (machine()->Float64RoundTiesEven().IsSupported()) {
+ return ValueEffectControl(node, effect, control);
+ }
+
Node* const one = jsgraph()->Float64Constant(1.0);
+ Node* const two = jsgraph()->Float64Constant(2.0);
+ Node* const half = jsgraph()->Float64Constant(0.5);
Node* const zero = jsgraph()->Float64Constant(0.0);
- Node* const minus_one = jsgraph()->Float64Constant(-1.0);
- Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
- Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
- Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
Node* const input = node->InputAt(0);
- // General case for floor.
+ // Generate case for round ties to even:
//
- // if 0.0 < input then
- // if 2^52 <= input then
- // input
- // else
- // let temp1 = (2^52 + input) - 2^52 in
- // if input < temp1 then
- // temp1 - 1
- // else
- // temp1
+ // let value = floor(input) in
+ // let temp1 = input - value in
+ // if temp1 < 0.5 then
+ // value
+ // else if 0.5 < temp1 then
+ // value + 1.0
// else
- // if input == 0 then
- // input
+ // let temp2 = value % 2.0 in
+ // if temp2 == 0.0 then
+ // value
// else
- // if input <= -2^52 then
- // input
- // else
- // let temp1 = -0 - input in
- // let temp2 = (2^52 + temp1) - 2^52 in
- // if temp2 < temp1 then
- // -1 - temp2
- // else
- // -0 - temp2
+ // value + 1.0
//
// Note: We do not use the Diamond helper class here, because it really hurts
// readability with nested diamonds.
- Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* vtrue0;
- {
- Node* check1 =
- graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+ ValueEffectControl continuation =
+ BuildFloat64RoundDown(input, effect, control);
+ Node* value = continuation.value;
+ effect = continuation.effect;
+ control = continuation.control;
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
+ Node* temp1 = graph()->NewNode(machine()->Float64Sub(), input, value);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
- {
- Node* temp1 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
- vfalse1 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), input, temp1),
- graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
- }
+ Node* check0 = graph()->NewNode(machine()->Float64LessThan(), temp1, half);
+ Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_true0);
- }
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* vtrue0 = value;
Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
Node* vfalse0;
{
- Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_false0);
+ Node* check1 = graph()->NewNode(machine()->Float64LessThan(), half, temp1);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
+ Node* vtrue1 = graph()->NewNode(machine()->Float64Add(), value, one);
Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
Node* vfalse1;
{
- Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
- input, minus_two_52);
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check2, if_false1);
+ Node* temp2 = graph()->NewNode(machine()->Float64Mod(), value, two);
+
+ Node* check2 = graph()->NewNode(machine()->Float64Equal(), temp2, zero);
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_false1);
Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = input;
+ Node* vtrue2 = value;
Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2;
- {
- Node* temp1 =
- graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
- Node* temp2 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
- vfalse2 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), temp2, temp1),
- graph()->NewNode(machine()->Float64Sub(), minus_one, temp2),
- graph()->NewNode(machine()->Float64Sub(), minus_zero, temp2));
- }
+ Node* vfalse2 = graph()->NewNode(machine()->Float64Add(), value, one);
if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
vfalse1 =
@@ -3534,11 +3614,11 @@ EffectControlLinearizer::LowerFloat64RoundDown(Node* node, Node* effect,
vtrue1, vfalse1, if_false0);
}
- Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue0, vfalse0, merge0);
- return ValueEffectControl(value, effect, merge0);
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
+ vtrue0, vfalse0, control);
+
+ return ValueEffectControl(value, effect, control);
}
EffectControlLinearizer::ValueEffectControl
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 0199fd0886..4ed03c6815 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -8,6 +8,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -24,10 +25,12 @@ class MachineOperatorBuilder;
class JSGraph;
class Graph;
class Schedule;
+class SourcePositionTable;
-class EffectControlLinearizer {
+class V8_EXPORT_PRIVATE EffectControlLinearizer {
public:
- EffectControlLinearizer(JSGraph* graph, Schedule* schedule, Zone* temp_zone);
+ EffectControlLinearizer(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
+ SourcePositionTable* source_positions);
void Run();
@@ -55,6 +58,8 @@ class EffectControlLinearizer {
Node* control);
ValueEffectControl LowerChangeFloat64ToTagged(Node* node, Node* effect,
Node* control);
+ ValueEffectControl LowerChangeFloat64ToTaggedPointer(Node* node, Node* effect,
+ Node* control);
ValueEffectControl LowerChangeTaggedSignedToInt32(Node* node, Node* effect,
Node* control);
ValueEffectControl LowerChangeTaggedToBit(Node* node, Node* effect,
@@ -73,8 +78,6 @@ class EffectControlLinearizer {
Node* effect, Node* control);
ValueEffectControl LowerCheckIf(Node* node, Node* frame_state, Node* effect,
Node* control);
- ValueEffectControl LowerCheckHeapObject(Node* node, Node* frame_state,
- Node* effect, Node* control);
ValueEffectControl LowerCheckedInt32Add(Node* node, Node* frame_state,
Node* effect, Node* control);
ValueEffectControl LowerCheckedInt32Sub(Node* node, Node* frame_state,
@@ -113,6 +116,10 @@ class EffectControlLinearizer {
Node* frame_state,
Node* effect,
Node* control);
+ ValueEffectControl LowerCheckedTaggedToTaggedPointer(Node* node,
+ Node* frame_state,
+ Node* effect,
+ Node* control);
ValueEffectControl LowerChangeTaggedToFloat64(Node* node, Node* effect,
Node* control);
ValueEffectControl LowerTruncateTaggedToBit(Node* node, Node* effect,
@@ -177,6 +184,8 @@ class EffectControlLinearizer {
Node* control);
ValueEffectControl LowerFloat64RoundDown(Node* node, Node* effect,
Node* control);
+ ValueEffectControl LowerFloat64RoundTiesEven(Node* node, Node* effect,
+ Node* control);
ValueEffectControl LowerFloat64RoundTruncate(Node* node, Node* effect,
Node* control);
@@ -188,6 +197,8 @@ class EffectControlLinearizer {
ValueEffectControl BuildCheckedHeapNumberOrOddballToFloat64(
CheckTaggedInputMode mode, Node* value, Node* frame_state, Node* effect,
Node* control);
+ ValueEffectControl BuildFloat64RoundDown(Node* value, Node* effect,
+ Node* control);
ValueEffectControl LowerStringComparison(Callable const& callable, Node* node,
Node* effect, Node* control);
@@ -217,6 +228,7 @@ class EffectControlLinearizer {
Schedule* schedule_;
Zone* temp_zone_;
RegionObservability region_observability_ = RegionObservability::kObservable;
+ SourcePositionTable* source_positions_;
SetOncePointer<Operator const> to_number_operator_;
};
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index d997813d01..f7708f85da 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -327,7 +327,8 @@ Node* EscapeAnalysisReducer::ReduceStateValueInput(Node* node, int node_index,
if (escape_analysis()->IsCyclicObjectState(effect, input)) {
// TODO(mstarzinger): Represent cyclic object states differently to
// ensure the scheduler can properly handle such object states.
- FATAL("Cyclic object state detected by escape analysis.");
+ compilation_failed_ = true;
+ return nullptr;
}
if (Node* object_state =
escape_analysis()->GetOrCreateObjectState(effect, input)) {
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index ad6747929c..61e7607a36 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -5,9 +5,11 @@
#ifndef V8_COMPILER_ESCAPE_ANALYSIS_REDUCER_H_
#define V8_COMPILER_ESCAPE_ANALYSIS_REDUCER_H_
+#include "src/base/compiler-specific.h"
#include "src/bit-vector.h"
#include "src/compiler/escape-analysis.h"
#include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -16,7 +18,8 @@ namespace compiler {
// Forward declarations.
class JSGraph;
-class EscapeAnalysisReducer final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
public:
EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
EscapeAnalysis* escape_analysis, Zone* zone);
@@ -27,6 +30,8 @@ class EscapeAnalysisReducer final : public AdvancedReducer {
// after this reducer has been applied. Has no effect in release mode.
void VerifyReplacement() const;
+ bool compilation_failed() const { return compilation_failed_; }
+
private:
Reduction ReduceLoad(Node* node);
Reduction ReduceStore(Node* node);
@@ -52,6 +57,7 @@ class EscapeAnalysisReducer final : public AdvancedReducer {
// and nodes that do not need a visit from ReduceDeoptState etc.
BitVector fully_reduced_;
bool exists_virtual_allocate_;
+ bool compilation_failed_ = false;
DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisReducer);
};
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 3f889ccbac..0218045971 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -420,7 +420,7 @@ bool IsEquivalentPhi(Node* node1, Node* node2) {
bool IsEquivalentPhi(Node* phi, ZoneVector<Node*>& inputs) {
if (phi->opcode() != IrOpcode::kPhi) return false;
- if (phi->op()->ValueInputCount() != inputs.size()) {
+ if (static_cast<size_t>(phi->op()->ValueInputCount()) != inputs.size()) {
return false;
}
for (size_t i = 0; i < inputs.size(); ++i) {
@@ -481,9 +481,9 @@ bool VirtualObject::MergeFrom(MergeCache* cache, Node* at, Graph* graph,
SetField(i, field);
TRACE(" Field %zu agree on rep #%d\n", i, field->id());
} else {
- int arity = at->opcode() == IrOpcode::kEffectPhi
- ? at->op()->EffectInputCount()
- : at->op()->ValueInputCount();
+ size_t arity = at->opcode() == IrOpcode::kEffectPhi
+ ? at->op()->EffectInputCount()
+ : at->op()->ValueInputCount();
if (cache->fields().size() == arity) {
changed = MergeFields(i, at, cache, graph, common) || changed;
} else {
@@ -798,6 +798,7 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
case IrOpcode::kStringEqual:
case IrOpcode::kStringLessThan:
case IrOpcode::kStringLessThanOrEqual:
+ case IrOpcode::kTypeGuard:
case IrOpcode::kPlainPrimitiveToNumber:
case IrOpcode::kPlainPrimitiveToWord32:
case IrOpcode::kPlainPrimitiveToFloat64:
@@ -1134,7 +1135,17 @@ VirtualObject* EscapeAnalysis::CopyForModificationAt(VirtualObject* obj,
Node* node) {
if (obj->NeedCopyForModification()) {
state = CopyForModificationAt(state, node);
- return state->Copy(obj, status_analysis_->GetAlias(obj->id()));
+ // TODO(tebbi): this copies the complete virtual state. Replace with a more
+ // precise analysis of which objects are actually affected by the change.
+ Alias changed_alias = status_analysis_->GetAlias(obj->id());
+ for (Alias alias = 0; alias < state->size(); ++alias) {
+ if (VirtualObject* next_obj = state->VirtualObjectFromAlias(alias)) {
+ if (alias != changed_alias && next_obj->NeedCopyForModification()) {
+ state->Copy(next_obj, alias);
+ }
+ }
+ }
+ return state->Copy(obj, changed_alias);
}
return obj;
}
@@ -1338,9 +1349,19 @@ bool EscapeAnalysis::CompareVirtualObjects(Node* left, Node* right) {
namespace {
+bool IsOffsetForFieldAccessCorrect(const FieldAccess& access) {
+#if V8_TARGET_LITTLE_ENDIAN
+ return (access.offset % kPointerSize) == 0;
+#else
+ return ((access.offset +
+ (1 << ElementSizeLog2Of(access.machine_type.representation()))) %
+ kPointerSize) == 0;
+#endif
+}
+
int OffsetForFieldAccess(Node* node) {
FieldAccess access = FieldAccessOf(node->op());
- DCHECK_EQ(access.offset % kPointerSize, 0);
+ DCHECK(IsOffsetForFieldAccessCorrect(access));
return access.offset / kPointerSize;
}
@@ -1398,7 +1419,20 @@ void EscapeAnalysis::ProcessLoadField(Node* node) {
if (VirtualObject* object = GetVirtualObject(state, from)) {
if (!object->IsTracked()) return;
int offset = OffsetForFieldAccess(node);
- if (static_cast<size_t>(offset) >= object->field_count()) return;
+ if (static_cast<size_t>(offset) >= object->field_count()) {
+ // We have a load from a field that is not inside the {object}. This
+ // can only happen with conflicting type feedback and for dead {node}s.
+ // For now, we just mark the {object} as escaping.
+ // TODO(turbofan): Consider introducing an Undefined or None operator
+ // that we can replace this load with, since we know it's dead code.
+ if (status_analysis_->SetEscaped(from)) {
+ TRACE(
+ "Setting #%d (%s) to escaped because load field #%d from "
+ "offset %d outside of object\n",
+ from->id(), from->op()->mnemonic(), node->id(), offset);
+ }
+ return;
+ }
Node* value = object->GetField(offset);
if (value) {
value = ResolveReplacement(value);
@@ -1406,7 +1440,7 @@ void EscapeAnalysis::ProcessLoadField(Node* node) {
// Record that the load has this alias.
UpdateReplacement(state, node, value);
} else if (from->opcode() == IrOpcode::kPhi &&
- FieldAccessOf(node->op()).offset % kPointerSize == 0) {
+ IsOffsetForFieldAccessCorrect(FieldAccessOf(node->op()))) {
int offset = OffsetForFieldAccess(node);
// Only binary phis are supported for now.
ProcessLoadFromPhi(offset, from, node, state);
@@ -1463,7 +1497,20 @@ void EscapeAnalysis::ProcessStoreField(Node* node) {
if (VirtualObject* object = GetVirtualObject(state, to)) {
if (!object->IsTracked()) return;
int offset = OffsetForFieldAccess(node);
- if (static_cast<size_t>(offset) >= object->field_count()) return;
+ if (static_cast<size_t>(offset) >= object->field_count()) {
+ // We have a store to a field that is not inside the {object}. This
+ // can only happen with conflicting type feedback and for dead {node}s.
+ // For now, we just mark the {object} as escaping.
+ // TODO(turbofan): Consider just eliminating the store in the reducer
+ // pass, as it's dead code anyways.
+ if (status_analysis_->SetEscaped(to)) {
+ TRACE(
+ "Setting #%d (%s) to escaped because store field #%d to "
+ "offset %d outside of object\n",
+ to->id(), to->op()->mnemonic(), node->id(), offset);
+ }
+ return;
+ }
Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 1));
// TODO(mstarzinger): The following is a workaround to not track some well
// known raw fields. We only ever store default initial values into these
@@ -1541,8 +1588,8 @@ Node* EscapeAnalysis::GetOrCreateObjectState(Node* effect, Node* node) {
}
int input_count = static_cast<int>(cache_->fields().size());
Node* new_object_state =
- graph()->NewNode(common()->ObjectState(input_count, vobj->id()),
- input_count, &cache_->fields().front());
+ graph()->NewNode(common()->ObjectState(input_count), input_count,
+ &cache_->fields().front());
vobj->SetObjectState(new_object_state);
TRACE(
"Creating object state #%d for vobj %p (from node #%d) at effect "
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index ec5154e8b1..b85efe7349 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_ESCAPE_ANALYSIS_H_
#include "src/compiler/graph.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -20,7 +21,7 @@ class VirtualObject;
// EscapeObjectAnalysis simulates stores to determine values of loads if
// an object is virtual and eliminated.
-class EscapeAnalysis {
+class V8_EXPORT_PRIVATE EscapeAnalysis {
public:
EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common, Zone* zone);
~EscapeAnalysis();
diff --git a/deps/v8/src/compiler/frame-elider.cc b/deps/v8/src/compiler/frame-elider.cc
index 5ad4aad41e..bb17d1215f 100644
--- a/deps/v8/src/compiler/frame-elider.cc
+++ b/deps/v8/src/compiler/frame-elider.cc
@@ -24,7 +24,8 @@ void FrameElider::MarkBlocks() {
for (int i = block->code_start(); i < block->code_end(); ++i) {
const Instruction* instr = InstructionAt(i);
if (instr->IsCall() || instr->IsDeoptimizeCall() ||
- instr->arch_opcode() == ArchOpcode::kArchStackPointer) {
+ instr->arch_opcode() == ArchOpcode::kArchStackPointer ||
+ instr->arch_opcode() == ArchOpcode::kArchFramePointer) {
block->mark_needs_frame();
break;
}
diff --git a/deps/v8/src/compiler/gap-resolver.cc b/deps/v8/src/compiler/gap-resolver.cc
index 7b04198e81..1ba1044eab 100644
--- a/deps/v8/src/compiler/gap-resolver.cc
+++ b/deps/v8/src/compiler/gap-resolver.cc
@@ -14,27 +14,124 @@ namespace compiler {
namespace {
+#define REP_BIT(rep) (1 << static_cast<int>(rep))
+
+const int kFloat32Bit = REP_BIT(MachineRepresentation::kFloat32);
+const int kFloat64Bit = REP_BIT(MachineRepresentation::kFloat64);
+
inline bool Blocks(MoveOperands* move, InstructionOperand destination) {
- return move->Blocks(destination);
+ return !move->IsEliminated() && move->source().InterferesWith(destination);
}
+// Splits a FP move between two location operands into the equivalent series of
+// moves between smaller sub-operands, e.g. a double move to two single moves.
+// This helps reduce the number of cycles that would normally occur under FP
+// aliasing, and makes swaps much easier to implement.
+MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
+ ParallelMove* moves) {
+ DCHECK(!kSimpleFPAliasing);
+ // Splitting is only possible when the slot size is the same as float size.
+ DCHECK_EQ(kPointerSize, kFloatSize);
+ const LocationOperand& src_loc = LocationOperand::cast(move->source());
+ const LocationOperand& dst_loc = LocationOperand::cast(move->destination());
+ MachineRepresentation dst_rep = dst_loc.representation();
+ DCHECK_NE(smaller_rep, dst_rep);
+ auto src_kind = src_loc.location_kind();
+ auto dst_kind = dst_loc.location_kind();
+
+ int aliases =
+ 1 << (ElementSizeLog2Of(dst_rep) - ElementSizeLog2Of(smaller_rep));
+ int base = -1;
+ USE(base);
+ DCHECK_EQ(aliases, RegisterConfiguration::Turbofan()->GetAliases(
+ dst_rep, 0, smaller_rep, &base));
+
+ int src_index = -1;
+ int slot_size = (1 << ElementSizeLog2Of(smaller_rep)) / kPointerSize;
+ int src_step = 1;
+ if (src_kind == LocationOperand::REGISTER) {
+ src_index = src_loc.register_code() * aliases;
+ } else {
+ src_index = src_loc.index();
+ // For operands that occuply multiple slots, the index refers to the last
+ // slot. On little-endian architectures, we start at the high slot and use a
+ // negative step so that register-to-slot moves are in the correct order.
+ src_step = -slot_size;
+ }
+ int dst_index = -1;
+ int dst_step = 1;
+ if (dst_kind == LocationOperand::REGISTER) {
+ dst_index = dst_loc.register_code() * aliases;
+ } else {
+ dst_index = dst_loc.index();
+ dst_step = -slot_size;
+ }
-inline bool IsRedundant(MoveOperands* move) { return move->IsRedundant(); }
+ // Reuse 'move' for the first fragment. It is not pending.
+ move->set_source(AllocatedOperand(src_kind, smaller_rep, src_index));
+ move->set_destination(AllocatedOperand(dst_kind, smaller_rep, dst_index));
+ // Add the remaining fragment moves.
+ for (int i = 1; i < aliases; ++i) {
+ src_index += src_step;
+ dst_index += dst_step;
+ moves->AddMove(AllocatedOperand(src_kind, smaller_rep, src_index),
+ AllocatedOperand(dst_kind, smaller_rep, dst_index));
+ }
+ // Return the first fragment.
+ return move;
+}
} // namespace
+void GapResolver::Resolve(ParallelMove* moves) {
+ // Clear redundant moves, and collect FP move representations if aliasing
+ // is non-simple.
+ int reps = 0;
+ for (size_t i = 0; i < moves->size();) {
+ MoveOperands* move = (*moves)[i];
+ if (move->IsRedundant()) {
+ (*moves)[i] = moves->back();
+ moves->pop_back();
+ continue;
+ }
+ i++;
+ if (!kSimpleFPAliasing && move->destination().IsFPRegister()) {
+ reps |=
+ REP_BIT(LocationOperand::cast(move->destination()).representation());
+ }
+ }
+
+ if (!kSimpleFPAliasing) {
+ if (reps && !base::bits::IsPowerOfTwo32(reps)) {
+ // Start with the smallest FP moves, so we never encounter smaller moves
+ // in the middle of a cycle of larger moves.
+ if ((reps & kFloat32Bit) != 0) {
+ split_rep_ = MachineRepresentation::kFloat32;
+ for (size_t i = 0; i < moves->size(); ++i) {
+ auto move = (*moves)[i];
+ if (!move->IsEliminated() && move->destination().IsFloatRegister())
+ PerformMove(moves, move);
+ }
+ }
+ if ((reps & kFloat64Bit) != 0) {
+ split_rep_ = MachineRepresentation::kFloat64;
+ for (size_t i = 0; i < moves->size(); ++i) {
+ auto move = (*moves)[i];
+ if (!move->IsEliminated() && move->destination().IsDoubleRegister())
+ PerformMove(moves, move);
+ }
+ }
+ }
+ split_rep_ = MachineRepresentation::kSimd128;
+ }
-void GapResolver::Resolve(ParallelMove* moves) const {
- // Clear redundant moves.
- auto it =
- std::remove_if(moves->begin(), moves->end(), std::ptr_fun(IsRedundant));
- moves->erase(it, moves->end());
- for (MoveOperands* move : *moves) {
+ for (size_t i = 0; i < moves->size(); ++i) {
+ auto move = (*moves)[i];
if (!move->IsEliminated()) PerformMove(moves, move);
}
}
-void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
+void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We mark a
// move as "pending" on entry to PerformMove in order to detect cycles in the
@@ -45,15 +142,32 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
// Clear this move's destination to indicate a pending move. The actual
// destination is saved on the side.
- DCHECK(!move->source().IsInvalid()); // Or else it will look eliminated.
+ InstructionOperand source = move->source();
+ DCHECK(!source.IsInvalid()); // Or else it will look eliminated.
InstructionOperand destination = move->destination();
move->SetPending();
+ // We may need to split moves between FP locations differently.
+ bool is_fp_loc_move = !kSimpleFPAliasing && destination.IsFPLocationOperand();
+
// Perform a depth-first traversal of the move graph to resolve dependencies.
// Any unperformed, unpending move with a source the same as this one's
// destination blocks this one so recursively perform all such moves.
- for (MoveOperands* other : *moves) {
- if (other->Blocks(destination) && !other->IsPending()) {
+ for (size_t i = 0; i < moves->size(); ++i) {
+ auto other = (*moves)[i];
+ if (other->IsEliminated()) continue;
+ if (other->IsPending()) continue;
+ if (other->source().InterferesWith(destination)) {
+ if (!kSimpleFPAliasing && is_fp_loc_move &&
+ LocationOperand::cast(other->source()).representation() >
+ split_rep_) {
+ // 'other' must also be an FP location move. Break it into fragments
+ // of the same size as 'move'. 'other' is set to one of the fragments,
+ // and the rest are appended to 'moves'.
+ other = Split(other, split_rep_, moves);
+ // 'other' may not block destination now.
+ if (!other->source().InterferesWith(destination)) continue;
+ }
// Though PerformMove can change any source operand in the move graph,
// this call cannot create a blocking move via a swap (this loop does not
// miss any). Assume there is a non-blocking move with source A and this
@@ -67,18 +181,18 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
}
}
- // We are about to resolve this move and don't need it marked as pending, so
- // restore its destination.
- move->set_destination(destination);
-
// This move's source may have changed due to swaps to resolve cycles and so
// it may now be the last move in the cycle. If so remove it.
- InstructionOperand source = move->source();
- if (source.InterferesWith(destination)) {
+ source = move->source();
+ if (source.EqualsCanonicalized(destination)) {
move->Eliminate();
return;
}
+ // We are about to resolve this move and don't need it marked as pending, so
+ // restore its destination.
+ move->set_destination(destination);
+
// The move may be blocked on a (at most one) pending move, in which case we
// have a cycle. Search for such a blocking move and perform a swap to
// resolve it.
@@ -91,7 +205,6 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
return;
}
- DCHECK((*blocker)->IsPending());
// Ensure source is a register or both are stack slots, to limit swap cases.
if (source.IsStackSlot() || source.IsFPStackSlot()) {
std::swap(source, destination);
@@ -99,14 +212,36 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
assembler_->AssembleSwap(&source, &destination);
move->Eliminate();
- // Any unperformed (including pending) move with a source of either this
- // move's source or destination needs to have their source changed to
- // reflect the state of affairs after the swap.
- for (MoveOperands* other : *moves) {
- if (other->Blocks(source)) {
- other->set_source(destination);
- } else if (other->Blocks(destination)) {
- other->set_source(source);
+ // Update outstanding moves whose source may now have been moved.
+ if (!kSimpleFPAliasing && is_fp_loc_move) {
+ // We may have to split larger moves.
+ for (size_t i = 0; i < moves->size(); ++i) {
+ auto other = (*moves)[i];
+ if (other->IsEliminated()) continue;
+ if (source.InterferesWith(other->source())) {
+ if (LocationOperand::cast(other->source()).representation() >
+ split_rep_) {
+ other = Split(other, split_rep_, moves);
+ if (!source.InterferesWith(other->source())) continue;
+ }
+ other->set_source(destination);
+ } else if (destination.InterferesWith(other->source())) {
+ if (LocationOperand::cast(other->source()).representation() >
+ split_rep_) {
+ other = Split(other, split_rep_, moves);
+ if (!destination.InterferesWith(other->source())) continue;
+ }
+ other->set_source(source);
+ }
+ }
+ } else {
+ for (auto other : *moves) {
+ if (other->IsEliminated()) continue;
+ if (source.EqualsCanonicalized(other->source())) {
+ other->set_source(destination);
+ } else if (destination.EqualsCanonicalized(other->source())) {
+ other->set_source(source);
+ }
}
}
}
diff --git a/deps/v8/src/compiler/gap-resolver.h b/deps/v8/src/compiler/gap-resolver.h
index 19806f568a..d4c402587f 100644
--- a/deps/v8/src/compiler/gap-resolver.h
+++ b/deps/v8/src/compiler/gap-resolver.h
@@ -26,18 +26,24 @@ class GapResolver final {
InstructionOperand* destination) = 0;
};
- explicit GapResolver(Assembler* assembler) : assembler_(assembler) {}
+ explicit GapResolver(Assembler* assembler)
+ : assembler_(assembler), split_rep_(MachineRepresentation::kSimd128) {}
// Resolve a set of parallel moves, emitting assembler instructions.
- void Resolve(ParallelMove* parallel_move) const;
+ void Resolve(ParallelMove* parallel_move);
private:
- // Perform the given move, possibly requiring other moves to satisfy
- // dependencies.
- void PerformMove(ParallelMove* moves, MoveOperands* move) const;
+ // Performs the given move, possibly performing other moves to unblock the
+ // destination operand.
+ void PerformMove(ParallelMove* moves, MoveOperands* move);
// Assembler used to emit moves and save registers.
Assembler* const assembler_;
+
+ // While resolving moves, the largest FP representation that can be moved.
+ // Any larger moves must be split into an equivalent series of moves of this
+ // representation.
+ MachineRepresentation split_rep_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index a089c12fbf..b95cf9df2d 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -5,7 +5,9 @@
#ifndef V8_COMPILER_GRAPH_REDUCER_H_
#define V8_COMPILER_GRAPH_REDUCER_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/node-marker.h"
+#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -40,7 +42,7 @@ class Reduction final {
// language-specific reductions (e.g. reduction based on types or constant
// folding of low-level operators) can be integrated into the graph reduction
// phase.
-class Reducer {
+class V8_EXPORT_PRIVATE Reducer {
public:
virtual ~Reducer() {}
@@ -119,7 +121,8 @@ class AdvancedReducer : public Reducer {
// Performs an iterative reduction of a node graph.
-class GraphReducer : public AdvancedReducer::Editor {
+class V8_EXPORT_PRIVATE GraphReducer
+ : public NON_EXPORTED_BASE(AdvancedReducer::Editor) {
public:
GraphReducer(Zone* zone, Graph* graph, Node* dead = nullptr);
~GraphReducer();
diff --git a/deps/v8/src/compiler/graph-replay.cc b/deps/v8/src/compiler/graph-replay.cc
index 352b171ffa..df0160d46c 100644
--- a/deps/v8/src/compiler/graph-replay.cc
+++ b/deps/v8/src/compiler/graph-replay.cc
@@ -20,7 +20,7 @@ namespace compiler {
void GraphReplayPrinter::PrintReplay(Graph* graph) {
GraphReplayPrinter replay;
PrintF(" Node* nil = graph()->NewNode(common()->Dead());\n");
- Zone zone(graph->zone()->allocator());
+ Zone zone(graph->zone()->allocator(), ZONE_NAME);
AllNodes nodes(&zone, graph);
// Allocate the nodes first.
diff --git a/deps/v8/src/compiler/graph-trimmer.h b/deps/v8/src/compiler/graph-trimmer.h
index 98d335a44d..e57dc18b5e 100644
--- a/deps/v8/src/compiler/graph-trimmer.h
+++ b/deps/v8/src/compiler/graph-trimmer.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_GRAPH_TRIMMER_H_
#include "src/compiler/node-marker.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -16,7 +17,7 @@ class Graph;
// Trims dead nodes from the node graph.
-class GraphTrimmer final {
+class V8_EXPORT_PRIVATE GraphTrimmer final {
public:
GraphTrimmer(Zone* zone, Graph* graph);
~GraphTrimmer();
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index d810c3785a..ab20f8f11f 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -11,6 +11,7 @@
#include "src/code-stubs.h"
#include "src/compilation-info.h"
#include "src/compiler/all-nodes.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
@@ -81,33 +82,28 @@ static const char* SafeMnemonic(Node* node) {
return node == nullptr ? "null" : node->op()->mnemonic();
}
-#define DEAD_COLOR "#999999"
-
-class Escaped {
+class JSONEscaped {
public:
- explicit Escaped(const std::ostringstream& os,
- const char* escaped_chars = "<>|{}\\")
- : str_(os.str()), escaped_chars_(escaped_chars) {}
-
- friend std::ostream& operator<<(std::ostream& os, const Escaped& e) {
- for (std::string::const_iterator i = e.str_.begin(); i != e.str_.end();
- ++i) {
- if (e.needs_escape(*i)) os << "\\";
- os << *i;
- }
+ explicit JSONEscaped(const std::ostringstream& os) : str_(os.str()) {}
+
+ friend std::ostream& operator<<(std::ostream& os, const JSONEscaped& e) {
+ for (char c : e.str_) PipeCharacter(os, c);
return os;
}
private:
- bool needs_escape(char ch) const {
- for (size_t i = 0; i < strlen(escaped_chars_); ++i) {
- if (ch == escaped_chars_[i]) return true;
- }
- return false;
+ static std::ostream& PipeCharacter(std::ostream& os, char c) {
+ if (c == '"') return os << "\\\"";
+ if (c == '\\') return os << "\\\\";
+ if (c == '\b') return os << "\\b";
+ if (c == '\f') return os << "\\f";
+ if (c == '\n') return os << "\\n";
+ if (c == '\r') return os << "\\r";
+ if (c == '\t') return os << "\\t";
+ return os << c;
}
const std::string str_;
- const char* const escaped_chars_;
};
class JSONGraphNodeWriter {
@@ -135,11 +131,11 @@ class JSONGraphNodeWriter {
node->op()->PrintTo(label, Operator::PrintVerbosity::kSilent);
node->op()->PrintTo(title, Operator::PrintVerbosity::kVerbose);
node->op()->PrintPropsTo(properties);
- os_ << "{\"id\":" << SafeId(node) << ",\"label\":\""
- << Escaped(label, "\"\\") << "\""
- << ",\"title\":\"" << Escaped(title, "\"\\") << "\""
+ os_ << "{\"id\":" << SafeId(node) << ",\"label\":\"" << JSONEscaped(label)
+ << "\""
+ << ",\"title\":\"" << JSONEscaped(title) << "\""
<< ",\"live\": " << (live_.IsLive(node) ? "true" : "false")
- << ",\"properties\":\"" << Escaped(properties, "\"\\") << "\"";
+ << ",\"properties\":\"" << JSONEscaped(properties) << "\"";
IrOpcode::Value opcode = node->opcode();
if (IrOpcode::IsPhiOpcode(opcode)) {
os_ << ",\"rankInputs\":[0," << NodeProperties::FirstControlIndex(node)
@@ -156,7 +152,7 @@ class JSONGraphNodeWriter {
}
SourcePosition position = positions_->GetSourcePosition(node);
if (position.IsKnown()) {
- os_ << ",\"pos\":" << position.raw();
+ os_ << ",\"pos\":" << position.ScriptOffset();
}
os_ << ",\"opcode\":\"" << IrOpcode::Mnemonic(node->opcode()) << "\"";
os_ << ",\"control\":" << (NodeProperties::IsControl(node) ? "true"
@@ -171,7 +167,7 @@ class JSONGraphNodeWriter {
Type* type = NodeProperties::GetType(node);
std::ostringstream type_out;
type->PrintTo(type_out);
- os_ << ",\"type\":\"" << Escaped(type_out, "\"\\") << "\"";
+ os_ << ",\"type\":\"" << JSONEscaped(type_out) << "\"";
}
os_ << "}";
}
@@ -240,7 +236,7 @@ class JSONGraphEdgeWriter {
std::ostream& operator<<(std::ostream& os, const AsJSON& ad) {
AccountingAllocator allocator;
- Zone tmp_zone(&allocator);
+ Zone tmp_zone(&allocator, ZONE_NAME);
os << "{\n\"nodes\":[";
JSONGraphNodeWriter(os, &tmp_zone, &ad.graph, ad.positions).Print();
os << "],\n\"edges\":[";
@@ -501,7 +497,7 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
if (positions != nullptr) {
SourcePosition position = positions->GetSourcePosition(node);
if (position.IsKnown()) {
- os_ << " pos:" << position.raw();
+ os_ << " pos:" << position.ScriptOffset();
}
}
os_ << " <|@\n";
@@ -630,7 +626,7 @@ void GraphC1Visualizer::PrintLiveRange(const LiveRange* range, const char* type,
std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac) {
AccountingAllocator allocator;
- Zone tmp_zone(&allocator);
+ Zone tmp_zone(&allocator, ZONE_NAME);
GraphC1Visualizer(os, &tmp_zone).PrintCompilation(ac.info_);
return os;
}
@@ -638,7 +634,7 @@ std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac) {
std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
AccountingAllocator allocator;
- Zone tmp_zone(&allocator);
+ Zone tmp_zone(&allocator, ZONE_NAME);
GraphC1Visualizer(os, &tmp_zone)
.PrintSchedule(ac.phase_, ac.schedule_, ac.positions_, ac.instructions_);
return os;
@@ -648,7 +644,7 @@ std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
std::ostream& operator<<(std::ostream& os,
const AsC1VRegisterAllocationData& ac) {
AccountingAllocator allocator;
- Zone tmp_zone(&allocator);
+ Zone tmp_zone(&allocator, ZONE_NAME);
GraphC1Visualizer(os, &tmp_zone).PrintLiveRanges(ac.phase_, ac.data_);
return os;
}
@@ -659,7 +655,7 @@ const int kVisited = 2;
std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
AccountingAllocator allocator;
- Zone local_zone(&allocator);
+ Zone local_zone(&allocator, ZONE_NAME);
// Do a post-order depth-first search on the RPO graph. For every node,
// print:
diff --git a/deps/v8/src/compiler/graph-visualizer.h b/deps/v8/src/compiler/graph-visualizer.h
index 700d7a75e0..356dd5e017 100644
--- a/deps/v8/src/compiler/graph-visualizer.h
+++ b/deps/v8/src/compiler/graph-visualizer.h
@@ -9,6 +9,8 @@
#include <iosfwd>
#include <memory>
+#include "src/globals.h"
+
namespace v8 {
namespace internal {
@@ -32,15 +34,14 @@ struct AsJSON {
const SourcePositionTable* positions;
};
-std::ostream& operator<<(std::ostream& os, const AsJSON& ad);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const AsJSON& ad);
struct AsRPO {
explicit AsRPO(const Graph& g) : graph(g) {}
const Graph& graph;
};
-std::ostream& operator<<(std::ostream& os, const AsRPO& ad);
-
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const AsRPO& ad);
struct AsC1VCompilation {
explicit AsC1VCompilation(const CompilationInfo* info) : info_(info) {}
diff --git a/deps/v8/src/compiler/graph.cc b/deps/v8/src/compiler/graph.cc
index ff1a17ef3e..373d6d7ee1 100644
--- a/deps/v8/src/compiler/graph.cc
+++ b/deps/v8/src/compiler/graph.cc
@@ -7,8 +7,9 @@
#include <algorithm>
#include "src/base/bits.h"
-#include "src/compiler/node.h"
+#include "src/compiler/graph-visualizer.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
#include "src/compiler/verifier.h"
namespace v8 {
@@ -72,6 +73,11 @@ NodeId Graph::NextNodeId() {
return id;
}
+void Graph::Print() const {
+ OFStream os(stdout);
+ os << AsRPO(*this);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index 1d9e85e91d..1e861c7b15 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -5,6 +5,8 @@
#ifndef V8_COMPILER_GRAPH_H_
#define V8_COMPILER_GRAPH_H_
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
@@ -28,7 +30,7 @@ typedef uint32_t Mark;
// out-of-line data associated with each node.
typedef uint32_t NodeId;
-class Graph final : public ZoneObject {
+class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit Graph(Zone* zone);
@@ -119,6 +121,9 @@ class Graph final : public ZoneObject {
void AddDecorator(GraphDecorator* decorator);
void RemoveDecorator(GraphDecorator* decorator);
+ // Very simple print API usable in a debugger.
+ void Print() const;
+
private:
friend class NodeMarkerBase;
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 428570a8c8..20afdc104d 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -580,18 +580,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallJSFunctionFromJSFunction:
- case kArchTailCallJSFunction: {
+ case kArchTailCallJSFunctionFromJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, kWrongFunctionContext);
}
- if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- no_reg, no_reg, no_reg);
- }
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, no_reg,
+ no_reg, no_reg);
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -652,7 +649,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchRet:
- AssembleReturn();
+ AssembleReturn(instr->InputAt(0));
break;
case kArchStackPointer:
__ mov(i.OutputRegister(), esp);
@@ -1461,7 +1458,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (i.InputRegister(1).is(i.OutputRegister())) {
__ shl(i.OutputRegister(), 1);
} else {
- __ lea(i.OutputRegister(), i.MemoryOperand());
+ __ add(i.OutputRegister(), i.InputRegister(1));
}
} else if (mode == kMode_M2) {
__ shl(i.OutputRegister(), 1);
@@ -1472,6 +1469,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
+ } else if (mode == kMode_MR1 &&
+ i.InputRegister(1).is(i.OutputRegister())) {
+ __ add(i.OutputRegister(), i.InputRegister(0));
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
@@ -1790,7 +1790,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -1945,12 +1945,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ mov(ebp, esp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(this->info()->GeneratePreagedPrologue());
+ if (descriptor->PushArgumentCount()) {
+ __ push(kJavaScriptCallArgCountRegister);
+ }
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
}
}
- int shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots =
+ frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -1981,8 +1985,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
const RegList saves = descriptor->CalleeSavedRegisters();
@@ -1994,22 +1997,41 @@ void CodeGenerator::AssembleReturn() {
}
}
+ // Might need ecx for scratch if pop_size is too big or if there is a variable
+ // pop count.
+ DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
+ size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ IA32OperandConverter g(this, nullptr);
if (descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
- // Canonicalize JSFunction return sites for now.
- if (return_label_.is_bound()) {
- __ jmp(&return_label_);
- return;
+ // Canonicalize JSFunction return sites for now if they always have the same
+ // number of return args.
+ if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ AssembleDeconstructFrame();
+ }
} else {
- __ bind(&return_label_);
AssembleDeconstructFrame();
}
}
- size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
- // Might need ecx for scratch if pop_size is too big.
+ DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & edx.bit());
DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
- __ Ret(static_cast<int>(pop_size), ecx);
+ if (pop->IsImmediate()) {
+ DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+ pop_size += g.ToConstant(pop).ToInt32() * kPointerSize;
+ __ Ret(static_cast<int>(pop_size), ecx);
+ } else {
+ Register pop_reg = g.ToRegister(pop);
+ Register scratch_reg = pop_reg.is(ecx) ? edx : ecx;
+ __ pop(scratch_reg);
+ __ lea(esp, Operand(esp, pop_reg, times_4, static_cast<int>(pop_size)));
+ __ jmp(scratch_reg);
+ }
}
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index ad7535c7c7..3216b1de0b 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -146,8 +146,72 @@ int InstructionScheduler::GetTargetInstructionFlags(
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
- // TODO(all): Add instruction cost modeling.
- return 1;
+ // Basic latency modeling for ia32 instructions. They have been determined
+ // in an empirical way.
+ switch (instr->arch_opcode()) {
+ case kCheckedLoadInt8:
+ case kCheckedLoadUint8:
+ case kCheckedLoadInt16:
+ case kCheckedLoadUint16:
+ case kCheckedLoadWord32:
+ case kCheckedLoadFloat32:
+ case kCheckedLoadFloat64:
+ case kCheckedStoreWord8:
+ case kCheckedStoreWord16:
+ case kCheckedStoreWord32:
+ case kCheckedStoreFloat32:
+ case kCheckedStoreFloat64:
+ case kSSEFloat64Mul:
+ return 5;
+ case kIA32Imul:
+ case kIA32ImulHigh:
+ return 5;
+ case kSSEFloat32Cmp:
+ case kSSEFloat64Cmp:
+ return 9;
+ case kSSEFloat32Add:
+ case kSSEFloat32Sub:
+ case kSSEFloat32Abs:
+ case kSSEFloat32Neg:
+ case kSSEFloat64Add:
+ case kSSEFloat64Sub:
+ case kSSEFloat64Max:
+ case kSSEFloat64Min:
+ case kSSEFloat64Abs:
+ case kSSEFloat64Neg:
+ return 5;
+ case kSSEFloat32Mul:
+ return 4;
+ case kSSEFloat32ToFloat64:
+ case kSSEFloat64ToFloat32:
+ return 6;
+ case kSSEFloat32Round:
+ case kSSEFloat64Round:
+ case kSSEFloat32ToInt32:
+ case kSSEFloat64ToInt32:
+ return 8;
+ case kSSEFloat32ToUint32:
+ return 21;
+ case kSSEFloat64ToUint32:
+ return 15;
+ case kIA32Idiv:
+ return 33;
+ case kIA32Udiv:
+ return 26;
+ case kSSEFloat32Div:
+ return 35;
+ case kSSEFloat64Div:
+ return 63;
+ case kSSEFloat32Sqrt:
+ case kSSEFloat64Sqrt:
+ return 25;
+ case kSSEFloat64Mod:
+ return 50;
+ case kArchTruncateDoubleToI:
+ return 9;
+ default:
+ return 1;
+ }
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 7e98023f5d..c827c68a5f 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -646,55 +646,78 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
void InstructionSelector::VisitInt32PairAdd(Node* node) {
IA32OperandGenerator g(this);
- // We use UseUniqueRegister here to avoid register sharing with the temp
- // register.
- InstructionOperand inputs[] = {
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ // We use UseUniqueRegister here to avoid register sharing with the temp
+ // register.
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
- InstructionOperand outputs[] = {
- g.DefineSameAsFirst(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
+ g.DefineAsRegister(projection1)};
- InstructionOperand temps[] = {g.TempRegister()};
+ InstructionOperand temps[] = {g.TempRegister()};
- Emit(kIA32AddPair, 2, outputs, 4, inputs, 1, temps);
+ Emit(kIA32AddPair, 2, outputs, 4, inputs, 1, temps);
+ } else {
+ // The high word of the result is not used, so we emit the standard 32 bit
+ // instruction.
+ Emit(kIA32Add, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.Use(node->InputAt(2)));
+ }
}
void InstructionSelector::VisitInt32PairSub(Node* node) {
IA32OperandGenerator g(this);
- // We use UseUniqueRegister here to avoid register sharing with the temp
- // register.
- InstructionOperand inputs[] = {
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ // We use UseUniqueRegister here to avoid register sharing with the temp
+ // register.
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
- InstructionOperand outputs[] = {
- g.DefineSameAsFirst(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
+ g.DefineAsRegister(projection1)};
- InstructionOperand temps[] = {g.TempRegister()};
+ InstructionOperand temps[] = {g.TempRegister()};
- Emit(kIA32SubPair, 2, outputs, 4, inputs, 1, temps);
+ Emit(kIA32SubPair, 2, outputs, 4, inputs, 1, temps);
+ } else {
+ // The high word of the result is not used, so we emit the standard 32 bit
+ // instruction.
+ Emit(kIA32Sub, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.Use(node->InputAt(2)));
+ }
}
void InstructionSelector::VisitInt32PairMul(Node* node) {
IA32OperandGenerator g(this);
- // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
- // register and one mov instruction.
- InstructionOperand inputs[] = {
- g.UseUnique(node->InputAt(0)), g.UseUnique(node->InputAt(1)),
- g.UseUniqueRegister(node->InputAt(2)), g.UseFixed(node->InputAt(3), ecx)};
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
+ // register and one mov instruction.
+ InstructionOperand inputs[] = {g.UseUnique(node->InputAt(0)),
+ g.UseUnique(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)),
+ g.UseFixed(node->InputAt(3), ecx)};
- InstructionOperand outputs[] = {
- g.DefineAsFixed(node, eax),
- g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(node, eax),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
- InstructionOperand temps[] = {g.TempRegister(edx)};
+ InstructionOperand temps[] = {g.TempRegister(edx)};
- Emit(kIA32MulPair, 2, outputs, 4, inputs, 1, temps);
+ Emit(kIA32MulPair, 2, outputs, 4, inputs, 1, temps);
+ } else {
+ // The high word of the result is not used, so we emit the standard 32 bit
+ // instruction.
+ Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.Use(node->InputAt(2)));
+ }
}
void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
@@ -712,11 +735,19 @@ void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
g.UseFixed(node->InputAt(1), edx),
shift_operand};
- InstructionOperand outputs[] = {
- g.DefineAsFixed(node, eax),
- g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+ InstructionOperand outputs[2];
+ InstructionOperand temps[1];
+ int32_t output_count = 0;
+ int32_t temp_count = 0;
+ outputs[output_count++] = g.DefineAsFixed(node, eax);
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ outputs[output_count++] = g.DefineAsFixed(projection1, edx);
+ } else {
+ temps[temp_count++] = g.TempRegister(edx);
+ }
- selector->Emit(opcode, 2, outputs, 3, inputs);
+ selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
}
void InstructionSelector::VisitWord32PairShl(Node* node) {
@@ -1362,22 +1393,22 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Shared routine for word comparison with zero.
void VisitWordCompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
- // Try to combine the branch with a comparison.
- while (selector->CanCover(user, value)) {
+ // Try to combine with comparisons against 0 by simply inverting the branch.
+ while (value->opcode() == IrOpcode::kWord32Equal &&
+ selector->CanCover(user, value)) {
+ Int32BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ }
+
+ if (selector->CanCover(user, value)) {
switch (value->opcode()) {
- case IrOpcode::kWord32Equal: {
- // Try to combine with comparisons against 0 by simply inverting the
- // continuation.
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont->Negate();
- continue;
- }
+ case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWordCompare(selector, value, cont);
- }
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWordCompare(selector, value, cont);
@@ -1443,7 +1474,6 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
default:
break;
}
- break;
}
// Continuation could not be combined with a compare, emit compare against 0.
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index 22279fea7a..6242e9804e 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -29,6 +29,7 @@
#define TARGET_ARCH_OPCODE_LIST(V)
#define TARGET_ADDRESSING_MODE_LIST(V)
#endif
+#include "src/globals.h"
#include "src/utils.h"
namespace v8 {
@@ -47,7 +48,6 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchTailCallCodeObject) \
V(ArchCallJSFunction) \
V(ArchTailCallJSFunctionFromJSFunction) \
- V(ArchTailCallJSFunction) \
V(ArchTailCallAddress) \
V(ArchPrepareCallCFunction) \
V(ArchCallCFunction) \
@@ -124,7 +124,8 @@ enum ArchOpcode {
#undef COUNT_ARCH_OPCODE
};
-std::ostream& operator<<(std::ostream& os, const ArchOpcode& ao);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const ArchOpcode& ao);
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
@@ -143,7 +144,8 @@ enum AddressingMode {
#undef COUNT_ADDRESSING_MODE
};
-std::ostream& operator<<(std::ostream& os, const AddressingMode& am);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const AddressingMode& am);
// The mode of the flags continuation (see below).
enum FlagsMode {
@@ -153,7 +155,8 @@ enum FlagsMode {
kFlags_set = 3
};
-std::ostream& operator<<(std::ostream& os, const FlagsMode& fm);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const FlagsMode& fm);
// The condition of flags continuation (see below).
enum FlagsCondition {
@@ -189,7 +192,8 @@ inline FlagsCondition NegateFlagsCondition(FlagsCondition condition) {
FlagsCondition CommuteFlagsCondition(FlagsCondition condition);
-std::ostream& operator<<(std::ostream& os, const FlagsCondition& fc);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const FlagsCondition& fc);
// The InstructionCode is an opaque, target-specific integer that encodes
// what code to emit for an instruction in the code generator. It is not
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index c7fd1ccd66..8ba287b1bc 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -284,7 +284,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject:
case kArchTailCallJSFunctionFromJSFunction:
- case kArchTailCallJSFunction:
case kArchTailCallAddress:
return kHasSideEffect | kIsBlockTerminator;
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 673d1b0dcb..6cb87ea0c0 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -387,6 +387,7 @@ class FlagsContinuation final {
void Overwrite(FlagsCondition condition) { condition_ = condition; }
void OverwriteAndNegateIfEqual(FlagsCondition condition) {
+ DCHECK(condition_ == kEqual || condition_ == kNotEqual);
bool negate = condition_ == kEqual;
condition_ = condition;
if (negate) Negate();
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index b150725b2b..8f899f3c8c 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -7,6 +7,7 @@
#include <limits>
#include "src/base/adapters.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/instruction-selector-impl.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/pipeline.h"
@@ -392,9 +393,13 @@ void InstructionSelector::SetEffectLevel(Node* node, int effect_level) {
}
bool InstructionSelector::CanAddressRelativeToRootsRegister() const {
- return (enable_serialization_ == kDisableSerialization &&
- (linkage()->GetIncomingDescriptor()->flags() &
- CallDescriptor::kCanUseRoots));
+ return enable_serialization_ == kDisableSerialization &&
+ CanUseRootsRegister();
+}
+
+bool InstructionSelector::CanUseRootsRegister() const {
+ return linkage()->GetIncomingDescriptor()->flags() &
+ CallDescriptor::kCanUseRoots;
}
void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
@@ -430,6 +435,7 @@ InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
case IrOpcode::kHeapConstant:
return g->UseImmediate(input);
case IrOpcode::kObjectState:
+ case IrOpcode::kTypedObjectState:
UNREACHABLE();
break;
default:
@@ -481,6 +487,10 @@ size_t AddOperandToStateValueDescriptor(StateValueDescriptor* descriptor,
FrameStateInputKind kind, Zone* zone) {
switch (input->opcode()) {
case IrOpcode::kObjectState: {
+ UNREACHABLE();
+ return 0;
+ }
+ case IrOpcode::kTypedObjectState: {
size_t id = deduplicator->GetObjectId(input);
if (id == StateObjectDeduplicator::kNotDuplicated) {
size_t entries = 0;
@@ -488,10 +498,12 @@ size_t AddOperandToStateValueDescriptor(StateValueDescriptor* descriptor,
descriptor->fields().push_back(
StateValueDescriptor::Recursive(zone, id));
StateValueDescriptor* new_desc = &descriptor->fields().back();
- for (Edge edge : input->input_edges()) {
+ int const input_count = input->op()->ValueInputCount();
+ ZoneVector<MachineType> const* types = MachineTypesOf(input->op());
+ for (int i = 0; i < input_count; ++i) {
entries += AddOperandToStateValueDescriptor(
- new_desc, inputs, g, deduplicator, edge.to(),
- MachineType::AnyTagged(), kind, zone);
+ new_desc, inputs, g, deduplicator, input->InputAt(i),
+ types->at(i), kind, zone);
}
return entries;
} else {
@@ -502,7 +514,6 @@ size_t AddOperandToStateValueDescriptor(StateValueDescriptor* descriptor,
StateValueDescriptor::Duplicate(zone, id));
return 0;
}
- break;
}
default: {
inputs->push_back(OperandForDeopt(g, input, kind, type.representation()));
@@ -929,6 +940,16 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
}
}
+void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) {
+ Node* projection0 = NodeProperties::FindProjection(node, 0);
+ if (projection0) {
+ MarkAsWord32(projection0);
+ }
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ MarkAsWord32(projection1);
+ }
+}
void InstructionSelector::VisitNode(Node* node) {
DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
@@ -1336,28 +1357,28 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kCheckedStore:
return VisitCheckedStore(node);
case IrOpcode::kInt32PairAdd:
- MarkAsWord32(NodeProperties::FindProjection(node, 0));
- MarkAsWord32(NodeProperties::FindProjection(node, 1));
+ MarkAsWord32(node);
+ MarkPairProjectionsAsWord32(node);
return VisitInt32PairAdd(node);
case IrOpcode::kInt32PairSub:
- MarkAsWord32(NodeProperties::FindProjection(node, 0));
- MarkAsWord32(NodeProperties::FindProjection(node, 1));
+ MarkAsWord32(node);
+ MarkPairProjectionsAsWord32(node);
return VisitInt32PairSub(node);
case IrOpcode::kInt32PairMul:
- MarkAsWord32(NodeProperties::FindProjection(node, 0));
- MarkAsWord32(NodeProperties::FindProjection(node, 1));
+ MarkAsWord32(node);
+ MarkPairProjectionsAsWord32(node);
return VisitInt32PairMul(node);
case IrOpcode::kWord32PairShl:
- MarkAsWord32(NodeProperties::FindProjection(node, 0));
- MarkAsWord32(NodeProperties::FindProjection(node, 1));
+ MarkAsWord32(node);
+ MarkPairProjectionsAsWord32(node);
return VisitWord32PairShl(node);
case IrOpcode::kWord32PairShr:
- MarkAsWord32(NodeProperties::FindProjection(node, 0));
- MarkAsWord32(NodeProperties::FindProjection(node, 1));
+ MarkAsWord32(node);
+ MarkPairProjectionsAsWord32(node);
return VisitWord32PairShr(node);
case IrOpcode::kWord32PairSar:
- MarkAsWord32(NodeProperties::FindProjection(node, 0));
- MarkAsWord32(NodeProperties::FindProjection(node, 1));
+ MarkAsWord32(node);
+ MarkPairProjectionsAsWord32(node);
return VisitWord32PairSar(node);
case IrOpcode::kAtomicLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
@@ -1741,7 +1762,7 @@ void InstructionSelector::VisitIfException(Node* node) {
void InstructionSelector::VisitOsrValue(Node* node) {
OperandGenerator g(this);
- int index = OpParameter<int>(node);
+ int index = OsrValueIndexOf(node->op());
Emit(kArchNop,
g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index)));
}
@@ -1875,109 +1896,63 @@ void InstructionSelector::VisitTailCall(Node* node) {
DCHECK_NE(0, descriptor->flags() & CallDescriptor::kSupportsTailCalls);
CallDescriptor* caller = linkage()->GetIncomingDescriptor();
- if (caller->CanTailCall(node)) {
- const CallDescriptor* callee = CallDescriptorOf(node->op());
- int stack_param_delta = callee->GetStackParameterDelta(caller);
- CallBuffer buffer(zone(), descriptor, nullptr);
-
- // Compute InstructionOperands for inputs and outputs.
- CallBufferFlags flags(kCallCodeImmediate | kCallTail);
- if (IsTailCallAddressImmediate()) {
- flags |= kCallAddressImmediate;
- }
- InitializeCallBuffer(node, &buffer, flags, stack_param_delta);
-
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
- InstructionOperandVector temps(zone());
- if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObjectFromJSFunction;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunctionFromJSFunction;
- break;
- default:
- UNREACHABLE();
- return;
- }
- int temps_count = GetTempsCountForTailCallFromJSFunction();
- for (int i = 0; i < temps_count; i++) {
- temps.push_back(g.TempRegister());
- }
- } else {
- switch (descriptor->kind()) {
- case CallDescriptor::kCallCodeObject:
- opcode = kArchTailCallCodeObject;
- break;
- case CallDescriptor::kCallJSFunction:
- opcode = kArchTailCallJSFunction;
- break;
- case CallDescriptor::kCallAddress:
- opcode = kArchTailCallAddress;
- break;
- default:
- UNREACHABLE();
- return;
- }
- }
- opcode |= MiscField::encode(descriptor->flags());
-
- Emit(kArchPrepareTailCall, g.NoOutput());
+ DCHECK(caller->CanTailCall(node));
+ const CallDescriptor* callee = CallDescriptorOf(node->op());
+ int stack_param_delta = callee->GetStackParameterDelta(caller);
+ CallBuffer buffer(zone(), descriptor, nullptr);
- int first_unused_stack_slot =
- (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0) +
- stack_param_delta;
- buffer.instruction_args.push_back(g.TempImmediate(first_unused_stack_slot));
-
- // Emit the tailcall instruction.
- Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
- &buffer.instruction_args.front(), temps.size(),
- temps.empty() ? nullptr : &temps.front());
- } else {
- FrameStateDescriptor* frame_state_descriptor =
- descriptor->NeedsFrameState()
- ? GetFrameStateDescriptor(
- node->InputAt(static_cast<int>(descriptor->InputCount())))
- : nullptr;
-
- CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
-
- // Compute InstructionOperands for inputs and outputs.
- CallBufferFlags flags = kCallCodeImmediate;
- if (IsTailCallAddressImmediate()) {
- flags |= kCallAddressImmediate;
- }
- InitializeCallBuffer(node, &buffer, flags);
-
- EmitPrepareArguments(&(buffer.pushed_nodes), descriptor, node);
+ // Compute InstructionOperands for inputs and outputs.
+ CallBufferFlags flags(kCallCodeImmediate | kCallTail);
+ if (IsTailCallAddressImmediate()) {
+ flags |= kCallAddressImmediate;
+ }
+ InitializeCallBuffer(node, &buffer, flags, stack_param_delta);
- // Select the appropriate opcode based on the call type.
- InstructionCode opcode;
+ // Select the appropriate opcode based on the call type.
+ InstructionCode opcode;
+ InstructionOperandVector temps(zone());
+ if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
switch (descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject;
+ opcode = kArchTailCallCodeObjectFromJSFunction;
break;
case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction;
+ opcode = kArchTailCallJSFunctionFromJSFunction;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ int temps_count = GetTempsCountForTailCallFromJSFunction();
+ for (int i = 0; i < temps_count; i++) {
+ temps.push_back(g.TempRegister());
+ }
+ } else {
+ switch (descriptor->kind()) {
+ case CallDescriptor::kCallCodeObject:
+ opcode = kArchTailCallCodeObject;
+ break;
+ case CallDescriptor::kCallAddress:
+ opcode = kArchTailCallAddress;
break;
default:
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(descriptor->flags());
-
- // Emit the call instruction.
- size_t output_count = buffer.outputs.size();
- auto* outputs = &buffer.outputs.front();
- Instruction* call_instr =
- Emit(opcode, output_count, outputs, buffer.instruction_args.size(),
- &buffer.instruction_args.front());
- if (instruction_selection_failed()) return;
- call_instr->MarkAsCall();
- Emit(kArchRet, 0, nullptr, output_count, outputs);
}
+ opcode |= MiscField::encode(descriptor->flags());
+
+ Emit(kArchPrepareTailCall, g.NoOutput());
+
+ int first_unused_stack_slot =
+ (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0) +
+ stack_param_delta;
+ buffer.instruction_args.push_back(g.TempImmediate(first_unused_stack_slot));
+
+ // Emit the tailcall instruction.
+ Emit(opcode, 0, nullptr, buffer.instruction_args.size(),
+ &buffer.instruction_args.front(), temps.size(),
+ temps.empty() ? nullptr : &temps.front());
}
@@ -1987,20 +1962,34 @@ void InstructionSelector::VisitGoto(BasicBlock* target) {
Emit(kArchJmp, g.NoOutput(), g.Label(target));
}
-
void InstructionSelector::VisitReturn(Node* ret) {
OperandGenerator g(this);
- if (linkage()->GetIncomingDescriptor()->ReturnCount() == 0) {
- Emit(kArchRet, g.NoOutput());
- } else {
- const int ret_count = ret->op()->ValueInputCount();
- auto value_locations = zone()->NewArray<InstructionOperand>(ret_count);
- for (int i = 0; i < ret_count; ++i) {
- value_locations[i] =
- g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i));
- }
- Emit(kArchRet, 0, nullptr, ret_count, value_locations);
+ const int input_count = linkage()->GetIncomingDescriptor()->ReturnCount() == 0
+ ? 1
+ : ret->op()->ValueInputCount();
+ DCHECK_GE(input_count, 1);
+ auto value_locations = zone()->NewArray<InstructionOperand>(input_count);
+ Node* pop_count = ret->InputAt(0);
+ value_locations[0] = pop_count->opcode() == IrOpcode::kInt32Constant
+ ? g.UseImmediate(pop_count)
+ : g.UseRegister(pop_count);
+ for (int i = 1; i < input_count; ++i) {
+ value_locations[i] =
+ g.UseLocation(ret->InputAt(i), linkage()->GetReturnLocation(i - 1));
}
+ Emit(kArchRet, 0, nullptr, input_count, value_locations);
+}
+
+Instruction* InstructionSelector::EmitDeoptimize(InstructionCode opcode,
+ InstructionOperand output,
+ InstructionOperand a,
+ DeoptimizeReason reason,
+ Node* frame_state) {
+ size_t output_count = output.IsInvalid() ? 0 : 1;
+ InstructionOperand inputs[] = {a};
+ size_t input_count = arraysize(inputs);
+ return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
+ reason, frame_state);
}
Instruction* InstructionSelector::EmitDeoptimize(
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 2981f90a1c..65ba8f7c71 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -12,6 +12,7 @@
#include "src/compiler/instruction.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
+#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -42,7 +43,7 @@ class PushParameter {
};
// Instruction selection generates an InstructionSequence for a given Schedule.
-class InstructionSelector final {
+class V8_EXPORT_PRIVATE InstructionSelector final {
public:
// Forward declarations.
class Features;
@@ -110,6 +111,9 @@ class InstructionSelector final {
// ===========================================================================
Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
+ InstructionOperand a, DeoptimizeReason reason,
+ Node* frame_state);
+ Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
DeoptimizeReason reason, Node* frame_state);
Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
@@ -204,6 +208,8 @@ class InstructionSelector final {
// to the roots register, i.e. if both a root register is available for this
// compilation unit and the serializer is disabled.
bool CanAddressRelativeToRootsRegister() const;
+ // Check if we can use the roots register to access GC roots.
+ bool CanUseRootsRegister() const;
Isolate* isolate() const { return sequence()->isolate(); }
@@ -344,6 +350,8 @@ class InstructionSelector final {
}
bool instruction_selection_failed() { return instruction_selection_failed_; }
+ void MarkPairProjectionsAsWord32(Node* node);
+
// ===========================================================================
Zone* const zone_;
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index 0df7ca0316..3b2311a23f 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -12,7 +12,8 @@ namespace v8 {
namespace internal {
namespace compiler {
-const auto GetRegConfig = RegisterConfiguration::Turbofan;
+const RegisterConfiguration* (*GetRegConfig)() =
+ RegisterConfiguration::Turbofan;
FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
switch (condition) {
@@ -64,8 +65,35 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
return condition;
}
-bool InstructionOperand::InterferesWith(const InstructionOperand& that) const {
- return EqualsCanonicalized(that);
+bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
+ if (kSimpleFPAliasing || !this->IsFPLocationOperand() ||
+ !other.IsFPLocationOperand())
+ return EqualsCanonicalized(other);
+ // Aliasing is complex and both operands are fp locations.
+ const LocationOperand& loc = *LocationOperand::cast(this);
+ const LocationOperand& other_loc = LocationOperand::cast(other);
+ LocationOperand::LocationKind kind = loc.location_kind();
+ LocationOperand::LocationKind other_kind = other_loc.location_kind();
+ if (kind != other_kind) return false;
+ MachineRepresentation rep = loc.representation();
+ MachineRepresentation other_rep = other_loc.representation();
+ if (rep == other_rep) return EqualsCanonicalized(other);
+ if (kind == LocationOperand::REGISTER) {
+ // FP register-register interference.
+ return GetRegConfig()->AreAliases(rep, loc.register_code(), other_rep,
+ other_loc.register_code());
+ } else {
+ // FP slot-slot interference. Slots of different FP reps can alias because
+ // the gap resolver may break a move into 2 or 4 equivalent smaller moves.
+ DCHECK_EQ(LocationOperand::STACK_SLOT, kind);
+ int index_hi = loc.index();
+ int index_lo = index_hi - (1 << ElementSizeLog2Of(rep)) / kPointerSize + 1;
+ int other_index_hi = other_loc.index();
+ int other_index_lo =
+ other_index_hi - (1 << ElementSizeLog2Of(other_rep)) / kPointerSize + 1;
+ return other_index_hi >= index_lo && index_hi >= other_index_lo;
+ }
+ return false;
}
void InstructionOperand::Print(const RegisterConfiguration* config) const {
@@ -232,28 +260,31 @@ bool ParallelMove::IsRedundant() const {
return true;
}
-
-MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
+void ParallelMove::PrepareInsertAfter(
+ MoveOperands* move, ZoneVector<MoveOperands*>* to_eliminate) const {
+ bool no_aliasing =
+ kSimpleFPAliasing || !move->destination().IsFPLocationOperand();
MoveOperands* replacement = nullptr;
- MoveOperands* to_eliminate = nullptr;
+ MoveOperands* eliminated = nullptr;
for (MoveOperands* curr : *this) {
if (curr->IsEliminated()) continue;
if (curr->destination().EqualsCanonicalized(move->source())) {
+ // We must replace move's source with curr's destination in order to
+ // insert it into this ParallelMove.
DCHECK(!replacement);
replacement = curr;
- if (to_eliminate != nullptr) break;
- } else if (curr->destination().EqualsCanonicalized(move->destination())) {
- DCHECK(!to_eliminate);
- to_eliminate = curr;
- if (replacement != nullptr) break;
+ if (no_aliasing && eliminated != nullptr) break;
+ } else if (curr->destination().InterferesWith(move->destination())) {
+ // We can eliminate curr, since move overwrites at least a part of its
+ // destination, implying its value is no longer live.
+ eliminated = curr;
+ to_eliminate->push_back(curr);
+ if (no_aliasing && replacement != nullptr) break;
}
}
- DCHECK_IMPLIES(replacement == to_eliminate, replacement == nullptr);
if (replacement != nullptr) move->set_source(replacement->source());
- return to_eliminate;
}
-
ExplicitOperand::ExplicitOperand(LocationKind kind, MachineRepresentation rep,
int index)
: LocationOperand(EXPLICIT, kind, rep, index) {
@@ -589,9 +620,7 @@ InstructionBlock::InstructionBlock(Zone* zone, RpoNumber rpo_number,
handler_(handler),
needs_frame_(false),
must_construct_frame_(false),
- must_deconstruct_frame_(false),
- last_deferred_(RpoNumber::Invalid()) {}
-
+ must_deconstruct_frame_(false) {}
size_t InstructionBlock::PredecessorIndexOf(RpoNumber rpo_number) const {
size_t j = 0;
@@ -785,6 +814,7 @@ InstructionSequence::InstructionSequence(Isolate* isolate,
next_virtual_register_(0),
reference_maps_(zone()),
representations_(zone()),
+ representation_mask_(0),
deoptimization_entries_(zone()),
current_block_(nullptr) {}
@@ -890,6 +920,7 @@ void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep,
DCHECK_IMPLIES(representations_[virtual_register] != rep,
representations_[virtual_register] == DefaultRepresentation());
representations_[virtual_register] = rep;
+ representation_mask_ |= 1 << static_cast<int>(rep);
}
int InstructionSequence::AddDeoptimizationEntry(
@@ -953,6 +984,11 @@ void InstructionSequence::PrintBlock(int block_id) const {
PrintBlock(GetRegConfig(), block_id);
}
+const RegisterConfiguration*
+InstructionSequence::GetRegisterConfigurationForTesting() {
+ return GetRegConfig();
+}
+
FrameStateDescriptor::FrameStateDescriptor(
Zone* zone, FrameStateType type, BailoutId bailout_id,
OutputFrameStateCombine state_combine, size_t parameters_count,
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index b5c5914166..327c8c1192 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -10,24 +10,27 @@
#include <map>
#include <set>
+#include "src/base/compiler-specific.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/frame.h"
#include "src/compiler/instruction-codes.h"
#include "src/compiler/opcodes.h"
-#include "src/compiler/source-position.h"
+#include "src/globals.h"
#include "src/macro-assembler.h"
#include "src/register-configuration.h"
#include "src/zone/zone-allocator.h"
namespace v8 {
namespace internal {
+
+class SourcePosition;
+
namespace compiler {
-// Forward declarations.
class Schedule;
+class SourcePositionTable;
-
-class InstructionOperand {
+class V8_EXPORT_PRIVATE InstructionOperand {
public:
static const int kInvalidVirtualRegister = -1;
@@ -117,7 +120,7 @@ class InstructionOperand {
return this->GetCanonicalizedValue() < that.GetCanonicalizedValue();
}
- bool InterferesWith(const InstructionOperand& that) const;
+ bool InterferesWith(const InstructionOperand& other) const;
// APIs to aid debugging. For general-stream APIs, use operator<<
void Print(const RegisterConfiguration* config) const;
@@ -516,8 +519,8 @@ class LocationOperand : public InstructionOperand {
class IndexField : public BitField64<int32_t, 35, 29> {};
};
-
-class ExplicitOperand : public LocationOperand {
+class V8_EXPORT_PRIVATE ExplicitOperand
+ : public NON_EXPORTED_BASE(LocationOperand) {
public:
ExplicitOperand(LocationKind kind, MachineRepresentation rep, int index);
@@ -639,8 +642,14 @@ uint64_t InstructionOperand::GetCanonicalizedValue() const {
if (IsAnyLocationOperand()) {
MachineRepresentation canonical = MachineRepresentation::kNone;
if (IsFPRegister()) {
- // We treat all FP register operands the same for simple aliasing.
- canonical = MachineRepresentation::kFloat64;
+ if (kSimpleFPAliasing) {
+ // We treat all FP register operands the same for simple aliasing.
+ canonical = MachineRepresentation::kFloat64;
+ } else {
+ // We need to distinguish FP register operands of different reps when
+ // aliasing is not simple (e.g. ARM).
+ canonical = LocationOperand::cast(this)->representation();
+ }
}
return InstructionOperand::KindField::update(
LocationOperand::RepresentationField::update(this->value_, canonical),
@@ -657,8 +666,8 @@ struct CompareOperandModuloType {
}
};
-
-class MoveOperands final : public ZoneObject {
+class V8_EXPORT_PRIVATE MoveOperands final
+ : public NON_EXPORTED_BASE(ZoneObject) {
public:
MoveOperands(const InstructionOperand& source,
const InstructionOperand& destination)
@@ -683,11 +692,6 @@ class MoveOperands final : public ZoneObject {
}
void SetPending() { destination_ = InstructionOperand(); }
- // True if this move is a move into the given destination operand.
- bool Blocks(const InstructionOperand& destination) const {
- return !IsEliminated() && source().InterferesWith(destination);
- }
-
// A move is redundant if it's been eliminated or if its source and
// destination are the same.
bool IsRedundant() const {
@@ -722,8 +726,9 @@ struct PrintableMoveOperands {
std::ostream& operator<<(std::ostream& os, const PrintableMoveOperands& mo);
-
-class ParallelMove final : public ZoneVector<MoveOperands*>, public ZoneObject {
+class V8_EXPORT_PRIVATE ParallelMove final
+ : public NON_EXPORTED_BASE(ZoneVector<MoveOperands *>),
+ public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit ParallelMove(Zone* zone) : ZoneVector<MoveOperands*>(zone) {
reserve(4);
@@ -746,9 +751,10 @@ class ParallelMove final : public ZoneVector<MoveOperands*>, public ZoneObject {
bool IsRedundant() const;
// Prepare this ParallelMove to insert move as if it happened in a subsequent
- // ParallelMove. move->source() may be changed. The MoveOperand returned
- // must be Eliminated.
- MoveOperands* PrepareInsertAfter(MoveOperands* move) const;
+ // ParallelMove. move->source() may be changed. Any MoveOperands added to
+ // to_eliminate must be Eliminated.
+ void PrepareInsertAfter(MoveOperands* move,
+ ZoneVector<MoveOperands*>* to_eliminate) const;
private:
DISALLOW_COPY_AND_ASSIGN(ParallelMove);
@@ -792,7 +798,7 @@ std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm);
class InstructionBlock;
-class Instruction final {
+class V8_EXPORT_PRIVATE Instruction final {
public:
size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
const InstructionOperand* OutputAt(size_t i) const {
@@ -899,7 +905,6 @@ class Instruction final {
bool IsTailCall() const {
return arch_opcode() == ArchOpcode::kArchTailCallCodeObject ||
arch_opcode() == ArchOpcode::kArchTailCallCodeObjectFromJSFunction ||
- arch_opcode() == ArchOpcode::kArchTailCallJSFunction ||
arch_opcode() == ArchOpcode::kArchTailCallJSFunctionFromJSFunction ||
arch_opcode() == ArchOpcode::kArchTailCallAddress;
}
@@ -1019,8 +1024,7 @@ class RpoNumber final {
std::ostream& operator<<(std::ostream&, const RpoNumber&);
-
-class Constant final {
+class V8_EXPORT_PRIVATE Constant final {
public:
enum Type {
kInt32,
@@ -1211,7 +1215,8 @@ class DeoptimizationEntry final {
typedef ZoneVector<DeoptimizationEntry> DeoptimizationVector;
-class PhiInstruction final : public ZoneObject {
+class V8_EXPORT_PRIVATE PhiInstruction final
+ : public NON_EXPORTED_BASE(ZoneObject) {
public:
typedef ZoneVector<InstructionOperand> Inputs;
@@ -1236,7 +1241,8 @@ class PhiInstruction final : public ZoneObject {
// Analogue of BasicBlock for Instructions instead of Nodes.
-class InstructionBlock final : public ZoneObject {
+class V8_EXPORT_PRIVATE InstructionBlock final
+ : public NON_EXPORTED_BASE(ZoneObject) {
public:
InstructionBlock(Zone* zone, RpoNumber rpo_number, RpoNumber loop_header,
RpoNumber loop_end, bool deferred, bool handler);
@@ -1300,9 +1306,6 @@ class InstructionBlock final : public ZoneObject {
bool must_deconstruct_frame() const { return must_deconstruct_frame_; }
void mark_must_deconstruct_frame() { must_deconstruct_frame_ = true; }
- void set_last_deferred(RpoNumber last) { last_deferred_ = last; }
- RpoNumber last_deferred() const { return last_deferred_; }
-
private:
Successors successors_;
Predecessors predecessors_;
@@ -1318,7 +1321,6 @@ class InstructionBlock final : public ZoneObject {
bool needs_frame_;
bool must_construct_frame_;
bool must_deconstruct_frame_;
- RpoNumber last_deferred_;
};
class InstructionSequence;
@@ -1347,7 +1349,8 @@ struct PrintableInstructionSequence;
// Represents architecture-specific generated code before, during, and after
// register allocation.
-class InstructionSequence final : public ZoneObject {
+class V8_EXPORT_PRIVATE InstructionSequence final
+ : public NON_EXPORTED_BASE(ZoneObject) {
public:
static InstructionBlocks* InstructionBlocksFor(Zone* zone,
const Schedule* schedule);
@@ -1388,6 +1391,7 @@ class InstructionSequence final : public ZoneObject {
}
MachineRepresentation GetRepresentation(int virtual_register) const;
void MarkAsRepresentation(MachineRepresentation rep, int virtual_register);
+ int representation_mask() const { return representation_mask_; }
bool IsReference(int virtual_register) const {
return CanBeTaggedPointer(GetRepresentation(virtual_register));
@@ -1395,14 +1399,6 @@ class InstructionSequence final : public ZoneObject {
bool IsFP(int virtual_register) const {
return IsFloatingPoint(GetRepresentation(virtual_register));
}
- bool IsFloat(int virtual_register) const {
- return GetRepresentation(virtual_register) ==
- MachineRepresentation::kFloat32;
- }
- bool IsDouble(int virtual_register) const {
- return GetRepresentation(virtual_register) ==
- MachineRepresentation::kFloat64;
- }
Instruction* GetBlockStart(RpoNumber rpo) const;
@@ -1504,9 +1500,11 @@ class InstructionSequence final : public ZoneObject {
void ValidateDeferredBlockEntryPaths() const;
void ValidateSSA() const;
+ const RegisterConfiguration* GetRegisterConfigurationForTesting();
+
private:
- friend std::ostream& operator<<(std::ostream& os,
- const PrintableInstructionSequence& code);
+ friend V8_EXPORT_PRIVATE std::ostream& operator<<(
+ std::ostream& os, const PrintableInstructionSequence& code);
typedef ZoneMap<const Instruction*, SourcePosition> SourcePositionMap;
@@ -1520,6 +1518,7 @@ class InstructionSequence final : public ZoneObject {
int next_virtual_register_;
ReferenceMapDeque reference_maps_;
ZoneVector<MachineRepresentation> representations_;
+ int representation_mask_;
DeoptimizationVector deoptimization_entries_;
// Used at construction time
@@ -1534,9 +1533,8 @@ struct PrintableInstructionSequence {
const InstructionSequence* sequence_;
};
-
-std::ostream& operator<<(std::ostream& os,
- const PrintableInstructionSequence& code);
+V8_EXPORT_PRIVATE std::ostream& operator<<(
+ std::ostream& os, const PrintableInstructionSequence& code);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 539a372504..62523ca45c 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -61,6 +61,8 @@ void Int64Lowering::LowerGraph() {
// that they are processed after all other nodes.
PreparePhiReplacement(input);
stack_.push_front({input, 0});
+ } else if (input->opcode() == IrOpcode::kEffectPhi) {
+ stack_.push_front({input, 0});
} else {
stack_.push_back({input, 0});
}
@@ -240,7 +242,7 @@ void Int64Lowering::LowerNode(Node* node) {
case IrOpcode::kStart: {
int parameter_count = GetParameterCountAfterLowering(signature());
// Only exchange the node if the parameter count actually changed.
- if (parameter_count != signature()->parameter_count()) {
+ if (parameter_count != static_cast<int>(signature()->parameter_count())) {
int delta =
parameter_count - static_cast<int>(signature()->parameter_count());
int new_output_count = node->op()->ValueOutputCount() + delta;
@@ -255,7 +257,7 @@ void Int64Lowering::LowerNode(Node* node) {
// the only input of a parameter node, only changes if the parameter count
// changes.
if (GetParameterCountAfterLowering(signature()) !=
- signature()->parameter_count()) {
+ static_cast<int>(signature()->parameter_count())) {
int old_index = ParameterIndexOf(node->op());
int new_index = GetParameterIndexAfterLowering(signature(), old_index);
NodeProperties::ChangeOp(node, common()->Parameter(new_index));
@@ -273,7 +275,7 @@ void Int64Lowering::LowerNode(Node* node) {
case IrOpcode::kReturn: {
DefaultLowering(node);
int new_return_count = GetReturnCountAfterLowering(signature());
- if (signature()->return_count() != new_return_count) {
+ if (static_cast<int>(signature()->return_count()) != new_return_count) {
NodeProperties::ChangeOp(node, common()->Return(new_return_count));
}
break;
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index 084c07a87c..66a54e9c3f 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -9,13 +9,14 @@
#include "src/compiler/graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-marker.h"
+#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
-class Int64Lowering {
+class V8_EXPORT_PRIVATE Int64Lowering {
public:
Int64Lowering(Graph* graph, MachineOperatorBuilder* machine,
CommonOperatorBuilder* common, Zone* zone,
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index 41d4a00166..2962e24502 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -96,15 +96,30 @@ class JSCallReduction {
JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph,
Flags flags,
- CompilationDependencies* dependencies)
+ CompilationDependencies* dependencies,
+ Handle<Context> native_context)
: AdvancedReducer(editor),
dependencies_(dependencies),
flags_(flags),
jsgraph_(jsgraph),
+ native_context_(native_context),
type_cache_(TypeCache::Get()) {}
namespace {
+// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
+// alias analyzer?
+bool IsSame(Node* a, Node* b) {
+ if (a == b) {
+ return true;
+ } else if (a->opcode() == IrOpcode::kCheckHeapObject) {
+ return IsSame(a->InputAt(0), b);
+ } else if (b->opcode() == IrOpcode::kCheckHeapObject) {
+ return IsSame(a, b->InputAt(0));
+ }
+ return false;
+}
+
MaybeHandle<Map> GetMapWitness(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -112,7 +127,7 @@ MaybeHandle<Map> GetMapWitness(Node* node) {
// for the {receiver}, and if so use that map for the lowering below.
for (Node* dominator = effect;;) {
if (dominator->opcode() == IrOpcode::kCheckMaps &&
- dominator->InputAt(0) == receiver) {
+ IsSame(dominator->InputAt(0), receiver)) {
if (dominator->op()->ValueInputCount() == 2) {
HeapObjectMatcher m(dominator->InputAt(1));
if (m.HasValue()) return Handle<Map>::cast(m.Value());
@@ -160,8 +175,554 @@ bool CanInlineArrayResizeOperation(Handle<Map> receiver_map) {
!IsReadOnlyLengthDescriptor(receiver_map);
}
+bool CanInlineJSArrayIteration(Handle<Map> receiver_map) {
+ Isolate* const isolate = receiver_map->GetIsolate();
+ // Ensure that the [[Prototype]] is actually an exotic Array
+ if (!receiver_map->prototype()->IsJSArray()) return false;
+
+ // Don't inline JSArrays with slow elements of any kind
+ if (!IsFastElementsKind(receiver_map->elements_kind())) return false;
+
+ // If the receiver map has packed elements, no need to check the prototype.
+ // This requires a MapCheck where this is used.
+ if (!IsFastHoleyElementsKind(receiver_map->elements_kind())) return true;
+
+ Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
+ isolate);
+ // Ensure all prototypes of the {receiver} are stable.
+ for (PrototypeIterator it(isolate, receiver_prototype, kStartAtReceiver);
+ !it.IsAtEnd(); it.Advance()) {
+ Handle<JSReceiver> current = PrototypeIterator::GetCurrent<JSReceiver>(it);
+ if (!current->map()->is_stable()) return false;
+ }
+
+ // For holey Arrays, ensure that the array_protector cell is valid (must be
+ // a CompilationDependency), and the JSArray prototype has not been altered.
+ return receiver_map->instance_type() == JS_ARRAY_TYPE &&
+ (!receiver_map->is_dictionary_map() || receiver_map->is_stable()) &&
+ isolate->IsFastArrayConstructorPrototypeChainIntact() &&
+ isolate->IsAnyInitialArrayPrototype(receiver_prototype);
+}
+
} // namespace
+Reduction JSBuiltinReducer::ReduceArrayIterator(Node* node,
+ IterationKind kind) {
+ Handle<Map> receiver_map;
+ if (GetMapWitness(node).ToHandle(&receiver_map)) {
+ return ReduceArrayIterator(receiver_map, node, kind,
+ ArrayIteratorKind::kArray);
+ }
+ return NoChange();
+}
+
+Reduction JSBuiltinReducer::ReduceTypedArrayIterator(Node* node,
+ IterationKind kind) {
+ Handle<Map> receiver_map;
+ if (GetMapWitness(node).ToHandle(&receiver_map) &&
+ receiver_map->instance_type() == JS_TYPED_ARRAY_TYPE) {
+ return ReduceArrayIterator(receiver_map, node, kind,
+ ArrayIteratorKind::kTypedArray);
+ }
+ return NoChange();
+}
+
+Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
+ Node* node, IterationKind kind,
+ ArrayIteratorKind iter_kind) {
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ if (iter_kind == ArrayIteratorKind::kTypedArray) {
+ // For JSTypedArray iterator methods, deopt if the buffer is neutered. This
+ // is potentially a deopt loop, but should be extremely unlikely.
+ DCHECK_EQ(JS_TYPED_ARRAY_TYPE, receiver_map->instance_type());
+ Node* buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+
+ Node* check = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+ check = graph()->NewNode(simplified()->BooleanNot(), check);
+ effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ }
+
+ int map_index = -1;
+ Node* object_map = jsgraph()->UndefinedConstant();
+ switch (receiver_map->instance_type()) {
+ case JS_ARRAY_TYPE:
+ if (kind == IterationKind::kKeys) {
+ map_index = Context::FAST_ARRAY_KEY_ITERATOR_MAP_INDEX;
+ } else {
+ map_index = kind == IterationKind::kValues
+ ? Context::FAST_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX
+ : Context::FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX;
+
+ if (CanInlineJSArrayIteration(receiver_map)) {
+ // Use `generic` elements for holey arrays if there may be elements
+ // on the prototype chain.
+ map_index += static_cast<int>(receiver_map->elements_kind());
+ object_map = jsgraph()->Constant(receiver_map);
+ if (IsFastHoleyElementsKind(receiver_map->elements_kind())) {
+ Handle<JSObject> initial_array_prototype(
+ native_context()->initial_array_prototype(), isolate());
+ dependencies()->AssumePrototypeMapsStable(receiver_map,
+ initial_array_prototype);
+ }
+ } else {
+ map_index += (Context::GENERIC_ARRAY_VALUE_ITERATOR_MAP_INDEX -
+ Context::FAST_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX);
+ }
+ }
+ break;
+ case JS_TYPED_ARRAY_TYPE:
+ if (kind == IterationKind::kKeys) {
+ map_index = Context::TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX;
+ } else {
+ DCHECK_GE(receiver_map->elements_kind(), UINT8_ELEMENTS);
+ DCHECK_LE(receiver_map->elements_kind(), UINT8_CLAMPED_ELEMENTS);
+ map_index = (kind == IterationKind::kValues
+ ? Context::UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX
+ : Context::UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX) +
+ (receiver_map->elements_kind() - UINT8_ELEMENTS);
+ }
+ break;
+ default:
+ if (kind == IterationKind::kKeys) {
+ map_index = Context::GENERIC_ARRAY_KEY_ITERATOR_MAP_INDEX;
+ } else if (kind == IterationKind::kValues) {
+ map_index = Context::GENERIC_ARRAY_VALUE_ITERATOR_MAP_INDEX;
+ } else {
+ map_index = Context::GENERIC_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX;
+ }
+ break;
+ }
+
+ DCHECK_GE(map_index, Context::TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX);
+ DCHECK_LE(map_index, Context::GENERIC_ARRAY_VALUE_ITERATOR_MAP_INDEX);
+
+ Handle<Map> map(Map::cast(native_context()->get(map_index)), isolate());
+
+ // allocate new iterator
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
+ Node* value = effect = graph()->NewNode(
+ simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Constant(JSArrayIterator::kSize), effect, control);
+ effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+ value, jsgraph()->Constant(map), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSObjectProperties()), value,
+ jsgraph()->EmptyFixedArrayConstant(), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
+ jsgraph()->EmptyFixedArrayConstant(), effect, control);
+
+ // attach the iterator to this object
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayIteratorObject()),
+ value, receiver, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayIteratorIndex()), value,
+ jsgraph()->ZeroConstant(), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayIteratorObjectMap()),
+ value, object_map, effect, control);
+
+ value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
+
+ // replace it
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
+ Handle<Map> iterator_map, Node* node, IterationKind kind) {
+ Node* iterator = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+
+ if (kind != IterationKind::kKeys &&
+ !isolate()->IsFastArrayIterationIntact()) {
+ // Avoid deopt loops for non-key iteration if the
+ // fast_array_iteration_protector cell has been invalidated.
+ return NoChange();
+ }
+
+ ElementsKind elements_kind = JSArrayIterator::ElementsKindForInstanceType(
+ iterator_map->instance_type());
+
+ if (IsFastHoleyElementsKind(elements_kind)) {
+ if (!isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
+ return NoChange();
+ } else {
+ Handle<JSObject> initial_array_prototype(
+ native_context()->initial_array_prototype(), isolate());
+ dependencies()->AssumePropertyCell(factory()->array_protector());
+ }
+ }
+
+ Node* array = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayIteratorObject()),
+ iterator, effect, control);
+ Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(), array,
+ jsgraph()->UndefinedConstant());
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+
+ Node* vdone_false0;
+ Node* vfalse0;
+ Node* efalse0 = effect;
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ {
+ // iterator.[[IteratedObject]] !== undefined, continue iterating.
+ Node* index = efalse0 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayIteratorIndex(
+ JS_ARRAY_TYPE, elements_kind)),
+ iterator, efalse0, if_false0);
+
+ Node* length = efalse0 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(elements_kind)),
+ array, efalse0, if_false0);
+ Node* check1 =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check1, if_false0);
+
+ Node* vdone_true1;
+ Node* vtrue1;
+ Node* etrue1 = efalse0;
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ {
+ // iterator.[[NextIndex]] < array.length, continue iterating
+ vdone_true1 = jsgraph()->FalseConstant();
+ if (kind == IterationKind::kKeys) {
+ vtrue1 = index;
+ } else {
+ // For value/entry iteration, first step is a mapcheck to ensure
+ // inlining is still valid.
+ Node* orig_map = etrue1 =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForJSArrayIteratorObjectMap()),
+ iterator, etrue1, if_true1);
+ etrue1 = graph()->NewNode(simplified()->CheckMaps(1), array, orig_map,
+ etrue1, if_true1);
+ }
+
+ if (kind != IterationKind::kKeys) {
+ Node* elements = etrue1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ array, etrue1, if_true1);
+ Node* value = etrue1 = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForFixedArrayElement(elements_kind)),
+ elements, index, etrue1, if_true1);
+
+ // Convert hole to undefined if needed.
+ if (elements_kind == FAST_HOLEY_ELEMENTS ||
+ elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ value = graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(),
+ value);
+ } else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
+ // TODO(bmeurer): avoid deopt if not all uses of value are truncated.
+ CheckFloat64HoleMode mode = CheckFloat64HoleMode::kAllowReturnHole;
+ value = etrue1 = graph()->NewNode(
+ simplified()->CheckFloat64Hole(mode), value, etrue1, if_true1);
+ }
+
+ if (kind == IterationKind::kEntries) {
+ // Allocate elements for key/value pair
+ vtrue1 = etrue1 =
+ graph()->NewNode(javascript()->CreateKeyValueArray(), index,
+ value, context, etrue1);
+ } else {
+ DCHECK_EQ(kind, IterationKind::kValues);
+ vtrue1 = value;
+ }
+ }
+
+ Node* next_index = graph()->NewNode(simplified()->NumberAdd(), index,
+ jsgraph()->OneConstant());
+ next_index = graph()->NewNode(simplified()->NumberToUint32(), next_index);
+
+ etrue1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayIteratorIndex(
+ JS_ARRAY_TYPE, elements_kind)),
+ iterator, next_index, etrue1, if_true1);
+ }
+
+ Node* vdone_false1;
+ Node* vfalse1;
+ Node* efalse1 = efalse0;
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ {
+ // iterator.[[NextIndex]] >= array.length, stop iterating.
+ vdone_false1 = jsgraph()->TrueConstant();
+ vfalse1 = jsgraph()->UndefinedConstant();
+ efalse1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayIteratorObject()),
+ iterator, vfalse1, efalse1, if_false1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_false0);
+ vdone_false0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vdone_true1, vdone_false1, if_false0);
+ }
+
+ Node* vdone_true0;
+ Node* vtrue0;
+ Node* etrue0 = effect;
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ {
+ // iterator.[[IteratedObject]] === undefined, the iterator is done.
+ vdone_true0 = jsgraph()->TrueConstant();
+ vtrue0 = jsgraph()->UndefinedConstant();
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_false0, if_true0);
+ effect = graph()->NewNode(common()->EffectPhi(2), efalse0, etrue0, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vfalse0, vtrue0, control);
+ Node* done =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vdone_false0, vdone_true0, control);
+
+ // Create IteratorResult object.
+ value = effect = graph()->NewNode(javascript()->CreateIterResultObject(),
+ value, done, context, effect);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+Reduction JSBuiltinReducer::ReduceTypedArrayIteratorNext(
+ Handle<Map> iterator_map, Node* node, IterationKind kind) {
+ Node* iterator = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+
+ ElementsKind elements_kind = JSArrayIterator::ElementsKindForInstanceType(
+ iterator_map->instance_type());
+
+ Node* array = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayIteratorObject()),
+ iterator, effect, control);
+ Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(), array,
+ jsgraph()->UndefinedConstant());
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+
+ Node* vdone_false0;
+ Node* vfalse0;
+ Node* efalse0 = effect;
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ {
+ // iterator.[[IteratedObject]] !== undefined, continue iterating.
+ Node* index = efalse0 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayIteratorIndex(
+ JS_TYPED_ARRAY_TYPE, elements_kind)),
+ iterator, efalse0, if_false0);
+
+ // typedarray.[[ViewedArrayBuffer]]
+ Node* buffer = efalse0 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ array, efalse0, if_false0);
+
+ Node* check1 = efalse0 = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, efalse0, if_false0);
+ check1 = graph()->NewNode(simplified()->BooleanNot(), check1);
+ efalse0 =
+ graph()->NewNode(simplified()->CheckIf(), check1, efalse0, if_false0);
+
+ Node* length = efalse0 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()), array,
+ efalse0, if_false0);
+
+ Node* check2 =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check2, if_false0);
+
+ Node* vdone_true2;
+ Node* vtrue2;
+ Node* etrue2 = efalse0;
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ {
+ // iterator.[[NextIndex]] < array.length, continue iterating
+ vdone_true2 = jsgraph()->FalseConstant();
+ if (kind == IterationKind::kKeys) {
+ vtrue2 = index;
+ }
+
+ Node* next_index = graph()->NewNode(simplified()->NumberAdd(), index,
+ jsgraph()->OneConstant());
+ next_index = graph()->NewNode(simplified()->NumberToUint32(), next_index);
+
+ etrue2 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayIteratorIndex(
+ JS_TYPED_ARRAY_TYPE, elements_kind)),
+ iterator, next_index, etrue2, if_true2);
+
+ if (kind != IterationKind::kKeys) {
+ Node* elements = etrue2 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ array, etrue2, if_true2);
+ Node* base_ptr = etrue2 = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
+ elements, etrue2, if_true2);
+ Node* external_ptr = etrue2 = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
+ elements, etrue2, if_true2);
+
+ ExternalArrayType array_type = kExternalInt8Array;
+ switch (elements_kind) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ array_type = kExternal##Type##Array; \
+ break;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ default:
+ UNREACHABLE();
+#undef TYPED_ARRAY_CASE
+ }
+
+ Node* value = etrue2 =
+ graph()->NewNode(simplified()->LoadTypedElement(array_type), buffer,
+ base_ptr, external_ptr, index, etrue2, if_true2);
+
+ if (kind == IterationKind::kEntries) {
+ // Allocate elements for key/value pair
+ vtrue2 = etrue2 =
+ graph()->NewNode(javascript()->CreateKeyValueArray(), index,
+ value, context, etrue2);
+ } else {
+ DCHECK(kind == IterationKind::kValues);
+ vtrue2 = value;
+ }
+ }
+ }
+
+ Node* vdone_false2;
+ Node* vfalse2;
+ Node* efalse2 = efalse0;
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ {
+ // iterator.[[NextIndex]] >= array.length, stop iterating.
+ vdone_false2 = jsgraph()->TrueConstant();
+ vfalse2 = jsgraph()->UndefinedConstant();
+ efalse2 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayIteratorObject()),
+ iterator, vfalse2, efalse2, if_false2);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue2, efalse2, if_false0);
+ vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue2, vfalse2, if_false0);
+ vdone_false0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vdone_true2, vdone_false2, if_false0);
+ }
+
+ Node* vdone_true0;
+ Node* vtrue0;
+ Node* etrue0 = effect;
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ {
+ // iterator.[[IteratedObject]] === undefined, the iterator is done.
+ vdone_true0 = jsgraph()->TrueConstant();
+ vtrue0 = jsgraph()->UndefinedConstant();
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_false0, if_true0);
+ effect = graph()->NewNode(common()->EffectPhi(2), efalse0, etrue0, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vfalse0, vtrue0, control);
+ Node* done =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vdone_false0, vdone_true0, control);
+
+ // Create IteratorResult object.
+ value = effect = graph()->NewNode(javascript()->CreateIterResultObject(),
+ value, done, context, effect);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+Reduction JSBuiltinReducer::ReduceArrayIteratorNext(Node* node) {
+ Handle<Map> receiver_map;
+ if (GetMapWitness(node).ToHandle(&receiver_map)) {
+ switch (receiver_map->instance_type()) {
+ case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+ return ReduceTypedArrayIteratorNext(receiver_map, node,
+ IterationKind::kKeys);
+
+ case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+ return ReduceFastArrayIteratorNext(receiver_map, node,
+ IterationKind::kKeys);
+
+ case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ return ReduceTypedArrayIteratorNext(receiver_map, node,
+ IterationKind::kEntries);
+
+ case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ return ReduceFastArrayIteratorNext(receiver_map, node,
+ IterationKind::kEntries);
+
+ case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+ return ReduceTypedArrayIteratorNext(receiver_map, node,
+ IterationKind::kValues);
+
+ case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ return ReduceFastArrayIteratorNext(receiver_map, node,
+ IterationKind::kValues);
+
+ default:
+ // Slow array iterators are not reduced
+ return NoChange();
+ }
+ }
+ return NoChange();
+}
+
// ES6 section 22.1.3.17 Array.prototype.pop ( )
Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
Handle<Map> receiver_map;
@@ -329,14 +890,14 @@ bool HasInstanceTypeWitness(Node* receiver, Node* effect,
InstanceType instance_type) {
for (Node* dominator = effect;;) {
if (dominator->opcode() == IrOpcode::kCheckMaps &&
- dominator->InputAt(0) == receiver) {
+ IsSame(dominator->InputAt(0), receiver)) {
// Check if all maps have the given {instance_type}.
for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
Node* const map = NodeProperties::GetValueInput(dominator, i);
Type* const map_type = NodeProperties::GetType(map);
- if (!map_type->IsConstant()) return false;
+ if (!map_type->IsHeapConstant()) return false;
Handle<Map> const map_value =
- Handle<Map>::cast(map_type->AsConstant()->Value());
+ Handle<Map>::cast(map_type->AsHeapConstant()->Value());
if (map_value->instance_type() != instance_type) return false;
}
return true;
@@ -915,11 +1476,10 @@ Reduction JSBuiltinReducer::ReduceNumberParseInt(Node* node) {
r.InputsMatchTwo(type_cache_.kSafeInteger,
type_cache_.kZeroOrUndefined) ||
r.InputsMatchTwo(type_cache_.kSafeInteger, type_cache_.kTenOrUndefined)) {
- // Number.parseInt(a:safe-integer) -> NumberToInt32(a)
- // Number.parseInt(a:safe-integer,b:#0\/undefined) -> NumberToInt32(a)
- // Number.parseInt(a:safe-integer,b:#10\/undefined) -> NumberToInt32(a)
- Node* input = r.GetJSCallInput(0);
- Node* value = graph()->NewNode(simplified()->NumberToInt32(), input);
+ // Number.parseInt(a:safe-integer) -> a
+ // Number.parseInt(a:safe-integer,b:#0\/undefined) -> a
+ // Number.parseInt(a:safe-integer,b:#10\/undefined) -> a
+ Node* value = r.GetJSCallInput(0);
return Replace(value);
}
return NoChange();
@@ -949,7 +1509,7 @@ Node* GetStringWitness(Node* node) {
// the lowering below.
for (Node* dominator = effect;;) {
if (dominator->opcode() == IrOpcode::kCheckString &&
- dominator->InputAt(0) == receiver) {
+ IsSame(dominator->InputAt(0), receiver)) {
return dominator;
}
if (dominator->op()->EffectInputCount() != 1) {
@@ -1058,6 +1618,46 @@ Reduction JSBuiltinReducer::ReduceStringCharCodeAt(Node* node) {
return NoChange();
}
+Reduction JSBuiltinReducer::ReduceStringIterator(Node* node) {
+ if (Node* receiver = GetStringWitness(node)) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* map = jsgraph()->HeapConstant(
+ handle(native_context()->string_iterator_map(), isolate()));
+
+ // allocate new iterator
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
+ Node* value = effect = graph()->NewNode(
+ simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Constant(JSStringIterator::kSize), effect, control);
+ effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+ value, map, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSObjectProperties()), value,
+ jsgraph()->EmptyFixedArrayConstant(), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
+ jsgraph()->EmptyFixedArrayConstant(), effect, control);
+
+ // attach the iterator to this string
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSStringIteratorString()),
+ value, receiver, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSStringIteratorIndex()),
+ value, jsgraph()->SmiConstant(0), effect, control);
+
+ value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
+
+ // replace it
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -1090,11 +1690,11 @@ Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
index, if_true0);
// branch1: if ((lead & 0xFC00) === 0xD800)
- Node* check1 = graph()->NewNode(
- simplified()->NumberEqual(),
- graph()->NewNode(simplified()->NumberBitwiseAnd(), lead,
- jsgraph()->Int32Constant(0xFC00)),
- jsgraph()->Int32Constant(0xD800));
+ Node* check1 =
+ graph()->NewNode(simplified()->NumberEqual(),
+ graph()->NewNode(simplified()->NumberBitwiseAnd(),
+ lead, jsgraph()->Constant(0xFC00)),
+ jsgraph()->Constant(0xD800));
Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
check1, if_true0);
Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
@@ -1116,8 +1716,8 @@ Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
Node* check3 = graph()->NewNode(
simplified()->NumberEqual(),
graph()->NewNode(simplified()->NumberBitwiseAnd(), trail,
- jsgraph()->Int32Constant(0xFC00)),
- jsgraph()->Int32Constant(0xDC00));
+ jsgraph()->Constant(0xFC00)),
+ jsgraph()->Constant(0xDC00));
Node* branch3 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
check3, if_true2);
Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
@@ -1128,11 +1728,11 @@ Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
// Need to swap the order for big-endian platforms
#if V8_TARGET_BIG_ENDIAN
graph()->NewNode(simplified()->NumberShiftLeft(), lead,
- jsgraph()->Int32Constant(16)),
+ jsgraph()->Constant(16)),
trail);
#else
graph()->NewNode(simplified()->NumberShiftLeft(), trail,
- jsgraph()->Int32Constant(16)),
+ jsgraph()->Constant(16)),
lead);
#endif
}
@@ -1234,6 +1834,14 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
// Dispatch according to the BuiltinFunctionId if present.
if (!r.HasBuiltinFunctionId()) return NoChange();
switch (r.GetBuiltinFunctionId()) {
+ case kArrayEntries:
+ return ReduceArrayIterator(node, IterationKind::kEntries);
+ case kArrayKeys:
+ return ReduceArrayIterator(node, IterationKind::kKeys);
+ case kArrayValues:
+ return ReduceArrayIterator(node, IterationKind::kValues);
+ case kArrayIteratorNext:
+ return ReduceArrayIteratorNext(node);
case kArrayPop:
return ReduceArrayPop(node);
case kArrayPush:
@@ -1370,6 +1978,8 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceStringCharAt(node);
case kStringCharCodeAt:
return ReduceStringCharCodeAt(node);
+ case kStringIterator:
+ return ReduceStringIterator(node);
case kStringIteratorNext:
return ReduceStringIteratorNext(node);
case kDataViewByteLength:
@@ -1391,6 +2001,12 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
case kTypedArrayLength:
return ReduceArrayBufferViewAccessor(
node, JS_TYPED_ARRAY_TYPE, AccessBuilder::ForJSTypedArrayLength());
+ case kTypedArrayEntries:
+ return ReduceTypedArrayIterator(node, IterationKind::kEntries);
+ case kTypedArrayKeys:
+ return ReduceTypedArrayIterator(node, IterationKind::kKeys);
+ case kTypedArrayValues:
+ return ReduceTypedArrayIterator(node, IterationKind::kValues);
default:
break;
}
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index 524d006174..4af3084ea3 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -5,8 +5,10 @@
#ifndef V8_COMPILER_JS_BUILTIN_REDUCER_H_
#define V8_COMPILER_JS_BUILTIN_REDUCER_H_
+#include "src/base/compiler-specific.h"
#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -25,7 +27,8 @@ class JSOperatorBuilder;
class SimplifiedOperatorBuilder;
class TypeCache;
-class JSBuiltinReducer final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE JSBuiltinReducer final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
public:
// Flags that control the mode of operation.
enum Flag {
@@ -35,12 +38,23 @@ class JSBuiltinReducer final : public AdvancedReducer {
typedef base::Flags<Flag> Flags;
JSBuiltinReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
- CompilationDependencies* dependencies);
+ CompilationDependencies* dependencies,
+ Handle<Context> native_context);
~JSBuiltinReducer() final {}
Reduction Reduce(Node* node) final;
private:
+ Reduction ReduceArrayIterator(Node* node, IterationKind kind);
+ Reduction ReduceTypedArrayIterator(Node* node, IterationKind kind);
+ Reduction ReduceArrayIterator(Handle<Map> receiver_map, Node* node,
+ IterationKind kind,
+ ArrayIteratorKind iter_kind);
+ Reduction ReduceArrayIteratorNext(Node* node);
+ Reduction ReduceFastArrayIteratorNext(Handle<Map> iterator_map, Node* node,
+ IterationKind kind);
+ Reduction ReduceTypedArrayIteratorNext(Handle<Map> iterator_map, Node* node,
+ IterationKind kind);
Reduction ReduceArrayPop(Node* node);
Reduction ReduceArrayPush(Node* node);
Reduction ReduceDateGetTime(Node* node);
@@ -88,6 +102,7 @@ class JSBuiltinReducer final : public AdvancedReducer {
Reduction ReduceStringCharAt(Node* node);
Reduction ReduceStringCharCodeAt(Node* node);
Reduction ReduceStringFromCharCode(Node* node);
+ Reduction ReduceStringIterator(Node* node);
Reduction ReduceStringIteratorNext(Node* node);
Reduction ReduceArrayBufferViewAccessor(Node* node,
InstanceType instance_type,
@@ -101,6 +116,7 @@ class JSBuiltinReducer final : public AdvancedReducer {
Factory* factory() const;
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
+ Handle<Context> native_context() const { return native_context_; }
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
JSOperatorBuilder* javascript() const;
@@ -109,6 +125,7 @@ class JSBuiltinReducer final : public AdvancedReducer {
CompilationDependencies* const dependencies_;
Flags const flags_;
JSGraph* const jsgraph_;
+ Handle<Context> const native_context_;
TypeCache const& type_cache_;
};
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index dd8f0643dd..e48fce91c4 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -189,12 +189,72 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
return reduction.Changed() ? reduction : Changed(node);
}
+namespace {
+
+// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
+// alias analyzer?
+bool IsSame(Node* a, Node* b) {
+ if (a == b) {
+ return true;
+ } else if (a->opcode() == IrOpcode::kCheckHeapObject) {
+ return IsSame(a->InputAt(0), b);
+ } else if (b->opcode() == IrOpcode::kCheckHeapObject) {
+ return IsSame(a, b->InputAt(0));
+ }
+ return false;
+}
+
+// TODO(turbofan): Share with similar functionality in JSInliningHeuristic
+// and JSNativeContextSpecialization, i.e. move to NodeProperties helper?!
+MaybeHandle<Map> InferReceiverMap(Node* node) {
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Check if the {node} is dominated by a CheckMaps with a single map
+ // for the {receiver}, and if so use that map for the lowering below.
+ for (Node* dominator = effect;;) {
+ if (dominator->opcode() == IrOpcode::kCheckMaps &&
+ IsSame(dominator->InputAt(0), receiver)) {
+ if (dominator->op()->ValueInputCount() == 2) {
+ HeapObjectMatcher m(dominator->InputAt(1));
+ if (m.HasValue()) return Handle<Map>::cast(m.Value());
+ }
+ return MaybeHandle<Map>();
+ }
+ if (dominator->op()->EffectInputCount() != 1) {
+ // Didn't find any appropriate CheckMaps node.
+ return MaybeHandle<Map>();
+ }
+ dominator = NodeProperties::GetEffectInput(dominator);
+ }
+}
+
+} // namespace
+
+// ES6 section B.2.2.1.1 get Object.prototype.__proto__
+Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+
+ // Try to determine the {receiver} map.
+ Handle<Map> receiver_map;
+ if (InferReceiverMap(node).ToHandle(&receiver_map)) {
+ // Check if we can constant-fold the {receiver} map.
+ if (!receiver_map->IsJSProxyMap() &&
+ !receiver_map->has_hidden_prototype() &&
+ !receiver_map->is_access_check_needed()) {
+ Handle<Object> receiver_prototype(receiver_map->prototype(), isolate());
+ Node* value = jsgraph()->Constant(receiver_prototype);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ }
+
+ return NoChange();
+}
Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
Node* target = NodeProperties::GetValueInput(node, 0);
- Node* context = NodeProperties::GetContextInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -215,26 +275,23 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
}
// Check for known builtin functions.
- if (shared->HasBuiltinFunctionId()) {
- switch (shared->builtin_function_id()) {
- case kFunctionApply:
- return ReduceFunctionPrototypeApply(node);
- case kFunctionCall:
- return ReduceFunctionPrototypeCall(node);
- default:
- break;
- }
+ switch (shared->code()->builtin_index()) {
+ case Builtins::kFunctionPrototypeApply:
+ return ReduceFunctionPrototypeApply(node);
+ case Builtins::kFunctionPrototypeCall:
+ return ReduceFunctionPrototypeCall(node);
+ case Builtins::kNumberConstructor:
+ return ReduceNumberConstructor(node);
+ case Builtins::kObjectPrototypeGetProto:
+ return ReduceObjectPrototypeGetProto(node);
+ default:
+ break;
}
// Check for the Array constructor.
if (*function == function->native_context()->array_function()) {
return ReduceArrayConstructor(node);
}
-
- // Check for the Number constructor.
- if (*function == function->native_context()->number_function()) {
- return ReduceNumberConstructor(node);
- }
} else if (m.Value()->IsJSBoundFunction()) {
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(m.Value());
@@ -298,19 +355,8 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
Handle<Object> feedback(nexus.GetFeedback(), isolate());
if (feedback->IsAllocationSite()) {
// Retrieve the Array function from the {node}.
- Node* array_function;
- Handle<Context> native_context;
- if (GetNativeContext(node).ToHandle(&native_context)) {
- array_function = jsgraph()->HeapConstant(
- handle(native_context->array_function(), isolate()));
- } else {
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- array_function = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::ARRAY_FUNCTION_INDEX, true),
- native_context, native_context, effect);
- }
+ Node* array_function = jsgraph()->HeapConstant(
+ handle(native_context()->array_function(), isolate()));
// Check that the {target} is still the {array_function}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
@@ -353,7 +399,6 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
int const arity = static_cast<int>(p.arity() - 2);
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
- Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -413,19 +458,8 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
Handle<AllocationSite> site = Handle<AllocationSite>::cast(feedback);
// Retrieve the Array function from the {node}.
- Node* array_function;
- Handle<Context> native_context;
- if (GetNativeContext(node).ToHandle(&native_context)) {
- array_function = jsgraph()->HeapConstant(
- handle(native_context->array_function(), isolate()));
- } else {
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- array_function = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::ARRAY_FUNCTION_INDEX, true),
- native_context, native_context, effect);
- }
+ Node* array_function = jsgraph()->HeapConstant(
+ handle(native_context()->array_function(), isolate()));
// Check that the {target} is still the {array_function}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
@@ -469,25 +503,14 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
return NoChange();
}
-
-MaybeHandle<Context> JSCallReducer::GetNativeContext(Node* node) {
- Node* const context = NodeProperties::GetContextInput(node);
- return NodeProperties::GetSpecializationNativeContext(context,
- native_context());
-}
-
-
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
-
Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
-
CommonOperatorBuilder* JSCallReducer::common() const {
return jsgraph()->common();
}
-
JSOperatorBuilder* JSCallReducer::javascript() const {
return jsgraph()->javascript();
}
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 0c3835c35c..81153f98dc 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -31,7 +31,7 @@ class JSCallReducer final : public AdvancedReducer {
typedef base::Flags<Flag> Flags;
JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
- MaybeHandle<Context> native_context)
+ Handle<Context> native_context)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
flags_(flags),
@@ -44,23 +44,22 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceNumberConstructor(Node* node);
Reduction ReduceFunctionPrototypeApply(Node* node);
Reduction ReduceFunctionPrototypeCall(Node* node);
+ Reduction ReduceObjectPrototypeGetProto(Node* node);
Reduction ReduceJSCallConstruct(Node* node);
Reduction ReduceJSCallFunction(Node* node);
- MaybeHandle<Context> GetNativeContext(Node* node);
-
Graph* graph() const;
Flags flags() const { return flags_; }
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
- MaybeHandle<Context> native_context() const { return native_context_; }
+ Handle<Context> native_context() const { return native_context_; }
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
SimplifiedOperatorBuilder* simplified() const;
JSGraph* const jsgraph_;
Flags const flags_;
- MaybeHandle<Context> const native_context_;
+ Handle<Context> const native_context_;
};
DEFINE_OPERATORS_FOR_FLAGS(JSCallReducer::Flags)
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index b68bb7085d..c54b76b6cb 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -36,12 +36,16 @@ class AllocationBuilder final {
control_(control) {}
// Primitive allocation of static size.
- void Allocate(int size, PretenureFlag pretenure = NOT_TENURED) {
+ void Allocate(int size, PretenureFlag pretenure = NOT_TENURED,
+ Type* type = Type::Any()) {
effect_ = graph()->NewNode(
common()->BeginRegion(RegionObservability::kNotObservable), effect_);
allocation_ =
graph()->NewNode(simplified()->Allocate(pretenure),
jsgraph()->Constant(size), effect_, control_);
+ // TODO(turbofan): Maybe we should put the Type* onto the Allocate operator
+ // at some point, or maybe we should have a completely differnt story.
+ NodeProperties::SetType(allocation_, type);
effect_ = allocation_;
}
@@ -65,7 +69,7 @@ class AllocationBuilder final {
int size = (map->instance_type() == FIXED_ARRAY_TYPE)
? FixedArray::SizeFor(length)
: FixedDoubleArray::SizeFor(length);
- Allocate(size, pretenure);
+ Allocate(size, pretenure, Type::OtherInternal());
Store(AccessBuilder::ForMap(), map);
Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
}
@@ -206,6 +210,8 @@ Reduction JSCreateLowering::Reduce(Node* node) {
return ReduceJSCreateClosure(node);
case IrOpcode::kJSCreateIterResultObject:
return ReduceJSCreateIterResultObject(node);
+ case IrOpcode::kJSCreateKeyValueArray:
+ return ReduceJSCreateKeyValueArray(node);
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject:
return ReduceJSCreateLiteral(node);
@@ -231,13 +237,12 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
Type* const new_target_type = NodeProperties::GetType(new_target);
Node* const effect = NodeProperties::GetEffectInput(node);
// Extract constructor and original constructor function.
- if (target_type->IsConstant() &&
- new_target_type->IsConstant() &&
- new_target_type->AsConstant()->Value()->IsJSFunction()) {
+ if (target_type->IsHeapConstant() && new_target_type->IsHeapConstant() &&
+ new_target_type->AsHeapConstant()->Value()->IsJSFunction()) {
Handle<JSFunction> constructor =
- Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+ Handle<JSFunction>::cast(target_type->AsHeapConstant()->Value());
Handle<JSFunction> original_constructor =
- Handle<JSFunction>::cast(new_target_type->AsConstant()->Value());
+ Handle<JSFunction>::cast(new_target_type->AsHeapConstant()->Value());
DCHECK(constructor->IsConstructor());
DCHECK(original_constructor->IsConstructor());
@@ -354,22 +359,18 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* const elements = AllocateAliasedArguments(
effect, control, args_state, context, shared, &has_aliased_arguments);
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
- // Load the arguments object map from the current native context.
- Node* const load_native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- Node* const load_arguments_map = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForContextSlot(
- has_aliased_arguments ? Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX
- : Context::SLOPPY_ARGUMENTS_MAP_INDEX)),
- load_native_context, effect, control);
+ // Load the arguments object map.
+ Node* const arguments_map = jsgraph()->HeapConstant(handle(
+ has_aliased_arguments ? native_context()->fast_aliased_arguments_map()
+ : native_context()->sloppy_arguments_map(),
+ isolate()));
// Actually allocate and initialize the arguments object.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
int length = args_state_info.parameter_count() - 1; // Minus receiver.
STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kPointerSize);
a.Allocate(JSSloppyArgumentsObject::kSize);
- a.Store(AccessBuilder::ForMap(), load_arguments_map);
+ a.Store(AccessBuilder::ForMap(), arguments_map);
a.Store(AccessBuilder::ForJSObjectProperties(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
@@ -380,7 +381,6 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
} else if (type == CreateArgumentsType::kUnmappedArguments) {
// Use inline allocation for all unmapped arguments objects within inlined
// (i.e. non-outermost) frames, independent of the object size.
- Node* const context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
// Choose the correct frame state and frame state info depending on
// whether there conceptually is an arguments adaptor frame in the call
@@ -390,21 +390,16 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
// Prepare element backing store to be used by arguments object.
Node* const elements = AllocateArguments(effect, control, args_state);
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
- // Load the arguments object map from the current native context.
- Node* const load_native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- Node* const load_arguments_map = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForContextSlot(
- Context::STRICT_ARGUMENTS_MAP_INDEX)),
- load_native_context, effect, control);
+ // Load the arguments object map.
+ Node* const arguments_map = jsgraph()->HeapConstant(
+ handle(native_context()->strict_arguments_map(), isolate()));
// Actually allocate and initialize the arguments object.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
int length = args_state_info.parameter_count() - 1; // Minus receiver.
STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
a.Allocate(JSStrictArgumentsObject::kSize);
- a.Store(AccessBuilder::ForMap(), load_arguments_map);
+ a.Store(AccessBuilder::ForMap(), arguments_map);
a.Store(AccessBuilder::ForJSObjectProperties(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
@@ -417,7 +412,6 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
int start_index = shared->internal_formal_parameter_count();
// Use inline allocation for all unmapped arguments objects within inlined
// (i.e. non-outermost) frames, independent of the object size.
- Node* const context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
// Choose the correct frame state and frame state info depending on
// whether there conceptually is an arguments adaptor frame in the call
@@ -428,14 +422,9 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* const elements =
AllocateRestArguments(effect, control, args_state, start_index);
effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
- // Load the JSArray object map from the current native context.
- Node* const load_native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- Node* const load_jsarray_map = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForContextSlot(
- Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX)),
- load_native_context, effect, control);
+ // Load the JSArray object map.
+ Node* const jsarray_map = jsgraph()->HeapConstant(handle(
+ native_context()->js_array_fast_elements_map_index(), isolate()));
// Actually allocate and initialize the jsarray.
AllocationBuilder a(jsgraph(), effect, control);
Node* properties = jsgraph()->EmptyFixedArrayConstant();
@@ -445,7 +434,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
int length = std::max(0, argument_count - start_index);
STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
a.Allocate(JSArray::kSize);
- a.Store(AccessBuilder::ForMap(), load_jsarray_map);
+ a.Store(AccessBuilder::ForMap(), jsarray_map);
a.Store(AccessBuilder::ForJSObjectProperties(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS),
@@ -463,7 +452,6 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
int capacity,
Handle<AllocationSite> site) {
DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
- Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -479,13 +467,10 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
dependencies()->AssumeTenuringDecision(site);
dependencies()->AssumeTransitionStable(site);
- // Retrieve the initial map for the array from the appropriate native context.
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- Node* js_array_map = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::ArrayMapIndex(elements_kind), true),
- native_context, native_context, effect);
+ // Retrieve the initial map for the array.
+ int const array_map_index = Context::ArrayMapIndex(elements_kind);
+ Node* js_array_map = jsgraph()->HeapConstant(
+ handle(Map::cast(native_context()->get(array_map_index)), isolate()));
// Setup elements and properties.
Node* elements;
@@ -528,7 +513,7 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
CallDescriptor::kNeedsFrameState);
node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(0));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(0));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(node, common()->Call(desc));
return Changed(node);
@@ -546,7 +531,7 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
CallDescriptor::kNeedsFrameState);
node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(1));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(1));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(node, common()->Call(desc));
return Changed(node);
@@ -577,7 +562,7 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
Node* inputs[] = {jsgraph()->HeapConstant(stub.GetCode()),
node->InputAt(1),
jsgraph()->HeapConstant(site),
- jsgraph()->Int32Constant(1),
+ jsgraph()->Constant(1),
jsgraph()->UndefinedConstant(),
length,
context,
@@ -601,7 +586,7 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
Node* inputs[] = {jsgraph()->HeapConstant(stub.GetCode()),
node->InputAt(1),
jsgraph()->HeapConstant(site),
- jsgraph()->Int32Constant(1),
+ jsgraph()->Constant(1),
jsgraph()->UndefinedConstant(),
length,
context,
@@ -632,7 +617,7 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
CallDescriptor::kNeedsFrameState);
node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(node, common()->Call(desc));
return Changed(node);
@@ -685,18 +670,14 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- int function_map_index =
+ int const function_map_index =
Context::FunctionMapIndex(shared->language_mode(), shared->kind());
- Node* function_map = effect =
- graph()->NewNode(javascript()->LoadContext(0, function_map_index, true),
- native_context, native_context, effect);
+ Node* function_map = jsgraph()->HeapConstant(
+ handle(Map::cast(native_context()->get(function_map_index)), isolate()));
// Note that it is only safe to embed the raw entry point of the compile
// lazy stub into the code, because that stub is immortal and immovable.
- Node* compile_entry = jsgraph()->IntPtrConstant(reinterpret_cast<intptr_t>(
- jsgraph()->isolate()->builtins()->CompileLazy()->entry()));
+ Node* compile_entry = jsgraph()->PointerConstant(
+ jsgraph()->isolate()->builtins()->CompileLazy()->entry());
Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
Node* empty_literals_array = jsgraph()->EmptyLiteralsArrayConstant();
Node* the_hole = jsgraph()->TheHoleConstant();
@@ -724,23 +705,8 @@ Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
Node* done = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
- Node* iterator_result_map;
- Handle<Context> native_context;
- if (GetSpecializationNativeContext(node).ToHandle(&native_context)) {
- // Specialize to the constant JSIteratorResult map to enable map check
- // elimination to eliminate subsequent checks in case of inlining.
- iterator_result_map = jsgraph()->HeapConstant(
- handle(native_context->iterator_result_map(), isolate()));
- } else {
- // Load the JSIteratorResult map for the {context}.
- Node* context = NodeProperties::GetContextInput(node);
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- iterator_result_map = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::ITERATOR_RESULT_MAP_INDEX, true),
- native_context, native_context, effect);
- }
+ Node* iterator_result_map = jsgraph()->HeapConstant(
+ handle(native_context()->iterator_result_map(), isolate()));
// Emit code to allocate the JSIteratorResult instance.
AllocationBuilder a(jsgraph(), effect, graph()->start());
@@ -757,6 +723,36 @@ Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
return Changed(node);
}
+Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateKeyValueArray, node->opcode());
+ Node* key = NodeProperties::GetValueInput(node, 0);
+ Node* value = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ Node* array_map = jsgraph()->HeapConstant(
+ handle(native_context()->js_array_fast_elements_map_index()));
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ Node* length = jsgraph()->Constant(2);
+
+ AllocationBuilder aa(jsgraph(), effect, graph()->start());
+ aa.AllocateArray(2, factory()->fixed_array_map());
+ aa.Store(AccessBuilder::ForFixedArrayElement(FAST_ELEMENTS),
+ jsgraph()->Constant(0), key);
+ aa.Store(AccessBuilder::ForFixedArrayElement(FAST_ELEMENTS),
+ jsgraph()->Constant(1), value);
+ Node* elements = aa.Finish();
+
+ AllocationBuilder a(jsgraph(), elements, graph()->start());
+ a.Allocate(JSArray::kSize);
+ a.Store(AccessBuilder::ForMap(), array_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), length);
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
Reduction JSCreateLowering::ReduceJSCreateLiteral(Node* node) {
DCHECK(node->opcode() == IrOpcode::kJSCreateLiteralArray ||
node->opcode() == IrOpcode::kJSCreateLiteralObject);
@@ -799,9 +795,6 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
Node* extension = jsgraph()->TheHoleConstant();
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
AllocationBuilder a(jsgraph(), effect, control);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
@@ -810,7 +803,7 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
- native_context);
+ jsgraph()->HeapConstant(native_context()));
for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
}
@@ -830,9 +823,6 @@ Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
AllocationBuilder aa(jsgraph(), effect, control);
aa.Allocate(ContextExtension::kSize);
@@ -848,7 +838,7 @@ Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
- native_context);
+ jsgraph()->HeapConstant(native_context()));
RelaxControls(node);
a.FinishAndChange(node);
return Changed(node);
@@ -863,9 +853,6 @@ Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
AllocationBuilder aa(jsgraph(), effect, control);
aa.Allocate(ContextExtension::kSize);
@@ -884,7 +871,7 @@ Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
- native_context);
+ jsgraph()->HeapConstant(native_context()));
a.Store(AccessBuilder::ForContextSlot(Context::THROWN_OBJECT_INDEX),
exception);
RelaxControls(node);
@@ -905,9 +892,7 @@ Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
Node* extension = jsgraph()->Constant(scope_info);
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
+
AllocationBuilder a(jsgraph(), effect, control);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
a.AllocateArray(context_length, factory()->block_context_map());
@@ -915,7 +900,7 @@ Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
- native_context);
+ jsgraph()->HeapConstant(native_context()));
for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
}
@@ -1160,7 +1145,8 @@ Node* JSCreateLowering::AllocateFastLiteral(
// Actually allocate and initialize the object.
AllocationBuilder builder(jsgraph(), effect, control);
- builder.Allocate(boilerplate_map->instance_size(), pretenure);
+ builder.Allocate(boilerplate_map->instance_size(), pretenure,
+ Type::OtherObject());
builder.Store(AccessBuilder::ForMap(), boilerplate_map);
builder.Store(AccessBuilder::ForJSObjectProperties(), properties);
builder.Store(AccessBuilder::ForJSObjectElements(), elements);
@@ -1225,7 +1211,7 @@ Node* JSCreateLowering::AllocateFastLiteralElements(
Handle<FixedArray> elements =
Handle<FixedArray>::cast(boilerplate_elements);
for (int i = 0; i < elements_length; ++i) {
- if (elements->is_the_hole(i)) {
+ if (elements->is_the_hole(isolate(), i)) {
elements_values[i] = jsgraph()->TheHoleConstant();
} else {
Handle<Object> element_value(elements->get(i), isolate());
@@ -1280,13 +1266,6 @@ MaybeHandle<LiteralsArray> JSCreateLowering::GetSpecializationLiterals(
return MaybeHandle<LiteralsArray>();
}
-MaybeHandle<Context> JSCreateLowering::GetSpecializationNativeContext(
- Node* node) {
- Node* const context = NodeProperties::GetContextInput(node);
- return NodeProperties::GetSpecializationNativeContext(context,
- native_context_);
-}
-
Factory* JSCreateLowering::factory() const { return isolate()->factory(); }
Graph* JSCreateLowering::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index 6248ca2982..b5390f136c 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -5,7 +5,9 @@
#ifndef V8_COMPILER_JS_CREATE_LOWERING_H_
#define V8_COMPILER_JS_CREATE_LOWERING_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -27,11 +29,12 @@ class SimplifiedOperatorBuilder;
// Lowers JSCreate-level operators to fast (inline) allocations.
-class JSCreateLowering final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE JSCreateLowering final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
public:
JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
JSGraph* jsgraph, MaybeHandle<LiteralsArray> literals_array,
- MaybeHandle<Context> native_context, Zone* zone)
+ Handle<Context> native_context, Zone* zone)
: AdvancedReducer(editor),
dependencies_(dependencies),
jsgraph_(jsgraph),
@@ -48,6 +51,7 @@ class JSCreateLowering final : public AdvancedReducer {
Reduction ReduceJSCreateArray(Node* node);
Reduction ReduceJSCreateClosure(Node* node);
Reduction ReduceJSCreateIterResultObject(Node* node);
+ Reduction ReduceJSCreateKeyValueArray(Node* node);
Reduction ReduceJSCreateLiteral(Node* node);
Reduction ReduceJSCreateFunctionContext(Node* node);
Reduction ReduceJSCreateWithContext(Node* node);
@@ -77,13 +81,12 @@ class JSCreateLowering final : public AdvancedReducer {
// Infers the LiteralsArray to use for a given {node}.
MaybeHandle<LiteralsArray> GetSpecializationLiterals(Node* node);
- // Infers the native context to use for a given {node}.
- MaybeHandle<Context> GetSpecializationNativeContext(Node* node);
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
+ Handle<Context> native_context() const { return native_context_; }
JSOperatorBuilder* javascript() const;
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
@@ -94,7 +97,7 @@ class JSCreateLowering final : public AdvancedReducer {
CompilationDependencies* const dependencies_;
JSGraph* const jsgraph_;
MaybeHandle<LiteralsArray> const literals_array_;
- MaybeHandle<Context> const native_context_;
+ Handle<Context> const native_context_;
Zone* const zone_;
};
diff --git a/deps/v8/src/compiler/js-frame-specialization.cc b/deps/v8/src/compiler/js-frame-specialization.cc
index 769d615e4a..55ec1bf41d 100644
--- a/deps/v8/src/compiler/js-frame-specialization.cc
+++ b/deps/v8/src/compiler/js-frame-specialization.cc
@@ -16,6 +16,8 @@ Reduction JSFrameSpecialization::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kOsrValue:
return ReduceOsrValue(node);
+ case IrOpcode::kOsrGuard:
+ return ReduceOsrGuard(node);
case IrOpcode::kParameter:
return ReduceParameter(node);
default:
@@ -24,11 +26,10 @@ Reduction JSFrameSpecialization::Reduce(Node* node) {
return NoChange();
}
-
Reduction JSFrameSpecialization::ReduceOsrValue(Node* node) {
DCHECK_EQ(IrOpcode::kOsrValue, node->opcode());
Handle<Object> value;
- int const index = OpParameter<int>(node);
+ int index = OsrValueIndexOf(node->op());
int const parameters_count = frame()->ComputeParametersCount() + 1;
if (index == Linkage::kOsrContextSpillSlotIndex) {
value = handle(frame()->context(), isolate());
@@ -43,6 +44,12 @@ Reduction JSFrameSpecialization::ReduceOsrValue(Node* node) {
return Replace(jsgraph()->Constant(value));
}
+Reduction JSFrameSpecialization::ReduceOsrGuard(Node* node) {
+ DCHECK_EQ(IrOpcode::kOsrGuard, node->opcode());
+ ReplaceWithValue(node, node->InputAt(0),
+ NodeProperties::GetEffectInput(node));
+ return Changed(node);
+}
Reduction JSFrameSpecialization::ReduceParameter(Node* node) {
DCHECK_EQ(IrOpcode::kParameter, node->opcode());
diff --git a/deps/v8/src/compiler/js-frame-specialization.h b/deps/v8/src/compiler/js-frame-specialization.h
index 90b3ca5e39..daf699265c 100644
--- a/deps/v8/src/compiler/js-frame-specialization.h
+++ b/deps/v8/src/compiler/js-frame-specialization.h
@@ -18,17 +18,18 @@ namespace compiler {
// Forward declarations.
class JSGraph;
-
-class JSFrameSpecialization final : public Reducer {
+class JSFrameSpecialization final : public AdvancedReducer {
public:
- JSFrameSpecialization(JavaScriptFrame const* frame, JSGraph* jsgraph)
- : frame_(frame), jsgraph_(jsgraph) {}
+ JSFrameSpecialization(Editor* editor, JavaScriptFrame const* frame,
+ JSGraph* jsgraph)
+ : AdvancedReducer(editor), frame_(frame), jsgraph_(jsgraph) {}
~JSFrameSpecialization() final {}
Reduction Reduce(Node* node) final;
private:
Reduction ReduceOsrValue(Node* node);
+ Reduction ReduceOsrGuard(Node* node);
Reduction ReduceParameter(Node* node);
Isolate* isolate() const;
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 22d6c86aa0..250a9c26f6 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -116,6 +116,8 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
}
void JSGenericLowering::LowerJSStrictEqual(Node* node) {
+ // The === operator doesn't need the current context.
+ NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
Callable callable = CodeFactory::StrictEqual(isolate());
node->RemoveInput(4); // control
ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
@@ -123,6 +125,8 @@ void JSGenericLowering::LowerJSStrictEqual(Node* node) {
}
void JSGenericLowering::LowerJSStrictNotEqual(Node* node) {
+ // The !== operator doesn't need the current context.
+ NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
Callable callable = CodeFactory::StrictNotEqual(isolate());
node->RemoveInput(4); // control
ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
@@ -130,6 +134,8 @@ void JSGenericLowering::LowerJSStrictNotEqual(Node* node) {
}
void JSGenericLowering::LowerJSToBoolean(Node* node) {
+ // The ToBoolean conversion doesn't need the current context.
+ NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
Callable callable = CodeFactory::ToBoolean(isolate());
node->AppendInput(zone(), graph()->start());
ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
@@ -137,6 +143,8 @@ void JSGenericLowering::LowerJSToBoolean(Node* node) {
}
void JSGenericLowering::LowerJSTypeOf(Node* node) {
+ // The typeof operator doesn't need the current context.
+ NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
Callable callable = CodeFactory::Typeof(isolate());
node->AppendInput(zone(), graph()->start());
ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
@@ -460,6 +468,9 @@ void JSGenericLowering::LowerJSCreateIterResultObject(Node* node) {
ReplaceWithRuntimeCall(node, Runtime::kCreateIterResultObject);
}
+void JSGenericLowering::LowerJSCreateKeyValueArray(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kCreateKeyValueArray);
+}
void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
@@ -622,6 +633,14 @@ void JSGenericLowering::LowerJSStoreMessage(Node* node) {
NodeProperties::ChangeOp(node, machine()->Store(representation));
}
+void JSGenericLowering::LowerJSLoadModule(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+
+void JSGenericLowering::LowerJSStoreModule(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+
void JSGenericLowering::LowerJSGeneratorStore(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
diff --git a/deps/v8/src/compiler/js-global-object-specialization.cc b/deps/v8/src/compiler/js-global-object-specialization.cc
index 10130f4039..e9ff060dd8 100644
--- a/deps/v8/src/compiler/js-global-object-specialization.cc
+++ b/deps/v8/src/compiler/js-global-object-specialization.cc
@@ -25,17 +25,15 @@ struct JSGlobalObjectSpecialization::ScriptContextTableLookupResult {
int index;
};
-
JSGlobalObjectSpecialization::JSGlobalObjectSpecialization(
- Editor* editor, JSGraph* jsgraph,
- MaybeHandle<Context> native_context, CompilationDependencies* dependencies)
+ Editor* editor, JSGraph* jsgraph, Handle<JSGlobalObject> global_object,
+ CompilationDependencies* dependencies)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
- native_context_(native_context),
+ global_object_(global_object),
dependencies_(dependencies),
type_cache_(TypeCache::Get()) {}
-
Reduction JSGlobalObjectSpecialization::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSLoadGlobal:
@@ -71,14 +69,10 @@ Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Retrieve the global object from the given {node}.
- Handle<JSGlobalObject> global_object;
- if (!GetGlobalObject(node).ToHandle(&global_object)) return NoChange();
-
// Try to lookup the name on the script context table first (lexical scoping).
ScriptContextTableLookupResult result;
- if (LookupInScriptContextTable(global_object, name, &result)) {
- if (result.context->is_the_hole(result.index)) return NoChange();
+ if (LookupInScriptContextTable(name, &result)) {
+ if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
Node* context = jsgraph()->HeapConstant(result.context);
Node* value = effect = graph()->NewNode(
javascript()->LoadContext(0, result.index, result.immutable), context,
@@ -89,7 +83,7 @@ Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
// Lookup on the global object instead. We only deal with own data
// properties of the global object here (represented as PropertyCell).
- LookupIterator it(global_object, name, LookupIterator::OWN);
+ LookupIterator it(global_object(), name, LookupIterator::OWN);
if (it.state() != LookupIterator::DATA) return NoChange();
if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
Handle<PropertyCell> property_cell = it.GetPropertyCell();
@@ -126,12 +120,10 @@ Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
if (property_details.cell_type() == PropertyCellType::kConstantType) {
// Compute proper type based on the current value in the cell.
if (property_cell_value->IsSmi()) {
- property_cell_value_type = type_cache_.kSmi;
+ property_cell_value_type = Type::SignedSmall();
representation = MachineRepresentation::kTaggedSigned;
} else if (property_cell_value->IsNumber()) {
- // TODO(mvstanton): Remove kHeapNumber from type cache, it's just
- // Type::Number().
- property_cell_value_type = type_cache_.kHeapNumber;
+ property_cell_value_type = Type::Number();
representation = MachineRepresentation::kTaggedPointer;
} else {
// TODO(turbofan): Track the property_cell_value_map on the FieldAccess
@@ -158,14 +150,10 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Retrieve the global object from the given {node}.
- Handle<JSGlobalObject> global_object;
- if (!GetGlobalObject(node).ToHandle(&global_object)) return NoChange();
-
// Try to lookup the name on the script context table first (lexical scoping).
ScriptContextTableLookupResult result;
- if (LookupInScriptContextTable(global_object, name, &result)) {
- if (result.context->is_the_hole(result.index)) return NoChange();
+ if (LookupInScriptContextTable(name, &result)) {
+ if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
if (result.immutable) return NoChange();
Node* context = jsgraph()->HeapConstant(result.context);
effect = graph()->NewNode(javascript()->StoreContext(0, result.index),
@@ -176,7 +164,7 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
// Lookup on the global object instead. We only deal with own data
// properties of the global object here (represented as PropertyCell).
- LookupIterator it(global_object, name, LookupIterator::OWN);
+ LookupIterator it(global_object(), name, LookupIterator::OWN);
if (it.state() != LookupIterator::DATA) return NoChange();
if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
Handle<PropertyCell> property_cell = it.GetPropertyCell();
@@ -256,21 +244,11 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
return Replace(value);
}
-
-MaybeHandle<JSGlobalObject> JSGlobalObjectSpecialization::GetGlobalObject(
- Node* node) {
- Node* const context = NodeProperties::GetContextInput(node);
- return NodeProperties::GetSpecializationGlobalObject(context,
- native_context());
-}
-
-
bool JSGlobalObjectSpecialization::LookupInScriptContextTable(
- Handle<JSGlobalObject> global_object, Handle<Name> name,
- ScriptContextTableLookupResult* result) {
+ Handle<Name> name, ScriptContextTableLookupResult* result) {
if (!name->IsString()) return false;
Handle<ScriptContextTable> script_context_table(
- global_object->native_context()->script_context_table(), isolate());
+ global_object()->native_context()->script_context_table(), isolate());
ScriptContextTable::LookupResult lookup_result;
if (!ScriptContextTable::Lookup(script_context_table,
Handle<String>::cast(name), &lookup_result)) {
@@ -284,27 +262,22 @@ bool JSGlobalObjectSpecialization::LookupInScriptContextTable(
return true;
}
-
Graph* JSGlobalObjectSpecialization::graph() const {
return jsgraph()->graph();
}
-
Isolate* JSGlobalObjectSpecialization::isolate() const {
return jsgraph()->isolate();
}
-
CommonOperatorBuilder* JSGlobalObjectSpecialization::common() const {
return jsgraph()->common();
}
-
JSOperatorBuilder* JSGlobalObjectSpecialization::javascript() const {
return jsgraph()->javascript();
}
-
SimplifiedOperatorBuilder* JSGlobalObjectSpecialization::simplified() const {
return jsgraph()->simplified();
}
diff --git a/deps/v8/src/compiler/js-global-object-specialization.h b/deps/v8/src/compiler/js-global-object-specialization.h
index a6c511e9e5..50bdd80a88 100644
--- a/deps/v8/src/compiler/js-global-object-specialization.h
+++ b/deps/v8/src/compiler/js-global-object-specialization.h
@@ -28,7 +28,7 @@ class TypeCache;
class JSGlobalObjectSpecialization final : public AdvancedReducer {
public:
JSGlobalObjectSpecialization(Editor* editor, JSGraph* jsgraph,
- MaybeHandle<Context> native_context,
+ Handle<JSGlobalObject> global_object,
CompilationDependencies* dependencies);
Reduction Reduce(Node* node) final;
@@ -37,12 +37,8 @@ class JSGlobalObjectSpecialization final : public AdvancedReducer {
Reduction ReduceJSLoadGlobal(Node* node);
Reduction ReduceJSStoreGlobal(Node* node);
- // Retrieve the global object from the given {node} if known.
- MaybeHandle<JSGlobalObject> GetGlobalObject(Node* node);
-
struct ScriptContextTableLookupResult;
- bool LookupInScriptContextTable(Handle<JSGlobalObject> global_object,
- Handle<Name> name,
+ bool LookupInScriptContextTable(Handle<Name> name,
ScriptContextTableLookupResult* result);
Graph* graph() const;
@@ -51,11 +47,11 @@ class JSGlobalObjectSpecialization final : public AdvancedReducer {
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
SimplifiedOperatorBuilder* simplified() const;
- MaybeHandle<Context> native_context() const { return native_context_; }
+ Handle<JSGlobalObject> global_object() const { return global_object_; }
CompilationDependencies* dependencies() const { return dependencies_; }
JSGraph* const jsgraph_;
- MaybeHandle<Context> native_context_;
+ Handle<JSGlobalObject> const global_object_;
CompilationDependencies* const dependencies_;
TypeCache const& type_cache_;
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index cafd047e74..8626cd1821 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -242,6 +242,13 @@ Node* JSGraph::Float64Constant(double value) {
return *loc;
}
+Node* JSGraph::PointerConstant(intptr_t value) {
+ Node** loc = cache_.FindPointerConstant(value);
+ if (*loc == nullptr) {
+ *loc = graph()->NewNode(common()->PointerConstant(value));
+ }
+ return *loc;
+}
Node* JSGraph::ExternalConstant(ExternalReference reference) {
Node** loc = cache_.FindExternalConstant(reference);
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 9d6f27dbe6..c2c0c77f42 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -5,12 +5,14 @@
#ifndef V8_COMPILER_JS_GRAPH_H_
#define V8_COMPILER_JS_GRAPH_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/common-node-cache.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
+#include "src/globals.h"
#include "src/isolate.h"
namespace v8 {
@@ -23,7 +25,7 @@ class Typer;
// Implements a facade on a Graph, enhancing the graph with JS-specific
// notions, including various builders for operators, canonicalized global
// constants, and various helper methods.
-class JSGraph : public ZoneObject {
+class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
public:
JSGraph(Isolate* isolate, Graph* graph, CommonOperatorBuilder* common,
JSOperatorBuilder* javascript, SimplifiedOperatorBuilder* simplified,
@@ -106,10 +108,6 @@ class JSGraph : public ZoneObject {
return machine()->Is32() ? Int32Constant(static_cast<int32_t>(value))
: Int64Constant(static_cast<int64_t>(value));
}
- template <typename T>
- Node* PointerConstant(T* value) {
- return IntPtrConstant(bit_cast<intptr_t>(value));
- }
Node* RelocatableInt32Constant(int32_t value, RelocInfo::Mode rmode);
Node* RelocatableInt64Constant(int64_t value, RelocInfo::Mode rmode);
@@ -121,6 +119,13 @@ class JSGraph : public ZoneObject {
// Creates a Float64Constant node, usually canonicalized.
Node* Float64Constant(double value);
+ // Creates a PointerConstant node (asm.js only).
+ Node* PointerConstant(intptr_t value);
+ template <typename T>
+ Node* PointerConstant(T* value) {
+ return PointerConstant(bit_cast<intptr_t>(value));
+ }
+
// Creates an ExternalConstant node, usually canonicalized.
Node* ExternalConstant(ExternalReference ref);
Node* ExternalConstant(Runtime::FunctionId function_id);
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 5c626d15c6..d6229c2d64 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -23,7 +23,7 @@ namespace {
int CollectFunctions(Node* node, Handle<JSFunction>* functions,
int functions_size) {
- DCHECK_NE(0u, functions_size);
+ DCHECK_NE(0, functions_size);
HeapObjectMatcher m(node);
if (m.HasValue() && m.Value()->IsJSFunction()) {
functions[0] = Handle<JSFunction>::cast(m.Value());
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index 367e35ad62..aca801103a 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -15,10 +15,11 @@ class JSInliningHeuristic final : public AdvancedReducer {
public:
enum Mode { kGeneralInlining, kRestrictedInlining, kStressInlining };
JSInliningHeuristic(Editor* editor, Mode mode, Zone* local_zone,
- CompilationInfo* info, JSGraph* jsgraph)
+ CompilationInfo* info, JSGraph* jsgraph,
+ SourcePositionTable* source_positions)
: AdvancedReducer(editor),
mode_(mode),
- inliner_(editor, local_zone, info, jsgraph),
+ inliner_(editor, local_zone, info, jsgraph, source_positions),
candidates_(local_zone),
seen_(local_zone),
jsgraph_(jsgraph) {}
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 58e5a276cc..0e122a6c14 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -116,7 +116,7 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
Replace(use, new_target);
} else if (index == inlinee_arity_index) {
// The projection is requesting the number of arguments.
- Replace(use, jsgraph()->Int32Constant(inliner_inputs - 2));
+ Replace(use, jsgraph()->Constant(inliner_inputs - 2));
} else if (index == inlinee_context_index) {
// The projection is requesting the inlinee function context.
Replace(use, context);
@@ -184,7 +184,7 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
for (Node* const input : end->inputs()) {
switch (input->opcode()) {
case IrOpcode::kReturn:
- values.push_back(NodeProperties::GetValueInput(input, 0));
+ values.push_back(NodeProperties::GetValueInput(input, 1));
effects.push_back(NodeProperties::GetEffectInput(input));
controls.push_back(NodeProperties::GetControlInput(input));
break;
@@ -282,6 +282,19 @@ Node* JSInliner::CreateTailCallerFrameState(Node* node, Node* frame_state) {
namespace {
+// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
+// alias analyzer?
+bool IsSame(Node* a, Node* b) {
+ if (a == b) {
+ return true;
+ } else if (a->opcode() == IrOpcode::kCheckHeapObject) {
+ return IsSame(a->InputAt(0), b);
+ } else if (b->opcode() == IrOpcode::kCheckHeapObject) {
+ return IsSame(a, b->InputAt(0));
+ }
+ return false;
+}
+
// TODO(bmeurer): Unify this with the witness helper functions in the
// js-builtin-reducer.cc once we have a better understanding of the
// map tracking we want to do, and eventually changed the CheckMaps
@@ -296,7 +309,7 @@ namespace {
bool NeedsConvertReceiver(Node* receiver, Node* effect) {
for (Node* dominator = effect;;) {
if (dominator->opcode() == IrOpcode::kCheckMaps &&
- dominator->InputAt(0) == receiver) {
+ IsSame(dominator->InputAt(0), receiver)) {
// Check if all maps have the given {instance_type}.
for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
HeapObjectMatcher m(NodeProperties::GetValueInput(dominator, i));
@@ -471,8 +484,8 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
}
}
- Zone zone(info_->isolate()->allocator());
- ParseInfo parse_info(&zone, function);
+ Zone zone(info_->isolate()->allocator(), ZONE_NAME);
+ ParseInfo parse_info(&zone, shared_info);
CompilationInfo info(&parse_info, function);
if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
if (info_->is_type_feedback_enabled()) info.MarkAsTypeFeedbackEnabled();
@@ -510,7 +523,8 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// Remember that we inlined this function. This needs to be called right
// after we ensure deoptimization support so that the code flusher
// does not remove the code with the deoptimization support.
- info_->AddInlinedFunction(shared_info);
+ int inlining_id = info_->AddInlinedFunction(
+ shared_info, source_positions_->GetSourcePosition(node));
// ----------------------------------------------------------------
// After this point, we've made a decision to inline this function.
@@ -530,8 +544,9 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// Run the BytecodeGraphBuilder to create the subgraph.
Graph::SubgraphScope scope(graph());
BytecodeGraphBuilder graph_builder(&zone, &info, jsgraph(),
- call.frequency());
- graph_builder.CreateGraph();
+ call.frequency(), source_positions_,
+ inlining_id);
+ graph_builder.CreateGraph(false);
// Extract the inlinee start/end nodes.
start = graph()->start();
@@ -549,8 +564,9 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// Run the AstGraphBuilder to create the subgraph.
Graph::SubgraphScope scope(graph());
- AstGraphBuilder graph_builder(&zone, &info, jsgraph(), call.frequency(),
- loop_assignment, type_hint_analysis);
+ AstGraphBuilderWithPositions graph_builder(
+ &zone, &info, jsgraph(), call.frequency(), loop_assignment,
+ type_hint_analysis, source_positions_, inlining_id);
graph_builder.CreateGraph(false);
// Extract the inlinee start/end nodes.
@@ -590,7 +606,7 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// constructor dispatch (allocate implicit receiver and check return value).
// This models the behavior usually accomplished by our {JSConstructStub}.
// Note that the context has to be the callers context (input to call node).
- Node* receiver = jsgraph()->UndefinedConstant(); // Implicit receiver.
+ Node* receiver = jsgraph()->TheHoleConstant(); // Implicit receiver.
if (NeedsImplicitReceiver(shared_info)) {
Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
Node* effect = NodeProperties::GetEffectInput(node);
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 323c3ae0bf..9bb8ec4643 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_JS_INLINING_H_
#define V8_COMPILER_JS_INLINING_H_
-#include "src/compiler/js-graph.h"
#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
namespace v8 {
namespace internal {
@@ -16,17 +16,20 @@ class CompilationInfo;
namespace compiler {
+class SourcePositionTable;
+
// The JSInliner provides the core graph inlining machinery. Note that this
// class only deals with the mechanics of how to inline one graph into another,
// heuristics that decide what and how much to inline are beyond its scope.
class JSInliner final : public AdvancedReducer {
public:
JSInliner(Editor* editor, Zone* local_zone, CompilationInfo* info,
- JSGraph* jsgraph)
+ JSGraph* jsgraph, SourcePositionTable* source_positions)
: AdvancedReducer(editor),
local_zone_(local_zone),
info_(info),
- jsgraph_(jsgraph) {}
+ jsgraph_(jsgraph),
+ source_positions_(source_positions) {}
// Reducer interface, eagerly inlines everything.
Reduction Reduce(Node* node) final;
@@ -45,6 +48,7 @@ class JSInliner final : public AdvancedReducer {
Zone* const local_zone_;
CompilationInfo* info_;
JSGraph* const jsgraph_;
+ SourcePositionTable* const source_positions_;
Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count,
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 7fc50e5f5f..52903232d7 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -54,14 +54,8 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceFixedArrayGet(node);
case Runtime::kInlineFixedArraySet:
return ReduceFixedArraySet(node);
- case Runtime::kInlineRegExpConstructResult:
- return ReduceRegExpConstructResult(node);
case Runtime::kInlineRegExpExec:
return ReduceRegExpExec(node);
- case Runtime::kInlineRegExpFlags:
- return ReduceRegExpFlags(node);
- case Runtime::kInlineRegExpSource:
- return ReduceRegExpSource(node);
case Runtime::kInlineSubString:
return ReduceSubString(node);
case Runtime::kInlineToInteger:
@@ -234,37 +228,11 @@ Reduction JSIntrinsicLowering::ReduceFixedArraySet(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceRegExpConstructResult(Node* node) {
- // TODO(bmeurer): Introduce JSCreateRegExpResult?
- return Change(node, CodeFactory::RegExpConstructResult(isolate()), 0);
-}
-
-
Reduction JSIntrinsicLowering::ReduceRegExpExec(Node* node) {
return Change(node, CodeFactory::RegExpExec(isolate()), 4);
}
-Reduction JSIntrinsicLowering::ReduceRegExpFlags(Node* node) {
- Node* const receiver = NodeProperties::GetValueInput(node, 0);
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- Operator const* const op =
- simplified()->LoadField(AccessBuilder::ForJSRegExpFlags());
- return Change(node, op, receiver, effect, control);
-}
-
-
-Reduction JSIntrinsicLowering::ReduceRegExpSource(Node* node) {
- Node* const receiver = NodeProperties::GetValueInput(node, 0);
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- Operator const* const op =
- simplified()->LoadField(AccessBuilder::ForJSRegExpSource());
- return Change(node, op, receiver, effect, control);
-}
-
-
Reduction JSIntrinsicLowering::ReduceSubString(Node* node) {
return Change(node, CodeFactory::SubString(isolate()), 3);
}
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 6835a52c7e..6e984ff496 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -5,8 +5,10 @@
#ifndef V8_COMPILER_JS_INTRINSIC_LOWERING_H_
#define V8_COMPILER_JS_INTRINSIC_LOWERING_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -25,7 +27,8 @@ class SimplifiedOperatorBuilder;
// Lowers certain JS-level runtime calls.
-class JSIntrinsicLowering final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE JSIntrinsicLowering final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
public:
enum DeoptimizationMode { kDeoptimizationEnabled, kDeoptimizationDisabled };
@@ -46,10 +49,7 @@ class JSIntrinsicLowering final : public AdvancedReducer {
Reduction ReduceIsSmi(Node* node);
Reduction ReduceFixedArrayGet(Node* node);
Reduction ReduceFixedArraySet(Node* node);
- Reduction ReduceRegExpConstructResult(Node* node);
Reduction ReduceRegExpExec(Node* node);
- Reduction ReduceRegExpFlags(Node* node);
- Reduction ReduceRegExpSource(Node* node);
Reduction ReduceSubString(Node* node);
Reduction ReduceToInteger(Node* node);
Reduction ReduceToLength(Node* node);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index ab20d93ebe..a849fec5aa 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -57,7 +57,7 @@ bool HasOnlyStringMaps(T const& maps) {
JSNativeContextSpecialization::JSNativeContextSpecialization(
Editor* editor, JSGraph* jsgraph, Flags flags,
- MaybeHandle<Context> native_context, CompilationDependencies* dependencies,
+ Handle<Context> native_context, CompilationDependencies* dependencies,
Zone* zone)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
@@ -67,7 +67,6 @@ JSNativeContextSpecialization::JSNativeContextSpecialization(
zone_(zone),
type_cache_(TypeCache::Get()) {}
-
Reduction JSNativeContextSpecialization::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSInstanceOf:
@@ -96,10 +95,6 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // Retrieve the native context from the given {node}.
- Handle<Context> native_context;
- if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
-
// If deoptimization is disabled, we cannot optimize.
if (!(flags() & kDeoptimizationEnabled)) return NoChange();
@@ -111,7 +106,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// Compute property access info for @@hasInstance on {receiver}.
PropertyAccessInfo access_info;
- AccessInfoFactory access_info_factory(dependencies(), native_context,
+ AccessInfoFactory access_info_factory(dependencies(), native_context(),
graph()->zone());
if (!access_info_factory.ComputePropertyAccessInfo(
receiver_map, factory()->has_instance_symbol(), AccessMode::kLoad,
@@ -126,8 +121,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
- AssumePrototypesStable(access_info.receiver_maps(), native_context,
- holder);
+ AssumePrototypesStable(access_info.receiver_maps(), holder);
}
// Monomorphic property access.
@@ -147,8 +141,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
- AssumePrototypesStable(access_info.receiver_maps(), native_context,
- holder);
+ AssumePrototypesStable(access_info.receiver_maps(), holder);
}
// Monomorphic property access.
@@ -184,13 +177,11 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
Reduction JSNativeContextSpecialization::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
- Handle<Context> native_context;
// Specialize JSLoadContext(NATIVE_CONTEXT_INDEX) to the known native
// context (if any), so we can constant-fold those fields, which is
// safe, since the NATIVE_CONTEXT_INDEX slot is always immutable.
- if (access.index() == Context::NATIVE_CONTEXT_INDEX &&
- GetNativeContext(node).ToHandle(&native_context)) {
- Node* value = jsgraph()->HeapConstant(native_context);
+ if (access.index() == Context::NATIVE_CONTEXT_INDEX) {
+ Node* value = jsgraph()->HeapConstant(native_context());
ReplaceWithValue(node, value);
return Replace(value);
}
@@ -200,7 +191,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadContext(Node* node) {
Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* node, Node* value, MapHandleList const& receiver_maps,
Handle<Name> name, AccessMode access_mode, LanguageMode language_mode,
- Node* index) {
+ Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot, Node* index) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed ||
node->opcode() == IrOpcode::kJSLoadProperty ||
@@ -215,12 +206,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Not much we can do if deoptimization support is disabled.
if (!(flags() & kDeoptimizationEnabled)) return NoChange();
- // Retrieve the native context from the given {node}.
- Handle<Context> native_context;
- if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
-
// Compute property access infos for the receiver maps.
- AccessInfoFactory access_info_factory(dependencies(), native_context,
+ AccessInfoFactory access_info_factory(dependencies(), native_context(),
graph()->zone());
ZoneVector<PropertyAccessInfo> access_infos(zone());
if (!access_info_factory.ComputePropertyAccessInfos(
@@ -229,10 +216,20 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
// TODO(turbofan): Add support for inlining into try blocks.
- if (NodeProperties::IsExceptionalCall(node) ||
- !(flags() & kAccessorInliningEnabled)) {
- for (auto access_info : access_infos) {
- if (access_info.IsAccessorConstant()) return NoChange();
+ bool is_exceptional = NodeProperties::IsExceptionalCall(node);
+ for (auto access_info : access_infos) {
+ if (access_info.IsAccessorConstant()) {
+ // Accessor in try-blocks are not supported yet.
+ if (is_exceptional || !(flags() & kAccessorInliningEnabled)) {
+ return NoChange();
+ }
+ } else if (access_info.IsGeneric()) {
+ // We do not handle generic calls in try blocks.
+ if (is_exceptional) return NoChange();
+ // We only handle the generic store IC case.
+ if (vector->GetKind(slot) != FeedbackVectorSlotKind::STORE_IC) {
+ return NoChange();
+ }
}
}
@@ -263,7 +260,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
receiver, effect, control);
} else {
// Monomorphic property access.
- effect = BuildCheckHeapObject(receiver, effect, control);
+ receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
+ receiver, effect, control);
effect = BuildCheckMaps(receiver, effect, control,
access_info.receiver_maps());
}
@@ -271,7 +269,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Generate the actual property access.
ValueEffectControl continuation = BuildPropertyAccess(
receiver, value, context, frame_state_lazy, effect, control, name,
- native_context, access_info, access_mode);
+ access_info, access_mode, language_mode, vector, slot);
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@@ -301,7 +299,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
receiverissmi_effect = effect;
} else {
- effect = BuildCheckHeapObject(receiver, effect, control);
+ receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
+ receiver, effect, control);
}
// Load the {receiver} map. The resulting effect is the dominating effect
@@ -317,7 +316,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* this_value = value;
Node* this_receiver = receiver;
Node* this_effect = effect;
- Node* this_control;
+ Node* this_control = fallthrough_control;
// Perform map check on {receiver}.
MapList const& receiver_maps = access_info.receiver_maps();
@@ -325,19 +324,19 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Emit a (sequence of) map checks for other {receiver}s.
ZoneVector<Node*> this_controls(zone());
ZoneVector<Node*> this_effects(zone());
- size_t num_classes = receiver_maps.size();
- for (auto map : receiver_maps) {
- DCHECK_LT(0u, num_classes);
- Node* check =
- graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
- jsgraph()->Constant(map));
- if (--num_classes == 0 && j == access_infos.size() - 1) {
- check = graph()->NewNode(simplified()->CheckIf(), check,
- this_effect, fallthrough_control);
- this_controls.push_back(fallthrough_control);
- this_effects.push_back(check);
- fallthrough_control = nullptr;
- } else {
+ if (j == access_infos.size() - 1) {
+ // Last map check on the fallthrough control path, do a
+ // conditional eager deoptimization exit here.
+ this_effect = BuildCheckMaps(receiver, this_effect, this_control,
+ receiver_maps);
+ this_effects.push_back(this_effect);
+ this_controls.push_back(fallthrough_control);
+ fallthrough_control = nullptr;
+ } else {
+ for (auto map : receiver_maps) {
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
+ jsgraph()->Constant(map));
Node* branch = graph()->NewNode(common()->Branch(), check,
fallthrough_control);
fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
@@ -382,7 +381,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Generate the actual property access.
ValueEffectControl continuation = BuildPropertyAccess(
this_receiver, this_value, context, frame_state_lazy, this_effect,
- this_control, name, native_context, access_info, access_mode);
+ this_control, name, access_info, access_mode, language_mode, vector,
+ slot);
values.push_back(continuation.value());
effects.push_back(continuation.effect());
controls.push_back(continuation.control());
@@ -449,7 +449,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
// Try to lower the named access based on the {receiver_maps}.
return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
- language_mode);
+ language_mode, nexus.vector_handle(), nexus.slot());
}
@@ -462,9 +462,9 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
// Check if we have a constant receiver.
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
- // Optimize "prototype" property of functions.
if (m.Value()->IsJSFunction() &&
p.name().is_identical_to(factory()->prototype_string())) {
+ // Optimize "prototype" property of functions.
Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
if (function->has_initial_map()) {
// We need to add a code dependency on the initial map of the
@@ -480,6 +480,13 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
return Replace(value);
}
}
+ } else if (m.Value()->IsString() &&
+ p.name().is_identical_to(factory()->length_string())) {
+ // Constant-fold "length" property on constant strings.
+ Handle<String> string = Handle<String>::cast(m.Value());
+ Node* value = jsgraph()->Constant(string->length());
+ ReplaceWithValue(node, value);
+ return Replace(value);
}
}
@@ -548,11 +555,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
value = graph()->NewNode(simplified()->StringFromCharCode(), value);
} else {
// Retrieve the native context from the given {node}.
- Handle<Context> native_context;
- if (!GetNativeContext(node).ToHandle(&native_context)) return NoChange();
-
// Compute element access infos for the receiver maps.
- AccessInfoFactory access_info_factory(dependencies(), native_context,
+ AccessInfoFactory access_info_factory(dependencies(), native_context(),
graph()->zone());
ZoneVector<ElementAccessInfo> access_infos(zone());
if (!access_info_factory.ComputeElementAccessInfos(
@@ -605,7 +609,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
// Ensure that {receiver} is a heap object.
- effect = BuildCheckHeapObject(receiver, effect, control);
+ receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
+ receiver, effect, control);
// Check for the monomorphic case.
if (access_infos.size() == 1) {
@@ -638,9 +643,9 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
access_info.receiver_maps());
// Access the actual element.
- ValueEffectControl continuation = BuildElementAccess(
- receiver, index, value, effect, control, native_context, access_info,
- access_mode, store_mode);
+ ValueEffectControl continuation =
+ BuildElementAccess(receiver, index, value, effect, control,
+ access_info, access_mode, store_mode);
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@@ -684,35 +689,25 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Perform map check(s) on {receiver}.
MapList const& receiver_maps = access_info.receiver_maps();
- {
+ if (j == access_infos.size() - 1) {
+ // Last map check on the fallthrough control path, do a
+ // conditional eager deoptimization exit here.
+ this_effect = BuildCheckMaps(receiver, this_effect, this_control,
+ receiver_maps);
+ fallthrough_control = nullptr;
+ } else {
ZoneVector<Node*> this_controls(zone());
ZoneVector<Node*> this_effects(zone());
- size_t num_classes = receiver_maps.size();
for (Handle<Map> map : receiver_maps) {
- DCHECK_LT(0u, num_classes);
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(), receiver_map,
jsgraph()->Constant(map));
- if (--num_classes == 0 && j == access_infos.size() - 1) {
- // Last map check on the fallthrough control path, do a
- // conditional eager deoptimization exit here.
- // TODO(turbofan): This is ugly as hell! We should probably
- // introduce macro-ish operators for property access that
- // encapsulate this whole mess.
- check = graph()->NewNode(simplified()->CheckIf(), check,
- this_effect, this_control);
- this_controls.push_back(this_control);
- this_effects.push_back(check);
- fallthrough_control = nullptr;
- } else {
- Node* branch = graph()->NewNode(common()->Branch(), check,
- fallthrough_control);
- this_controls.push_back(
- graph()->NewNode(common()->IfTrue(), branch));
- this_effects.push_back(this_effect);
- fallthrough_control =
- graph()->NewNode(common()->IfFalse(), branch);
- }
+ Node* branch = graph()->NewNode(common()->Branch(), check,
+ fallthrough_control);
+ this_controls.push_back(
+ graph()->NewNode(common()->IfTrue(), branch));
+ this_effects.push_back(this_effect);
+ fallthrough_control = graph()->NewNode(common()->IfFalse(), branch);
}
// Create single chokepoint for the control.
@@ -739,7 +734,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Access the actual element.
ValueEffectControl continuation = BuildElementAccess(
this_receiver, this_index, this_value, this_effect, this_control,
- native_context, access_info, access_mode, store_mode);
+ access_info, access_mode, store_mode);
values.push_back(continuation.value());
effects.push_back(continuation.effect());
controls.push_back(continuation.control());
@@ -780,8 +775,48 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty);
- Node* const receiver = NodeProperties::GetValueInput(node, 0);
- Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Optimize access for constant {receiver}.
+ HeapObjectMatcher mreceiver(receiver);
+ if (mreceiver.HasValue() && mreceiver.Value()->IsString()) {
+ Handle<String> string = Handle<String>::cast(mreceiver.Value());
+
+ // We can only assume that the {index} is a valid array index if the IC
+ // is in element access mode and not MEGAMORPHIC, otherwise there's no
+ // guard for the bounds check below.
+ if (nexus.ic_state() != MEGAMORPHIC && nexus.GetKeyType() == ELEMENT) {
+ // Strings are immutable in JavaScript.
+ if (access_mode == AccessMode::kStore) return NoChange();
+
+ // Properly deal with constant {index}.
+ NumberMatcher mindex(index);
+ if (mindex.IsInteger() && mindex.IsInRange(0.0, string->length() - 1)) {
+ // Constant-fold the {index} access to {string}.
+ Node* value = jsgraph()->HeapConstant(
+ factory()->LookupSingleCharacterStringFromCode(
+ string->Get(static_cast<int>(mindex.Value()))));
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ } else if (flags() & kDeoptimizationEnabled) {
+ // Ensure that {index} is less than {receiver} length.
+ Node* length = jsgraph()->Constant(string->length());
+ index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
+ length, effect, control);
+
+ // Load the character from the {receiver}.
+ value = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
+ index, control);
+
+ // Return it as a single character string.
+ value = graph()->NewNode(simplified()->StringFromCharCode(), value);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ }
+ }
// Check if the {nexus} reports type feedback for the IC.
if (nexus.IsUninitialized()) {
@@ -824,21 +859,28 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
} else {
name = factory()->InternalizeName(name);
return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
- language_mode);
+ language_mode, nexus.vector_handle(),
+ nexus.slot());
}
}
}
// Check if we have feedback for a named access.
if (Name* name = nexus.FindFirstName()) {
- return ReduceNamedAccess(node, value, receiver_maps,
- handle(name, isolate()), access_mode,
- language_mode, index);
+ return ReduceNamedAccess(
+ node, value, receiver_maps, handle(name, isolate()), access_mode,
+ language_mode, nexus.vector_handle(), nexus.slot(), index);
} else if (nexus.GetKeyType() != ELEMENT) {
// The KeyedLoad/StoreIC has seen non-element accesses, so we cannot assume
// that the {index} is a valid array index, thus we just let the IC continue
// to deal with this load/store.
return NoChange();
+ } else if (nexus.ic_state() == MEGAMORPHIC) {
+ // The KeyedLoad/StoreIC uses the MEGAMORPHIC state to guard the assumption
+ // that a numeric {index} is within the valid bounds for {receiver}, i.e.
+ // it transitions to MEGAMORPHIC once it sees an out-of-bounds access. Thus
+ // we cannot continue here if the IC state is MEGAMORPHIC.
+ return NoChange();
}
// Try to lower the element access based on the {receiver_maps}.
@@ -900,12 +942,13 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildPropertyAccess(
Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
- Node* control, Handle<Name> name, Handle<Context> native_context,
- PropertyAccessInfo const& access_info, AccessMode access_mode) {
+ Node* control, Handle<Name> name, PropertyAccessInfo const& access_info,
+ AccessMode access_mode, LanguageMode language_mode,
+ Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
- AssumePrototypesStable(access_info.receiver_maps(), native_context, holder);
+ AssumePrototypesStable(access_info.receiver_maps(), holder);
}
// Generate the actual property access.
@@ -943,12 +986,26 @@ JSNativeContextSpecialization::BuildPropertyAccess(
context, target, frame_state);
// Introduce the call to the getter function.
- value = effect = graph()->NewNode(
- javascript()->CallFunction(
- 2, 0.0f, VectorSlotPair(),
- ConvertReceiverMode::kNotNullOrUndefined),
- target, receiver, context, frame_state0, effect, control);
- control = graph()->NewNode(common()->IfSuccess(), value);
+ if (access_info.constant()->IsJSFunction()) {
+ value = effect = graph()->NewNode(
+ javascript()->CallFunction(
+ 2, 0.0f, VectorSlotPair(),
+ ConvertReceiverMode::kNotNullOrUndefined),
+ target, receiver, context, frame_state0, effect, control);
+ control = graph()->NewNode(common()->IfSuccess(), value);
+ } else {
+ DCHECK(access_info.constant()->IsFunctionTemplateInfo());
+ Handle<FunctionTemplateInfo> function_template_info(
+ Handle<FunctionTemplateInfo>::cast(access_info.constant()));
+ DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
+ ZoneVector<Node*> stack_parameters(graph()->zone());
+ ValueEffectControl value_effect_control = InlineApiCall(
+ receiver, context, target, frame_state0, &stack_parameters,
+ effect, control, shared_info, function_template_info);
+ value = value_effect_control.value();
+ effect = value_effect_control.effect();
+ control = value_effect_control.control();
+ }
break;
}
case AccessMode::kStore: {
@@ -966,17 +1023,31 @@ JSNativeContextSpecialization::BuildPropertyAccess(
context, target, frame_state);
// Introduce the call to the setter function.
- effect = graph()->NewNode(javascript()->CallFunction(
- 3, 0.0f, VectorSlotPair(),
- ConvertReceiverMode::kNotNullOrUndefined),
- target, receiver, value, context,
- frame_state0, effect, control);
- control = graph()->NewNode(common()->IfSuccess(), effect);
+ if (access_info.constant()->IsJSFunction()) {
+ effect = graph()->NewNode(
+ javascript()->CallFunction(
+ 3, 0.0f, VectorSlotPair(),
+ ConvertReceiverMode::kNotNullOrUndefined),
+ target, receiver, value, context, frame_state0, effect, control);
+ control = graph()->NewNode(common()->IfSuccess(), effect);
+ } else {
+ DCHECK(access_info.constant()->IsFunctionTemplateInfo());
+ Handle<FunctionTemplateInfo> function_template_info(
+ Handle<FunctionTemplateInfo>::cast(access_info.constant()));
+ DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
+ ZoneVector<Node*> stack_parameters(graph()->zone());
+ stack_parameters.push_back(value);
+ ValueEffectControl value_effect_control = InlineApiCall(
+ receiver, context, target, frame_state0, &stack_parameters,
+ effect, control, shared_info, function_template_info);
+ value = value_effect_control.value();
+ effect = value_effect_control.effect();
+ control = value_effect_control.control();
+ }
break;
}
}
- } else {
- DCHECK(access_info.IsDataField());
+ } else if (access_info.IsDataField()) {
FieldIndex const field_index = access_info.field_index();
Type* const field_type = access_info.field_type();
MachineRepresentation const field_representation =
@@ -1128,6 +1199,28 @@ JSNativeContextSpecialization::BuildPropertyAccess(
jsgraph()->UndefinedConstant(), effect);
}
}
+ } else {
+ DCHECK(access_info.IsGeneric());
+ DCHECK_EQ(AccessMode::kStore, access_mode);
+ DCHECK_EQ(FeedbackVectorSlotKind::STORE_IC, vector->GetKind(slot));
+ Callable callable =
+ CodeFactory::StoreICInOptimizedCode(isolate(), language_mode);
+ const CallInterfaceDescriptor& descriptor = callable.descriptor();
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), descriptor,
+ descriptor.GetStackParameterCount(), CallDescriptor::kNeedsFrameState,
+ Operator::kNoProperties);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* name_node = jsgraph()->HeapConstant(name);
+ Node* slot_node = jsgraph()->Constant(vector->GetIndex(slot));
+ Node* vector_node = jsgraph()->HeapConstant(vector);
+
+ Node* inputs[] = {stub_code, receiver, name_node, value, slot_node,
+ vector_node, context, frame_state, effect, control};
+
+ value = effect = control =
+ graph()->NewNode(common()->Call(desc), arraysize(inputs), inputs);
+ control = graph()->NewNode(common()->IfSuccess(), control);
}
return ValueEffectControl(value, effect, control);
@@ -1154,8 +1247,8 @@ ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildElementAccess(
Node* receiver, Node* index, Node* value, Node* effect, Node* control,
- Handle<Context> native_context, ElementAccessInfo const& access_info,
- AccessMode access_mode, KeyedAccessStoreMode store_mode) {
+ ElementAccessInfo const& access_info, AccessMode access_mode,
+ KeyedAccessStoreMode store_mode) {
// TODO(bmeurer): We currently specialize based on elements kind. We should
// also be able to properly support strings and other JSObjects here.
ElementsKind elements_kind = access_info.elements_kind();
@@ -1232,6 +1325,14 @@ JSNativeContextSpecialization::BuildElementAccess(
value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
effect, control);
+ // Introduce the appropriate truncation for {value}. Currently we
+ // only need to do this for ClamedUint8Array {receiver}s, as the
+ // other truncations are implicit in the StoreTypedElement, but we
+ // might want to change that at some point.
+ if (external_array_type == kExternalUint8ClampedArray) {
+ value = graph()->NewNode(simplified()->NumberToUint8Clamped(), value);
+ }
+
// Check if we can skip the out-of-bounds store.
if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
Node* check =
@@ -1304,7 +1405,7 @@ JSNativeContextSpecialization::BuildElementAccess(
element_type = Type::Number();
element_machine_type = MachineType::Float64();
} else if (IsFastSmiElementsKind(elements_kind)) {
- element_type = type_cache_.kSmi;
+ element_type = Type::SignedSmall();
element_machine_type = MachineType::TaggedSigned();
}
ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
@@ -1330,7 +1431,7 @@ JSNativeContextSpecialization::BuildElementAccess(
if (elements_kind == FAST_HOLEY_ELEMENTS ||
elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
// Check if we are allowed to turn the hole into undefined.
- if (CanTreatHoleAsUndefined(receiver_maps, native_context)) {
+ if (CanTreatHoleAsUndefined(receiver_maps)) {
// Turn the hole into undefined.
value = graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(),
value);
@@ -1343,7 +1444,7 @@ JSNativeContextSpecialization::BuildElementAccess(
// Perform the hole check on the result.
CheckFloat64HoleMode mode = CheckFloat64HoleMode::kNeverReturnHole;
// Check if we are allowed to return the hole directly.
- if (CanTreatHoleAsUndefined(receiver_maps, native_context)) {
+ if (CanTreatHoleAsUndefined(receiver_maps)) {
// Return the signaling NaN hole directly if all uses are truncating.
mode = CheckFloat64HoleMode::kAllowReturnHole;
}
@@ -1397,6 +1498,65 @@ JSNativeContextSpecialization::BuildElementAccess(
return ValueEffectControl(value, effect, control);
}
+JSNativeContextSpecialization::ValueEffectControl
+JSNativeContextSpecialization::InlineApiCall(
+ Node* receiver, Node* context, Node* target, Node* frame_state,
+ ZoneVector<Node*>* stack_parameters, Node* effect, Node* control,
+ Handle<SharedFunctionInfo> shared_info,
+ Handle<FunctionTemplateInfo> function_template_info) {
+ Handle<CallHandlerInfo> call_handler_info = handle(
+ CallHandlerInfo::cast(function_template_info->call_code()), isolate());
+ Handle<Object> call_data_object(call_handler_info->data(), isolate());
+
+ // The stub always expects the receiver as the first param on the stack.
+ CallApiCallbackStub stub(
+ isolate(), static_cast<int>(stack_parameters->size()),
+ call_data_object->IsUndefined(isolate()),
+ true /* TODO(epertoso): similar to CallOptimization */);
+ CallInterfaceDescriptor call_interface_descriptor =
+ stub.GetCallInterfaceDescriptor();
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), call_interface_descriptor,
+ call_interface_descriptor.GetStackParameterCount() +
+ static_cast<int>(stack_parameters->size()) + 1,
+ CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
+ MachineType::AnyTagged(), 1);
+
+ Node* data = jsgraph()->Constant(call_data_object);
+ ApiFunction function(v8::ToCData<Address>(call_handler_info->callback()));
+ Node* function_reference =
+ graph()->NewNode(common()->ExternalConstant(ExternalReference(
+ &function, ExternalReference::DIRECT_API_CALL, isolate())));
+ Node* code = jsgraph()->HeapConstant(stub.GetCode());
+
+ ZoneVector<Node*> inputs(zone());
+ inputs.push_back(code);
+
+ // CallApiCallbackStub's register arguments.
+ inputs.push_back(target);
+ inputs.push_back(data);
+ inputs.push_back(receiver);
+ inputs.push_back(function_reference);
+
+ // Stack parameters: CallApiCallbackStub expects the first one to be the
+ // receiver.
+ inputs.push_back(receiver);
+ for (Node* node : *stack_parameters) {
+ inputs.push_back(node);
+ }
+ inputs.push_back(context);
+ inputs.push_back(frame_state);
+ inputs.push_back(effect);
+ inputs.push_back(control);
+
+ Node* effect0;
+ Node* value0 = effect0 =
+ graph()->NewNode(common()->Call(call_descriptor),
+ static_cast<int>(inputs.size()), inputs.data());
+ Node* control0 = graph()->NewNode(common()->IfSuccess(), value0);
+ return ValueEffectControl(value0, effect0, control0);
+}
+
Node* JSNativeContextSpecialization::BuildCheckMaps(
Node* receiver, Node* effect, Node* control,
std::vector<Handle<Map>> const& maps) {
@@ -1425,42 +1585,14 @@ Node* JSNativeContextSpecialization::BuildCheckMaps(
inputs);
}
-Node* JSNativeContextSpecialization::BuildCheckHeapObject(Node* receiver,
- Node* effect,
- Node* control) {
- switch (receiver->opcode()) {
- case IrOpcode::kHeapConstant:
- case IrOpcode::kJSCreate:
- case IrOpcode::kJSCreateArguments:
- case IrOpcode::kJSCreateArray:
- case IrOpcode::kJSCreateClosure:
- case IrOpcode::kJSCreateIterResultObject:
- case IrOpcode::kJSCreateLiteralArray:
- case IrOpcode::kJSCreateLiteralObject:
- case IrOpcode::kJSCreateLiteralRegExp:
- case IrOpcode::kJSConvertReceiver:
- case IrOpcode::kJSToName:
- case IrOpcode::kJSToString:
- case IrOpcode::kJSToObject:
- case IrOpcode::kJSTypeOf: {
- return effect;
- }
- default: {
- return graph()->NewNode(simplified()->CheckHeapObject(), receiver, effect,
- control);
- }
- }
-}
-
void JSNativeContextSpecialization::AssumePrototypesStable(
- std::vector<Handle<Map>> const& receiver_maps,
- Handle<Context> native_context, Handle<JSObject> holder) {
+ std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder) {
// Determine actual holder and perform prototype chain checks.
for (auto map : receiver_maps) {
// Perform the implicit ToObject for primitives here.
// Implemented according to ES6 section 7.3.2 GetV (V, P).
Handle<JSFunction> constructor;
- if (Map::GetConstructorFunction(map, native_context)
+ if (Map::GetConstructorFunction(map, native_context())
.ToHandle(&constructor)) {
map = handle(constructor->initial_map(), isolate());
}
@@ -1469,16 +1601,15 @@ void JSNativeContextSpecialization::AssumePrototypesStable(
}
bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
- std::vector<Handle<Map>> const& receiver_maps,
- Handle<Context> native_context) {
+ std::vector<Handle<Map>> const& receiver_maps) {
// Check if the array prototype chain is intact.
if (!isolate()->IsFastArrayConstructorPrototypeChainIntact()) return false;
// Make sure both the initial Array and Object prototypes are stable.
Handle<JSObject> initial_array_prototype(
- native_context->initial_array_prototype(), isolate());
+ native_context()->initial_array_prototype(), isolate());
Handle<JSObject> initial_object_prototype(
- native_context->initial_object_prototype(), isolate());
+ native_context()->initial_object_prototype(), isolate());
if (!initial_array_prototype->map()->is_stable() ||
!initial_object_prototype->map()->is_stable()) {
return false;
@@ -1587,44 +1718,30 @@ MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverRootMap(
return MaybeHandle<Map>();
}
-MaybeHandle<Context> JSNativeContextSpecialization::GetNativeContext(
- Node* node) {
- Node* const context = NodeProperties::GetContextInput(node);
- return NodeProperties::GetSpecializationNativeContext(context,
- native_context());
-}
-
-
Graph* JSNativeContextSpecialization::graph() const {
return jsgraph()->graph();
}
-
Isolate* JSNativeContextSpecialization::isolate() const {
return jsgraph()->isolate();
}
-
Factory* JSNativeContextSpecialization::factory() const {
return isolate()->factory();
}
-
MachineOperatorBuilder* JSNativeContextSpecialization::machine() const {
return jsgraph()->machine();
}
-
CommonOperatorBuilder* JSNativeContextSpecialization::common() const {
return jsgraph()->common();
}
-
JSOperatorBuilder* JSNativeContextSpecialization::javascript() const {
return jsgraph()->javascript();
}
-
SimplifiedOperatorBuilder* JSNativeContextSpecialization::simplified() const {
return jsgraph()->simplified();
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index c015de08e7..2d07061d11 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -8,6 +8,7 @@
#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
#include "src/deoptimize-reason.h"
+#include "src/type-feedback-vector.h"
namespace v8 {
namespace internal {
@@ -15,7 +16,6 @@ namespace internal {
// Forward declarations.
class CompilationDependencies;
class Factory;
-class FeedbackNexus;
namespace compiler {
@@ -46,7 +46,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
typedef base::Flags<Flag> Flags;
JSNativeContextSpecialization(Editor* editor, JSGraph* jsgraph, Flags flags,
- MaybeHandle<Context> native_context,
+ Handle<Context> native_context,
CompilationDependencies* dependencies,
Zone* zone);
@@ -79,7 +79,8 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
MapHandleList const& receiver_maps,
Handle<Name> name, AccessMode access_mode,
LanguageMode language_mode,
- Node* index = nullptr);
+ Handle<TypeFeedbackVector> vector,
+ FeedbackVectorSlot slot, Node* index = nullptr);
Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
@@ -100,38 +101,34 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
};
// Construct the appropriate subgraph for property access.
- ValueEffectControl BuildPropertyAccess(Node* receiver, Node* value,
- Node* context, Node* frame_state,
- Node* effect, Node* control,
- Handle<Name> name,
- Handle<Context> native_context,
- PropertyAccessInfo const& access_info,
- AccessMode access_mode);
+ ValueEffectControl BuildPropertyAccess(
+ Node* receiver, Node* value, Node* context, Node* frame_state,
+ Node* effect, Node* control, Handle<Name> name,
+ PropertyAccessInfo const& access_info, AccessMode access_mode,
+ LanguageMode language_mode, Handle<TypeFeedbackVector> vector,
+ FeedbackVectorSlot slot);
// Construct the appropriate subgraph for element access.
- ValueEffectControl BuildElementAccess(
- Node* receiver, Node* index, Node* value, Node* effect, Node* control,
- Handle<Context> native_context, ElementAccessInfo const& access_info,
- AccessMode access_mode, KeyedAccessStoreMode store_mode);
+ ValueEffectControl BuildElementAccess(Node* receiver, Node* index,
+ Node* value, Node* effect,
+ Node* control,
+ ElementAccessInfo const& access_info,
+ AccessMode access_mode,
+ KeyedAccessStoreMode store_mode);
// Construct an appropriate map check.
Node* BuildCheckMaps(Node* receiver, Node* effect, Node* control,
std::vector<Handle<Map>> const& maps);
- // Construct an appropriate heap object check.
- Node* BuildCheckHeapObject(Node* receiver, Node* effect, Node* control);
-
// Adds stability dependencies on all prototypes of every class in
// {receiver_type} up to (and including) the {holder}.
void AssumePrototypesStable(std::vector<Handle<Map>> const& receiver_maps,
- Handle<Context> native_context,
Handle<JSObject> holder);
// Checks if we can turn the hole into undefined when loading an element
// from an object with one of the {receiver_maps}; sets up appropriate
// code dependencies and might use the array protector cell.
- bool CanTreatHoleAsUndefined(std::vector<Handle<Map>> const& receiver_maps,
- Handle<Context> native_context);
+ bool CanTreatHoleAsUndefined(std::vector<Handle<Map>> const& receiver_maps);
// Extract receiver maps from {nexus} and filter based on {receiver} if
// possible.
@@ -147,8 +144,11 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
// program location.
MaybeHandle<Map> InferReceiverRootMap(Node* receiver);
- // Retrieve the native context from the given {node} if known.
- MaybeHandle<Context> GetNativeContext(Node* node);
+ ValueEffectControl InlineApiCall(
+ Node* receiver, Node* context, Node* target, Node* frame_state,
+ ZoneVector<Node*>* stack_parameters, Node* effect, Node* control,
+ Handle<SharedFunctionInfo> shared_info,
+ Handle<FunctionTemplateInfo> function_template_info);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
@@ -159,13 +159,13 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
SimplifiedOperatorBuilder* simplified() const;
MachineOperatorBuilder* machine() const;
Flags flags() const { return flags_; }
- MaybeHandle<Context> native_context() const { return native_context_; }
+ Handle<Context> native_context() const { return native_context_; }
CompilationDependencies* dependencies() const { return dependencies_; }
Zone* zone() const { return zone_; }
JSGraph* const jsgraph_;
Flags const flags_;
- MaybeHandle<Context> native_context_;
+ Handle<Context> native_context_;
CompilationDependencies* const dependencies_;
Zone* const zone_;
TypeCache const& type_cache_;
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 21e905aee6..f64630c589 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -445,6 +445,7 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(ToString, Operator::kNoProperties, 1, 1) \
V(Create, Operator::kEliminatable, 2, 1) \
V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
+ V(CreateKeyValueArray, Operator::kEliminatable, 2, 1) \
V(HasProperty, Operator::kNoProperties, 2, 1) \
V(TypeOf, Operator::kPure, 1, 1) \
V(InstanceOf, Operator::kNoProperties, 2, 1) \
@@ -766,6 +767,23 @@ const Operator* JSOperatorBuilder::StoreContext(size_t depth, size_t index) {
access); // parameter
}
+const Operator* JSOperatorBuilder::LoadModule(int32_t cell_index) {
+ return new (zone()) Operator1<int32_t>( // --
+ IrOpcode::kJSLoadModule, // opcode
+ Operator::kNoWrite | Operator::kNoThrow, // flags
+ "JSLoadModule", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ cell_index); // parameter
+}
+
+const Operator* JSOperatorBuilder::StoreModule(int32_t cell_index) {
+ return new (zone()) Operator1<int32_t>( // --
+ IrOpcode::kJSStoreModule, // opcode
+ Operator::kNoRead | Operator::kNoThrow, // flags
+ "JSStoreModule", // name
+ 2, 1, 1, 0, 1, 0, // counts
+ cell_index); // parameter
+}
const Operator* JSOperatorBuilder::CreateArguments(CreateArgumentsType type) {
return new (zone()) Operator1<CreateArgumentsType>( // --
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 2374ae63ae..9cdd30594a 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -5,6 +5,8 @@
#ifndef V8_COMPILER_JS_OPERATOR_H_
#define V8_COMPILER_JS_OPERATOR_H_
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/runtime/runtime.h"
#include "src/type-hints.h"
@@ -19,7 +21,7 @@ struct JSOperatorGlobalCache;
// Defines a pair of {TypeFeedbackVector} and {TypeFeedbackVectorSlot}, which
// is used to access the type feedback for a certain {Node}.
-class VectorSlotPair {
+class V8_EXPORT_PRIVATE VectorSlotPair {
public:
VectorSlotPair();
VectorSlotPair(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
@@ -182,7 +184,7 @@ bool operator!=(ContextAccess const&, ContextAccess const&);
size_t hash_value(ContextAccess const&);
-std::ostream& operator<<(std::ostream&, ContextAccess const&);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ContextAccess const&);
ContextAccess const& ContextAccessOf(Operator const*);
@@ -416,7 +418,8 @@ CompareOperationHint CompareOperationHintOf(const Operator* op);
// Interface for building JavaScript-level operators, e.g. directly from the
// AST. Most operators have no parameters, thus can be globally shared for all
// graphs.
-class JSOperatorBuilder final : public ZoneObject {
+class V8_EXPORT_PRIVATE JSOperatorBuilder final
+ : public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit JSOperatorBuilder(Zone* zone);
@@ -455,6 +458,7 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
PretenureFlag pretenure);
const Operator* CreateIterResultObject();
+ const Operator* CreateKeyValueArray();
const Operator* CreateLiteralArray(Handle<FixedArray> constant_elements,
int literal_flags, int literal_index,
int number_of_elements);
@@ -499,6 +503,9 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* LoadContext(size_t depth, size_t index, bool immutable);
const Operator* StoreContext(size_t depth, size_t index);
+ const Operator* LoadModule(int32_t cell_index);
+ const Operator* StoreModule(int32_t cell_index);
+
const Operator* TypeOf();
const Operator* InstanceOf();
const Operator* OrdinaryHasInstance();
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 82df4edf24..dbbeca6e96 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -4,6 +4,7 @@
#include "src/compiler/js-typed-lowering.h"
+#include "src/ast/modules.h"
#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
#include "src/compilation-dependencies.h"
@@ -82,16 +83,13 @@ class JSBinopReduction final {
if (BothInputsAre(Type::String()) ||
((lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) &&
BinaryOperationHintOf(node_->op()) == BinaryOperationHint::kString)) {
- if (right_type()->IsConstant() &&
- right_type()->AsConstant()->Value()->IsString()) {
- Handle<String> right_string =
- Handle<String>::cast(right_type()->AsConstant()->Value());
+ HeapObjectBinopMatcher m(node_);
+ if (m.right().HasValue() && m.right().Value()->IsString()) {
+ Handle<String> right_string = Handle<String>::cast(m.right().Value());
if (right_string->length() >= ConsString::kMinLength) return true;
}
- if (left_type()->IsConstant() &&
- left_type()->AsConstant()->Value()->IsString()) {
- Handle<String> left_string =
- Handle<String>::cast(left_type()->AsConstant()->Value());
+ if (m.left().HasValue() && m.left().Value()->IsString()) {
+ Handle<String> left_string = Handle<String>::cast(m.left().Value());
if (left_string->length() >= ConsString::kMinLength) {
// The invariant for ConsString requires the left hand side to be
// a sequential or external string if the right hand side is the
@@ -454,7 +452,6 @@ class JSBinopReduction final {
// - immediately put in type bounds for all new nodes
// - relax effects from generic but not-side-effecting operations
-
JSTypedLowering::JSTypedLowering(Editor* editor,
CompilationDependencies* dependencies,
Flags flags, JSGraph* jsgraph, Zone* zone)
@@ -463,7 +460,7 @@ JSTypedLowering::JSTypedLowering(Editor* editor,
flags_(flags),
jsgraph_(jsgraph),
the_hole_type_(
- Type::Constant(factory()->the_hole_value(), graph()->zone())),
+ Type::HeapConstant(factory()->the_hole_value(), graph()->zone())),
type_cache_(TypeCache::Get()) {
for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
double min = kMinInt / (1 << k);
@@ -529,7 +526,7 @@ Reduction JSTypedLowering::ReduceNumberBinop(Node* node) {
NumberOperationHint hint;
if (r.GetBinaryNumberOperationHint(&hint)) {
if (hint == NumberOperationHint::kNumberOrOddball &&
- r.BothInputsAre(Type::PlainPrimitive())) {
+ r.BothInputsAre(Type::NumberOrOddball())) {
r.ConvertInputsToNumber();
return r.ChangeToPureOperator(r.NumberOp(), Type::Number());
}
@@ -604,21 +601,20 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
}
// Determine the {first} length.
+ HeapObjectBinopMatcher m(node);
Node* first_length =
- first_type->IsConstant()
+ (m.left().HasValue() && m.left().Value()->IsString())
? jsgraph()->Constant(
- Handle<String>::cast(first_type->AsConstant()->Value())
- ->length())
+ Handle<String>::cast(m.left().Value())->length())
: effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForStringLength()),
first, effect, control);
// Determine the {second} length.
Node* second_length =
- second_type->IsConstant()
+ (m.right().HasValue() && m.right().Value()->IsString())
? jsgraph()->Constant(
- Handle<String>::cast(second_type->AsConstant()->Value())
- ->length())
+ Handle<String>::cast(m.right().Value())->length())
: effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForStringLength()),
second, effect, control);
@@ -630,33 +626,44 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
// Check if we would overflow the allowed maximum string length.
Node* check = graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
jsgraph()->Constant(String::kMaxLength));
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- {
- // Throw a RangeError in case of overflow.
- Node* vfalse = efalse = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowInvalidStringLength), context,
- frame_state, efalse, if_false);
- if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
- if_false = graph()->NewNode(common()->Throw(), vfalse, efalse, if_false);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), if_false);
- Revisit(graph()->end());
-
- // Update potential {IfException} uses of {node} to point to the
- // %ThrowInvalidStringLength runtime call node instead.
- for (Edge edge : node->use_edges()) {
- if (edge.from()->opcode() == IrOpcode::kIfException) {
- DCHECK(NodeProperties::IsControlEdge(edge) ||
- NodeProperties::IsEffectEdge(edge));
- edge.UpdateTo(vfalse);
- Revisit(edge.from());
+ if (isolate()->IsStringLengthOverflowIntact()) {
+ // Add a code dependency on the string length overflow protector.
+ dependencies()->AssumePropertyCell(factory()->string_length_protector());
+
+ // We can just deoptimize if the {check} fails. Besides generating a
+ // shorter code sequence than the version below, this has the additional
+ // benefit of not holding on to the lazy {frame_state} and thus potentially
+ // reduces the number of live ranges and allows for more truncations.
+ effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ } else {
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ {
+ // Throw a RangeError in case of overflow.
+ Node* vfalse = efalse = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowInvalidStringLength),
+ context, frame_state, efalse, if_false);
+ if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
+ if_false = graph()->NewNode(common()->Throw(), vfalse, efalse, if_false);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), if_false);
+ Revisit(graph()->end());
+
+ // Update potential {IfException} uses of {node} to point to the
+ // %ThrowInvalidStringLength runtime call node instead.
+ for (Edge edge : node->use_edges()) {
+ if (edge.from()->opcode() == IrOpcode::kIfException) {
+ DCHECK(NodeProperties::IsControlEdge(edge) ||
+ NodeProperties::IsEffectEdge(edge));
+ edge.UpdateTo(vfalse);
+ Revisit(edge.from());
+ }
}
}
+ control = graph()->NewNode(common()->IfTrue(), branch);
}
- control = graph()->NewNode(common()->IfTrue(), branch);
// Figure out the map for the resulting ConsString.
// TODO(turbofan): We currently just use the cons_string_map here for
@@ -676,7 +683,7 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
value, value_map, effect, control);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForNameHashField()), value,
- jsgraph()->Uint32Constant(Name::kEmptyHashField), effect, control);
+ jsgraph()->Constant(Name::kEmptyHashField), effect, control);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForStringLength()), value, length,
effect, control);
@@ -768,6 +775,35 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
}
}
+Reduction JSTypedLowering::ReduceJSTypeOf(Node* node) {
+ Node* const input = node->InputAt(0);
+ Type* type = NodeProperties::GetType(input);
+ Factory* const f = factory();
+ if (type->Is(Type::Boolean())) {
+ return Replace(jsgraph()->Constant(f->boolean_string()));
+ } else if (type->Is(Type::Number())) {
+ return Replace(jsgraph()->Constant(f->number_string()));
+ } else if (type->Is(Type::String())) {
+ return Replace(jsgraph()->Constant(f->string_string()));
+ } else if (type->Is(Type::Symbol())) {
+ return Replace(jsgraph()->Constant(f->symbol_string()));
+ } else if (type->Is(Type::Union(Type::Undefined(), Type::OtherUndetectable(),
+ graph()->zone()))) {
+ return Replace(jsgraph()->Constant(f->undefined_string()));
+ } else if (type->Is(Type::Null())) {
+ return Replace(jsgraph()->Constant(f->object_string()));
+ } else if (type->Is(Type::Function())) {
+ return Replace(jsgraph()->Constant(f->function_string()));
+ } else if (type->IsHeapConstant()) {
+ return Replace(jsgraph()->Constant(
+ Object::TypeOf(isolate(), type->AsHeapConstant()->Value())));
+ } else if (type->IsOtherNumberConstant()) {
+ return Replace(jsgraph()->Constant(f->number_string()));
+ }
+
+ return NoChange();
+}
+
Reduction JSTypedLowering::ReduceJSEqualTypeOf(Node* node, bool invert) {
HeapObjectBinopMatcher m(node);
if (m.left().IsJSTypeOf() && m.right().HasValue() &&
@@ -949,6 +985,17 @@ Reduction JSTypedLowering::ReduceJSToInteger(Node* node) {
return NoChange();
}
+Reduction JSTypedLowering::ReduceJSToName(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::Name())) {
+ // JSToName(x:name) => x
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
Reduction JSTypedLowering::ReduceJSToLength(Node* node) {
Node* input = NodeProperties::GetValueInput(node, 0);
Type* input_type = NodeProperties::GetType(input);
@@ -976,12 +1023,17 @@ Reduction JSTypedLowering::ReduceJSToLength(Node* node) {
Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
// Try constant-folding of JSToNumber with constant inputs.
Type* input_type = NodeProperties::GetType(input);
- if (input_type->IsConstant()) {
- Handle<Object> input_value = input_type->AsConstant()->Value();
- if (input_value->IsString()) {
+ if (input_type->Is(Type::String())) {
+ HeapObjectMatcher m(input);
+ if (m.HasValue() && m.Value()->IsString()) {
+ Handle<Object> input_value = m.Value();
return Replace(jsgraph()->Constant(
String::ToNumber(Handle<String>::cast(input_value))));
- } else if (input_value->IsOddball()) {
+ }
+ }
+ if (input_type->IsHeapConstant()) {
+ Handle<Object> input_value = input_type->AsHeapConstant()->Value();
+ if (input_value->IsOddball()) {
return Replace(jsgraph()->Constant(
Oddball::ToNumber(Handle<Oddball>::cast(input_value))));
}
@@ -1270,12 +1322,12 @@ Reduction JSTypedLowering::ReduceJSOrdinaryHasInstance(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
// Check if the {constructor} is a (known) JSFunction.
- if (!constructor_type->IsConstant() ||
- !constructor_type->AsConstant()->Value()->IsJSFunction()) {
+ if (!constructor_type->IsHeapConstant() ||
+ !constructor_type->AsHeapConstant()->Value()->IsJSFunction()) {
return NoChange();
}
Handle<JSFunction> function =
- Handle<JSFunction>::cast(constructor_type->AsConstant()->Value());
+ Handle<JSFunction>::cast(constructor_type->AsHeapConstant()->Value());
// Check if the {function} already has an initial map (i.e. the
// {function} has been used as a constructor at least once).
@@ -1457,6 +1509,81 @@ Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
return Changed(node);
}
+Reduction JSTypedLowering::ReduceJSLoadModule(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadModule, node->opcode());
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ int32_t cell_index = OpParameter<int32_t>(node);
+ Node* module = NodeProperties::GetValueInput(node, 0);
+
+ Node* array;
+ int index;
+ if (ModuleDescriptor::GetCellIndexKind(cell_index) ==
+ ModuleDescriptor::kExport) {
+ array = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForModuleRegularExports()),
+ module, effect, control);
+ index = cell_index - 1;
+ } else {
+ DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
+ ModuleDescriptor::kImport);
+ array = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForModuleRegularImports()),
+ module, effect, control);
+ index = -cell_index - 1;
+ }
+
+ Node* cell = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArraySlot(index)), array,
+ effect, control);
+
+ Node* value = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()),
+ cell, effect, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Changed(value);
+}
+
+Reduction JSTypedLowering::ReduceJSStoreModule(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreModule, node->opcode());
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ int32_t cell_index = OpParameter<int32_t>(node);
+ Node* module = NodeProperties::GetValueInput(node, 0);
+ Node* value = NodeProperties::GetValueInput(node, 1);
+
+ Node* array;
+ int index;
+ if (ModuleDescriptor::GetCellIndexKind(cell_index) ==
+ ModuleDescriptor::kExport) {
+ array = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForModuleRegularExports()),
+ module, effect, control);
+ index = cell_index - 1;
+ } else {
+ DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
+ ModuleDescriptor::kImport);
+ array = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForModuleRegularImports()),
+ module, effect, control);
+ index = -cell_index - 1;
+ }
+
+ Node* cell = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArraySlot(index)), array,
+ effect, control);
+
+ effect =
+ graph()->NewNode(simplified()->StoreField(AccessBuilder::ForCellValue()),
+ cell, value, effect, control);
+
+ ReplaceWithValue(node, effect, effect, control);
+ return Changed(value);
+}
+
Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
DCHECK_EQ(IrOpcode::kJSConvertReceiver, node->opcode());
ConvertReceiverMode mode = ConvertReceiverModeOf(node->op());
@@ -1478,9 +1605,9 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
// with the global proxy unconditionally.
if (receiver_type->Is(Type::NullOrUndefined()) ||
mode == ConvertReceiverMode::kNullOrUndefined) {
- if (context_type->IsConstant()) {
+ if (context_type->IsHeapConstant()) {
Handle<JSObject> global_proxy(
- Handle<Context>::cast(context_type->AsConstant()->Value())
+ Handle<Context>::cast(context_type->AsHeapConstant()->Value())
->global_proxy(),
isolate());
receiver = jsgraph()->Constant(global_proxy);
@@ -1583,9 +1710,9 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
Node* eglobal = effect;
Node* rglobal;
{
- if (context_type->IsConstant()) {
+ if (context_type->IsHeapConstant()) {
Handle<JSObject> global_proxy(
- Handle<Context>::cast(context_type->AsConstant()->Value())
+ Handle<Context>::cast(context_type->AsHeapConstant()->Value())
->global_proxy(),
isolate());
rglobal = jsgraph()->Constant(global_proxy);
@@ -1640,6 +1767,7 @@ void ReduceBuiltin(Isolate* isolate, JSGraph* jsgraph, Node* node,
const bool is_construct = (node->opcode() == IrOpcode::kJSCallConstruct);
DCHECK(Builtins::HasCppImplementation(builtin_index));
+ DCHECK_EQ(0, flags & CallDescriptor::kSupportsTailCalls);
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = is_construct
@@ -1664,7 +1792,7 @@ void ReduceBuiltin(Isolate* isolate, JSGraph* jsgraph, Node* node,
}
const int argc = arity + BuiltinArguments::kNumExtraArgsWithReceiver;
- Node* argc_node = jsgraph->Int32Constant(argc);
+ Node* argc_node = jsgraph->Constant(argc);
static const int kStubAndReceiver = 2;
int cursor = arity + kStubAndReceiver;
@@ -1708,10 +1836,10 @@ Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
// Check if {target} is a known JSFunction.
- if (target_type->IsConstant() &&
- target_type->AsConstant()->Value()->IsJSFunction()) {
+ if (target_type->IsHeapConstant() &&
+ target_type->AsHeapConstant()->Value()->IsJSFunction()) {
Handle<JSFunction> function =
- Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+ Handle<JSFunction>::cast(target_type->AsHeapConstant()->Value());
Handle<SharedFunctionInfo> shared(function->shared(), isolate());
const int builtin_index = shared->construct_stub()->builtin_index();
const bool is_builtin = (builtin_index != -1);
@@ -1740,7 +1868,7 @@ Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
node->InsertInput(graph()->zone(), 2, new_target);
- node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(
@@ -1759,7 +1887,7 @@ Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
node->InsertInput(graph()->zone(), 2, new_target);
- node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
@@ -1793,10 +1921,10 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
}
// Check if {target} is a known JSFunction.
- if (target_type->IsConstant() &&
- target_type->AsConstant()->Value()->IsJSFunction()) {
+ if (target_type->IsHeapConstant() &&
+ target_type->AsHeapConstant()->Value()->IsJSFunction()) {
Handle<JSFunction> function =
- Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+ Handle<JSFunction>::cast(target_type->AsHeapConstant()->Value());
Handle<SharedFunctionInfo> shared(function->shared(), isolate());
const int builtin_index = shared->code()->builtin_index();
const bool is_builtin = (builtin_index != -1);
@@ -1830,7 +1958,7 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
}
Node* new_target = jsgraph()->UndefinedConstant();
- Node* argument_count = jsgraph()->Int32Constant(arity);
+ Node* argument_count = jsgraph()->Constant(arity);
if (NeedsArgumentAdaptorFrame(shared, arity)) {
// Patch {node} to an indirect call via the ArgumentsAdaptorTrampoline.
Callable callable = CodeFactory::ArgumentAdaptor(isolate());
@@ -1840,12 +1968,13 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
node->InsertInput(graph()->zone(), 3, argument_count);
node->InsertInput(
graph()->zone(), 4,
- jsgraph()->Int32Constant(shared->internal_formal_parameter_count()));
+ jsgraph()->Constant(shared->internal_formal_parameter_count()));
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(),
1 + arity, flags)));
- } else if (is_builtin && Builtins::HasCppImplementation(builtin_index)) {
+ } else if (is_builtin && Builtins::HasCppImplementation(builtin_index) &&
+ ((flags & CallDescriptor::kSupportsTailCalls) == 0)) {
// Patch {node} to a direct CEntryStub call.
ReduceBuiltin(isolate(), jsgraph(), node, builtin_index, arity, flags);
} else {
@@ -1871,7 +2000,7 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
Callable callable = CodeFactory::CallFunction(isolate(), convert_mode);
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(arity));
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 1 + arity,
@@ -2074,12 +2203,16 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSToInteger(node);
case IrOpcode::kJSToLength:
return ReduceJSToLength(node);
+ case IrOpcode::kJSToName:
+ return ReduceJSToName(node);
case IrOpcode::kJSToNumber:
return ReduceJSToNumber(node);
case IrOpcode::kJSToString:
return ReduceJSToString(node);
case IrOpcode::kJSToObject:
return ReduceJSToObject(node);
+ case IrOpcode::kJSTypeOf:
+ return ReduceJSTypeOf(node);
case IrOpcode::kJSLoadNamed:
return ReduceJSLoadNamed(node);
case IrOpcode::kJSLoadProperty:
@@ -2090,6 +2223,10 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSLoadContext(node);
case IrOpcode::kJSStoreContext:
return ReduceJSStoreContext(node);
+ case IrOpcode::kJSLoadModule:
+ return ReduceJSLoadModule(node);
+ case IrOpcode::kJSStoreModule:
+ return ReduceJSStoreModule(node);
case IrOpcode::kJSConvertReceiver:
return ReduceJSConvertReceiver(node);
case IrOpcode::kJSCallConstruct:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index b0cf1f4f3d..3e710226b4 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -5,9 +5,11 @@
#ifndef V8_COMPILER_JS_TYPED_LOWERING_H_
#define V8_COMPILER_JS_TYPED_LOWERING_H_
+#include "src/base/compiler-specific.h"
#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/opcodes.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -26,7 +28,8 @@ class SimplifiedOperatorBuilder;
class TypeCache;
// Lowers JS-level operators to simplified operators based on types.
-class JSTypedLowering final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE JSTypedLowering final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
public:
// Flags that control the mode of operation.
enum Flag {
@@ -52,12 +55,15 @@ class JSTypedLowering final : public AdvancedReducer {
Reduction ReduceJSOrdinaryHasInstance(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSStoreContext(Node* node);
+ Reduction ReduceJSLoadModule(Node* node);
+ Reduction ReduceJSStoreModule(Node* node);
Reduction ReduceJSEqualTypeOf(Node* node, bool invert);
Reduction ReduceJSEqual(Node* node, bool invert);
Reduction ReduceJSStrictEqual(Node* node, bool invert);
Reduction ReduceJSToBoolean(Node* node);
Reduction ReduceJSToInteger(Node* node);
Reduction ReduceJSToLength(Node* node);
+ Reduction ReduceJSToName(Node* node);
Reduction ReduceJSToNumberInput(Node* input);
Reduction ReduceJSToNumber(Node* node);
Reduction ReduceJSToStringInput(Node* input);
@@ -70,6 +76,7 @@ class JSTypedLowering final : public AdvancedReducer {
Reduction ReduceJSGeneratorStore(Node* node);
Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
Reduction ReduceJSGeneratorRestoreRegister(Node* node);
+ Reduction ReduceJSTypeOf(Node* node);
Reduction ReduceNumberBinop(Node* node);
Reduction ReduceInt32Binop(Node* node);
Reduction ReduceUI32Shift(Node* node, Signedness signedness);
diff --git a/deps/v8/src/compiler/jump-threading.cc b/deps/v8/src/compiler/jump-threading.cc
index 55542825e7..d7d4f91c94 100644
--- a/deps/v8/src/compiler/jump-threading.cc
+++ b/deps/v8/src/compiler/jump-threading.cc
@@ -143,7 +143,7 @@ void JumpThreading::ApplyForwarding(ZoneVector<RpoNumber>& result,
InstructionSequence* code) {
if (!FLAG_turbo_jt) return;
- Zone local_zone(code->isolate()->allocator());
+ Zone local_zone(code->isolate()->allocator(), ZONE_NAME);
ZoneVector<bool> skip(static_cast<int>(result.size()), false, &local_zone);
// Skip empty blocks when the previous block doesn't fall through.
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 523ce47b0f..971ea7212d 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -107,6 +107,23 @@ bool CallDescriptor::CanTailCall(const Node* node) const {
return HasSameReturnLocationsAs(CallDescriptorOf(node->op()));
}
+int CallDescriptor::CalculateFixedFrameSize() const {
+ switch (kind_) {
+ case kCallJSFunction:
+ return PushArgumentCount()
+ ? OptimizedBuiltinFrameConstants::kFixedSlotCount
+ : StandardFrameConstants::kFixedSlotCount;
+ break;
+ case kCallAddress:
+ return CommonFrameConstants::kFixedSlotCountAboveFp +
+ CommonFrameConstants::kCPSlotCount;
+ break;
+ case kCallCodeObject:
+ return TypedFrameConstants::kFixedSlotCount;
+ }
+ UNREACHABLE();
+ return 0;
+}
CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
DCHECK(!info->IsStub());
@@ -168,8 +185,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kInlineIsRegExp:
case Runtime::kInlineIsSmi:
case Runtime::kInlineIsTypedArray:
- case Runtime::kInlineRegExpFlags:
- case Runtime::kInlineRegExpSource:
return false;
default:
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 6f302bc534..b515aca2da 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -5,10 +5,12 @@
#ifndef V8_COMPILER_LINKAGE_H_
#define V8_COMPILER_LINKAGE_H_
+#include "src/base/compiler-specific.h"
#include "src/base/flags.h"
#include "src/compiler/frame.h"
#include "src/compiler/operator.h"
#include "src/frames.h"
+#include "src/globals.h"
#include "src/machine-type.h"
#include "src/runtime/runtime.h"
#include "src/zone/zone.h"
@@ -161,7 +163,8 @@ typedef Signature<LinkageLocation> LocationSignature;
// Describes a call to various parts of the compiler. Every call has the notion
// of a "target", which is the first input to the call.
-class CallDescriptor final : public ZoneObject {
+class V8_EXPORT_PRIVATE CallDescriptor final
+ : public NON_EXPORTED_BASE(ZoneObject) {
public:
// Describes the kind of this call, which determines the target.
enum Kind {
@@ -184,7 +187,9 @@ class CallDescriptor final : public ZoneObject {
// Causes the code generator to initialize the root register.
kInitializeRootRegister = 1u << 7,
// Does not ever try to allocate space on our heap.
- kNoAllocate = 1u << 8
+ kNoAllocate = 1u << 8,
+ // Push argument count as part of function prologue.
+ kPushArgumentCount = 1u << 9
};
typedef base::Flags<Flag> Flags;
@@ -246,6 +251,7 @@ class CallDescriptor final : public ZoneObject {
bool NeedsFrameState() const { return flags() & kNeedsFrameState; }
bool SupportsTailCalls() const { return flags() & kSupportsTailCalls; }
bool UseNativeStack() const { return flags() & kUseNativeStack; }
+ bool PushArgumentCount() const { return flags() & kPushArgumentCount; }
bool InitializeRootRegister() const {
return flags() & kInitializeRootRegister;
}
@@ -293,6 +299,8 @@ class CallDescriptor final : public ZoneObject {
bool CanTailCall(const Node* call) const;
+ int CalculateFixedFrameSize() const;
+
private:
friend class Linkage;
@@ -313,7 +321,8 @@ class CallDescriptor final : public ZoneObject {
DEFINE_OPERATORS_FOR_FLAGS(CallDescriptor::Flags)
std::ostream& operator<<(std::ostream& os, const CallDescriptor& d);
-std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const CallDescriptor::Kind& k);
// Defines the linkage for a compilation, including the calling conventions
// for incoming parameters and return value(s) as well as the outgoing calling
@@ -329,7 +338,7 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k);
// Call[JSFunction] function, rcvr, arg 1, [...], new, #arg, context
// Call[Runtime] CEntryStub, arg 1, arg 2, [...], fun, #arg, context
// Call[BytecodeDispatch] address, arg 1, arg 2, [...]
-class Linkage : public ZoneObject {
+class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit Linkage(CallDescriptor* incoming) : incoming_(incoming) {}
diff --git a/deps/v8/src/compiler/liveness-analyzer.cc b/deps/v8/src/compiler/liveness-analyzer.cc
index fe458b8f9f..0cf13332f4 100644
--- a/deps/v8/src/compiler/liveness-analyzer.cc
+++ b/deps/v8/src/compiler/liveness-analyzer.cc
@@ -13,10 +13,13 @@ namespace v8 {
namespace internal {
namespace compiler {
-
-LivenessAnalyzer::LivenessAnalyzer(size_t local_count, Zone* zone)
- : zone_(zone), blocks_(zone), local_count_(local_count), queue_(zone) {}
-
+LivenessAnalyzer::LivenessAnalyzer(size_t local_count, bool has_accumulator,
+ Zone* zone)
+ : zone_(zone),
+ blocks_(zone),
+ local_count_(local_count),
+ has_accumulator_(has_accumulator),
+ queue_(zone) {}
void LivenessAnalyzer::Print(std::ostream& os) {
for (auto block : blocks_) {
@@ -28,8 +31,8 @@ void LivenessAnalyzer::Print(std::ostream& os) {
LivenessAnalyzerBlock* LivenessAnalyzer::NewBlock() {
LivenessAnalyzerBlock* result =
- new (zone()->New(sizeof(LivenessAnalyzerBlock)))
- LivenessAnalyzerBlock(blocks_.size(), local_count_, zone());
+ new (zone()->New(sizeof(LivenessAnalyzerBlock))) LivenessAnalyzerBlock(
+ blocks_.size(), local_count_, has_accumulator_, zone());
blocks_.push_back(result);
return result;
}
@@ -52,8 +55,8 @@ void LivenessAnalyzer::Queue(LivenessAnalyzerBlock* block) {
void LivenessAnalyzer::Run(NonLiveFrameStateSlotReplacer* replacer) {
- if (local_count_ == 0) {
- // No local variables => nothing to do.
+ if (local_count_ == 0 && !has_accumulator_) {
+ // No variables => nothing to do.
return;
}
@@ -64,7 +67,8 @@ void LivenessAnalyzer::Run(NonLiveFrameStateSlotReplacer* replacer) {
}
// Compute the fix-point.
- BitVector working_area(static_cast<int>(local_count_), zone_);
+ BitVector working_area(
+ static_cast<int>(local_count_) + (has_accumulator_ ? 1 : 0), zone_);
while (!queue_.empty()) {
LivenessAnalyzerBlock* block = queue_.front();
queue_.pop();
@@ -84,11 +88,12 @@ void LivenessAnalyzer::Run(NonLiveFrameStateSlotReplacer* replacer) {
}
LivenessAnalyzerBlock::LivenessAnalyzerBlock(size_t id, size_t local_count,
- Zone* zone)
+ bool has_accumulator, Zone* zone)
: entries_(zone),
predecessors_(zone),
- live_(local_count == 0 ? 1 : static_cast<int>(local_count), zone),
+ live_(static_cast<int>(local_count) + (has_accumulator ? 1 : 0), zone),
queued_(false),
+ has_accumulator_(has_accumulator),
id_(id) {}
void LivenessAnalyzerBlock::Process(BitVector* result,
@@ -123,32 +128,52 @@ bool LivenessAnalyzerBlock::UpdateLive(BitVector* working_area) {
void NonLiveFrameStateSlotReplacer::ClearNonLiveFrameStateSlots(
Node* frame_state, BitVector* liveness) {
+ DCHECK_EQ(liveness->length(), permanently_live_.length());
+
DCHECK_EQ(frame_state->opcode(), IrOpcode::kFrameState);
Node* locals_state = frame_state->InputAt(1);
DCHECK_EQ(locals_state->opcode(), IrOpcode::kStateValues);
- int count = static_cast<int>(StateValuesAccess(locals_state).size());
- DCHECK_EQ(count == 0 ? 1 : count, liveness->length());
+ int count = liveness->length() - (has_accumulator_ ? 1 : 0);
+ DCHECK_EQ(count, static_cast<int>(StateValuesAccess(locals_state).size()));
for (int i = 0; i < count; i++) {
- bool live = liveness->Contains(i) || permanently_live_.Contains(i);
- if (!live || locals_state->InputAt(i) != replacement_node_) {
+ if (!liveness->Contains(i) && !permanently_live_.Contains(i)) {
Node* new_values = ClearNonLiveStateValues(locals_state, liveness);
frame_state->ReplaceInput(1, new_values);
break;
}
}
+
+ if (has_accumulator_) {
+ DCHECK_EQ(frame_state->InputAt(2)->opcode(), IrOpcode::kStateValues);
+ DCHECK_EQ(
+ static_cast<int>(StateValuesAccess(frame_state->InputAt(2)).size()), 1);
+ int index = liveness->length() - 1;
+ if (!liveness->Contains(index) && !permanently_live_.Contains(index)) {
+ Node* new_value =
+ state_values_cache()->GetNodeForValues(&replacement_node_, 1);
+ frame_state->ReplaceInput(2, new_value);
+ }
+ }
}
Node* NonLiveFrameStateSlotReplacer::ClearNonLiveStateValues(
Node* values, BitVector* liveness) {
DCHECK(inputs_buffer_.empty());
- for (StateValuesAccess::TypedNode node : StateValuesAccess(values)) {
+
+ int var = 0;
+ for (Node* value_node : values->inputs()) {
+ // Make sure this isn't a state value tree
+ DCHECK(value_node->opcode() != IrOpcode::kStateValues);
+
// Index of the next variable is its furure index in the inputs buffer,
// i.e., the buffer's size.
- int var = static_cast<int>(inputs_buffer_.size());
bool live = liveness->Contains(var) || permanently_live_.Contains(var);
- inputs_buffer_.push_back(live ? node.node : replacement_node_);
+ inputs_buffer_.push_back(live ? value_node : replacement_node_);
+
+ var++;
}
+
Node* result = state_values_cache()->GetNodeForValues(
inputs_buffer_.empty() ? nullptr : &(inputs_buffer_.front()),
inputs_buffer_.size());
@@ -175,10 +200,18 @@ void LivenessAnalyzerBlock::Print(std::ostream& os) {
os << " ";
switch (entry.kind()) {
case Entry::kLookup:
- os << "- Lookup " << entry.var() << std::endl;
+ if (has_accumulator_ && entry.var() == live_.length() - 1) {
+ os << "- Lookup accumulator" << std::endl;
+ } else {
+ os << "- Lookup " << entry.var() << std::endl;
+ }
break;
case Entry::kBind:
- os << "- Bind " << entry.var() << std::endl;
+ if (has_accumulator_ && entry.var() == live_.length() - 1) {
+ os << "- Bind accumulator" << std::endl;
+ } else {
+ os << "- Bind " << entry.var() << std::endl;
+ }
break;
case Entry::kCheckpoint:
os << "- Checkpoint " << entry.node()->id() << std::endl;
diff --git a/deps/v8/src/compiler/liveness-analyzer.h b/deps/v8/src/compiler/liveness-analyzer.h
index 8a3d715096..63fc52c125 100644
--- a/deps/v8/src/compiler/liveness-analyzer.h
+++ b/deps/v8/src/compiler/liveness-analyzer.h
@@ -7,6 +7,7 @@
#include "src/bit-vector.h"
#include "src/compiler/node.h"
+#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -17,20 +18,22 @@ class LivenessAnalyzerBlock;
class Node;
class StateValuesCache;
-
class NonLiveFrameStateSlotReplacer {
public:
void ClearNonLiveFrameStateSlots(Node* frame_state, BitVector* liveness);
NonLiveFrameStateSlotReplacer(StateValuesCache* state_values_cache,
Node* replacement, size_t local_count,
- Zone* local_zone)
+ bool has_accumulator, Zone* local_zone)
: replacement_node_(replacement),
state_values_cache_(state_values_cache),
local_zone_(local_zone),
- permanently_live_(local_count == 0 ? 1 : static_cast<int>(local_count),
- local_zone),
- inputs_buffer_(local_zone) {}
+ permanently_live_(
+ static_cast<int>(local_count) + (has_accumulator ? 1 : 0),
+ local_zone),
+ inputs_buffer_(local_zone),
+ has_accumulator_(has_accumulator) {}
+ // TODO(leszeks): Not used by bytecode, remove once AST graph builder is gone.
void MarkPermanentlyLive(int var) { permanently_live_.Add(var); }
private:
@@ -48,12 +51,13 @@ class NonLiveFrameStateSlotReplacer {
Zone* local_zone_;
BitVector permanently_live_;
NodeVector inputs_buffer_;
-};
+ bool has_accumulator_;
+};
-class LivenessAnalyzer {
+class V8_EXPORT_PRIVATE LivenessAnalyzer {
public:
- LivenessAnalyzer(size_t local_count, Zone* zone);
+ LivenessAnalyzer(size_t local_count, bool has_accumulator, Zone* zone);
LivenessAnalyzerBlock* NewBlock();
LivenessAnalyzerBlock* NewBlock(LivenessAnalyzerBlock* predecessor);
@@ -73,6 +77,10 @@ class LivenessAnalyzer {
ZoneDeque<LivenessAnalyzerBlock*> blocks_;
size_t local_count_;
+ // TODO(leszeks): Always true for bytecode, remove once AST graph builder is
+ // gone.
+ bool has_accumulator_;
+
ZoneQueue<LivenessAnalyzerBlock*> queue_;
};
@@ -83,6 +91,17 @@ class LivenessAnalyzerBlock {
void Lookup(int var) { entries_.push_back(Entry(Entry::kLookup, var)); }
void Bind(int var) { entries_.push_back(Entry(Entry::kBind, var)); }
+ void LookupAccumulator() {
+ DCHECK(has_accumulator_);
+ // The last entry is the accumulator entry.
+ entries_.push_back(Entry(Entry::kLookup, live_.length() - 1));
+ }
+ void BindAccumulator() {
+ DCHECK(has_accumulator_);
+ // The last entry is the accumulator entry.
+ entries_.push_back(Entry(Entry::kBind, live_.length() - 1));
+ }
+
void Checkpoint(Node* node) { entries_.push_back(Entry(node)); }
void AddPredecessor(LivenessAnalyzerBlock* b) { predecessors_.push_back(b); }
LivenessAnalyzerBlock* GetPredecessor() {
@@ -116,7 +135,8 @@ class LivenessAnalyzerBlock {
Node* node_;
};
- LivenessAnalyzerBlock(size_t id, size_t local_count, Zone* zone);
+ LivenessAnalyzerBlock(size_t id, size_t local_count, bool has_accumulator,
+ Zone* zone);
void Process(BitVector* result, NonLiveFrameStateSlotReplacer* relaxer);
bool UpdateLive(BitVector* working_area);
@@ -138,6 +158,7 @@ class LivenessAnalyzerBlock {
BitVector live_;
bool queued_;
+ bool has_accumulator_;
size_t id_;
};
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 93c24a08e5..e50ebe1919 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -448,6 +448,26 @@ LoadElimination::AbstractState const* LoadElimination::AbstractState::KillField(
return this;
}
+LoadElimination::AbstractState const*
+LoadElimination::AbstractState::KillFields(Node* object, Zone* zone) const {
+ for (size_t i = 0;; ++i) {
+ if (i == arraysize(fields_)) return this;
+ if (AbstractField const* this_field = this->fields_[i]) {
+ AbstractField const* that_field = this_field->Kill(object, zone);
+ if (that_field != this_field) {
+ AbstractState* that = new (zone) AbstractState(*this);
+ that->fields_[i] = this_field;
+ while (++i < arraysize(fields_)) {
+ if (this->fields_[i] != nullptr) {
+ that->fields_[i] = this->fields_[i]->Kill(object, zone);
+ }
+ }
+ return that;
+ }
+ }
+ }
+}
+
Node* LoadElimination::AbstractState::LookupField(Node* object,
size_t index) const {
if (AbstractField const* this_field = this->fields_[index]) {
@@ -662,7 +682,7 @@ Reduction LoadElimination::ReduceStoreField(Node* node) {
state = state->AddField(object, field_index, new_value, zone());
} else {
// Unsupported StoreField operator.
- state = empty_state();
+ state = state->KillFields(object, zone());
}
return UpdateState(node, state);
}
@@ -856,8 +876,11 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
FieldAccess const& access = FieldAccessOf(current->op());
Node* const object = NodeProperties::GetValueInput(current, 0);
int field_index = FieldIndexOf(access);
- if (field_index < 0) return empty_state();
- state = state->KillField(object, field_index, zone());
+ if (field_index < 0) {
+ state = state->KillFields(object, zone());
+ } else {
+ state = state->KillField(object, field_index, zone());
+ }
break;
}
case IrOpcode::kStoreElement: {
@@ -897,6 +920,7 @@ int LoadElimination::FieldIndexOf(FieldAccess const& access) {
switch (rep) {
case MachineRepresentation::kNone:
case MachineRepresentation::kBit:
+ case MachineRepresentation::kSimd128:
UNREACHABLE();
break;
case MachineRepresentation::kWord32:
@@ -910,16 +934,20 @@ int LoadElimination::FieldIndexOf(FieldAccess const& access) {
case MachineRepresentation::kFloat32:
return -1; // Currently untracked.
case MachineRepresentation::kFloat64:
- case MachineRepresentation::kSimd128:
- return -1; // Currently untracked.
+ if (kDoubleSize != kPointerSize) {
+ return -1; // We currently only track pointer size fields.
+ }
+ // Fall through.
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
// TODO(bmeurer): Check that we never do overlapping load/stores of
- // individual parts of Float64/Simd128 values.
+ // individual parts of Float64 values.
break;
}
- DCHECK_EQ(kTaggedBase, access.base_is_tagged);
+ if (access.base_is_tagged != kTaggedBase) {
+ return -1; // We currently only track tagged objects.
+ }
return FieldIndexOf(access.offset);
}
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 985e690bc4..50979e4da8 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -5,7 +5,9 @@
#ifndef V8_COMPILER_LOAD_ELIMINATION_H_
#define V8_COMPILER_LOAD_ELIMINATION_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -17,7 +19,8 @@ struct FieldAccess;
class Graph;
class JSGraph;
-class LoadElimination final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE LoadElimination final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
public:
LoadElimination(Editor* editor, JSGraph* jsgraph, Zone* zone)
: AdvancedReducer(editor), node_states_(zone), jsgraph_(jsgraph) {}
@@ -164,6 +167,7 @@ class LoadElimination final : public AdvancedReducer {
Zone* zone) const;
AbstractState const* KillField(Node* object, size_t index,
Zone* zone) const;
+ AbstractState const* KillFields(Node* object, Zone* zone) const;
Node* LookupField(Node* object, size_t index) const;
AbstractState const* AddElement(Node* object, Node* index, Node* value,
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index 2d0f27b89f..fb3e1e753b 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -8,6 +8,7 @@
#include "src/base/iterator.h"
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
+#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -153,7 +154,7 @@ class LoopTree : public ZoneObject {
ZoneVector<Node*> loop_nodes_;
};
-class LoopFinder {
+class V8_EXPORT_PRIVATE LoopFinder {
public:
// Build a loop tree for the entire graph.
static LoopTree* BuildLoopTree(Graph* graph, Zone* temp_zone);
diff --git a/deps/v8/src/compiler/loop-peeling.h b/deps/v8/src/compiler/loop-peeling.h
index 8b38e2575c..301e4b8b6c 100644
--- a/deps/v8/src/compiler/loop-peeling.h
+++ b/deps/v8/src/compiler/loop-peeling.h
@@ -5,7 +5,9 @@
#ifndef V8_COMPILER_LOOP_PEELING_H_
#define V8_COMPILER_LOOP_PEELING_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/loop-analysis.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -14,7 +16,7 @@ namespace compiler {
// Represents the output of peeling a loop, which is basically the mapping
// from the body of the loop to the corresponding nodes in the peeled
// iteration.
-class PeeledIteration : public ZoneObject {
+class V8_EXPORT_PRIVATE PeeledIteration : public NON_EXPORTED_BASE(ZoneObject) {
public:
// Maps {node} to its corresponding copy in the peeled iteration, if
// the node was part of the body of the loop. Returns {node} otherwise.
@@ -27,7 +29,7 @@ class PeeledIteration : public ZoneObject {
class CommonOperatorBuilder;
// Implements loop peeling.
-class LoopPeeler {
+class V8_EXPORT_PRIVATE LoopPeeler {
public:
static bool CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop);
static PeeledIteration* Peel(Graph* graph, CommonOperatorBuilder* common,
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index d33ee4ec28..a8f7a25e1f 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -25,7 +25,8 @@ class MachineRepresentationInferrer {
Linkage* linkage, Zone* zone)
: schedule_(schedule),
linkage_(linkage),
- representation_vector_(graph->NodeCount(), zone) {
+ representation_vector_(graph->NodeCount(), MachineRepresentation::kNone,
+ zone) {
Run();
}
@@ -234,9 +235,10 @@ class MachineRepresentationInferrer {
class MachineRepresentationChecker {
public:
- MachineRepresentationChecker(Schedule const* const schedule,
- MachineRepresentationInferrer const* const typer)
- : schedule_(schedule), typer_(typer) {}
+ MachineRepresentationChecker(
+ Schedule const* const schedule,
+ MachineRepresentationInferrer const* const inferrer)
+ : schedule_(schedule), inferrer_(inferrer) {}
void Run() {
BasicBlockVector const* blocks = schedule_->all_blocks();
@@ -255,11 +257,11 @@ class MachineRepresentationChecker {
break;
case IrOpcode::kChangeBitToTagged:
CHECK_EQ(MachineRepresentation::kBit,
- typer_->GetRepresentation(node->InputAt(0)));
+ inferrer_->GetRepresentation(node->InputAt(0)));
break;
case IrOpcode::kChangeTaggedToBit:
CHECK_EQ(MachineRepresentation::kTagged,
- typer_->GetRepresentation(node->InputAt(0)));
+ inferrer_->GetRepresentation(node->InputAt(0)));
break;
case IrOpcode::kRoundInt64ToFloat64:
case IrOpcode::kRoundUint64ToFloat64:
@@ -290,7 +292,7 @@ class MachineRepresentationChecker {
case IrOpcode::kWord64Equal:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
- node, 1, typer_->GetRepresentation(node->InputAt(0)));
+ node, 1, inferrer_->GetRepresentation(node->InputAt(0)));
break;
case IrOpcode::kInt64LessThan:
case IrOpcode::kInt64LessThanOrEqual:
@@ -400,7 +402,7 @@ class MachineRepresentationChecker {
}
break;
case IrOpcode::kPhi:
- switch (typer_->GetRepresentation(node)) {
+ switch (inferrer_->GetRepresentation(node)) {
case MachineRepresentation::kTagged:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
@@ -411,7 +413,7 @@ class MachineRepresentationChecker {
default:
for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
CheckValueInputRepresentationIs(
- node, i, typer_->GetRepresentation(node));
+ node, i, inferrer_->GetRepresentation(node));
}
break;
}
@@ -444,19 +446,21 @@ class MachineRepresentationChecker {
void CheckValueInputRepresentationIs(Node const* node, int index,
MachineRepresentation representation) {
Node const* input = node->InputAt(index);
- if (typer_->GetRepresentation(input) != representation) {
+ MachineRepresentation input_representation =
+ inferrer_->GetRepresentation(input);
+ if (input_representation != representation) {
std::stringstream str;
- str << "TypeError: node #" << node->id() << ":" << *node->op()
- << " uses node #" << input->id() << ":" << *input->op()
- << " which doesn't have a " << MachineReprToString(representation)
- << " representation.";
+ str << "TypeError: node #" << node->id() << ":" << *node->op() << ":"
+ << MachineReprToString(input_representation) << " uses node #"
+ << input->id() << ":" << *input->op() << " which doesn't have a "
+ << MachineReprToString(representation) << " representation.";
FATAL(str.str().c_str());
}
}
void CheckValueInputIsTagged(Node const* node, int index) {
Node const* input = node->InputAt(index);
- switch (typer_->GetRepresentation(input)) {
+ switch (inferrer_->GetRepresentation(input)) {
case MachineRepresentation::kTagged:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
@@ -473,7 +477,7 @@ class MachineRepresentationChecker {
void CheckValueInputIsTaggedOrPointer(Node const* node, int index) {
Node const* input = node->InputAt(index);
- switch (typer_->GetRepresentation(input)) {
+ switch (inferrer_->GetRepresentation(input)) {
case MachineRepresentation::kTagged:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
@@ -481,7 +485,7 @@ class MachineRepresentationChecker {
default:
break;
}
- if (typer_->GetRepresentation(input) !=
+ if (inferrer_->GetRepresentation(input) !=
MachineType::PointerRepresentation()) {
std::ostringstream str;
str << "TypeError: node #" << node->id() << ":" << *node->op()
@@ -493,7 +497,7 @@ class MachineRepresentationChecker {
void CheckValueInputForInt32Op(Node const* node, int index) {
Node const* input = node->InputAt(index);
- switch (typer_->GetRepresentation(input)) {
+ switch (inferrer_->GetRepresentation(input)) {
case MachineRepresentation::kBit:
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
@@ -518,7 +522,9 @@ class MachineRepresentationChecker {
void CheckValueInputForInt64Op(Node const* node, int index) {
Node const* input = node->InputAt(index);
- switch (typer_->GetRepresentation(input)) {
+ MachineRepresentation input_representation =
+ inferrer_->GetRepresentation(input);
+ switch (input_representation) {
case MachineRepresentation::kWord64:
return;
case MachineRepresentation::kNone: {
@@ -533,15 +539,16 @@ class MachineRepresentationChecker {
break;
}
std::ostringstream str;
- str << "TypeError: node #" << node->id() << ":" << *node->op()
- << " uses node #" << input->id() << ":" << *input->op()
- << " which doesn't have a kWord64 representation.";
+ str << "TypeError: node #" << node->id() << ":" << *node->op() << ":"
+ << input_representation << " uses node #" << input->id() << ":"
+ << *input->op() << " which doesn't have a kWord64 representation.";
FATAL(str.str().c_str());
}
void CheckValueInputForFloat32Op(Node const* node, int index) {
Node const* input = node->InputAt(index);
- if (MachineRepresentation::kFloat32 == typer_->GetRepresentation(input)) {
+ if (MachineRepresentation::kFloat32 ==
+ inferrer_->GetRepresentation(input)) {
return;
}
std::ostringstream str;
@@ -553,7 +560,8 @@ class MachineRepresentationChecker {
void CheckValueInputForFloat64Op(Node const* node, int index) {
Node const* input = node->InputAt(index);
- if (MachineRepresentation::kFloat64 == typer_->GetRepresentation(input)) {
+ if (MachineRepresentation::kFloat64 ==
+ inferrer_->GetRepresentation(input)) {
return;
}
std::ostringstream str;
@@ -569,7 +577,8 @@ class MachineRepresentationChecker {
bool should_log_error = false;
for (size_t i = 0; i < desc->InputCount(); ++i) {
Node const* input = node->InputAt(static_cast<int>(i));
- MachineRepresentation const input_type = typer_->GetRepresentation(input);
+ MachineRepresentation const input_type =
+ inferrer_->GetRepresentation(input);
MachineRepresentation const expected_input_type =
desc->GetInputType(i).representation();
if (!IsCompatible(expected_input_type, input_type)) {
@@ -649,7 +658,7 @@ class MachineRepresentationChecker {
}
Schedule const* const schedule_;
- MachineRepresentationInferrer const* const typer_;
+ MachineRepresentationInferrer const* const inferrer_;
};
} // namespace
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index 574f45c0b3..d0845d9fab 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -5,8 +5,10 @@
#ifndef V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
#define V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/machine-operator.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -19,7 +21,8 @@ class JSGraph;
// Performs constant folding and strength reduction on nodes that have
// machine operators.
-class MachineOperatorReducer final : public Reducer {
+class V8_EXPORT_PRIVATE MachineOperatorReducer final
+ : public NON_EXPORTED_BASE(Reducer) {
public:
explicit MachineOperatorReducer(JSGraph* jsgraph);
~MachineOperatorReducer();
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 56cefc5923..1cbec994a8 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -5,7 +5,9 @@
#ifndef V8_COMPILER_MACHINE_OPERATOR_H_
#define V8_COMPILER_MACHINE_OPERATOR_H_
+#include "src/base/compiler-specific.h"
#include "src/base/flags.h"
+#include "src/globals.h"
#include "src/machine-type.h"
namespace v8 {
@@ -62,12 +64,12 @@ class StoreRepresentation final {
WriteBarrierKind write_barrier_kind_;
};
-bool operator==(StoreRepresentation, StoreRepresentation);
+V8_EXPORT_PRIVATE bool operator==(StoreRepresentation, StoreRepresentation);
bool operator!=(StoreRepresentation, StoreRepresentation);
size_t hash_value(StoreRepresentation);
-std::ostream& operator<<(std::ostream&, StoreRepresentation);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, StoreRepresentation);
StoreRepresentation const& StoreRepresentationOf(Operator const*);
@@ -99,7 +101,8 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op);
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
-class MachineOperatorBuilder final : public ZoneObject {
+class V8_EXPORT_PRIVATE MachineOperatorBuilder final
+ : public NON_EXPORTED_BASE(ZoneObject) {
public:
// Flags that specify which operations are available. This is useful
// for operations that are unsupported by some back-ends.
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 12ab4af771..0a62b52d4f 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -54,6 +54,14 @@ class MipsOperandConverter final : public InstructionOperandConverter {
return ToDoubleRegister(op);
}
+ Register InputOrZeroRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) {
+ DCHECK((InputInt32(index) == 0));
+ return zero_reg;
+ }
+ return InputRegister(index);
+ }
+
DoubleRegister InputOrZeroDoubleRegister(size_t index) {
if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
@@ -381,45 +389,48 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ bind(ool->exit()); \
} while (0)
-
#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
do { \
Label done; \
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
- auto value = i.Input##width##Register(2); \
+ auto value = i.InputOrZero##width##Register(2); \
+ if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
+ __ Move(kDoubleRegZero, 0.0); \
+ } \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
__ addu(kScratchReg, i.InputRegister(3), offset); \
__ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
- auto value = i.Input##width##Register(2); \
+ auto value = i.InputOrZero##width##Register(2); \
+ if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
+ __ Move(kDoubleRegZero, 0.0); \
+ } \
__ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
__ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
} \
__ bind(&done); \
} while (0)
-
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
Label done; \
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
- auto value = i.InputRegister(2); \
+ auto value = i.InputOrZeroRegister(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
__ addu(kScratchReg, i.InputRegister(3), offset); \
__ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
- auto value = i.InputRegister(2); \
+ auto value = i.InputOrZeroRegister(2); \
__ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
__ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
} \
__ bind(&done); \
} while (0)
-
#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
if (IsMipsArchVariant(kMips32r6)) { \
__ cfc1(kScratchReg, FCSR); \
@@ -478,11 +489,11 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
- do { \
- __ sync(); \
- __ asm_instr(i.InputRegister(2), i.MemoryOperand()); \
- __ sync(); \
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ sync(); \
+ __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
+ __ sync(); \
} while (0)
#define ASSEMBLE_IEEE754_BINOP(name) \
@@ -639,20 +650,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->SetFrameAccessToDefault();
break;
}
- case kArchTailCallJSFunctionFromJSFunction:
- case kArchTailCallJSFunction: {
+ case kArchTailCallJSFunctionFromJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
-
- if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
__ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(at);
frame_access_state()->ClearSPDelta();
@@ -713,7 +720,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchRet:
- AssembleReturn();
+ AssembleReturn(instr->InputAt(0));
break;
case kArchStackPointer:
__ mov(i.OutputRegister(), sp);
@@ -976,32 +983,38 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kMipsShlPair: {
+ Register second_output =
+ instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
if (instr->InputAt(2)->IsRegister()) {
- __ ShlPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0),
i.InputRegister(1), i.InputRegister(2));
} else {
uint32_t imm = i.InputOperand(2).immediate();
- __ ShlPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0),
i.InputRegister(1), imm);
}
} break;
case kMipsShrPair: {
+ Register second_output =
+ instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
if (instr->InputAt(2)->IsRegister()) {
- __ ShrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
i.InputRegister(1), i.InputRegister(2));
} else {
uint32_t imm = i.InputOperand(2).immediate();
- __ ShrPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0),
i.InputRegister(1), imm);
}
} break;
case kMipsSarPair: {
+ Register second_output =
+ instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
if (instr->InputAt(2)->IsRegister()) {
- __ SarPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0),
i.InputRegister(1), i.InputRegister(2));
} else {
uint32_t imm = i.InputOperand(2).immediate();
- __ SarPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0),
i.InputRegister(1), imm);
}
} break;
@@ -1388,10 +1401,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// ... more basic instructions ...
case kMipsSeb:
- __ seb(i.OutputRegister(), i.InputRegister(0));
+ __ Seb(i.OutputRegister(), i.InputRegister(0));
break;
case kMipsSeh:
- __ seh(i.OutputRegister(), i.InputRegister(0));
+ __ Seh(i.OutputRegister(), i.InputRegister(0));
break;
case kMipsLbu:
__ lbu(i.OutputRegister(), i.MemoryOperand());
@@ -1400,7 +1413,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lb(i.OutputRegister(), i.MemoryOperand());
break;
case kMipsSb:
- __ sb(i.InputRegister(2), i.MemoryOperand());
+ __ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMipsLhu:
__ lhu(i.OutputRegister(), i.MemoryOperand());
@@ -1415,10 +1428,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Ulh(i.OutputRegister(), i.MemoryOperand());
break;
case kMipsSh:
- __ sh(i.InputRegister(2), i.MemoryOperand());
+ __ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMipsUsh:
- __ Ush(i.InputRegister(2), i.MemoryOperand(), kScratchReg);
+ __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg);
break;
case kMipsLw:
__ lw(i.OutputRegister(), i.MemoryOperand());
@@ -1427,10 +1440,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Ulw(i.OutputRegister(), i.MemoryOperand());
break;
case kMipsSw:
- __ sw(i.InputRegister(2), i.MemoryOperand());
+ __ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMipsUsw:
- __ Usw(i.InputRegister(2), i.MemoryOperand());
+ __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMipsLwc1: {
__ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
@@ -1443,13 +1456,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMipsSwc1: {
size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
- __ swc1(i.InputSingleRegister(index), operand);
+ FPURegister ft = i.InputOrZeroSingleRegister(index);
+ if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ swc1(ft, operand);
break;
}
case kMipsUswc1: {
size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
- __ Uswc1(i.InputSingleRegister(index), operand, kScratchReg);
+ FPURegister ft = i.InputOrZeroSingleRegister(index);
+ if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ Uswc1(ft, operand, kScratchReg);
break;
}
case kMipsLdc1:
@@ -1458,12 +1479,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMipsUldc1:
__ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
break;
- case kMipsSdc1:
- __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
+ case kMipsSdc1: {
+ FPURegister ft = i.InputOrZeroDoubleRegister(2);
+ if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ sdc1(ft, i.MemoryOperand());
break;
- case kMipsUsdc1:
- __ Usdc1(i.InputDoubleRegister(2), i.MemoryOperand(), kScratchReg);
+ }
+ case kMipsUsdc1: {
+ FPURegister ft = i.InputOrZeroDoubleRegister(2);
+ if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ Usdc1(ft, i.MemoryOperand(), kScratchReg);
break;
+ }
case kMipsPush:
if (instr->InputAt(0)->IsFPRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
@@ -1714,8 +1745,15 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (instr->arch_opcode() == kMipsTst) {
cc = FlagsConditionToConditionTst(condition);
- __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
- __ Sltu(result, zero_reg, kScratchReg);
+ if (instr->InputAt(1)->IsImmediate() &&
+ base::bits::IsPowerOfTwo32(i.InputOperand(1).immediate())) {
+ uint16_t pos =
+ base::bits::CountTrailingZeros32(i.InputOperand(1).immediate());
+ __ Ext(result, i.InputRegister(0), pos, 1);
+ } else {
+ __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
+ __ Sltu(result, zero_reg, kScratchReg);
+ }
if (cc == eq) {
// Sltu produces 0 for equality, invert the result.
__ xori(result, result, 1);
@@ -1884,7 +1922,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -1920,12 +1958,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ mov(fp, sp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(this->info()->GeneratePreagedPrologue());
+ if (descriptor->PushArgumentCount()) {
+ __ Push(kJavaScriptCallArgCountRegister);
+ }
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
}
}
- int shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots =
+ frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -1958,8 +2000,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int pop_count = static_cast<int>(descriptor->StackParameterCount());
@@ -1975,18 +2016,32 @@ void CodeGenerator::AssembleReturn() {
__ MultiPopFPU(saves_fpu);
}
+ MipsOperandConverter g(this, nullptr);
if (descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
- // Canonicalize JSFunction return sites for now.
- if (return_label_.is_bound()) {
- __ Branch(&return_label_);
- return;
+ // Canonicalize JSFunction return sites for now unless they have an variable
+ // number of stack slot pops.
+ if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (return_label_.is_bound()) {
+ __ Branch(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ AssembleDeconstructFrame();
+ }
} else {
- __ bind(&return_label_);
AssembleDeconstructFrame();
}
}
+ if (pop->IsImmediate()) {
+ DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+ pop_count += g.ToConstant(pop).ToInt32();
+ } else {
+ Register pop_reg = g.ToRegister(pop);
+ __ sll(pop_reg, pop_reg, kPointerSizeLog2);
+ __ Addu(sp, sp, Operand(pop_reg));
+ }
if (pop_count != 0) {
__ DropAndRet(pop_count);
} else {
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 0a98930b5c..1e4b996531 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -31,6 +31,39 @@ class MipsOperandGenerator final : public OperandGenerator {
return UseRegister(node);
}
+ // Use the zero register if the node has the immediate value zero, otherwise
+ // assign a register.
+ InstructionOperand UseRegisterOrImmediateZero(Node* node) {
+ if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
+ (IsFloatConstant(node) &&
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool IsIntegerConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kInt32Constant);
+ }
+
+ int64_t GetIntegerConstantValue(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kInt32Constant);
+ return OpParameter<int32_t>(node);
+ }
+
+ bool IsFloatConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kFloat32Constant) ||
+ (node->opcode() == IrOpcode::kFloat64Constant);
+ }
+
+ double GetFloatConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kFloat32Constant) {
+ return OpParameter<float>(node);
+ }
+ DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
+ return OpParameter<double>(node);
+ }
+
bool CanBeImmediate(Node* node, InstructionCode opcode) {
Int32Matcher m(node);
if (!m.HasValue()) return false;
@@ -40,14 +73,40 @@ class MipsOperandGenerator final : public OperandGenerator {
case kMipsSar:
case kMipsShr:
return is_uint5(value);
+ case kMipsAdd:
+ case kMipsAnd:
+ case kMipsOr:
+ case kMipsTst:
+ case kMipsSub:
case kMipsXor:
return is_uint16(value);
+ case kMipsLb:
+ case kMipsLbu:
+ case kMipsSb:
+ case kMipsLh:
+ case kMipsLhu:
+ case kMipsSh:
+ case kMipsLw:
+ case kMipsSw:
+ case kMipsLwc1:
+ case kMipsSwc1:
case kMipsLdc1:
case kMipsSdc1:
+ case kCheckedLoadInt8:
+ case kCheckedLoadUint8:
+ case kCheckedLoadInt16:
+ case kCheckedLoadUint16:
+ case kCheckedLoadWord32:
+ case kCheckedStoreWord8:
+ case kCheckedStoreWord16:
+ case kCheckedStoreWord32:
+ case kCheckedLoadFloat32:
case kCheckedLoadFloat64:
+ case kCheckedStoreFloat32:
case kCheckedStoreFloat64:
- return std::numeric_limits<int16_t>::min() <= (value + kIntSize) &&
- std::numeric_limits<int16_t>::max() >= (value + kIntSize);
+ // true even for 32b values, offsets > 16b
+ // are handled in assembler-mips.cc
+ return is_int32(value);
default:
return is_int16(value);
}
@@ -86,9 +145,23 @@ static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
g.UseOperand(node->InputAt(1), opcode));
}
+bool TryMatchImmediate(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ size_t* input_count_return, InstructionOperand* inputs) {
+ MipsOperandGenerator g(selector);
+ if (g.CanBeImmediate(node, *opcode_return)) {
+ *opcode_return |= AddressingModeField::encode(kMode_MRI);
+ inputs[0] = g.UseImmediate(node);
+ *input_count_return = 1;
+ return true;
+ }
+ return false;
+}
static void VisitBinop(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation* cont) {
+ InstructionCode opcode, bool has_reverse_opcode,
+ InstructionCode reverse_opcode,
+ FlagsContinuation* cont) {
MipsOperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand inputs[4];
@@ -96,8 +169,21 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionOperand outputs[2];
size_t output_count = 0;
- inputs[input_count++] = g.UseRegister(m.left().node());
- inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+ if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
+ &inputs[1])) {
+ inputs[0] = g.UseRegister(m.left().node());
+ input_count++;
+ }
+ if (has_reverse_opcode &&
+ TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
+ &input_count, &inputs[1])) {
+ inputs[0] = g.UseRegister(m.right().node());
+ opcode = reverse_opcode;
+ input_count++;
+ } else {
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+ }
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
@@ -130,11 +216,21 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
}
}
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, bool has_reverse_opcode,
+ InstructionCode reverse_opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ VisitBinop(selector, node, opcode, false, kArchNop, cont);
+}
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
- FlagsContinuation cont;
- VisitBinop(selector, node, opcode, &cont);
+ VisitBinop(selector, node, opcode, false, kArchNop);
}
@@ -259,14 +355,15 @@ void InstructionSelector::VisitStore(Node* node) {
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
} else {
InstructionOperand addr_reg = g.TempRegister();
Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
}
}
}
@@ -317,12 +414,12 @@ void InstructionSelector::VisitWord32And(Node* node) {
return;
}
}
- VisitBinop(this, node, kMipsAnd);
+ VisitBinop(this, node, kMipsAnd, true, kMipsAnd);
}
void InstructionSelector::VisitWord32Or(Node* node) {
- VisitBinop(this, node, kMipsOr);
+ VisitBinop(this, node, kMipsOr, true, kMipsOr);
}
@@ -346,7 +443,7 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
g.TempImmediate(0));
return;
}
- VisitBinop(this, node, kMipsXor);
+ VisitBinop(this, node, kMipsXor, true, kMipsXor);
}
@@ -429,32 +526,43 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
}
static void VisitInt32PairBinop(InstructionSelector* selector,
- InstructionCode opcode, Node* node) {
+ InstructionCode pair_opcode,
+ InstructionCode single_opcode, Node* node) {
MipsOperandGenerator g(selector);
- // We use UseUniqueRegister here to avoid register sharing with the output
- // register.
- InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)),
- g.UseUniqueRegister(node->InputAt(2)),
- g.UseUniqueRegister(node->InputAt(3))};
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+
+ if (projection1) {
+ // We use UseUniqueRegister here to avoid register sharing with the output
+ // register.
+ InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)),
+ g.UseUniqueRegister(node->InputAt(3))};
- InstructionOperand outputs[] = {
- g.DefineAsRegister(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
- selector->Emit(opcode, 2, outputs, 4, inputs);
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ selector->Emit(pair_opcode, 2, outputs, 4, inputs);
+ } else {
+ // The high word of the result is not used, so we emit the standard 32 bit
+ // instruction.
+ selector->Emit(single_opcode, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(2)));
+ }
}
void InstructionSelector::VisitInt32PairAdd(Node* node) {
- VisitInt32PairBinop(this, kMipsAddPair, node);
+ VisitInt32PairBinop(this, kMipsAddPair, kMipsAdd, node);
}
void InstructionSelector::VisitInt32PairSub(Node* node) {
- VisitInt32PairBinop(this, kMipsSubPair, node);
+ VisitInt32PairBinop(this, kMipsSubPair, kMipsSub, node);
}
void InstructionSelector::VisitInt32PairMul(Node* node) {
- VisitInt32PairBinop(this, kMipsMulPair, node);
+ VisitInt32PairBinop(this, kMipsMulPair, kMipsMul, node);
}
// Shared routine for multiple shift operations.
@@ -475,11 +583,21 @@ static void VisitWord32PairShift(InstructionSelector* selector,
g.UseUniqueRegister(node->InputAt(1)),
shift_operand};
- InstructionOperand outputs[] = {
- g.DefineAsRegister(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+
+ InstructionOperand outputs[2];
+ InstructionOperand temps[1];
+ int32_t output_count = 0;
+ int32_t temp_count = 0;
- selector->Emit(opcode, 2, outputs, 3, inputs);
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (projection1) {
+ outputs[output_count++] = g.DefineAsRegister(projection1);
+ } else {
+ temps[temp_count++] = g.TempRegister();
+ }
+
+ selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
}
void InstructionSelector::VisitWord32PairShl(Node* node) {
@@ -554,7 +672,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
}
}
- VisitBinop(this, node, kMipsAdd);
+ VisitBinop(this, node, kMipsAdd, true, kMipsAdd);
}
@@ -1170,14 +1288,15 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
} else {
InstructionOperand addr_reg = g.TempRegister();
Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
}
}
@@ -1269,7 +1388,7 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
: g.UseRegister(length);
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- offset_operand, length_operand, g.UseRegister(value),
+ offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
g.UseRegister(buffer));
}
@@ -1334,51 +1453,61 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Match immediates on left or right side of comparison.
if (g.CanBeImmediate(right, opcode)) {
- switch (cont->condition()) {
- case kEqual:
- case kNotEqual:
- if (cont->IsSet()) {
+ if (opcode == kMipsTst) {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+ cont);
+ } else {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
VisitCompare(selector, opcode, g.UseRegister(left),
g.UseImmediate(right), cont);
- } else {
+ break;
+ default:
VisitCompare(selector, opcode, g.UseRegister(left),
g.UseRegister(right), cont);
- }
- break;
- case kSignedLessThan:
- case kSignedGreaterThanOrEqual:
- case kUnsignedLessThan:
- case kUnsignedGreaterThanOrEqual:
- VisitCompare(selector, opcode, g.UseRegister(left),
- g.UseImmediate(right), cont);
- break;
- default:
- VisitCompare(selector, opcode, g.UseRegister(left),
- g.UseRegister(right), cont);
+ }
}
} else if (g.CanBeImmediate(left, opcode)) {
if (!commutative) cont->Commute();
- switch (cont->condition()) {
- case kEqual:
- case kNotEqual:
- if (cont->IsSet()) {
+ if (opcode == kMipsTst) {
+ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+ cont);
+ } else {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
VisitCompare(selector, opcode, g.UseRegister(right),
g.UseImmediate(left), cont);
- } else {
+ break;
+ default:
VisitCompare(selector, opcode, g.UseRegister(right),
g.UseRegister(left), cont);
- }
- break;
- case kSignedLessThan:
- case kSignedGreaterThanOrEqual:
- case kUnsignedLessThan:
- case kUnsignedGreaterThanOrEqual:
- VisitCompare(selector, opcode, g.UseRegister(right),
- g.UseImmediate(left), cont);
- break;
- default:
- VisitCompare(selector, opcode, g.UseRegister(right),
- g.UseRegister(left), cont);
+ }
}
} else {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
@@ -1395,21 +1524,22 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Shared routine for word comparisons against zero.
void VisitWordCompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
- while (selector->CanCover(user, value)) {
+ // Try to combine with comparisons against 0 by simply inverting the branch.
+ while (value->opcode() == IrOpcode::kWord32Equal &&
+ selector->CanCover(user, value)) {
+ Int32BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ }
+
+ if (selector->CanCover(user, value)) {
switch (value->opcode()) {
- case IrOpcode::kWord32Equal: {
- // Combine with comparisons against 0 by simply inverting the
- // continuation.
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont->Negate();
- continue;
- }
+ case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWordCompare(selector, value, cont);
- }
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWordCompare(selector, value, cont);
@@ -1473,7 +1603,6 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
default:
break;
}
- break;
}
// Continuation could not be combined with a compare, emit compare against 0.
@@ -1703,6 +1832,7 @@ void InstructionSelector::VisitAtomicLoad(Node* node) {
UNREACHABLE();
return;
}
+
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
@@ -1740,14 +1870,15 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
} else {
InstructionOperand addr_reg = g.TempRegister();
Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
}
}
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 9ed72ae027..a3bf433d4a 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -53,6 +53,14 @@ class MipsOperandConverter final : public InstructionOperandConverter {
return ToDoubleRegister(op);
}
+ Register InputOrZeroRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) {
+ DCHECK((InputInt32(index) == 0));
+ return zero_reg;
+ }
+ return InputRegister(index);
+ }
+
DoubleRegister InputOrZeroDoubleRegister(size_t index) {
if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
@@ -400,14 +408,20 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
Label done; \
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
- auto value = i.Input##width##Register(2); \
+ auto value = i.InputOrZero##width##Register(2); \
+ if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
+ __ Move(kDoubleRegZero, 0.0); \
+ } \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
__ And(kScratchReg, offset, Operand(0xffffffff)); \
__ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
__ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- auto value = i.Input##width##Register(2); \
+ auto value = i.InputOrZero##width##Register(2); \
+ if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
+ __ Move(kDoubleRegZero, 0.0); \
+ } \
__ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
__ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
} \
@@ -419,14 +433,14 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
Label done; \
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
- auto value = i.InputRegister(2); \
+ auto value = i.InputOrZeroRegister(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
__ And(kScratchReg, offset, Operand(0xffffffff)); \
__ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
__ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- auto value = i.InputRegister(2); \
+ auto value = i.InputOrZeroRegister(2); \
__ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
__ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
} \
@@ -489,11 +503,11 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
- do { \
- __ sync(); \
- __ asm_instr(i.InputRegister(2), i.MemoryOperand()); \
- __ sync(); \
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ sync(); \
+ __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
+ __ sync(); \
} while (0)
#define ASSEMBLE_IEEE754_BINOP(name) \
@@ -648,19 +662,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallJSFunctionFromJSFunction:
- case kArchTailCallJSFunction: {
+ case kArchTailCallJSFunctionFromJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
- if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
__ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(at);
frame_access_state()->ClearSPDelta();
@@ -722,7 +733,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchRet:
- AssembleReturn();
+ AssembleReturn(instr->InputAt(0));
break;
case kArchStackPointer:
__ mov(i.OutputRegister(), sp);
@@ -1686,7 +1697,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lb(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Sb:
- __ sb(i.InputRegister(2), i.MemoryOperand());
+ __ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Lhu:
__ lhu(i.OutputRegister(), i.MemoryOperand());
@@ -1701,10 +1712,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Ulh(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Sh:
- __ sh(i.InputRegister(2), i.MemoryOperand());
+ __ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Ush:
- __ Ush(i.InputRegister(2), i.MemoryOperand(), kScratchReg);
+ __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg);
break;
case kMips64Lw:
__ lw(i.OutputRegister(), i.MemoryOperand());
@@ -1725,16 +1736,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Uld(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Sw:
- __ sw(i.InputRegister(2), i.MemoryOperand());
+ __ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Usw:
- __ Usw(i.InputRegister(2), i.MemoryOperand());
+ __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Sd:
- __ sd(i.InputRegister(2), i.MemoryOperand());
+ __ sd(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Usd:
- __ Usd(i.InputRegister(2), i.MemoryOperand());
+ __ Usd(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Lwc1: {
__ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
@@ -1747,13 +1758,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64Swc1: {
size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
- __ swc1(i.InputSingleRegister(index), operand);
+ FPURegister ft = i.InputOrZeroSingleRegister(index);
+ if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ swc1(ft, operand);
break;
}
case kMips64Uswc1: {
size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
- __ Uswc1(i.InputSingleRegister(index), operand, kScratchReg);
+ FPURegister ft = i.InputOrZeroSingleRegister(index);
+ if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ Uswc1(ft, operand, kScratchReg);
break;
}
case kMips64Ldc1:
@@ -1762,12 +1781,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64Uldc1:
__ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
break;
- case kMips64Sdc1:
- __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
+ case kMips64Sdc1: {
+ FPURegister ft = i.InputOrZeroDoubleRegister(2);
+ if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ sdc1(ft, i.MemoryOperand());
break;
- case kMips64Usdc1:
- __ Usdc1(i.InputDoubleRegister(2), i.MemoryOperand(), kScratchReg);
+ }
+ case kMips64Usdc1: {
+ FPURegister ft = i.InputOrZeroDoubleRegister(2);
+ if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ __ Usdc1(ft, i.MemoryOperand(), kScratchReg);
break;
+ }
case kMips64Push:
if (instr->InputAt(0)->IsFPRegister()) {
__ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
@@ -1866,6 +1895,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
break;
+ case kMips64AssertEqual:
+ __ Assert(eq, static_cast<BailoutReason>(i.InputOperand(2).immediate()),
+ i.InputRegister(0), Operand(i.InputRegister(1)));
+ break;
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -2025,8 +2058,15 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (instr->arch_opcode() == kMips64Tst) {
cc = FlagsConditionToConditionTst(condition);
- __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
- __ Sltu(result, zero_reg, kScratchReg);
+ if (instr->InputAt(1)->IsImmediate() &&
+ base::bits::IsPowerOfTwo64(i.InputOperand(1).immediate())) {
+ uint16_t pos =
+ base::bits::CountTrailingZeros64(i.InputOperand(1).immediate());
+ __ ExtractBits(result, i.InputRegister(0), pos, 1);
+ } else {
+ __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
+ __ Sltu(result, zero_reg, kScratchReg);
+ }
if (cc == eq) {
// Sltu produces 0 for equality, invert the result.
__ xori(result, result, 1);
@@ -2206,7 +2246,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -2238,12 +2278,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ mov(fp, sp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(this->info()->GeneratePreagedPrologue());
+ if (descriptor->PushArgumentCount()) {
+ __ Push(kJavaScriptCallArgCountRegister);
+ }
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
}
}
- int shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots =
+ frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -2277,8 +2321,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
// Restore GP registers.
@@ -2293,19 +2336,33 @@ void CodeGenerator::AssembleReturn() {
__ MultiPopFPU(saves_fpu);
}
+ MipsOperandConverter g(this, nullptr);
if (descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
- // Canonicalize JSFunction return sites for now.
- if (return_label_.is_bound()) {
- __ Branch(&return_label_);
- return;
+ // Canonicalize JSFunction return sites for now unless they have an variable
+ // number of stack slot pops.
+ if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (return_label_.is_bound()) {
+ __ Branch(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ AssembleDeconstructFrame();
+ }
} else {
- __ bind(&return_label_);
AssembleDeconstructFrame();
}
}
int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ if (pop->IsImmediate()) {
+ DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+ pop_count += g.ToConstant(pop).ToInt32();
+ } else {
+ Register pop_reg = g.ToRegister(pop);
+ __ dsll(pop_reg, pop_reg, kPointerSizeLog2);
+ __ Daddu(sp, sp, pop_reg);
+ }
if (pop_count != 0) {
__ DropAndRet(pop_count);
} else {
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 6a444342ac..8f68ced62e 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -169,7 +169,8 @@ namespace compiler {
V(Mips64ByteSwap32) \
V(Mips64StackClaim) \
V(Mips64Seb) \
- V(Mips64Seh)
+ V(Mips64Seh) \
+ V(Mips64AssertEqual)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 6e937e20d7..fbf09d6ca2 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -31,14 +31,49 @@ class Mips64OperandGenerator final : public OperandGenerator {
return UseRegister(node);
}
- bool CanBeImmediate(Node* node, InstructionCode opcode) {
- int64_t value;
- if (node->opcode() == IrOpcode::kInt32Constant)
- value = OpParameter<int32_t>(node);
- else if (node->opcode() == IrOpcode::kInt64Constant)
- value = OpParameter<int64_t>(node);
- else
- return false;
+ // Use the zero register if the node has the immediate value zero, otherwise
+ // assign a register.
+ InstructionOperand UseRegisterOrImmediateZero(Node* node) {
+ if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
+ (IsFloatConstant(node) &&
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool IsIntegerConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kInt32Constant) ||
+ (node->opcode() == IrOpcode::kInt64Constant);
+ }
+
+ int64_t GetIntegerConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kInt32Constant) {
+ return OpParameter<int32_t>(node);
+ }
+ DCHECK(node->opcode() == IrOpcode::kInt64Constant);
+ return OpParameter<int64_t>(node);
+ }
+
+ bool IsFloatConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kFloat32Constant) ||
+ (node->opcode() == IrOpcode::kFloat64Constant);
+ }
+
+ double GetFloatConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kFloat32Constant) {
+ return OpParameter<float>(node);
+ }
+ DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
+ return OpParameter<double>(node);
+ }
+
+ bool CanBeImmediate(Node* node, InstructionCode mode) {
+ return IsIntegerConstant(node) &&
+ CanBeImmediate(GetIntegerConstantValue(node), mode);
+ }
+
+ bool CanBeImmediate(int64_t value, InstructionCode opcode) {
switch (ArchOpcodeField::decode(opcode)) {
case kMips64Shl:
case kMips64Sar:
@@ -48,6 +83,13 @@ class Mips64OperandGenerator final : public OperandGenerator {
case kMips64Dsar:
case kMips64Dshr:
return is_uint6(value);
+ case kMips64Add:
+ case kMips64And32:
+ case kMips64And:
+ case kMips64Dadd:
+ case kMips64Or32:
+ case kMips64Or:
+ case kMips64Tst:
case kMips64Xor:
return is_uint16(value);
case kMips64Ldc1:
@@ -91,9 +133,94 @@ static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
g.UseOperand(node->InputAt(1), opcode));
}
+struct ExtendingLoadMatcher {
+ ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
+ : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
+ Initialize(node);
+ }
+
+ bool Matches() const { return matches_; }
+
+ Node* base() const {
+ DCHECK(Matches());
+ return base_;
+ }
+ int64_t immediate() const {
+ DCHECK(Matches());
+ return immediate_;
+ }
+ ArchOpcode opcode() const {
+ DCHECK(Matches());
+ return opcode_;
+ }
+
+ private:
+ bool matches_;
+ InstructionSelector* selector_;
+ Node* base_;
+ int64_t immediate_;
+ ArchOpcode opcode_;
+
+ void Initialize(Node* node) {
+ Int64BinopMatcher m(node);
+ // When loading a 64-bit value and shifting by 32, we should
+ // just load and sign-extend the interesting 4 bytes instead.
+ // This happens, for example, when we're loading and untagging SMIs.
+ DCHECK(m.IsWord64Sar());
+ if (m.left().IsLoad() && m.right().Is(32) &&
+ selector_->CanCover(m.node(), m.left().node())) {
+ Mips64OperandGenerator g(selector_);
+ Node* load = m.left().node();
+ Node* offset = load->InputAt(1);
+ base_ = load->InputAt(0);
+ opcode_ = kMips64Lw;
+ if (g.CanBeImmediate(offset, opcode_)) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ immediate_ = g.GetIntegerConstantValue(offset) + 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ immediate_ = g.GetIntegerConstantValue(offset);
+#endif
+ matches_ = g.CanBeImmediate(immediate_, kMips64Lw);
+ }
+ }
+ }
+};
+
+bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node) {
+ ExtendingLoadMatcher m(node, selector);
+ Mips64OperandGenerator g(selector);
+ if (m.Matches()) {
+ InstructionOperand inputs[2];
+ inputs[0] = g.UseRegister(m.base());
+ InstructionCode opcode =
+ m.opcode() | AddressingModeField::encode(kMode_MRI);
+ DCHECK(is_int32(m.immediate()));
+ inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
+ InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+ selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
+ inputs);
+ return true;
+ }
+ return false;
+}
+
+bool TryMatchImmediate(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ size_t* input_count_return, InstructionOperand* inputs) {
+ Mips64OperandGenerator g(selector);
+ if (g.CanBeImmediate(node, *opcode_return)) {
+ *opcode_return |= AddressingModeField::encode(kMode_MRI);
+ inputs[0] = g.UseImmediate(node);
+ *input_count_return = 1;
+ return true;
+ }
+ return false;
+}
static void VisitBinop(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation* cont) {
+ InstructionCode opcode, bool has_reverse_opcode,
+ InstructionCode reverse_opcode,
+ FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand inputs[4];
@@ -101,8 +228,21 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionOperand outputs[2];
size_t output_count = 0;
- inputs[input_count++] = g.UseRegister(m.left().node());
- inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+ if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
+ &inputs[1])) {
+ inputs[0] = g.UseRegister(m.left().node());
+ input_count++;
+ }
+ if (has_reverse_opcode &&
+ TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
+ &input_count, &inputs[1])) {
+ inputs[0] = g.UseRegister(m.right().node());
+ opcode = reverse_opcode;
+ input_count++;
+ } else {
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+ }
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
@@ -135,11 +275,21 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
}
}
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, bool has_reverse_opcode,
+ InstructionCode reverse_opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ VisitBinop(selector, node, opcode, false, kArchNop, cont);
+}
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode) {
- FlagsContinuation cont;
- VisitBinop(selector, node, opcode, &cont);
+ VisitBinop(selector, node, opcode, false, kArchNop);
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -275,14 +425,15 @@ void InstructionSelector::VisitStore(Node* node) {
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
} else {
InstructionOperand addr_reg = g.TempRegister();
Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
}
}
}
@@ -334,7 +485,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
return;
}
}
- VisitBinop(this, node, kMips64And32);
+ VisitBinop(this, node, kMips64And32, true, kMips64And32);
}
@@ -385,17 +536,17 @@ void InstructionSelector::VisitWord64And(Node* node) {
return;
}
}
- VisitBinop(this, node, kMips64And);
+ VisitBinop(this, node, kMips64And, true, kMips64And);
}
void InstructionSelector::VisitWord32Or(Node* node) {
- VisitBinop(this, node, kMips64Or32);
+ VisitBinop(this, node, kMips64Or32, true, kMips64Or32);
}
void InstructionSelector::VisitWord64Or(Node* node) {
- VisitBinop(this, node, kMips64Or);
+ VisitBinop(this, node, kMips64Or, true, kMips64Or);
}
@@ -419,7 +570,7 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
g.TempImmediate(0));
return;
}
- VisitBinop(this, node, kMips64Xor32);
+ VisitBinop(this, node, kMips64Xor32, true, kMips64Xor32);
}
@@ -443,7 +594,7 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
g.TempImmediate(0));
return;
}
- VisitBinop(this, node, kMips64Xor);
+ VisitBinop(this, node, kMips64Xor, true, kMips64Xor);
}
@@ -597,6 +748,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
void InstructionSelector::VisitWord64Sar(Node* node) {
+ if (TryEmitExtendingLoad(this, node)) return;
VisitRRO(this, kMips64Dsar, node);
}
@@ -692,7 +844,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
return;
}
}
- VisitBinop(this, node, kMips64Add);
+ VisitBinop(this, node, kMips64Add, true, kMips64Add);
}
@@ -726,7 +878,7 @@ void InstructionSelector::VisitInt64Add(Node* node) {
}
}
- VisitBinop(this, node, kMips64Dadd);
+ VisitBinop(this, node, kMips64Dadd, true, kMips64Dadd);
}
@@ -1128,6 +1280,33 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
Mips64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ switch (value->opcode()) {
+ // 32-bit operations will write their result in a 64 bit register,
+ // clearing the top 32 bits of the destination register.
+ case IrOpcode::kUint32Div:
+ case IrOpcode::kUint32Mod:
+ case IrOpcode::kUint32MulHigh: {
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ return;
+ }
+ case IrOpcode::kLoad: {
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ if (load_rep.IsUnsigned()) {
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ return;
+ default:
+ break;
+ }
+ }
+ }
+ default:
+ break;
+ }
Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0), g.TempImmediate(32));
}
@@ -1613,14 +1792,15 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
} else {
InstructionOperand addr_reg = g.TempRegister();
Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
}
}
@@ -1722,7 +1902,7 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
: g.UseRegister(length);
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- offset_operand, length_operand, g.UseRegister(value),
+ offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
g.UseRegister(buffer));
}
@@ -1788,51 +1968,61 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Match immediates on left or right side of comparison.
if (g.CanBeImmediate(right, opcode)) {
- switch (cont->condition()) {
- case kEqual:
- case kNotEqual:
- if (cont->IsSet()) {
+ if (opcode == kMips64Tst) {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+ cont);
+ } else {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
VisitCompare(selector, opcode, g.UseRegister(left),
g.UseImmediate(right), cont);
- } else {
+ break;
+ default:
VisitCompare(selector, opcode, g.UseRegister(left),
g.UseRegister(right), cont);
- }
- break;
- case kSignedLessThan:
- case kSignedGreaterThanOrEqual:
- case kUnsignedLessThan:
- case kUnsignedGreaterThanOrEqual:
- VisitCompare(selector, opcode, g.UseRegister(left),
- g.UseImmediate(right), cont);
- break;
- default:
- VisitCompare(selector, opcode, g.UseRegister(left),
- g.UseRegister(right), cont);
+ }
}
} else if (g.CanBeImmediate(left, opcode)) {
if (!commutative) cont->Commute();
- switch (cont->condition()) {
- case kEqual:
- case kNotEqual:
- if (cont->IsSet()) {
+ if (opcode == kMips64Tst) {
+ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+ cont);
+ } else {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
VisitCompare(selector, opcode, g.UseRegister(right),
g.UseImmediate(left), cont);
- } else {
+ break;
+ default:
VisitCompare(selector, opcode, g.UseRegister(right),
g.UseRegister(left), cont);
- }
- break;
- case kSignedLessThan:
- case kSignedGreaterThanOrEqual:
- case kUnsignedLessThan:
- case kUnsignedGreaterThanOrEqual:
- VisitCompare(selector, opcode, g.UseRegister(right),
- g.UseImmediate(left), cont);
- break;
- default:
- VisitCompare(selector, opcode, g.UseRegister(right),
- g.UseRegister(left), cont);
+ }
}
} else {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
@@ -1840,10 +2030,89 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
}
}
+bool IsNodeUnsigned(Node* n) {
+ NodeMatcher m(n);
+
+ if (m.IsLoad()) {
+ LoadRepresentation load_rep = LoadRepresentationOf(n->op());
+ return load_rep.IsUnsigned();
+ } else if (m.IsUnalignedLoad()) {
+ UnalignedLoadRepresentation load_rep =
+ UnalignedLoadRepresentationOf(n->op());
+ return load_rep.IsUnsigned();
+ } else {
+ return m.IsUint32Div() || m.IsUint32LessThan() ||
+ m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
+ m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() ||
+ m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32();
+ }
+}
+
+// Shared routine for multiple word compare operations.
+void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ Mips64OperandGenerator g(selector);
+ InstructionOperand leftOp = g.TempRegister();
+ InstructionOperand rightOp = g.TempRegister();
+
+ selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(32));
+ selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
+ g.TempImmediate(32));
+
+ VisitCompare(selector, opcode, leftOp, rightOp, cont);
+}
+
+void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode,
+ FlagsContinuation* cont) {
+ if (FLAG_debug_code) {
+ Mips64OperandGenerator g(selector);
+ InstructionOperand leftOp = g.TempRegister();
+ InstructionOperand rightOp = g.TempRegister();
+ InstructionOperand optimizedResult = g.TempRegister();
+ InstructionOperand fullResult = g.TempRegister();
+ FlagsCondition condition = cont->condition();
+ InstructionCode testOpcode = opcode |
+ FlagsConditionField::encode(condition) |
+ FlagsModeField::encode(kFlags_set);
+
+ selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+
+ selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(32));
+ selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
+ g.TempImmediate(32));
+ selector->Emit(testOpcode, fullResult, leftOp, rightOp);
+
+ selector->Emit(
+ kMips64AssertEqual, g.NoOutput(), optimizedResult, fullResult,
+ g.TempImmediate(BailoutReason::kUnsupportedNonPrimitiveCompare));
+ }
+
+ VisitWordCompare(selector, node, opcode, cont, false);
+}
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- VisitWordCompare(selector, node, kMips64Cmp, cont, false);
+ // MIPS64 doesn't support Word32 compare instructions. Instead it relies
+ // that the values in registers are correctly sign-extended and uses
+ // Word64 comparison instead. This behavior is correct in most cases,
+ // but doesn't work when comparing signed with unsigned operands.
+ // We could simulate full Word32 compare in all cases but this would
+ // create an unnecessary overhead since unsigned integers are rarely
+ // used in JavaScript.
+ // The solution proposed here tries to match a comparison of signed
+ // with unsigned operand, and perform full Word32Compare only
+ // in those cases. Unfortunately, the solution is not complete because
+ // it might skip cases where Word32 full compare is needed, so
+ // basically it is a hack.
+ if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
+ VisitFullWord32Compare(selector, node, kMips64Cmp, cont);
+ } else {
+ VisitOptimizedWord32Compare(selector, node, kMips64Cmp, cont);
+ }
}
@@ -1876,21 +2145,30 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
// Shared routine for word comparisons against zero.
void VisitWordCompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
+ // Try to combine with comparisons against 0 by simply inverting the branch.
while (selector->CanCover(user, value)) {
+ if (value->opcode() == IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+ user = value;
+ value = m.left().node();
+ } else if (value->opcode() == IrOpcode::kWord64Equal) {
+ Int64BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+ user = value;
+ value = m.left().node();
+ } else {
+ break;
+ }
+
+ cont->Negate();
+ }
+
+ if (selector->CanCover(user, value)) {
switch (value->opcode()) {
- case IrOpcode::kWord32Equal: {
- // Combine with comparisons against 0 by simply inverting the
- // continuation.
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont->Negate();
- continue;
- }
+ case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWord32Compare(selector, value, cont);
- }
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord32Compare(selector, value, cont);
@@ -1903,19 +2181,9 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kUint32LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWord32Compare(selector, value, cont);
- case IrOpcode::kWord64Equal: {
- // Combine with comparisons against 0 by simply inverting the
- // continuation.
- Int64BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont->Negate();
- continue;
- }
+ case IrOpcode::kWord64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWord64Compare(selector, value, cont);
- }
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord64Compare(selector, value, cont);
@@ -1986,7 +2254,6 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
default:
break;
}
- break;
}
// Continuation could not be combined with a compare, emit compare against 0.
@@ -2288,14 +2555,15 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+ g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
} else {
InstructionOperand addr_reg = g.TempRegister();
Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- addr_reg, g.TempImmediate(0), g.UseRegister(value));
+ addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
}
}
diff --git a/deps/v8/src/compiler/move-optimizer.cc b/deps/v8/src/compiler/move-optimizer.cc
index d87ece3849..b62a8ccb4f 100644
--- a/deps/v8/src/compiler/move-optimizer.cc
+++ b/deps/v8/src/compiler/move-optimizer.cc
@@ -25,11 +25,92 @@ struct MoveKeyCompare {
};
typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
-typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
-bool Blocks(const OperandSet& set, const InstructionOperand& operand) {
- return set.find(operand) != set.end();
-}
+class OperandSet {
+ public:
+ explicit OperandSet(ZoneVector<InstructionOperand>* buffer)
+ : set_(buffer), fp_reps_(0) {
+ buffer->clear();
+ }
+
+ void InsertOp(const InstructionOperand& op) {
+ set_->push_back(op);
+
+ if (!kSimpleFPAliasing && op.IsFPRegister())
+ fp_reps_ |= RepBit(LocationOperand::cast(op).representation());
+ }
+
+ bool Contains(const InstructionOperand& op) const {
+ for (const InstructionOperand& elem : *set_) {
+ if (elem.EqualsCanonicalized(op)) return true;
+ }
+ return false;
+ }
+
+ bool ContainsOpOrAlias(const InstructionOperand& op) const {
+ if (Contains(op)) return true;
+
+ if (!kSimpleFPAliasing && op.IsFPRegister()) {
+ // Platforms where FP registers have complex aliasing need extra checks.
+ const LocationOperand& loc = LocationOperand::cast(op);
+ MachineRepresentation rep = loc.representation();
+ // If haven't encountered mixed rep FP registers, skip the extra checks.
+ if (!HasMixedFPReps(fp_reps_ | RepBit(rep))) return false;
+
+ // Check register against aliasing registers of other FP representations.
+ MachineRepresentation other_rep1, other_rep2;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ other_rep1 = MachineRepresentation::kFloat64;
+ other_rep2 = MachineRepresentation::kSimd128;
+ break;
+ case MachineRepresentation::kFloat64:
+ other_rep1 = MachineRepresentation::kFloat32;
+ other_rep2 = MachineRepresentation::kSimd128;
+ break;
+ case MachineRepresentation::kSimd128:
+ other_rep1 = MachineRepresentation::kFloat32;
+ other_rep2 = MachineRepresentation::kFloat64;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ const RegisterConfiguration* config = RegisterConfiguration::Turbofan();
+ int base = -1;
+ int aliases =
+ config->GetAliases(rep, loc.register_code(), other_rep1, &base);
+ DCHECK(aliases > 0 || (aliases == 0 && base == -1));
+ while (aliases--) {
+ if (Contains(AllocatedOperand(LocationOperand::REGISTER, other_rep1,
+ base + aliases))) {
+ return true;
+ }
+ }
+ aliases = config->GetAliases(rep, loc.register_code(), other_rep2, &base);
+ DCHECK(aliases > 0 || (aliases == 0 && base == -1));
+ while (aliases--) {
+ if (Contains(AllocatedOperand(LocationOperand::REGISTER, other_rep2,
+ base + aliases))) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ private:
+ static int RepBit(MachineRepresentation rep) {
+ return 1 << static_cast<int>(rep);
+ }
+
+ static bool HasMixedFPReps(int reps) {
+ return reps && !base::bits::IsPowerOfTwo32(reps);
+ }
+
+ ZoneVector<InstructionOperand>* set_;
+ int fp_reps_;
+};
int FindFirstNonEmptySlot(const Instruction* instr) {
int i = Instruction::FIRST_GAP_POSITION;
@@ -47,12 +128,12 @@ int FindFirstNonEmptySlot(const Instruction* instr) {
} // namespace
-
MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
: local_zone_(local_zone),
code_(code),
- local_vector_(local_zone) {}
-
+ local_vector_(local_zone),
+ operand_buffer1(local_zone),
+ operand_buffer2(local_zone) {}
void MoveOptimizer::Run() {
for (Instruction* instruction : code()->instructions()) {
@@ -92,27 +173,27 @@ void MoveOptimizer::RemoveClobberedDestinations(Instruction* instruction) {
DCHECK(instruction->parallel_moves()[1] == nullptr ||
instruction->parallel_moves()[1]->empty());
- OperandSet outputs(local_zone());
- OperandSet inputs(local_zone());
+ OperandSet outputs(&operand_buffer1);
+ OperandSet inputs(&operand_buffer2);
// Outputs and temps are treated together as potentially clobbering a
// destination operand.
for (size_t i = 0; i < instruction->OutputCount(); ++i) {
- outputs.insert(*instruction->OutputAt(i));
+ outputs.InsertOp(*instruction->OutputAt(i));
}
for (size_t i = 0; i < instruction->TempCount(); ++i) {
- outputs.insert(*instruction->TempAt(i));
+ outputs.InsertOp(*instruction->TempAt(i));
}
// Input operands block elisions.
for (size_t i = 0; i < instruction->InputCount(); ++i) {
- inputs.insert(*instruction->InputAt(i));
+ inputs.InsertOp(*instruction->InputAt(i));
}
// Elide moves made redundant by the instruction.
for (MoveOperands* move : *moves) {
- if (outputs.find(move->destination()) != outputs.end() &&
- inputs.find(move->destination()) == inputs.end()) {
+ if (outputs.ContainsOpOrAlias(move->destination()) &&
+ !inputs.ContainsOpOrAlias(move->destination())) {
move->Eliminate();
}
}
@@ -121,7 +202,7 @@ void MoveOptimizer::RemoveClobberedDestinations(Instruction* instruction) {
// the one for its input.
if (instruction->IsRet() || instruction->IsTailCall()) {
for (MoveOperands* move : *moves) {
- if (inputs.find(move->destination()) == inputs.end()) {
+ if (!inputs.ContainsOpOrAlias(move->destination())) {
move->Eliminate();
}
}
@@ -134,13 +215,13 @@ void MoveOptimizer::MigrateMoves(Instruction* to, Instruction* from) {
ParallelMove* from_moves = from->parallel_moves()[0];
if (from_moves == nullptr || from_moves->empty()) return;
- OperandSet dst_cant_be(local_zone());
- OperandSet src_cant_be(local_zone());
+ OperandSet dst_cant_be(&operand_buffer1);
+ OperandSet src_cant_be(&operand_buffer2);
// If an operand is an input to the instruction, we cannot move assignments
// where it appears on the LHS.
for (size_t i = 0; i < from->InputCount(); ++i) {
- dst_cant_be.insert(*from->InputAt(i));
+ dst_cant_be.InsertOp(*from->InputAt(i));
}
// If an operand is output to the instruction, we cannot move assignments
// where it appears on the RHS, because we would lose its value before the
@@ -149,10 +230,10 @@ void MoveOptimizer::MigrateMoves(Instruction* to, Instruction* from) {
// The output can't appear on the LHS because we performed
// RemoveClobberedDestinations for the "from" instruction.
for (size_t i = 0; i < from->OutputCount(); ++i) {
- src_cant_be.insert(*from->OutputAt(i));
+ src_cant_be.InsertOp(*from->OutputAt(i));
}
for (size_t i = 0; i < from->TempCount(); ++i) {
- src_cant_be.insert(*from->TempAt(i));
+ src_cant_be.InsertOp(*from->TempAt(i));
}
for (MoveOperands* move : *from_moves) {
if (move->IsRedundant()) continue;
@@ -160,7 +241,7 @@ void MoveOptimizer::MigrateMoves(Instruction* to, Instruction* from) {
// move "z = dest", because z would become y rather than "V".
// We assume CompressMoves has happened before this, which means we don't
// have more than one assignment to dest.
- src_cant_be.insert(move->destination());
+ src_cant_be.InsertOp(move->destination());
}
ZoneSet<MoveKey, MoveKeyCompare> move_candidates(local_zone());
@@ -168,7 +249,7 @@ void MoveOptimizer::MigrateMoves(Instruction* to, Instruction* from) {
// destination operands are eligible for being moved down.
for (MoveOperands* move : *from_moves) {
if (move->IsRedundant()) continue;
- if (!Blocks(dst_cant_be, move->destination())) {
+ if (!dst_cant_be.ContainsOpOrAlias(move->destination())) {
MoveKey key = {move->source(), move->destination()};
move_candidates.insert(key);
}
@@ -183,8 +264,8 @@ void MoveOptimizer::MigrateMoves(Instruction* to, Instruction* from) {
auto current = iter;
++iter;
InstructionOperand src = current->source;
- if (Blocks(src_cant_be, src)) {
- src_cant_be.insert(current->destination);
+ if (src_cant_be.ContainsOpOrAlias(src)) {
+ src_cant_be.InsertOp(current->destination);
move_candidates.erase(current);
changed = true;
}
@@ -223,8 +304,7 @@ void MoveOptimizer::CompressMoves(ParallelMove* left, MoveOpVector* right) {
// merging the two gaps.
for (MoveOperands* move : *right) {
if (move->IsRedundant()) continue;
- MoveOperands* to_eliminate = left->PrepareInsertAfter(move);
- if (to_eliminate != nullptr) eliminated.push_back(to_eliminate);
+ left->PrepareInsertAfter(move, &eliminated);
}
// Eliminate dead moves.
for (MoveOperands* to_eliminate : eliminated) {
@@ -317,7 +397,7 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
if (!op->IsConstant() && !op->IsImmediate()) return;
}
}
- // TODO(dcarney): pass a ZonePool down for this?
+ // TODO(dcarney): pass a ZoneStats down for this?
MoveMap move_map(local_zone());
size_t correct_counts = 0;
// Accumulate set of shared moves.
@@ -350,7 +430,7 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
if (correct_counts != move_map.size()) {
// Moves that are unique to each predecessor won't be pushed to the common
// successor.
- OperandSet conflicting_srcs(local_zone());
+ OperandSet conflicting_srcs(&operand_buffer1);
for (auto iter = move_map.begin(), end = move_map.end(); iter != end;) {
auto current = iter;
++iter;
@@ -360,7 +440,7 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
// there are such moves, we could move them, but the destination of the
// moves staying behind can't appear as a source of a common move,
// because the move staying behind will clobber this destination.
- conflicting_srcs.insert(dest);
+ conflicting_srcs.InsertOp(dest);
move_map.erase(current);
}
}
@@ -374,9 +454,8 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
auto current = iter;
++iter;
DCHECK_EQ(block->PredecessorCount(), current->second);
- if (conflicting_srcs.find(current->first.source) !=
- conflicting_srcs.end()) {
- conflicting_srcs.insert(current->first.destination);
+ if (conflicting_srcs.ContainsOpOrAlias(current->first.source)) {
+ conflicting_srcs.InsertOp(current->first.destination);
move_map.erase(current);
changed = true;
}
diff --git a/deps/v8/src/compiler/move-optimizer.h b/deps/v8/src/compiler/move-optimizer.h
index ce26a7f988..3844d330ec 100644
--- a/deps/v8/src/compiler/move-optimizer.h
+++ b/deps/v8/src/compiler/move-optimizer.h
@@ -6,13 +6,14 @@
#define V8_COMPILER_MOVE_OPTIMIZER_
#include "src/compiler/instruction.h"
+#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
namespace compiler {
-class MoveOptimizer final {
+class V8_EXPORT_PRIVATE MoveOptimizer final {
public:
MoveOptimizer(Zone* local_zone, InstructionSequence* code);
void Run();
@@ -52,6 +53,11 @@ class MoveOptimizer final {
InstructionSequence* const code_;
MoveOpVector local_vector_;
+ // Reusable buffers for storing operand sets. We need at most two sets
+ // at any given time, so we create two buffers.
+ ZoneVector<InstructionOperand> operand_buffer1;
+ ZoneVector<InstructionOperand> operand_buffer2;
+
DISALLOW_COPY_AND_ASSIGN(MoveOptimizer);
};
diff --git a/deps/v8/src/compiler/node-aux-data.h b/deps/v8/src/compiler/node-aux-data.h
index b50ff3885d..277ff18034 100644
--- a/deps/v8/src/compiler/node-aux-data.h
+++ b/deps/v8/src/compiler/node-aux-data.h
@@ -15,20 +15,20 @@ namespace compiler {
// Forward declarations.
class Node;
-template <class T>
+template <class T, T def()>
class NodeAuxData {
public:
explicit NodeAuxData(Zone* zone) : aux_data_(zone) {}
void Set(Node* node, T const& data) {
size_t const id = node->id();
- if (id >= aux_data_.size()) aux_data_.resize(id + 1);
+ if (id >= aux_data_.size()) aux_data_.resize(id + 1, def());
aux_data_[id] = data;
}
T Get(Node* node) const {
size_t const id = node->id();
- return (id < aux_data_.size()) ? aux_data_[id] : T();
+ return (id < aux_data_.size()) ? aux_data_[id] : def();
}
class const_iterator;
@@ -41,9 +41,8 @@ class NodeAuxData {
ZoneVector<T> aux_data_;
};
-
-template <class T>
-class NodeAuxData<T>::const_iterator {
+template <class T, T def()>
+class NodeAuxData<T, def>::const_iterator {
public:
typedef std::forward_iterator_tag iterator_category;
typedef int difference_type;
@@ -76,14 +75,16 @@ class NodeAuxData<T>::const_iterator {
size_t current_;
};
-template <class T>
-typename NodeAuxData<T>::const_iterator NodeAuxData<T>::begin() const {
- return typename NodeAuxData<T>::const_iterator(&aux_data_, 0);
+template <class T, T def()>
+typename NodeAuxData<T, def>::const_iterator NodeAuxData<T, def>::begin()
+ const {
+ return typename NodeAuxData<T, def>::const_iterator(&aux_data_, 0);
}
-template <class T>
-typename NodeAuxData<T>::const_iterator NodeAuxData<T>::end() const {
- return typename NodeAuxData<T>::const_iterator(&aux_data_, aux_data_.size());
+template <class T, T def()>
+typename NodeAuxData<T, def>::const_iterator NodeAuxData<T, def>::end() const {
+ return typename NodeAuxData<T, def>::const_iterator(&aux_data_,
+ aux_data_.size());
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/node-cache.cc b/deps/v8/src/compiler/node-cache.cc
index 0be6f81bd5..fc9a44c629 100644
--- a/deps/v8/src/compiler/node-cache.cc
+++ b/deps/v8/src/compiler/node-cache.cc
@@ -6,6 +6,7 @@
#include <cstring>
+#include "src/globals.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
@@ -111,12 +112,11 @@ void NodeCache<Key, Hash, Pred>::GetCachedNodes(ZoneVector<Node*>* nodes) {
// -----------------------------------------------------------------------------
// Instantiations
+template class V8_EXPORT_PRIVATE NodeCache<int32_t>;
+template class V8_EXPORT_PRIVATE NodeCache<int64_t>;
-template class NodeCache<int32_t>;
-template class NodeCache<int64_t>;
-
-template class NodeCache<RelocInt32Key>;
-template class NodeCache<RelocInt64Key>;
+template class V8_EXPORT_PRIVATE NodeCache<RelocInt32Key>;
+template class V8_EXPORT_PRIVATE NodeCache<RelocInt64Key>;
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 6c283dc032..c317fdd5e7 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -9,9 +9,11 @@
// TODO(turbofan): Move ExternalReference out of assembler.h
#include "src/assembler.h"
+#include "src/base/compiler-specific.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/double.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -165,6 +167,9 @@ struct FloatMatcher final : public ValueMatcher<T, kOpcode> {
bool IsNormal() const {
return this->HasValue() && std::isnormal(this->Value());
}
+ bool IsInteger() const {
+ return this->HasValue() && std::nearbyint(this->Value()) == this->Value();
+ }
bool IsPositiveOrNegativePowerOf2() const {
if (!this->HasValue() || (this->Value() == 0.0)) {
return false;
@@ -651,7 +656,7 @@ typedef BaseWithIndexAndDisplacementMatcher<Int32AddMatcher>
typedef BaseWithIndexAndDisplacementMatcher<Int64AddMatcher>
BaseWithIndexAndDisplacement64Matcher;
-struct BranchMatcher : public NodeMatcher {
+struct V8_EXPORT_PRIVATE BranchMatcher : public NON_EXPORTED_BASE(NodeMatcher) {
explicit BranchMatcher(Node* branch);
bool Matched() const { return if_true_ && if_false_; }
@@ -665,8 +670,8 @@ struct BranchMatcher : public NodeMatcher {
Node* if_false_;
};
-
-struct DiamondMatcher : public NodeMatcher {
+struct V8_EXPORT_PRIVATE DiamondMatcher
+ : public NON_EXPORTED_BASE(NodeMatcher) {
explicit DiamondMatcher(Node* merge);
bool Matched() const { return branch_; }
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 22539cbfb4..646dbc209e 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -338,74 +338,6 @@ MaybeHandle<Context> NodeProperties::GetSpecializationContext(
// static
-MaybeHandle<Context> NodeProperties::GetSpecializationNativeContext(
- Node* node, MaybeHandle<Context> native_context) {
- while (true) {
- switch (node->opcode()) {
- case IrOpcode::kJSLoadContext: {
- ContextAccess const& access = ContextAccessOf(node->op());
- if (access.index() != Context::NATIVE_CONTEXT_INDEX) {
- return MaybeHandle<Context>();
- }
- // Skip over the intermediate contexts, we're only interested in the
- // very last context in the context chain anyway.
- node = NodeProperties::GetContextInput(node);
- break;
- }
- case IrOpcode::kJSCreateBlockContext:
- case IrOpcode::kJSCreateCatchContext:
- case IrOpcode::kJSCreateFunctionContext:
- case IrOpcode::kJSCreateScriptContext:
- case IrOpcode::kJSCreateWithContext: {
- // Skip over the intermediate contexts, we're only interested in the
- // very last context in the context chain anyway.
- node = NodeProperties::GetContextInput(node);
- break;
- }
- case IrOpcode::kHeapConstant: {
- // Extract the native context from the actual {context}.
- Handle<Context> context =
- Handle<Context>::cast(OpParameter<Handle<HeapObject>>(node));
- return handle(context->native_context());
- }
- case IrOpcode::kOsrValue: {
- int const index = OpParameter<int>(node);
- if (index == Linkage::kOsrContextSpillSlotIndex) {
- return native_context;
- }
- return MaybeHandle<Context>();
- }
- case IrOpcode::kParameter: {
- Node* const start = NodeProperties::GetValueInput(node, 0);
- DCHECK_EQ(IrOpcode::kStart, start->opcode());
- int const index = ParameterIndexOf(node->op());
- // The context is always the last parameter to a JavaScript function,
- // and {Parameter} indices start at -1, so value outputs of {Start}
- // look like this: closure, receiver, param0, ..., paramN, context.
- if (index == start->op()->ValueOutputCount() - 2) {
- return native_context;
- }
- return MaybeHandle<Context>();
- }
- default:
- return MaybeHandle<Context>();
- }
- }
-}
-
-
-// static
-MaybeHandle<JSGlobalObject> NodeProperties::GetSpecializationGlobalObject(
- Node* node, MaybeHandle<Context> native_context) {
- Handle<Context> context;
- if (GetSpecializationNativeContext(node, native_context).ToHandle(&context)) {
- return handle(context->global_object());
- }
- return MaybeHandle<JSGlobalObject>();
-}
-
-
-// static
Type* NodeProperties::GetTypeOrAny(Node* node) {
return IsTyped(node) ? node->type() : Type::Any();
}
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index ed3c117507..23253239a1 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -7,6 +7,7 @@
#include "src/compiler/node.h"
#include "src/compiler/types.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -17,7 +18,7 @@ class Operator;
class CommonOperatorBuilder;
// A facade that simplifies access to the different kinds of inputs to a node.
-class NodeProperties final {
+class V8_EXPORT_PRIVATE NodeProperties final {
public:
// ---------------------------------------------------------------------------
// Input layout.
@@ -131,18 +132,6 @@ class NodeProperties final {
static MaybeHandle<Context> GetSpecializationContext(
Node* node, MaybeHandle<Context> context = MaybeHandle<Context>());
- // Try to retrieve the specialization native context from the given
- // {node}, optionally utilizing the knowledge about the (outermost)
- // {native_context}.
- static MaybeHandle<Context> GetSpecializationNativeContext(
- Node* node, MaybeHandle<Context> native_context = MaybeHandle<Context>());
-
- // Try to retrieve the specialization global object from the given
- // {node}, optionally utilizing the knowledge about the (outermost)
- // {native_context}.
- static MaybeHandle<JSGlobalObject> GetSpecializationGlobalObject(
- Node* node, MaybeHandle<Context> native_context = MaybeHandle<Context>());
-
// ---------------------------------------------------------------------------
// Type.
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index e940371b85..dc6c5dc01c 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -8,6 +8,7 @@
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/compiler/types.h"
+#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -39,7 +40,7 @@ typedef uint32_t NodeId;
// compilation, e.g. during lowering passes. Other information that needs to be
// associated with Nodes during compilation must be stored out-of-line indexed
// by the Node's id.
-class Node final {
+class V8_EXPORT_PRIVATE Node final {
public:
static Node* New(Zone* zone, NodeId id, const Operator* op, int input_count,
Node* const* inputs, bool has_extensible_inputs);
@@ -126,7 +127,7 @@ class Node final {
InputEdges input_edges() { return InputEdges(this); }
- class Inputs final {
+ class V8_EXPORT_PRIVATE Inputs final {
public:
typedef Node* value_type;
@@ -162,7 +163,7 @@ class Node final {
UseEdges use_edges() { return UseEdges(this); }
- class Uses final {
+ class V8_EXPORT_PRIVATE Uses final {
public:
typedef Node* value_type;
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 5ac2012350..fdbe001de3 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -7,6 +7,8 @@
#include <iosfwd>
+#include "src/globals.h"
+
// Opcodes for control operators.
#define CONTROL_OP_LIST(V) \
V(Start) \
@@ -39,6 +41,7 @@
V(Float64Constant) \
V(ExternalConstant) \
V(NumberConstant) \
+ V(PointerConstant) \
V(HeapConstant) \
V(RelocatableInt32Constant) \
V(RelocatableInt64Constant)
@@ -55,9 +58,11 @@
V(StateValues) \
V(TypedStateValues) \
V(ObjectState) \
+ V(TypedObjectState) \
V(Call) \
V(Parameter) \
V(OsrValue) \
+ V(OsrGuard) \
V(LoopExit) \
V(LoopExitValue) \
V(LoopExitEffect) \
@@ -123,6 +128,7 @@
V(JSCreateArray) \
V(JSCreateClosure) \
V(JSCreateIterResultObject) \
+ V(JSCreateKeyValueArray) \
V(JSCreateLiteralArray) \
V(JSCreateLiteralObject) \
V(JSCreateLiteralRegExp) \
@@ -155,6 +161,8 @@
V(JSForInPrepare) \
V(JSLoadMessage) \
V(JSStoreMessage) \
+ V(JSLoadModule) \
+ V(JSStoreModule) \
V(JSGeneratorStore) \
V(JSGeneratorRestoreContinuation) \
V(JSGeneratorRestoreRegister) \
@@ -177,6 +185,7 @@
V(ChangeInt32ToTagged) \
V(ChangeUint32ToTagged) \
V(ChangeFloat64ToTagged) \
+ V(ChangeFloat64ToTaggedPointer) \
V(ChangeTaggedToBit) \
V(ChangeBitToTagged) \
V(TruncateTaggedToWord32) \
@@ -199,7 +208,8 @@
V(CheckedTaggedToInt32) \
V(CheckedTruncateTaggedToWord32) \
V(CheckedTaggedToFloat64) \
- V(CheckedTaggedToTaggedSigned)
+ V(CheckedTaggedToTaggedSigned) \
+ V(CheckedTaggedToTaggedPointer)
#define SIMPLIFIED_COMPARE_BINOP_LIST(V) \
V(NumberEqual) \
@@ -276,6 +286,7 @@
V(NumberToBoolean) \
V(NumberToInt32) \
V(NumberToUint32) \
+ V(NumberToUint8Clamped) \
V(NumberSilenceNaN)
#define SIMPLIFIED_OTHER_OP_LIST(V) \
@@ -724,7 +735,7 @@ namespace compiler {
// Declare an enumeration with all the opcodes at all levels so that they
// can be globally, uniquely numbered.
-class IrOpcode {
+class V8_EXPORT_PRIVATE IrOpcode {
public:
enum Value {
#define DECLARE_OPCODE(x) k##x,
@@ -784,7 +795,7 @@ class IrOpcode {
}
};
-std::ostream& operator<<(std::ostream&, IrOpcode::Value);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, IrOpcode::Value);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 4295a22287..9198f4b9a9 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -19,18 +19,14 @@ namespace compiler {
OperationTyper::OperationTyper(Isolate* isolate, Zone* zone)
: zone_(zone), cache_(TypeCache::Get()) {
Factory* factory = isolate->factory();
- infinity_ = Type::Constant(factory->infinity_value(), zone);
- minus_infinity_ = Type::Constant(factory->minus_infinity_value(), zone);
- // Unfortunately, the infinities created in other places might be different
- // ones (eg the result of NewNumber in TypeNumberConstant).
- Type* truncating_to_zero =
- Type::Union(Type::Union(infinity_, minus_infinity_, zone),
- Type::MinusZeroOrNaN(), zone);
+ infinity_ = Type::NewConstant(factory->infinity_value(), zone);
+ minus_infinity_ = Type::NewConstant(factory->minus_infinity_value(), zone);
+ Type* truncating_to_zero = Type::MinusZeroOrNaN();
DCHECK(!truncating_to_zero->Maybe(Type::Integral32()));
- singleton_false_ = Type::Constant(factory->false_value(), zone);
- singleton_true_ = Type::Constant(factory->true_value(), zone);
- singleton_the_hole_ = Type::Constant(factory->the_hole_value(), zone);
+ singleton_false_ = Type::HeapConstant(factory->false_value(), zone);
+ singleton_true_ = Type::HeapConstant(factory->true_value(), zone);
+ singleton_the_hole_ = Type::HeapConstant(factory->the_hole_value(), zone);
signed32ish_ = Type::Union(Type::Signed32(), truncating_to_zero, zone);
unsigned32ish_ = Type::Union(Type::Unsigned32(), truncating_to_zero, zone);
}
@@ -494,6 +490,13 @@ Type* OperationTyper::NumberToUint32(Type* type) {
return Type::Unsigned32();
}
+Type* OperationTyper::NumberToUint8Clamped(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+
+ if (type->Is(cache_.kUint8)) return type;
+ return cache_.kUint8;
+}
+
Type* OperationTyper::NumberSilenceNaN(Type* type) {
DCHECK(type->Is(Type::Number()));
// TODO(jarin): This is a terrible hack; we definitely need a dedicated type
diff --git a/deps/v8/src/compiler/operator-properties.h b/deps/v8/src/compiler/operator-properties.h
index 4fe5f59d74..b4bb8b5e73 100644
--- a/deps/v8/src/compiler/operator-properties.h
+++ b/deps/v8/src/compiler/operator-properties.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_OPERATOR_PROPERTIES_H_
#include "src/base/macros.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -14,7 +15,7 @@ namespace compiler {
// Forward declarations.
class Operator;
-class OperatorProperties final {
+class V8_EXPORT_PRIVATE OperatorProperties final {
public:
static bool HasContextInput(const Operator* op);
static int GetContextInputCount(const Operator* op) {
diff --git a/deps/v8/src/compiler/operator.cc b/deps/v8/src/compiler/operator.cc
index fa1b2d89ca..4f746e2944 100644
--- a/deps/v8/src/compiler/operator.cc
+++ b/deps/v8/src/compiler/operator.cc
@@ -24,7 +24,6 @@ V8_INLINE N CheckRange(size_t val) {
// static
STATIC_CONST_MEMBER_DEFINITION const size_t Operator::kMaxControlOutputCount;
-
Operator::Operator(Opcode opcode, Properties properties, const char* mnemonic,
size_t value_in, size_t effect_in, size_t control_in,
size_t value_out, size_t effect_out, size_t control_out)
@@ -36,8 +35,7 @@ Operator::Operator(Opcode opcode, Properties properties, const char* mnemonic,
control_in_(CheckRange<uint16_t>(control_in)),
value_out_(CheckRange<uint16_t>(value_out)),
effect_out_(CheckRange<uint8_t>(effect_out)),
- control_out_(CheckRange<uint16_t>(control_out)) {}
-
+ control_out_(CheckRange<uint32_t>(control_out)) {}
std::ostream& operator<<(std::ostream& os, const Operator& op) {
op.PrintTo(os);
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index 8e3a9d1725..dea94f0906 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -7,8 +7,10 @@
#include <ostream> // NOLINT(readability/streams)
+#include "src/base/compiler-specific.h"
#include "src/base/flags.h"
#include "src/base/functional.h"
+#include "src/globals.h"
#include "src/handles.h"
#include "src/zone/zone.h"
@@ -28,7 +30,7 @@ namespace compiler {
// as the name for a named field access, the ID of a runtime function, etc.
// Static parameters are private to the operator and only semantically
// meaningful to the operator itself.
-class Operator : public ZoneObject {
+class V8_EXPORT_PRIVATE Operator : public NON_EXPORTED_BASE(ZoneObject) {
public:
typedef uint16_t Opcode;
@@ -142,7 +144,7 @@ class Operator : public ZoneObject {
uint16_t control_in_;
uint16_t value_out_;
uint8_t effect_out_;
- uint16_t control_out_;
+ uint32_t control_out_;
DISALLOW_COPY_AND_ASSIGN(Operator);
};
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index 6d61affe83..a2dc4305a3 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -47,13 +47,14 @@ OsrHelper::OsrHelper(CompilationInfo* info)
if (TRACE_COND) PrintF(__VA_ARGS__); \
} while (false)
+namespace {
// Peel outer loops and rewire the graph so that control reduction can
// produce a properly formed graph.
-static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
- Zone* tmp_zone, Node* dead,
- LoopTree* loop_tree, LoopTree::Loop* osr_loop,
- Node* osr_normal_entry, Node* osr_loop_entry) {
+void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
+ Zone* tmp_zone, Node* dead, LoopTree* loop_tree,
+ LoopTree::Loop* osr_loop, Node* osr_normal_entry,
+ Node* osr_loop_entry) {
const size_t original_count = graph->NodeCount();
AllNodes all(tmp_zone, graph);
NodeVector tmp_inputs(tmp_zone);
@@ -93,7 +94,8 @@ static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
continue;
}
if (orig->InputCount() == 0 || orig->opcode() == IrOpcode::kParameter ||
- orig->opcode() == IrOpcode::kOsrValue) {
+ orig->opcode() == IrOpcode::kOsrValue ||
+ orig->opcode() == IrOpcode::kOsrGuard) {
// No need to copy leaf nodes or parameters.
mapping->at(orig->id()) = orig;
continue;
@@ -255,6 +257,42 @@ static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
}
}
+void SetTypeForOsrValue(Node* osr_value, Node* loop,
+ CommonOperatorBuilder* common) {
+ Node* osr_guard = nullptr;
+ for (Node* use : osr_value->uses()) {
+ if (use->opcode() == IrOpcode::kOsrGuard) {
+ DCHECK_EQ(use->InputAt(0), osr_value);
+ osr_guard = use;
+ break;
+ }
+ }
+
+ OsrGuardType guard_type = OsrGuardType::kAny;
+ // Find the phi that uses the OsrGuard node and get the type from
+ // there. Skip the search if the OsrGuard does not have value use
+ // (i.e., if there is other use beyond the effect use).
+ if (OsrGuardTypeOf(osr_guard->op()) == OsrGuardType::kUninitialized &&
+ osr_guard->UseCount() > 1) {
+ Type* type = nullptr;
+ for (Node* use : osr_guard->uses()) {
+ if (use->opcode() == IrOpcode::kPhi) {
+ if (NodeProperties::GetControlInput(use) != loop) continue;
+ CHECK_NULL(type);
+ type = NodeProperties::GetType(use);
+ }
+ }
+ CHECK_NOT_NULL(type);
+
+ if (type->Is(Type::SignedSmall())) {
+ guard_type = OsrGuardType::kSignedSmall;
+ }
+ }
+
+ NodeProperties::ChangeOp(osr_guard, common->OsrGuard(guard_type));
+}
+
+} // namespace
void OsrHelper::Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common,
Zone* tmp_zone) {
@@ -283,6 +321,12 @@ void OsrHelper::Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common,
CHECK(osr_loop); // Should have found the OSR loop.
+ for (Node* use : osr_loop_entry->uses()) {
+ if (use->opcode() == IrOpcode::kOsrValue) {
+ SetTypeForOsrValue(use, osr_loop, common);
+ }
+ }
+
// Analyze the graph to determine how deeply nested the OSR loop is.
LoopTree* loop_tree = LoopFinder::BuildLoopTree(graph, tmp_zone);
diff --git a/deps/v8/src/compiler/pipeline-statistics.cc b/deps/v8/src/compiler/pipeline-statistics.cc
index a032c3dac2..2b6ffe418c 100644
--- a/deps/v8/src/compiler/pipeline-statistics.cc
+++ b/deps/v8/src/compiler/pipeline-statistics.cc
@@ -6,7 +6,7 @@
#include "src/compilation-info.h"
#include "src/compiler/pipeline-statistics.h"
-#include "src/compiler/zone-pool.h"
+#include "src/compiler/zone-stats.h"
#include "src/isolate.h"
namespace v8 {
@@ -16,13 +16,13 @@ namespace compiler {
void PipelineStatistics::CommonStats::Begin(
PipelineStatistics* pipeline_stats) {
DCHECK(!scope_);
- scope_.reset(new ZonePool::StatsScope(pipeline_stats->zone_pool_));
+ scope_.reset(new ZoneStats::StatsScope(pipeline_stats->zone_stats_));
timer_.Start();
outer_zone_initial_size_ = pipeline_stats->OuterZoneSize();
allocated_bytes_at_start_ =
outer_zone_initial_size_ -
pipeline_stats->total_stats_.outer_zone_initial_size_ +
- pipeline_stats->zone_pool_->GetCurrentAllocatedBytes();
+ pipeline_stats->zone_stats_->GetCurrentAllocatedBytes();
}
@@ -43,12 +43,11 @@ void PipelineStatistics::CommonStats::End(
timer_.Stop();
}
-
PipelineStatistics::PipelineStatistics(CompilationInfo* info,
- ZonePool* zone_pool)
+ ZoneStats* zone_stats)
: isolate_(info->isolate()),
outer_zone_(info->zone()),
- zone_pool_(zone_pool),
+ zone_stats_(zone_stats),
compilation_stats_(isolate_->GetTurboStatistics()),
source_size_(0),
phase_kind_name_(nullptr),
diff --git a/deps/v8/src/compiler/pipeline-statistics.h b/deps/v8/src/compiler/pipeline-statistics.h
index a9931ebed7..b09e2363d6 100644
--- a/deps/v8/src/compiler/pipeline-statistics.h
+++ b/deps/v8/src/compiler/pipeline-statistics.h
@@ -10,7 +10,7 @@
#include "src/base/platform/elapsed-timer.h"
#include "src/compilation-statistics.h"
-#include "src/compiler/zone-pool.h"
+#include "src/compiler/zone-stats.h"
namespace v8 {
namespace internal {
@@ -20,7 +20,7 @@ class PhaseScope;
class PipelineStatistics : public Malloced {
public:
- PipelineStatistics(CompilationInfo* info, ZonePool* zone_pool);
+ PipelineStatistics(CompilationInfo* info, ZoneStats* zone_stats);
~PipelineStatistics();
void BeginPhaseKind(const char* phase_kind_name);
@@ -39,7 +39,7 @@ class PipelineStatistics : public Malloced {
void End(PipelineStatistics* pipeline_stats,
CompilationStatistics::BasicStats* diff);
- std::unique_ptr<ZonePool::StatsScope> scope_;
+ std::unique_ptr<ZoneStats::StatsScope> scope_;
base::ElapsedTimer timer_;
size_t outer_zone_initial_size_;
size_t allocated_bytes_at_start_;
@@ -57,7 +57,7 @@ class PipelineStatistics : public Malloced {
Isolate* isolate_;
Zone* outer_zone_;
- ZonePool* zone_pool_;
+ ZoneStats* zone_stats_;
CompilationStatistics* compilation_stats_;
std::string function_name_;
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 805b687e7d..2614155722 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -70,7 +70,7 @@
#include "src/compiler/typer.h"
#include "src/compiler/value-numbering-reducer.h"
#include "src/compiler/verifier.h"
-#include "src/compiler/zone-pool.h"
+#include "src/compiler/zone-stats.h"
#include "src/isolate-inl.h"
#include "src/ostreams.h"
#include "src/parsing/parse-info.h"
@@ -85,19 +85,19 @@ namespace compiler {
class PipelineData {
public:
// For main entry point.
- PipelineData(ZonePool* zone_pool, CompilationInfo* info,
+ PipelineData(ZoneStats* zone_stats, CompilationInfo* info,
PipelineStatistics* pipeline_statistics)
: isolate_(info->isolate()),
info_(info),
debug_name_(info_->GetDebugName()),
outer_zone_(info_->zone()),
- zone_pool_(zone_pool),
+ zone_stats_(zone_stats),
pipeline_statistics_(pipeline_statistics),
- graph_zone_scope_(zone_pool_),
+ graph_zone_scope_(zone_stats_, ZONE_NAME),
graph_zone_(graph_zone_scope_.zone()),
- instruction_zone_scope_(zone_pool_),
+ instruction_zone_scope_(zone_stats_, ZONE_NAME),
instruction_zone_(instruction_zone_scope_.zone()),
- register_allocation_zone_scope_(zone_pool_),
+ register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()) {
PhaseScope scope(pipeline_statistics, "init pipeline data");
graph_ = new (graph_zone_) Graph(graph_zone_);
@@ -114,48 +114,48 @@ class PipelineData {
}
// For WASM compile entry point.
- PipelineData(ZonePool* zone_pool, CompilationInfo* info, Graph* graph,
+ PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Graph* graph,
SourcePositionTable* source_positions)
: isolate_(info->isolate()),
info_(info),
debug_name_(info_->GetDebugName()),
- zone_pool_(zone_pool),
- graph_zone_scope_(zone_pool_),
+ zone_stats_(zone_stats),
+ graph_zone_scope_(zone_stats_, ZONE_NAME),
graph_(graph),
source_positions_(source_positions),
- instruction_zone_scope_(zone_pool_),
+ instruction_zone_scope_(zone_stats_, ZONE_NAME),
instruction_zone_(instruction_zone_scope_.zone()),
- register_allocation_zone_scope_(zone_pool_),
+ register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
// For machine graph testing entry point.
- PipelineData(ZonePool* zone_pool, CompilationInfo* info, Graph* graph,
+ PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Graph* graph,
Schedule* schedule)
: isolate_(info->isolate()),
info_(info),
debug_name_(info_->GetDebugName()),
- zone_pool_(zone_pool),
- graph_zone_scope_(zone_pool_),
+ zone_stats_(zone_stats),
+ graph_zone_scope_(zone_stats_, ZONE_NAME),
graph_(graph),
source_positions_(new (info->zone()) SourcePositionTable(graph_)),
schedule_(schedule),
- instruction_zone_scope_(zone_pool_),
+ instruction_zone_scope_(zone_stats_, ZONE_NAME),
instruction_zone_(instruction_zone_scope_.zone()),
- register_allocation_zone_scope_(zone_pool_),
+ register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
// For register allocation testing entry point.
- PipelineData(ZonePool* zone_pool, CompilationInfo* info,
+ PipelineData(ZoneStats* zone_stats, CompilationInfo* info,
InstructionSequence* sequence)
: isolate_(info->isolate()),
info_(info),
debug_name_(info_->GetDebugName()),
- zone_pool_(zone_pool),
- graph_zone_scope_(zone_pool_),
- instruction_zone_scope_(zone_pool_),
+ zone_stats_(zone_stats),
+ graph_zone_scope_(zone_stats_, ZONE_NAME),
+ instruction_zone_scope_(zone_stats_, ZONE_NAME),
instruction_zone_(sequence->zone()),
sequence_(sequence),
- register_allocation_zone_scope_(zone_pool_),
+ register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
~PipelineData() {
@@ -166,7 +166,7 @@ class PipelineData {
Isolate* isolate() const { return isolate_; }
CompilationInfo* info() const { return info_; }
- ZonePool* zone_pool() const { return zone_pool_; }
+ ZoneStats* zone_stats() const { return zone_stats_; }
PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
bool compilation_failed() const { return compilation_failed_; }
void set_compilation_failed() { compilation_failed_ = true; }
@@ -186,11 +186,11 @@ class PipelineData {
CommonOperatorBuilder* common() const { return common_; }
JSOperatorBuilder* javascript() const { return javascript_; }
JSGraph* jsgraph() const { return jsgraph_; }
- MaybeHandle<Context> native_context() const {
- if (info()->is_native_context_specializing()) {
- return handle(info()->native_context(), isolate());
- }
- return MaybeHandle<Context>();
+ Handle<Context> native_context() const {
+ return handle(info()->native_context(), isolate());
+ }
+ Handle<JSGlobalObject> global_object() const {
+ return handle(info()->global_object(), isolate());
}
LoopAssignmentAnalysis* loop_assignment() const { return loop_assignment_; }
@@ -274,8 +274,8 @@ class PipelineData {
if (descriptor && descriptor->RequiresFrameAsIncoming()) {
sequence_->instruction_blocks()[0]->mark_needs_frame();
} else {
- DCHECK_EQ(0, descriptor->CalleeSavedFPRegisters());
- DCHECK_EQ(0, descriptor->CalleeSavedRegisters());
+ DCHECK_EQ(0u, descriptor->CalleeSavedFPRegisters());
+ DCHECK_EQ(0u, descriptor->CalleeSavedRegisters());
}
}
@@ -283,7 +283,7 @@ class PipelineData {
DCHECK(frame_ == nullptr);
int fixed_frame_size = 0;
if (descriptor != nullptr) {
- fixed_frame_size = CalculateFixedFrameSize(descriptor);
+ fixed_frame_size = descriptor->CalculateFixedFrameSize();
}
frame_ = new (instruction_zone()) Frame(fixed_frame_size);
}
@@ -313,14 +313,14 @@ class PipelineData {
CompilationInfo* const info_;
std::unique_ptr<char[]> debug_name_;
Zone* outer_zone_ = nullptr;
- ZonePool* const zone_pool_;
+ ZoneStats* const zone_stats_;
PipelineStatistics* pipeline_statistics_ = nullptr;
bool compilation_failed_ = false;
Handle<Code> code_ = Handle<Code>::null();
// All objects in the following group of fields are allocated in graph_zone_.
// They are all set to nullptr when the graph_zone_ is destroyed.
- ZonePool::Scope graph_zone_scope_;
+ ZoneStats::Scope graph_zone_scope_;
Zone* graph_zone_ = nullptr;
Graph* graph_ = nullptr;
SourcePositionTable* source_positions_ = nullptr;
@@ -337,7 +337,7 @@ class PipelineData {
// instruction_zone_. They are all set to nullptr when the instruction_zone_
// is
// destroyed.
- ZonePool::Scope instruction_zone_scope_;
+ ZoneStats::Scope instruction_zone_scope_;
Zone* instruction_zone_;
InstructionSequence* sequence_ = nullptr;
Frame* frame_ = nullptr;
@@ -345,7 +345,7 @@ class PipelineData {
// All objects in the following group of fields are allocated in
// register_allocation_zone_. They are all set to nullptr when the zone is
// destroyed.
- ZonePool::Scope register_allocation_zone_scope_;
+ ZoneStats::Scope register_allocation_zone_scope_;
Zone* register_allocation_zone_;
RegisterAllocationData* register_allocation_data_ = nullptr;
@@ -355,16 +355,6 @@ class PipelineData {
// Source position output for --trace-turbo.
std::string source_position_output_;
- int CalculateFixedFrameSize(CallDescriptor* descriptor) {
- if (descriptor->IsJSFunctionCall()) {
- return StandardFrameConstants::kFixedSlotCount;
- }
- return descriptor->IsCFunctionCall()
- ? (CommonFrameConstants::kFixedSlotCountAboveFp +
- CommonFrameConstants::kCPSlotCount)
- : TypedFrameConstants::kFixedSlotCount;
- }
-
DISALLOW_COPY_AND_ASSIGN(PipelineData);
};
@@ -389,7 +379,7 @@ class PipelineImpl final {
// Perform the actual code generation and return handle to a code object.
Handle<Code> GenerateCode(Linkage* linkage);
- bool ScheduleAndSelectInstructions(Linkage* linkage);
+ bool ScheduleAndSelectInstructions(Linkage* linkage, bool trim_graph);
void RunPrintAndVerify(const char* phase, bool untyped = false);
Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
void AllocateRegisters(const RegisterConfiguration* config,
@@ -437,38 +427,6 @@ void TraceSchedule(CompilationInfo* info, Schedule* schedule) {
}
-class AstGraphBuilderWithPositions final : public AstGraphBuilder {
- public:
- AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
- JSGraph* jsgraph,
- LoopAssignmentAnalysis* loop_assignment,
- TypeHintAnalysis* type_hint_analysis,
- SourcePositionTable* source_positions)
- : AstGraphBuilder(local_zone, info, jsgraph, 1.0f, loop_assignment,
- type_hint_analysis),
- source_positions_(source_positions),
- start_position_(info->shared_info()->start_position()) {}
-
- bool CreateGraph() {
- SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
- return AstGraphBuilder::CreateGraph();
- }
-
-#define DEF_VISIT(type) \
- void Visit##type(type* node) override { \
- SourcePositionTable::Scope pos(source_positions_, \
- SourcePosition(node->position())); \
- AstGraphBuilder::Visit##type(node); \
- }
- AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- private:
- SourcePositionTable* const source_positions_;
- SourcePosition const start_position_;
-};
-
-
class SourcePositionWrapper final : public Reducer {
public:
SourcePositionWrapper(Reducer* reducer, SourcePositionTable* table)
@@ -518,21 +476,21 @@ class PipelineRunScope {
: phase_scope_(
phase_name == nullptr ? nullptr : data->pipeline_statistics(),
phase_name),
- zone_scope_(data->zone_pool()) {}
+ zone_scope_(data->zone_stats(), ZONE_NAME) {}
Zone* zone() { return zone_scope_.zone(); }
private:
PhaseScope phase_scope_;
- ZonePool::Scope zone_scope_;
+ ZoneStats::Scope zone_scope_;
};
PipelineStatistics* CreatePipelineStatistics(CompilationInfo* info,
- ZonePool* zone_pool) {
+ ZoneStats* zone_stats) {
PipelineStatistics* pipeline_statistics = nullptr;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
- pipeline_statistics = new PipelineStatistics(info, zone_pool);
+ pipeline_statistics = new PipelineStatistics(info, zone_stats);
pipeline_statistics->BeginPhaseKind("initializing");
}
@@ -568,12 +526,12 @@ class PipelineCompilationJob final : public CompilationJob {
// Note that the CompilationInfo is not initialized at the time we pass it
// to the CompilationJob constructor, but it is not dereferenced there.
: CompilationJob(isolate, &info_, "TurboFan"),
- zone_(isolate->allocator()),
- zone_pool_(isolate->allocator()),
- parse_info_(&zone_, function),
+ zone_(isolate->allocator(), ZONE_NAME),
+ zone_stats_(isolate->allocator()),
+ parse_info_(&zone_, handle(function->shared())),
info_(&parse_info_, function),
- pipeline_statistics_(CreatePipelineStatistics(info(), &zone_pool_)),
- data_(&zone_pool_, info(), pipeline_statistics_.get()),
+ pipeline_statistics_(CreatePipelineStatistics(info(), &zone_stats_)),
+ data_(&zone_stats_, info(), pipeline_statistics_.get()),
pipeline_(&data_),
linkage_(nullptr) {}
@@ -584,7 +542,7 @@ class PipelineCompilationJob final : public CompilationJob {
private:
Zone zone_;
- ZonePool zone_pool_;
+ ZoneStats zone_stats_;
ParseInfo parse_info_;
CompilationInfo info_;
std::unique_ptr<PipelineStatistics> pipeline_statistics_;
@@ -603,20 +561,17 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
if (!FLAG_always_opt) {
info()->MarkAsBailoutOnUninitialized();
}
- if (FLAG_native_context_specialization) {
- info()->MarkAsNativeContextSpecializing();
- }
if (FLAG_turbo_inlining) {
info()->MarkAsInliningEnabled();
}
}
if (!info()->shared_info()->asm_function() || FLAG_turbo_asm_deoptimization) {
info()->MarkAsDeoptimizationEnabled();
- }
- if (!info()->is_optimizing_from_bytecode()) {
if (FLAG_inline_accessors) {
info()->MarkAsAccessorInliningEnabled();
}
+ }
+ if (!info()->is_optimizing_from_bytecode()) {
if (info()->is_deoptimization_enabled() && FLAG_turbo_type_feedback) {
info()->MarkAsTypeFeedbackEnabled();
}
@@ -662,8 +617,8 @@ class PipelineWasmCompilationJob final : public CompilationJob {
SourcePositionTable* source_positions)
: CompilationJob(info->isolate(), info, "TurboFan",
State::kReadyToExecute),
- zone_pool_(info->isolate()->allocator()),
- data_(&zone_pool_, info, graph, source_positions),
+ zone_stats_(info->isolate()->allocator()),
+ data_(&zone_stats_, info, graph, source_positions),
pipeline_(&data_),
linkage_(descriptor) {}
@@ -673,7 +628,7 @@ class PipelineWasmCompilationJob final : public CompilationJob {
Status FinalizeJobImpl() final;
private:
- ZonePool zone_pool_;
+ ZoneStats zone_stats_;
PipelineData data_;
PipelineImpl pipeline_;
Linkage linkage_;
@@ -695,7 +650,7 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
pipeline_.RunPrintAndVerify("Machine", true);
- if (!pipeline_.ScheduleAndSelectInstructions(&linkage_)) return FAILED;
+ if (!pipeline_.ScheduleAndSelectInstructions(&linkage_, true)) return FAILED;
return SUCCEEDED;
}
@@ -761,12 +716,14 @@ struct GraphBuilderPhase {
if (data->info()->is_optimizing_from_bytecode()) {
BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
- data->jsgraph(), 1.0f);
+ data->jsgraph(), 1.0f,
+ data->source_positions());
succeeded = graph_builder.CreateGraph();
} else {
AstGraphBuilderWithPositions graph_builder(
- temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
- data->type_hint_analysis(), data->source_positions());
+ temp_zone, data->info(), data->jsgraph(), 1.0f,
+ data->loop_assignment(), data->type_hint_analysis(),
+ data->source_positions());
succeeded = graph_builder.CreateGraph();
}
@@ -800,10 +757,10 @@ struct InliningPhase {
data->info()->is_function_context_specializing()
? handle(data->info()->context())
: MaybeHandle<Context>());
- JSFrameSpecialization frame_specialization(data->info()->osr_frame(),
- data->jsgraph());
+ JSFrameSpecialization frame_specialization(
+ &graph_reducer, data->info()->osr_frame(), data->jsgraph());
JSGlobalObjectSpecialization global_object_specialization(
- &graph_reducer, data->jsgraph(), data->native_context(),
+ &graph_reducer, data->jsgraph(), data->global_object(),
data->info()->dependencies());
JSNativeContextSpecialization::Flags flags =
JSNativeContextSpecialization::kNoFlags;
@@ -819,11 +776,11 @@ struct InliningPhase {
JSNativeContextSpecialization native_context_specialization(
&graph_reducer, data->jsgraph(), flags, data->native_context(),
data->info()->dependencies(), temp_zone);
- JSInliningHeuristic inlining(&graph_reducer,
- data->info()->is_inlining_enabled()
- ? JSInliningHeuristic::kGeneralInlining
- : JSInliningHeuristic::kRestrictedInlining,
- temp_zone, data->info(), data->jsgraph());
+ JSInliningHeuristic inlining(
+ &graph_reducer, data->info()->is_inlining_enabled()
+ ? JSInliningHeuristic::kGeneralInlining
+ : JSInliningHeuristic::kRestrictedInlining,
+ temp_zone, data->info(), data->jsgraph(), data->source_positions());
JSIntrinsicLowering intrinsic_lowering(
&graph_reducer, data->jsgraph(),
data->info()->is_deoptimization_enabled()
@@ -860,7 +817,20 @@ struct TyperPhase {
}
};
-#ifdef DEBUG
+struct OsrTyperPhase {
+ static const char* phase_name() { return "osr typer"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ NodeVector roots(temp_zone);
+ data->jsgraph()->GetCachedNodes(&roots);
+ // Dummy induction variable optimizer: at the moment, we do not try
+ // to compute loop variable bounds on OSR.
+ LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
+ data->common(), temp_zone);
+ Typer typer(data->isolate(), Typer::kNoFlags, data->graph());
+ typer.Run(roots, &induction_vars);
+ }
+};
struct UntyperPhase {
static const char* phase_name() { return "untyper"; }
@@ -877,6 +847,12 @@ struct UntyperPhase {
}
};
+ NodeVector roots(temp_zone);
+ data->jsgraph()->GetCachedNodes(&roots);
+ for (Node* node : roots) {
+ NodeProperties::RemoveType(node);
+ }
+
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
RemoveTypeReducer remove_type_reducer;
AddReducer(data, &graph_reducer, &remove_type_reducer);
@@ -884,12 +860,15 @@ struct UntyperPhase {
}
};
-#endif // DEBUG
-
struct OsrDeconstructionPhase {
static const char* phase_name() { return "OSR deconstruction"; }
void Run(PipelineData* data, Zone* temp_zone) {
+ GraphTrimmer trimmer(temp_zone, data->graph());
+ NodeVector roots(temp_zone);
+ data->jsgraph()->GetCachedNodes(&roots);
+ trimmer.TrimGraph(roots.begin(), roots.end());
+
OsrHelper osr_helper(data->info());
osr_helper.Deconstruct(data->jsgraph(), data->common(), temp_zone);
}
@@ -908,11 +887,8 @@ struct TypedLoweringPhase {
data->info()->is_deoptimization_enabled()
? JSBuiltinReducer::kDeoptimizationEnabled
: JSBuiltinReducer::kNoFlags,
- data->info()->dependencies());
- MaybeHandle<LiteralsArray> literals_array =
- data->info()->is_native_context_specializing()
- ? handle(data->info()->closure()->literals(), data->isolate())
- : MaybeHandle<LiteralsArray>();
+ data->info()->dependencies(), data->native_context());
+ Handle<LiteralsArray> literals_array(data->info()->closure()->literals());
JSCreateLowering create_lowering(
&graph_reducer, data->info()->dependencies(), data->jsgraph(),
literals_array, data->native_context(), temp_zone);
@@ -960,6 +936,10 @@ struct EscapeAnalysisPhase {
&escape_analysis, temp_zone);
AddReducer(data, &graph_reducer, &escape_reducer);
graph_reducer.ReduceGraph();
+ if (escape_reducer.compilation_failed()) {
+ data->set_compilation_failed();
+ return;
+ }
escape_reducer.VerifyReplacement();
}
};
@@ -1068,7 +1048,8 @@ struct EffectControlLinearizationPhase {
// chains and lower them,
// - get rid of the region markers,
// - introduce effect phis and rewire effects to get SSA again.
- EffectControlLinearizer linearizer(data->jsgraph(), schedule, temp_zone);
+ EffectControlLinearizer linearizer(data->jsgraph(), schedule, temp_zone,
+ data->source_positions());
linearizer.Run();
}
};
@@ -1189,7 +1170,9 @@ struct LateGraphTrimmingPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphTrimmer trimmer(temp_zone, data->graph());
NodeVector roots(temp_zone);
- data->jsgraph()->GetCachedNodes(&roots);
+ if (data->jsgraph()) {
+ data->jsgraph()->GetCachedNodes(&roots);
+ }
trimmer.TrimGraph(roots.begin(), roots.end());
}
};
@@ -1503,7 +1486,11 @@ bool PipelineImpl::CreateGraph() {
// Perform OSR deconstruction.
if (info()->is_osr()) {
+ Run<OsrTyperPhase>();
+
Run<OsrDeconstructionPhase>();
+
+ Run<UntyperPhase>();
RunPrintAndVerify("OSR deconstruction", true);
}
@@ -1522,10 +1509,22 @@ bool PipelineImpl::CreateGraph() {
// Run the type-sensitive lowerings and optimizations on the graph.
{
+ // Determine the Typer operation flags.
+ Typer::Flags flags = Typer::kNoFlags;
+ if (is_sloppy(info()->shared_info()->language_mode()) &&
+ !info()->shared_info()->IsBuiltin()) {
+ // Sloppy mode functions always have an Object for this.
+ flags |= Typer::kThisIsReceiver;
+ }
+ if (IsClassConstructor(info()->shared_info()->kind())) {
+ // Class constructors cannot be [[Call]]ed.
+ flags |= Typer::kNewTargetIsReceiver;
+ }
+
// Type the graph and keep the Typer running on newly created nodes within
// this scope; the Typer is automatically unlinked from the Graph once we
// leave this scope below.
- Typer typer(isolate(), data->graph());
+ Typer typer(isolate(), flags, data->graph());
Run<TyperPhase>(&typer);
RunPrintAndVerify("Typed");
@@ -1548,14 +1547,21 @@ bool PipelineImpl::CreateGraph() {
RunPrintAndVerify("Loop peeled");
}
- if (FLAG_turbo_escape) {
- Run<EscapeAnalysisPhase>();
- RunPrintAndVerify("Escape Analysed");
- }
+ if (!info()->shared_info()->asm_function()) {
+ if (FLAG_turbo_load_elimination) {
+ Run<LoadEliminationPhase>();
+ RunPrintAndVerify("Load eliminated");
+ }
- if (!info()->shared_info()->asm_function() && FLAG_turbo_load_elimination) {
- Run<LoadEliminationPhase>();
- RunPrintAndVerify("Load eliminated");
+ if (FLAG_turbo_escape) {
+ Run<EscapeAnalysisPhase>();
+ if (data->compilation_failed()) {
+ info()->AbortOptimization(kCyclicObjectStateDetectedInEscapeAnalysis);
+ data->EndPhaseKind();
+ return false;
+ }
+ RunPrintAndVerify("Escape Analysed");
+ }
}
}
@@ -1627,13 +1633,9 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Late optimized", true);
- Run<LateGraphTrimmingPhase>();
- // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
- RunPrintAndVerify("Late trimmed", true);
-
data->source_positions()->RemoveDecorator();
- return ScheduleAndSelectInstructions(linkage);
+ return ScheduleAndSelectInstructions(linkage, true);
}
Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
@@ -1645,11 +1647,11 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
if (isolate->serializer_enabled()) info.PrepareForSerializing();
// Construct a pipeline for scheduling and code generation.
- ZonePool zone_pool(isolate->allocator());
- PipelineData data(&zone_pool, &info, graph, schedule);
+ ZoneStats zone_stats(isolate->allocator());
+ PipelineData data(&zone_stats, &info, graph, schedule);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
- pipeline_statistics.reset(new PipelineStatistics(&info, &zone_pool));
+ pipeline_statistics.reset(new PipelineStatistics(&info, &zone_stats));
pipeline_statistics->BeginPhaseKind("stub codegen");
}
@@ -1671,10 +1673,10 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
// static
Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info) {
- ZonePool zone_pool(info->isolate()->allocator());
+ ZoneStats zone_stats(info->isolate()->allocator());
std::unique_ptr<PipelineStatistics> pipeline_statistics(
- CreatePipelineStatistics(info, &zone_pool));
- PipelineData data(&zone_pool, info, pipeline_statistics.get());
+ CreatePipelineStatistics(info, &zone_stats));
+ PipelineData data(&zone_stats, info, pipeline_statistics.get());
PipelineImpl pipeline(&data);
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
@@ -1699,11 +1701,11 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
Graph* graph,
Schedule* schedule) {
// Construct a pipeline for scheduling and code generation.
- ZonePool zone_pool(info->isolate()->allocator());
- PipelineData data(&zone_pool, info, graph, schedule);
+ ZoneStats zone_stats(info->isolate()->allocator());
+ PipelineData data(&zone_stats, info, graph, schedule);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
- pipeline_statistics.reset(new PipelineStatistics(info, &zone_pool));
+ pipeline_statistics.reset(new PipelineStatistics(info, &zone_stats));
pipeline_statistics->BeginPhaseKind("test codegen");
}
@@ -1738,20 +1740,25 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
bool run_verifier) {
CompilationInfo info(ArrayVector("testing"), sequence->isolate(),
sequence->zone(), Code::ComputeFlags(Code::STUB));
- ZonePool zone_pool(sequence->isolate()->allocator());
- PipelineData data(&zone_pool, &info, sequence);
+ ZoneStats zone_stats(sequence->isolate()->allocator());
+ PipelineData data(&zone_stats, &info, sequence);
PipelineImpl pipeline(&data);
pipeline.data_->InitializeFrameData(nullptr);
pipeline.AllocateRegisters(config, nullptr, run_verifier);
return !data.compilation_failed();
}
-bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage) {
+bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
+ bool trim_graph) {
CallDescriptor* call_descriptor = linkage->GetIncomingDescriptor();
PipelineData* data = this->data_;
DCHECK_NOT_NULL(data->graph());
+ if (trim_graph) {
+ Run<LateGraphTrimmingPhase>();
+ RunPrintAndVerify("Late trimmed", true);
+ }
if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
TraceSchedule(data->info(), data->schedule());
@@ -1760,8 +1767,11 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage) {
info(), data->graph(), data->schedule()));
}
- if (FLAG_turbo_verify_machine_graph) {
- Zone temp_zone(data->isolate()->allocator());
+ if (FLAG_turbo_verify_machine_graph != nullptr &&
+ (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
+ !strcmp(FLAG_turbo_verify_machine_graph,
+ data->info()->GetDebugName().get()))) {
+ Zone temp_zone(data->isolate()->allocator(), ZONE_NAME);
MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage,
&temp_zone);
}
@@ -1871,7 +1881,7 @@ Handle<Code> PipelineImpl::ScheduleAndGenerateCode(
Linkage linkage(call_descriptor);
// Schedule the graph, perform instruction selection and register allocation.
- if (!ScheduleAndSelectInstructions(&linkage)) return Handle<Code>();
+ if (!ScheduleAndSelectInstructions(&linkage, false)) return Handle<Code>();
// Generate the final machine code.
return GenerateCode(&linkage);
@@ -1885,7 +1895,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
std::unique_ptr<Zone> verifier_zone;
RegisterAllocatorVerifier* verifier = nullptr;
if (run_verifier) {
- verifier_zone.reset(new Zone(isolate()->allocator()));
+ verifier_zone.reset(new Zone(isolate()->allocator(), ZONE_NAME));
verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
verifier_zone.get(), config, data->sequence());
}
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 64befbfe06..0c0a57b286 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -7,6 +7,7 @@
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
+#include "src/globals.h"
#include "src/objects.h"
namespace v8 {
@@ -53,9 +54,9 @@ class Pipeline : public AllStatic {
Schedule* schedule = nullptr);
// Run just the register allocator phases.
- static bool AllocateRegistersForTesting(const RegisterConfiguration* config,
- InstructionSequence* sequence,
- bool run_verifier);
+ V8_EXPORT_PRIVATE static bool AllocateRegistersForTesting(
+ const RegisterConfiguration* config, InstructionSequence* sequence,
+ bool run_verifier);
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index f8f3099209..a838ede47c 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -1012,8 +1012,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallJSFunctionFromJSFunction:
- case kArchTailCallJSFunction: {
+ case kArchTailCallJSFunctionFromJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
@@ -1022,11 +1021,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmp(cp, kScratchReg);
__ Assert(eq, kWrongFunctionContext);
}
- if (opcode == kArchTailCallJSFunctionFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(ip);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -1093,7 +1090,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchRet:
- AssembleReturn();
+ AssembleReturn(instr->InputAt(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kArchStackPointer:
@@ -1241,39 +1238,46 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mulhwu(i.OutputRegister(1), i.InputRegister(0), i.InputRegister(2));
__ add(i.OutputRegister(1), i.OutputRegister(1), i.TempRegister(0));
break;
- case kPPC_ShiftLeftPair:
+ case kPPC_ShiftLeftPair: {
+ Register second_output =
+ instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
if (instr->InputAt(2)->IsImmediate()) {
- __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
- i.InputRegister(0), i.InputRegister(1),
- i.InputInt32(2));
+ __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
+ i.InputRegister(1), i.InputInt32(2));
} else {
- __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
- i.InputRegister(0), i.InputRegister(1), kScratchReg,
- i.InputRegister(2));
+ __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
+ i.InputRegister(1), kScratchReg, i.InputRegister(2));
}
break;
- case kPPC_ShiftRightPair:
+ }
+ case kPPC_ShiftRightPair: {
+ Register second_output =
+ instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
if (instr->InputAt(2)->IsImmediate()) {
- __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+ __ ShiftRightPair(i.OutputRegister(0), second_output,
i.InputRegister(0), i.InputRegister(1),
i.InputInt32(2));
} else {
- __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+ __ ShiftRightPair(i.OutputRegister(0), second_output,
i.InputRegister(0), i.InputRegister(1), kScratchReg,
i.InputRegister(2));
}
break;
- case kPPC_ShiftRightAlgPair:
+ }
+ case kPPC_ShiftRightAlgPair: {
+ Register second_output =
+ instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
if (instr->InputAt(2)->IsImmediate()) {
- __ ShiftRightAlgPair(i.OutputRegister(0), i.OutputRegister(1),
+ __ ShiftRightAlgPair(i.OutputRegister(0), second_output,
i.InputRegister(0), i.InputRegister(1),
i.InputInt32(2));
} else {
- __ ShiftRightAlgPair(i.OutputRegister(0), i.OutputRegister(1),
+ __ ShiftRightAlgPair(i.OutputRegister(0), second_output,
i.InputRegister(0), i.InputRegister(1),
kScratchReg, i.InputRegister(2));
}
break;
+ }
#endif
case kPPC_RotRight32:
if (HasRegisterInput(instr, 1)) {
@@ -2078,7 +2082,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -2125,6 +2129,9 @@ void CodeGenerator::AssembleConstructFrame() {
}
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(this->info()->GeneratePreagedPrologue(), ip);
+ if (descriptor->PushArgumentCount()) {
+ __ Push(kJavaScriptCallArgCountRegister);
+ }
} else {
StackFrame::Type type = info()->GetOutputStackFrameType();
// TODO(mbrandy): Detect cases where ip is the entrypoint (for
@@ -2133,7 +2140,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots =
+ frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -2170,8 +2178,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int pop_count = static_cast<int>(descriptor->StackParameterCount());
@@ -2189,20 +2196,33 @@ void CodeGenerator::AssembleReturn() {
if (double_saves != 0) {
__ MultiPopDoubles(double_saves);
}
+ PPCOperandConverter g(this, nullptr);
if (descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
- // Canonicalize JSFunction return sites for now.
- if (return_label_.is_bound()) {
- __ b(&return_label_);
- return;
+ // Canonicalize JSFunction return sites for now unless they have an variable
+ // number of stack slot pops
+ if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (return_label_.is_bound()) {
+ __ b(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ AssembleDeconstructFrame();
+ }
} else {
- __ bind(&return_label_);
AssembleDeconstructFrame();
}
}
- __ Ret(pop_count);
+ if (pop->IsImmediate()) {
+ DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+ pop_count += g.ToConstant(pop).ToInt32();
+ } else {
+ __ Drop(g.ToRegister(pop));
+ }
+ __ Drop(pop_count);
+ __ Ret();
}
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index a2eb7b8f22..768b188aaa 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -245,7 +245,7 @@ void InstructionSelector::VisitStore(Node* node) {
MachineRepresentation rep = store_rep.representation();
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ DCHECK(CanBeTaggedPointer(rep));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
@@ -810,49 +810,70 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
#if !V8_TARGET_ARCH_PPC64
void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
- Node* node) {
+ InstructionCode opcode2, Node* node) {
PPCOperandGenerator g(selector);
- // We use UseUniqueRegister here to avoid register sharing with the output
- // registers.
- InstructionOperand inputs[] = {
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ // We use UseUniqueRegister here to avoid register sharing with the output
+ // registers.
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
- InstructionOperand outputs[] = {
- g.DefineAsRegister(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
- selector->Emit(opcode, 2, outputs, 4, inputs);
+ selector->Emit(opcode, 2, outputs, 4, inputs);
+ } else {
+ // The high word of the result is not used, so we emit the standard 32 bit
+ // instruction.
+ selector->Emit(opcode2, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(2)));
+ }
}
void InstructionSelector::VisitInt32PairAdd(Node* node) {
- VisitPairBinop(this, kPPC_AddPair, node);
+ VisitPairBinop(this, kPPC_AddPair, kPPC_Add, node);
}
void InstructionSelector::VisitInt32PairSub(Node* node) {
- VisitPairBinop(this, kPPC_SubPair, node);
+ VisitPairBinop(this, kPPC_SubPair, kPPC_Sub, node);
}
void InstructionSelector::VisitInt32PairMul(Node* node) {
PPCOperandGenerator g(this);
- InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)),
- g.UseUniqueRegister(node->InputAt(2)),
- g.UseRegister(node->InputAt(3))};
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)),
+ g.UseUniqueRegister(node->InputAt(3))};
- InstructionOperand outputs[] = {
- g.DefineAsRegister(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- Emit(kPPC_MulPair, 2, outputs, 4, inputs, 2, temps);
+ Emit(kPPC_MulPair, 2, outputs, 4, inputs, 2, temps);
+ } else {
+ // The high word of the result is not used, so we emit the standard 32 bit
+ // instruction.
+ Emit(kPPC_Mul32, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(2)));
+ }
}
+namespace {
+// Shared routine for multiple shift operations.
void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
Node* node) {
PPCOperandGenerator g(selector);
+ // We use g.UseUniqueRegister here to guarantee that there is
+ // no register aliasing of input registers with output registers.
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
if (m.HasValue()) {
@@ -861,16 +882,27 @@ void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
shift_operand = g.UseUniqueRegister(m.node());
}
- InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)),
+ InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
shift_operand};
- InstructionOperand outputs[] = {
- g.DefineSameAsFirst(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
- selector->Emit(opcode, 2, outputs, 3, inputs);
+ InstructionOperand outputs[2];
+ InstructionOperand temps[1];
+ int32_t output_count = 0;
+ int32_t temp_count = 0;
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (projection1) {
+ outputs[output_count++] = g.DefineAsRegister(projection1);
+ } else {
+ temps[temp_count++] = g.TempRegister();
+ }
+
+ selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
}
+} // namespace
void InstructionSelector::VisitWord32PairShl(Node* node) {
VisitPairShift(this, kPPC_ShiftLeftPair, node);
@@ -1570,21 +1602,22 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
void VisitWordCompareZero(InstructionSelector* selector, Node* user,
Node* value, InstructionCode opcode,
FlagsContinuation* cont) {
- while (selector->CanCover(user, value)) {
+ // Try to combine with comparisons against 0 by simply inverting the branch.
+ while (value->opcode() == IrOpcode::kWord32Equal &&
+ selector->CanCover(user, value)) {
+ Int32BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ }
+
+ if (selector->CanCover(user, value)) {
switch (value->opcode()) {
- case IrOpcode::kWord32Equal: {
- // Combine with comparisons against 0 by simply inverting the
- // continuation.
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont->Negate();
- continue;
- }
+ case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWord32Compare(selector, value, cont);
- }
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord32Compare(selector, value, cont);
@@ -1706,7 +1739,6 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
default:
break;
}
- break;
}
// Branch could not be combined with a compare, emit compare against 0.
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index cdf45ab776..14695c11b8 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -120,23 +120,46 @@ void RawMachineAssembler::Switch(Node* index, RawMachineLabel* default_label,
}
void RawMachineAssembler::Return(Node* value) {
- Node* ret = MakeNode(common()->Return(), 1, &value);
+ Node* values[] = {Int32Constant(0), value};
+ Node* ret = MakeNode(common()->Return(1), 2, values);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
void RawMachineAssembler::Return(Node* v1, Node* v2) {
- Node* values[] = {v1, v2};
- Node* ret = MakeNode(common()->Return(2), 2, values);
+ Node* values[] = {Int32Constant(0), v1, v2};
+ Node* ret = MakeNode(common()->Return(2), 3, values);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
- Node* values[] = {v1, v2, v3};
- Node* ret = MakeNode(common()->Return(3), 3, values);
+ Node* values[] = {Int32Constant(0), v1, v2, v3};
+ Node* ret = MakeNode(common()->Return(3), 4, values);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+}
+
+void RawMachineAssembler::PopAndReturn(Node* pop, Node* value) {
+ Node* values[] = {pop, value};
+ Node* ret = MakeNode(common()->Return(1), 2, values);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+}
+
+void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2) {
+ Node* values[] = {pop, v1, v2};
+ Node* ret = MakeNode(common()->Return(2), 3, values);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+}
+
+void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2,
+ Node* v3) {
+ Node* values[] = {pop, v1, v2, v3};
+ Node* ret = MakeNode(common()->Return(3), 4, values);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
@@ -253,6 +276,21 @@ Node* RawMachineAssembler::CallRuntime4(Runtime::FunctionId function,
ref, arity, context);
}
+Node* RawMachineAssembler::CallRuntime5(Runtime::FunctionId function,
+ Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4, Node* arg5, Node* context) {
+ CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, 5, Operator::kNoProperties, CallDescriptor::kNoFlags);
+ int return_count = static_cast<int>(descriptor->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(5);
+
+ return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, arg4,
+ arg5, ref, arity, context);
+}
Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
Node** args) {
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index cdd368ca7c..6d2accb861 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -13,6 +13,7 @@
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/factory.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -34,7 +35,7 @@ class Schedule;
// Also note that the generated graph is only valid together with the generated
// schedule, using one without the other is invalid as the graph is inherently
// non-schedulable due to missing control and effect dependencies.
-class RawMachineAssembler {
+class V8_EXPORT_PRIVATE RawMachineAssembler {
public:
RawMachineAssembler(
Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
@@ -717,6 +718,9 @@ class RawMachineAssembler {
// Call to a runtime function with four arguments.
Node* CallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
Node* arg3, Node* arg4, Node* context);
+ // Call to a runtime function with five arguments.
+ Node* CallRuntime5(Runtime::FunctionId function, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5, Node* context);
// Call to a C function with zero arguments.
Node* CallCFunction0(MachineType return_type, Node* function);
// Call to a C function with one parameter.
@@ -773,6 +777,9 @@ class RawMachineAssembler {
void Return(Node* value);
void Return(Node* v1, Node* v2);
void Return(Node* v1, Node* v2, Node* v3);
+ void PopAndReturn(Node* pop, Node* value);
+ void PopAndReturn(Node* pop, Node* v1, Node* v2);
+ void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3);
void Bind(RawMachineLabel* label);
void Deoptimize(Node* state);
void DebugBreak();
@@ -834,8 +841,7 @@ class RawMachineAssembler {
DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);
};
-
-class RawMachineLabel final {
+class V8_EXPORT_PRIVATE RawMachineLabel final {
public:
enum Type { kDeferred, kNonDeferred };
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index efcdcb42e6..0ed479fa99 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -19,6 +19,11 @@ namespace compiler {
namespace {
+static const int kFloatRepBit =
+ 1 << static_cast<int>(MachineRepresentation::kFloat32);
+static const int kSimd128RepBit =
+ 1 << static_cast<int>(MachineRepresentation::kSimd128);
+
void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
auto it = std::find(v->begin(), v->end(), range);
DCHECK(it != v->end());
@@ -33,7 +38,7 @@ int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
RegisterKind kind) {
- return kind == FP_REGISTERS ? cfg->num_allocatable_aliased_double_registers()
+ return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers()
: cfg->num_allocatable_general_registers();
}
@@ -74,14 +79,8 @@ int GetByteWidth(MachineRepresentation rep) {
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
- return kPointerSize;
case MachineRepresentation::kFloat32:
-// TODO(bbudge) Eliminate this when FP register aliasing works.
-#if V8_TARGET_ARCH_ARM
- return kDoubleSize;
-#else
return kPointerSize;
-#endif
case MachineRepresentation::kWord64:
case MachineRepresentation::kFloat64:
return kDoubleSize;
@@ -342,6 +341,11 @@ UsePositionHintType UsePosition::HintTypeForOperand(
return UsePositionHintType::kNone;
}
+void UsePosition::SetHint(UsePosition* use_pos) {
+ DCHECK_NOT_NULL(use_pos);
+ hint_ = use_pos;
+ flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
+}
void UsePosition::ResolveHint(UsePosition* use_pos) {
DCHECK_NOT_NULL(use_pos);
@@ -493,6 +497,12 @@ UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
return pos;
}
+LifetimePosition LiveRange::NextLifetimePositionRegisterIsBeneficial(
+ const LifetimePosition& start) const {
+ UsePosition* next_use = NextUsePositionRegisterIsBeneficial(start);
+ if (next_use == nullptr) return End();
+ return next_use->pos();
+}
UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
LifetimePosition start) const {
@@ -581,7 +591,9 @@ void LiveRange::AdvanceLastProcessedMarker(
LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
int new_id = TopLevel()->GetNextChildId();
LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel());
- DetachAt(position, child, zone);
+ // If we split, we do so because we're about to switch registers or move
+ // to/from a slot, so there's no value in connecting hints.
+ DetachAt(position, child, zone, DoNotConnectHints);
child->top_level_ = TopLevel();
child->next_ = next_;
@@ -589,9 +601,9 @@ LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
return child;
}
-
UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
- Zone* zone) {
+ Zone* zone,
+ HintConnectionOption connect_hints) {
DCHECK(Start() < position);
DCHECK(End() > position);
DCHECK(result->IsEmpty());
@@ -670,6 +682,10 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
last_processed_use_ = nullptr;
current_interval_ = nullptr;
+ if (connect_hints == ConnectHints && use_before != nullptr &&
+ use_after != nullptr) {
+ use_after->SetHint(use_before);
+ }
#ifdef DEBUG
VerifyChildStructure();
result->VerifyChildStructure();
@@ -912,17 +928,21 @@ void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
if (end >= End()) {
DCHECK(start > Start());
- DetachAt(start, &splinter_temp, zone);
+ DetachAt(start, &splinter_temp, zone, ConnectHints);
next_ = nullptr;
} else {
DCHECK(start < End() && Start() < end);
const int kInvalidId = std::numeric_limits<int>::max();
- UsePosition* last = DetachAt(start, &splinter_temp, zone);
+ UsePosition* last = DetachAt(start, &splinter_temp, zone, ConnectHints);
LiveRange end_part(kInvalidId, this->representation(), nullptr);
- last_in_splinter = splinter_temp.DetachAt(end, &end_part, zone);
+ // The last chunk exits the deferred region, and we don't want to connect
+ // hints here, because the non-deferred region shouldn't be affected
+ // by allocation decisions on the deferred path.
+ last_in_splinter =
+ splinter_temp.DetachAt(end, &end_part, zone, DoNotConnectHints);
next_ = end_part.next_;
last_interval_->set_next(end_part.first_interval_);
@@ -1345,14 +1365,23 @@ RegisterAllocationData::RegisterAllocationData(
allocation_zone()),
fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
allocation_zone()),
+ fixed_float_live_ranges_(allocation_zone()),
fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
allocation_zone()),
+ fixed_simd128_live_ranges_(allocation_zone()),
spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
delayed_references_(allocation_zone()),
assigned_registers_(nullptr),
assigned_double_registers_(nullptr),
virtual_register_count_(code->VirtualRegisterCount()),
preassigned_slot_ranges_(zone) {
+ if (!kSimpleFPAliasing) {
+ fixed_float_live_ranges_.resize(this->config()->num_float_registers(),
+ nullptr);
+ fixed_simd128_live_ranges_.resize(this->config()->num_simd128_registers(),
+ nullptr);
+ }
+
assigned_registers_ = new (code_zone())
BitVector(this->config()->num_general_registers(), code_zone());
assigned_double_registers_ = new (code_zone())
@@ -1524,8 +1553,21 @@ void RegisterAllocationData::MarkAllocated(MachineRepresentation rep,
int index) {
switch (rep) {
case MachineRepresentation::kFloat32:
- case MachineRepresentation::kFloat64:
case MachineRepresentation::kSimd128:
+ if (kSimpleFPAliasing) {
+ assigned_double_registers_->Add(index);
+ } else {
+ int alias_base_index = -1;
+ int aliases = config()->GetAliases(
+ rep, index, MachineRepresentation::kFloat64, &alias_base_index);
+ DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+ while (aliases--) {
+ int aliased_reg = alias_base_index + aliases;
+ assigned_double_registers_->Add(aliased_reg);
+ }
+ }
+ break;
+ case MachineRepresentation::kFloat64:
assigned_double_registers_->Add(index);
break;
default:
@@ -1852,7 +1894,11 @@ int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
int result = -index - 1;
switch (rep) {
case MachineRepresentation::kSimd128:
+ result -= config()->num_float_registers();
+ // Fall through.
case MachineRepresentation::kFloat32:
+ result -= config()->num_double_registers();
+ // Fall through.
case MachineRepresentation::kFloat64:
result -= config()->num_general_registers();
break;
@@ -1879,24 +1925,33 @@ TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
int index, MachineRepresentation rep) {
- TopLevelLiveRange* result = nullptr;
- switch (rep) {
- case MachineRepresentation::kFloat32:
- case MachineRepresentation::kFloat64:
- case MachineRepresentation::kSimd128:
- DCHECK(index < config()->num_double_registers());
- result = data()->fixed_double_live_ranges()[index];
- if (result == nullptr) {
- result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
- DCHECK(result->IsFixed());
- result->set_assigned_register(index);
- data()->MarkAllocated(rep, index);
- data()->fixed_double_live_ranges()[index] = result;
- }
- break;
- default:
- UNREACHABLE();
- break;
+ int num_regs = config()->num_double_registers();
+ ZoneVector<TopLevelLiveRange*>* live_ranges =
+ &data()->fixed_double_live_ranges();
+ if (!kSimpleFPAliasing) {
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ num_regs = config()->num_float_registers();
+ live_ranges = &data()->fixed_float_live_ranges();
+ break;
+ case MachineRepresentation::kSimd128:
+ num_regs = config()->num_simd128_registers();
+ live_ranges = &data()->fixed_simd128_live_ranges();
+ break;
+ default:
+ break;
+ }
+ }
+
+ DCHECK(index < num_regs);
+ USE(num_regs);
+ TopLevelLiveRange* result = (*live_ranges)[index];
+ if (result == nullptr) {
+ result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
+ DCHECK(result->IsFixed());
+ result->set_assigned_register(index);
+ data()->MarkAllocated(rep, index);
+ (*live_ranges)[index] = result;
}
return result;
}
@@ -1972,6 +2027,13 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
int block_start = block->first_instruction_index();
LifetimePosition block_start_position =
LifetimePosition::GapFromInstructionIndex(block_start);
+ bool fixed_float_live_ranges = false;
+ bool fixed_simd128_live_ranges = false;
+ if (!kSimpleFPAliasing) {
+ int mask = data()->code()->representation_mask();
+ fixed_float_live_ranges = (mask & kFloatRepBit) != 0;
+ fixed_simd128_live_ranges = (mask & kSimd128RepBit) != 0;
+ }
for (int index = block->last_instruction_index(); index >= block_start;
index--) {
@@ -2020,8 +2082,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
}
if (instr->ClobbersDoubleRegisters()) {
- for (int i = 0; i < config()->num_allocatable_aliased_double_registers();
- ++i) {
+ for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
// Add a UseInterval for all DoubleRegisters. See comment above for
// general registers.
int code = config()->GetAllocatableDoubleCode(i);
@@ -2030,6 +2091,31 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone());
}
+ // Clobber fixed float registers on archs with non-simple aliasing.
+ if (!kSimpleFPAliasing) {
+ if (fixed_float_live_ranges) {
+ for (int i = 0; i < config()->num_allocatable_float_registers();
+ ++i) {
+ // Add a UseInterval for all FloatRegisters. See comment above for
+ // general registers.
+ int code = config()->GetAllocatableFloatCode(i);
+ TopLevelLiveRange* range =
+ FixedFPLiveRangeFor(code, MachineRepresentation::kFloat32);
+ range->AddUseInterval(curr_position, curr_position.End(),
+ allocation_zone());
+ }
+ }
+ if (fixed_simd128_live_ranges) {
+ for (int i = 0; i < config()->num_allocatable_simd128_registers();
+ ++i) {
+ int code = config()->GetAllocatableSimd128Code(i);
+ TopLevelLiveRange* range =
+ FixedFPLiveRangeFor(code, MachineRepresentation::kSimd128);
+ range->AddUseInterval(curr_position, curr_position.End(),
+ allocation_zone());
+ }
+ }
+ }
}
for (size_t i = 0; i < instr->InputCount(); i++) {
@@ -2141,7 +2227,6 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
}
}
-
void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
BitVector* live) {
for (PhiInstruction* phi : block->phis()) {
@@ -2149,33 +2234,112 @@ void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
// block.
int phi_vreg = phi->virtual_register();
live->Remove(phi_vreg);
- // Select the hint from the first predecessor block that preceeds this block
- // in the rpo ordering. Prefer non-deferred blocks. The enforcement of
- // hinting in rpo order is required because hint resolution that happens
- // later in the compiler pipeline visits instructions in reverse rpo,
- // relying on the fact that phis are encountered before their hints.
- const Instruction* instr = nullptr;
- const InstructionBlock::Predecessors& predecessors = block->predecessors();
- for (size_t i = 0; i < predecessors.size(); ++i) {
+ // Select a hint from a predecessor block that preceeds this block in the
+ // rpo order. In order of priority:
+ // - Avoid hints from deferred blocks.
+ // - Prefer hints from allocated (or explicit) operands.
+ // - Prefer hints from empty blocks (containing just parallel moves and a
+ // jump). In these cases, if we can elide the moves, the jump threader
+ // is likely to be able to elide the jump.
+ // The enforcement of hinting in rpo order is required because hint
+ // resolution that happens later in the compiler pipeline visits
+ // instructions in reverse rpo order, relying on the fact that phis are
+ // encountered before their hints.
+ InstructionOperand* hint = nullptr;
+ int hint_preference = 0;
+
+ // The cost of hinting increases with the number of predecessors. At the
+ // same time, the typical benefit decreases, since this hinting only
+ // optimises the execution path through one predecessor. A limit of 2 is
+ // sufficient to hit the common if/else pattern.
+ int predecessor_limit = 2;
+
+ for (RpoNumber predecessor : block->predecessors()) {
const InstructionBlock* predecessor_block =
- code()->InstructionBlockAt(predecessors[i]);
- if (predecessor_block->rpo_number() < block->rpo_number()) {
- instr = GetLastInstruction(code(), predecessor_block);
- if (!predecessor_block->IsDeferred()) break;
+ code()->InstructionBlockAt(predecessor);
+ DCHECK_EQ(predecessor_block->rpo_number(), predecessor);
+
+ // Only take hints from earlier rpo numbers.
+ if (predecessor >= block->rpo_number()) continue;
+
+ // Look up the predecessor instruction.
+ const Instruction* predecessor_instr =
+ GetLastInstruction(code(), predecessor_block);
+ InstructionOperand* predecessor_hint = nullptr;
+ // Phis are assigned in the END position of the last instruction in each
+ // predecessor block.
+ for (MoveOperands* move :
+ *predecessor_instr->GetParallelMove(Instruction::END)) {
+ InstructionOperand& to = move->destination();
+ if (to.IsUnallocated() &&
+ UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
+ predecessor_hint = &move->source();
+ break;
+ }
+ }
+ DCHECK_NOT_NULL(predecessor_hint);
+
+ // For each predecessor, generate a score according to the priorities
+ // described above, and pick the best one. Flags in higher-order bits have
+ // a higher priority than those in lower-order bits.
+ int predecessor_hint_preference = 0;
+ const int kNotDeferredBlockPreference = (1 << 2);
+ const int kMoveIsAllocatedPreference = (1 << 1);
+ const int kBlockIsEmptyPreference = (1 << 0);
+
+ // - Avoid hints from deferred blocks.
+ if (!predecessor_block->IsDeferred()) {
+ predecessor_hint_preference |= kNotDeferredBlockPreference;
}
- }
- DCHECK_NOT_NULL(instr);
- InstructionOperand* hint = nullptr;
- for (MoveOperands* move : *instr->GetParallelMove(Instruction::END)) {
- InstructionOperand& to = move->destination();
- if (to.IsUnallocated() &&
- UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
- hint = &move->source();
- break;
+ // - Prefer hints from allocated (or explicit) operands.
+ //
+ // Already-allocated or explicit operands are typically assigned using
+ // the parallel moves on the last instruction. For example:
+ //
+ // gap (v101 = [x0|R|w32]) (v100 = v101)
+ // ArchJmp
+ // ...
+ // phi: v100 = v101 v102
+ //
+ // We have already found the END move, so look for a matching START move
+ // from an allocated (or explicit) operand.
+ //
+ // Note that we cannot simply look up data()->live_ranges()[vreg] here
+ // because the live ranges are still being built when this function is
+ // called.
+ // TODO(v8): Find a way to separate hinting from live range analysis in
+ // BuildLiveRanges so that we can use the O(1) live-range look-up.
+ auto moves = predecessor_instr->GetParallelMove(Instruction::START);
+ if (moves != nullptr) {
+ for (MoveOperands* move : *moves) {
+ InstructionOperand& to = move->destination();
+ if (predecessor_hint->Equals(to)) {
+ if (move->source().IsAllocated() || move->source().IsExplicit()) {
+ predecessor_hint_preference |= kMoveIsAllocatedPreference;
+ }
+ break;
+ }
+ }
+ }
+
+ // - Prefer hints from empty blocks.
+ if (predecessor_block->last_instruction_index() ==
+ predecessor_block->first_instruction_index()) {
+ predecessor_hint_preference |= kBlockIsEmptyPreference;
+ }
+
+ if ((hint == nullptr) ||
+ (predecessor_hint_preference > hint_preference)) {
+ // Take the hint from this predecessor.
+ hint = predecessor_hint;
+ hint_preference = predecessor_hint_preference;
}
+
+ if (--predecessor_limit <= 0) break;
}
- DCHECK(hint != nullptr);
+ DCHECK_NOT_NULL(hint);
+
LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
UsePosition* use_pos = Define(block_start, &phi->output(), hint,
@@ -2372,7 +2536,13 @@ RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
num_allocatable_registers_(
GetAllocatableRegisterCount(data->config(), kind)),
allocatable_register_codes_(
- GetAllocatableRegisterCodes(data->config(), kind)) {}
+ GetAllocatableRegisterCodes(data->config(), kind)),
+ check_fp_aliasing_(false) {
+ if (!kSimpleFPAliasing && kind == FP_REGISTERS) {
+ check_fp_aliasing_ = (data->code()->representation_mask() &
+ (kFloatRepBit | kSimd128RepBit)) != 0;
+ }
+}
LifetimePosition RegisterAllocator::GetSplitPositionForInstruction(
const LiveRange* range, int instruction_index) {
@@ -2401,7 +2571,13 @@ void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
if (next_pos.IsGapPosition()) {
next_pos = next_pos.NextStart();
}
- UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
+
+ // With splinters, we can be more strict and skip over positions
+ // not strictly needing registers.
+ UsePosition* pos =
+ range->IsSplinter()
+ ? range->NextRegisterPosition(next_pos)
+ : range->NextUsePositionRegisterIsBeneficial(next_pos);
// If the range already has a spill operand and it doesn't need a
// register immediately, split it and spill the first part of the range.
if (pos == nullptr) {
@@ -2593,6 +2769,14 @@ void LinearScanAllocator::AllocateRegisters() {
for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
if (current != nullptr) AddToInactive(current);
}
+ if (!kSimpleFPAliasing && check_fp_aliasing()) {
+ for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
+ if (current != nullptr) AddToInactive(current);
+ }
+ for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
+ if (current != nullptr) AddToInactive(current);
+ }
+ }
}
while (!unhandled_live_ranges().empty()) {
@@ -2634,14 +2818,30 @@ void LinearScanAllocator::AllocateRegisters() {
DCHECK(!current->HasRegisterAssigned() && !current->spilled());
- bool result = TryAllocateFreeReg(current);
- if (!result) AllocateBlockedReg(current);
- if (current->HasRegisterAssigned()) {
- AddToActive(current);
- }
+ ProcessCurrentRange(current);
}
}
+bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) {
+ DCHECK(range->TopLevel()->IsSplinter());
+ // If we can spill the whole range, great. Otherwise, split above the
+ // first use needing a register and spill the top part.
+ const UsePosition* next_reg = range->NextRegisterPosition(range->Start());
+ if (next_reg == nullptr) {
+ Spill(range);
+ return true;
+ } else if (range->FirstHintPosition() == nullptr) {
+ // If there was no hint, but we have a use position requiring a
+ // register, apply the hot path heuristics.
+ return false;
+ } else if (next_reg->pos().PrevStart() > range->Start()) {
+ LiveRange* tail = SplitRangeAt(range, next_reg->pos().PrevStart());
+ AddToUnhandledSorted(tail);
+ Spill(range);
+ return true;
+ }
+ return false;
+}
void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
int reg) {
@@ -2757,35 +2957,133 @@ void LinearScanAllocator::InactiveToActive(LiveRange* range) {
range->TopLevel()->vreg(), range->relative_id());
}
+void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep,
+ int* num_regs, int* num_codes,
+ const int** codes) const {
+ DCHECK(!kSimpleFPAliasing);
+ if (rep == MachineRepresentation::kFloat32) {
+ *num_regs = data()->config()->num_float_registers();
+ *num_codes = data()->config()->num_allocatable_float_registers();
+ *codes = data()->config()->allocatable_float_codes();
+ } else if (rep == MachineRepresentation::kSimd128) {
+ *num_regs = data()->config()->num_simd128_registers();
+ *num_codes = data()->config()->num_allocatable_simd128_registers();
+ *codes = data()->config()->allocatable_simd128_codes();
+ } else {
+ UNREACHABLE();
+ }
+}
-bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
+void LinearScanAllocator::FindFreeRegistersForRange(
+ LiveRange* range, Vector<LifetimePosition> positions) {
int num_regs = num_registers();
int num_codes = num_allocatable_registers();
const int* codes = allocatable_register_codes();
+ MachineRepresentation rep = range->representation();
+ if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
+ rep == MachineRepresentation::kSimd128))
+ GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
+ DCHECK_GE(positions.length(), num_regs);
- LifetimePosition free_until_pos[RegisterConfiguration::kMaxFPRegisters];
for (int i = 0; i < num_regs; i++) {
- free_until_pos[i] = LifetimePosition::MaxPosition();
+ positions[i] = LifetimePosition::MaxPosition();
}
for (LiveRange* cur_active : active_live_ranges()) {
int cur_reg = cur_active->assigned_register();
- free_until_pos[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
- TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
- LifetimePosition::GapFromInstructionIndex(0).value());
+ if (kSimpleFPAliasing || !check_fp_aliasing()) {
+ positions[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
+ TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
+ LifetimePosition::GapFromInstructionIndex(0).value());
+ } else {
+ int alias_base_index = -1;
+ int aliases = data()->config()->GetAliases(
+ cur_active->representation(), cur_reg, rep, &alias_base_index);
+ DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+ while (aliases--) {
+ int aliased_reg = alias_base_index + aliases;
+ positions[aliased_reg] = LifetimePosition::GapFromInstructionIndex(0);
+ }
+ }
}
for (LiveRange* cur_inactive : inactive_live_ranges()) {
- DCHECK(cur_inactive->End() > current->Start());
- LifetimePosition next_intersection =
- cur_inactive->FirstIntersection(current);
+ DCHECK(cur_inactive->End() > range->Start());
+ LifetimePosition next_intersection = cur_inactive->FirstIntersection(range);
if (!next_intersection.IsValid()) continue;
int cur_reg = cur_inactive->assigned_register();
- free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
- TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
- Min(free_until_pos[cur_reg], next_intersection).value());
+ if (kSimpleFPAliasing || !check_fp_aliasing()) {
+ positions[cur_reg] = Min(positions[cur_reg], next_intersection);
+ TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
+ Min(positions[cur_reg], next_intersection).value());
+ } else {
+ int alias_base_index = -1;
+ int aliases = data()->config()->GetAliases(
+ cur_inactive->representation(), cur_reg, rep, &alias_base_index);
+ DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+ while (aliases--) {
+ int aliased_reg = alias_base_index + aliases;
+ positions[aliased_reg] = Min(positions[aliased_reg], next_intersection);
+ }
+ }
}
+}
+// High-level register allocation summary:
+//
+// For regular, or hot (i.e. not splinter) ranges, we attempt to first
+// allocate first the preferred (hint) register. If that is not possible,
+// we find a register that's free, and allocate that. If that's not possible,
+// we search for a register to steal from a range that was allocated. The
+// goal is to optimize for throughput by avoiding register-to-memory
+// moves, which are expensive.
+//
+// For splinters, the goal is to minimize the number of moves. First we try
+// to allocate the preferred register (more discussion follows). Failing that,
+// we bail out and spill as far as we can, unless the first use is at start,
+// case in which we apply the same behavior as we do for regular ranges.
+// If there is no hint, we apply the hot-path behavior.
+//
+// For the splinter, the hint register may come from:
+//
+// - the hot path (we set it at splintering time with SetHint). In this case, if
+// we cannot offer the hint register, spilling is better because it's at most
+// 1 move, while trying to find and offer another register is at least 1 move.
+//
+// - a constraint. If we cannot offer that register, it's because there is some
+// interference. So offering the hint register up to the interference would
+// result
+// in a move at the interference, plus a move to satisfy the constraint. This is
+// also the number of moves if we spill, with the potential of the range being
+// already spilled and thus saving a move (the spill).
+// Note that this can only be an input constraint, if it were an output one,
+// the range wouldn't be a splinter because it means it'd be defined in a
+// deferred
+// block, and we don't mark those as splinters (they live in deferred blocks
+// only).
+//
+// - a phi. The same analysis as in the case of the input constraint applies.
+//
+void LinearScanAllocator::ProcessCurrentRange(LiveRange* current) {
+ LifetimePosition free_until_pos_buff[RegisterConfiguration::kMaxFPRegisters];
+ Vector<LifetimePosition> free_until_pos(
+ free_until_pos_buff, RegisterConfiguration::kMaxFPRegisters);
+ FindFreeRegistersForRange(current, free_until_pos);
+ if (!TryAllocatePreferredReg(current, free_until_pos)) {
+ if (current->TopLevel()->IsSplinter()) {
+ if (TrySplitAndSpillSplinter(current)) return;
+ }
+ if (!TryAllocateFreeReg(current, free_until_pos)) {
+ AllocateBlockedReg(current);
+ }
+ }
+ if (current->HasRegisterAssigned()) {
+ AddToActive(current);
+ }
+}
+
+bool LinearScanAllocator::TryAllocatePreferredReg(
+ LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
int hint_register;
if (current->FirstHintPosition(&hint_register) != nullptr) {
TRACE(
@@ -2803,6 +3101,20 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
return true;
}
}
+ return false;
+}
+
+bool LinearScanAllocator::TryAllocateFreeReg(
+ LiveRange* current, const Vector<LifetimePosition>& free_until_pos) {
+ int num_regs = 0; // used only for the call to GetFPRegisterSet.
+ int num_codes = num_allocatable_registers();
+ const int* codes = allocatable_register_codes();
+ MachineRepresentation rep = current->representation();
+ if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
+ rep == MachineRepresentation::kSimd128))
+ GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
+
+ DCHECK_GE(free_until_pos.length(), num_codes);
// Find the register which stays free for the longest time.
int reg = codes[0];
@@ -2837,7 +3149,6 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
return true;
}
-
void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
UsePosition* register_use = current->NextRegisterPosition(current->Start());
if (register_use == nullptr) {
@@ -2850,6 +3161,10 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
int num_regs = num_registers();
int num_codes = num_allocatable_registers();
const int* codes = allocatable_register_codes();
+ MachineRepresentation rep = current->representation();
+ if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
+ rep == MachineRepresentation::kSimd128))
+ GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
@@ -2861,16 +3176,28 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
int cur_reg = range->assigned_register();
bool is_fixed_or_cant_spill =
range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
- if (is_fixed_or_cant_spill) {
- block_pos[cur_reg] = use_pos[cur_reg] =
- LifetimePosition::GapFromInstructionIndex(0);
- } else {
- UsePosition* next_use =
- range->NextUsePositionRegisterIsBeneficial(current->Start());
- if (next_use == nullptr) {
- use_pos[cur_reg] = range->End();
+ if (kSimpleFPAliasing || !check_fp_aliasing()) {
+ if (is_fixed_or_cant_spill) {
+ block_pos[cur_reg] = use_pos[cur_reg] =
+ LifetimePosition::GapFromInstructionIndex(0);
} else {
- use_pos[cur_reg] = next_use->pos();
+ use_pos[cur_reg] =
+ range->NextLifetimePositionRegisterIsBeneficial(current->Start());
+ }
+ } else {
+ int alias_base_index = -1;
+ int aliases = data()->config()->GetAliases(
+ range->representation(), cur_reg, rep, &alias_base_index);
+ DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+ while (aliases--) {
+ int aliased_reg = alias_base_index + aliases;
+ if (is_fixed_or_cant_spill) {
+ block_pos[aliased_reg] = use_pos[aliased_reg] =
+ LifetimePosition::GapFromInstructionIndex(0);
+ } else {
+ use_pos[aliased_reg] =
+ range->NextLifetimePositionRegisterIsBeneficial(current->Start());
+ }
}
}
}
@@ -2881,11 +3208,29 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
if (!next_intersection.IsValid()) continue;
int cur_reg = range->assigned_register();
bool is_fixed = range->TopLevel()->IsFixed();
- if (is_fixed) {
- block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
- use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+ if (kSimpleFPAliasing || !check_fp_aliasing()) {
+ if (is_fixed) {
+ block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
+ use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+ } else {
+ use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+ }
} else {
- use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+ int alias_base_index = -1;
+ int aliases = data()->config()->GetAliases(
+ range->representation(), cur_reg, rep, &alias_base_index);
+ DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
+ while (aliases--) {
+ int aliased_reg = alias_base_index + aliases;
+ if (is_fixed) {
+ block_pos[aliased_reg] =
+ Min(block_pos[aliased_reg], next_intersection);
+ use_pos[aliased_reg] =
+ Min(block_pos[aliased_reg], use_pos[aliased_reg]);
+ } else {
+ use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
+ }
+ }
}
}
@@ -2937,7 +3282,15 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
LifetimePosition split_pos = current->Start();
for (size_t i = 0; i < active_live_ranges().size(); ++i) {
LiveRange* range = active_live_ranges()[i];
- if (range->assigned_register() != reg) continue;
+ if (kSimpleFPAliasing || !check_fp_aliasing()) {
+ if (range->assigned_register() != reg) continue;
+ } else {
+ if (!data()->config()->AreAliases(current->representation(), reg,
+ range->representation(),
+ range->assigned_register())) {
+ continue;
+ }
+ }
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
@@ -2964,7 +3317,14 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
LiveRange* range = inactive_live_ranges()[i];
DCHECK(range->End() > current->Start());
if (range->TopLevel()->IsFixed()) continue;
- if (range->assigned_register() != reg) continue;
+ if (kSimpleFPAliasing || !check_fp_aliasing()) {
+ if (range->assigned_register() != reg) continue;
+ } else {
+ if (!data()->config()->AreAliases(current->representation(), reg,
+ range->representation(),
+ range->assigned_register()))
+ continue;
+ }
LifetimePosition next_intersection = range->FirstIntersection(current);
if (next_intersection.IsValid()) {
@@ -3455,7 +3815,6 @@ int LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
return gap_index;
}
-
void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
DelayedInsertionMap delayed_insertion_map(local_zone);
for (TopLevelLiveRange* top_range : data()->live_ranges()) {
@@ -3543,9 +3902,8 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
// Gather all MoveOperands for a single ParallelMove.
MoveOperands* move =
new (code_zone()) MoveOperands(it->first.second, it->second);
- MoveOperands* eliminate = moves->PrepareInsertAfter(move);
+ moves->PrepareInsertAfter(move, &to_eliminate);
to_insert.push_back(move);
- if (eliminate != nullptr) to_eliminate.push_back(eliminate);
}
}
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index 2089ea2fc1..7698a90387 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -5,7 +5,9 @@
#ifndef V8_REGISTER_ALLOCATOR_H_
#define V8_REGISTER_ALLOCATOR_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/instruction.h"
+#include "src/globals.h"
#include "src/ostreams.h"
#include "src/register-configuration.h"
#include "src/zone/zone-containers.h"
@@ -246,7 +248,8 @@ static_assert(kUnassignedRegister <= RegisterConfiguration::kMaxFPRegisters,
"kUnassignedRegister too small");
// Representation of a use position.
-class UsePosition final : public ZoneObject {
+class V8_EXPORT_PRIVATE UsePosition final
+ : public NON_EXPORTED_BASE(ZoneObject) {
public:
UsePosition(LifetimePosition pos, InstructionOperand* operand, void* hint,
UsePositionHintType hint_type);
@@ -275,6 +278,7 @@ class UsePosition final : public ZoneObject {
}
bool HasHint() const;
bool HintRegister(int* register_code) const;
+ void SetHint(UsePosition* use_pos);
void ResolveHint(UsePosition* use_pos);
bool IsResolved() const {
return hint_type() != UsePositionHintType::kUnresolved;
@@ -304,7 +308,7 @@ class LiveRangeGroup;
// Representation of SSA values' live ranges as a collection of (continuous)
// intervals over the instruction ordering.
-class LiveRange : public ZoneObject {
+class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
public:
UseInterval* first_interval() const { return first_interval_; }
UsePosition* first_pos() const { return first_pos_; }
@@ -353,6 +357,11 @@ class LiveRange : public ZoneObject {
UsePosition* NextUsePositionRegisterIsBeneficial(
LifetimePosition start) const;
+ // Returns lifetime position for which register is beneficial in this live
+ // range and which follows both start and last processed use position.
+ LifetimePosition NextLifetimePositionRegisterIsBeneficial(
+ const LifetimePosition& start) const;
+
// Returns use position for which register is beneficial in this live
// range and which precedes start.
UsePosition* PreviousUsePositionRegisterIsBeneficial(
@@ -368,8 +377,12 @@ class LiveRange : public ZoneObject {
// live range to the result live range.
// The current range will terminate at position, while result will start from
// position.
+ enum HintConnectionOption : bool {
+ DoNotConnectHints = false,
+ ConnectHints = true
+ };
UsePosition* DetachAt(LifetimePosition position, LiveRange* result,
- Zone* zone);
+ Zone* zone, HintConnectionOption connect_hints);
// Detaches at position, and then links the resulting ranges. Returns the
// child, which starts at position.
@@ -471,8 +484,7 @@ class LiveRangeGroup final : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(LiveRangeGroup);
};
-
-class TopLevelLiveRange final : public LiveRange {
+class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
public:
explicit TopLevelLiveRange(int vreg, MachineRepresentation rep);
int spill_start_index() const { return spill_start_index_; }
@@ -766,12 +778,24 @@ class RegisterAllocationData final : public ZoneObject {
ZoneVector<TopLevelLiveRange*>& fixed_live_ranges() {
return fixed_live_ranges_;
}
+ ZoneVector<TopLevelLiveRange*>& fixed_float_live_ranges() {
+ return fixed_float_live_ranges_;
+ }
+ const ZoneVector<TopLevelLiveRange*>& fixed_float_live_ranges() const {
+ return fixed_float_live_ranges_;
+ }
ZoneVector<TopLevelLiveRange*>& fixed_double_live_ranges() {
return fixed_double_live_ranges_;
}
const ZoneVector<TopLevelLiveRange*>& fixed_double_live_ranges() const {
return fixed_double_live_ranges_;
}
+ ZoneVector<TopLevelLiveRange*>& fixed_simd128_live_ranges() {
+ return fixed_simd128_live_ranges_;
+ }
+ const ZoneVector<TopLevelLiveRange*>& fixed_simd128_live_ranges() const {
+ return fixed_simd128_live_ranges_;
+ }
ZoneVector<BitVector*>& live_in_sets() { return live_in_sets_; }
ZoneVector<BitVector*>& live_out_sets() { return live_out_sets_; }
ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
@@ -833,7 +857,9 @@ class RegisterAllocationData final : public ZoneObject {
ZoneVector<BitVector*> live_out_sets_;
ZoneVector<TopLevelLiveRange*> live_ranges_;
ZoneVector<TopLevelLiveRange*> fixed_live_ranges_;
+ ZoneVector<TopLevelLiveRange*> fixed_float_live_ranges_;
ZoneVector<TopLevelLiveRange*> fixed_double_live_ranges_;
+ ZoneVector<TopLevelLiveRange*> fixed_simd128_live_ranges_;
ZoneVector<SpillRange*> spill_ranges_;
DelayedReferences delayed_references_;
BitVector* assigned_registers_;
@@ -956,6 +982,8 @@ class RegisterAllocator : public ZoneObject {
const int* allocatable_register_codes() const {
return allocatable_register_codes_;
}
+ // Returns true iff. we must check float register aliasing.
+ bool check_fp_aliasing() const { return check_fp_aliasing_; }
// TODO(mtrofin): explain why splitting in gap START is always OK.
LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
@@ -1006,6 +1034,7 @@ class RegisterAllocator : public ZoneObject {
const int num_registers_;
int num_allocatable_registers_;
const int* allocatable_register_codes_;
+ bool check_fp_aliasing_;
private:
bool no_combining_;
@@ -1047,8 +1076,17 @@ class LinearScanAllocator final : public RegisterAllocator {
// Helper methods for allocating registers.
bool TryReuseSpillForPhi(TopLevelLiveRange* range);
- bool TryAllocateFreeReg(LiveRange* range);
+ bool TryAllocateFreeReg(LiveRange* range,
+ const Vector<LifetimePosition>& free_until_pos);
+ bool TryAllocatePreferredReg(LiveRange* range,
+ const Vector<LifetimePosition>& free_until_pos);
+ void GetFPRegisterSet(MachineRepresentation rep, int* num_regs,
+ int* num_codes, const int** codes) const;
+ void FindFreeRegistersForRange(LiveRange* range,
+ Vector<LifetimePosition> free_until_pos);
+ void ProcessCurrentRange(LiveRange* current);
void AllocateBlockedReg(LiveRange* range);
+ bool TrySplitAndSpillSplinter(LiveRange* range);
// Spill the given life range after position pos.
void SpillAfter(LiveRange* range, LifetimePosition pos);
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 22d809b9d6..4d002cc3c6 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -24,8 +24,6 @@ const char* Truncation::description() const {
return "truncate-to-word32";
case TruncationKind::kWord64:
return "truncate-to-word64";
- case TruncationKind::kFloat32:
- return "truncate-to-float32";
case TruncationKind::kFloat64:
return "truncate-to-float64";
case TruncationKind::kAny:
@@ -42,15 +40,15 @@ const char* Truncation::description() const {
// ^ ^
// \ |
// \ kFloat64 <--+
-// \ ^ ^ |
-// \ / | |
-// kWord32 kFloat32 kBool
-// ^ ^ ^
-// \ | /
-// \ | /
-// \ | /
-// \ | /
-// \ | /
+// \ ^ |
+// \ / |
+// kWord32 kBool
+// ^ ^
+// \ /
+// \ /
+// \ /
+// \ /
+// \ /
// kNone
// static
@@ -87,9 +85,6 @@ bool Truncation::LessGeneral(TruncationKind rep1, TruncationKind rep2) {
rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
case TruncationKind::kWord64:
return rep2 == TruncationKind::kWord64;
- case TruncationKind::kFloat32:
- return rep2 == TruncationKind::kFloat32 ||
- rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
case TruncationKind::kFloat64:
return rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
case TruncationKind::kAny:
@@ -147,8 +142,10 @@ Node* RepresentationChanger::GetRepresentationFor(
return GetTaggedSignedRepresentationFor(node, output_rep, output_type,
use_node, use_info);
case MachineRepresentation::kTaggedPointer:
- DCHECK(use_info.type_check() == TypeCheckKind::kNone);
- return GetTaggedPointerRepresentationFor(node, output_rep, output_type);
+ DCHECK(use_info.type_check() == TypeCheckKind::kNone ||
+ use_info.type_check() == TypeCheckKind::kHeapObject);
+ return GetTaggedPointerRepresentationFor(node, output_rep, output_type,
+ use_node, use_info);
case MachineRepresentation::kTagged:
DCHECK(use_info.type_check() == TypeCheckKind::kNone);
return GetTaggedRepresentationFor(node, output_rep, output_type,
@@ -255,6 +252,24 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
}
+ } else if (output_rep == MachineRepresentation::kFloat32) {
+ if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+ op = machine()->ChangeFloat32ToFloat64();
+ node = InsertConversion(node, op, use_node);
+ op = simplified()->CheckedFloat64ToInt32(
+ output_type->Maybe(Type::MinusZero())
+ ? CheckForMinusZeroMode::kCheckForMinusZero
+ : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ node = InsertConversion(node, op, use_node);
+ if (SmiValuesAre32Bits()) {
+ op = simplified()->ChangeInt32ToTagged();
+ } else {
+ op = simplified()->CheckedInt32ToTaggedSigned();
+ }
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedSigned);
+ }
} else if (CanBeTaggedPointer(output_rep) &&
use_info.type_check() == TypeCheckKind::kSignedSmall) {
op = simplified()->CheckedTaggedToTaggedSigned();
@@ -272,34 +287,64 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
}
Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
- Node* node, MachineRepresentation output_rep, Type* output_type) {
+ Node* node, MachineRepresentation output_rep, Type* output_type,
+ Node* use_node, UseInfo use_info) {
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kHeapConstant:
return node; // No change necessary.
case IrOpcode::kInt32Constant:
- if (output_type->Is(Type::Boolean())) {
- return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
- : jsgraph()->TrueConstant();
- } else {
- return TypeError(node, output_rep, output_type,
- MachineRepresentation::kTaggedPointer);
- }
case IrOpcode::kFloat64Constant:
case IrOpcode::kFloat32Constant:
- return TypeError(node, output_rep, output_type,
- MachineRepresentation::kTaggedPointer);
+ UNREACHABLE();
default:
break;
}
- // Select the correct X -> Tagged operator.
+ // Select the correct X -> TaggedPointer operator.
+ Operator const* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
// We just provide a dummy value here.
return jsgraph()->TheHoleConstant();
+ } else if (output_rep == MachineRepresentation::kBit) {
+ if (output_type->Is(Type::Boolean())) {
+ op = simplified()->ChangeBitToTagged();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTagged);
+ }
+ } else if (IsWord(output_rep)) {
+ if (output_type->Is(Type::Unsigned32())) {
+ // uint32 -> float64 -> tagged
+ node = InsertChangeUint32ToFloat64(node);
+ } else if (output_type->Is(Type::Signed32())) {
+ // int32 -> float64 -> tagged
+ node = InsertChangeInt32ToFloat64(node);
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedPointer);
+ }
+ op = simplified()->ChangeFloat64ToTaggedPointer();
+ } else if (output_rep == MachineRepresentation::kFloat32) {
+ // float32 -> float64 -> tagged
+ node = InsertChangeFloat32ToFloat64(node);
+ op = simplified()->ChangeFloat64ToTaggedPointer();
+ } else if (output_rep == MachineRepresentation::kFloat64) {
+ // float64 -> tagged
+ op = simplified()->ChangeFloat64ToTaggedPointer();
+ } else if (CanBeTaggedSigned(output_rep) &&
+ use_info.type_check() == TypeCheckKind::kHeapObject) {
+ if (!output_type->Maybe(Type::SignedSmall())) {
+ return node;
+ }
+ // TODO(turbofan): Consider adding a Bailout operator that just deopts
+ // for TaggedSigned output representation.
+ op = simplified()->CheckedTaggedToTaggedPointer();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedPointer);
}
- return TypeError(node, output_rep, output_type,
- MachineRepresentation::kTaggedPointer);
+ return InsertConversion(node, op, use_node);
}
Node* RepresentationChanger::GetTaggedRepresentationFor(
@@ -311,23 +356,10 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
case IrOpcode::kHeapConstant:
return node; // No change necessary.
case IrOpcode::kInt32Constant:
- if (output_type->Is(Type::Signed32())) {
- int32_t value = OpParameter<int32_t>(node);
- return jsgraph()->Constant(value);
- } else if (output_type->Is(Type::Unsigned32())) {
- uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
- return jsgraph()->Constant(static_cast<double>(value));
- } else if (output_type->Is(Type::Boolean())) {
- return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
- : jsgraph()->TrueConstant();
- } else {
- return TypeError(node, output_rep, output_type,
- MachineRepresentation::kTagged);
- }
case IrOpcode::kFloat64Constant:
- return jsgraph()->Constant(OpParameter<double>(node));
case IrOpcode::kFloat32Constant:
- return jsgraph()->Constant(OpParameter<float>(node));
+ UNREACHABLE();
+ break;
default:
break;
}
@@ -395,20 +427,14 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
Truncation truncation) {
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
- case IrOpcode::kFloat64Constant:
case IrOpcode::kNumberConstant:
return jsgraph()->Float32Constant(
DoubleToFloat32(OpParameter<double>(node)));
case IrOpcode::kInt32Constant:
- if (output_type->Is(Type::Unsigned32())) {
- uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
- return jsgraph()->Float32Constant(static_cast<float>(value));
- } else {
- int32_t value = OpParameter<int32_t>(node);
- return jsgraph()->Float32Constant(static_cast<float>(value));
- }
+ case IrOpcode::kFloat64Constant:
case IrOpcode::kFloat32Constant:
- return node; // No change necessary.
+ UNREACHABLE();
+ break;
default:
break;
}
@@ -466,18 +492,10 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
case IrOpcode::kNumberConstant:
return jsgraph()->Float64Constant(OpParameter<double>(node));
case IrOpcode::kInt32Constant:
- if (output_type->Is(Type::Signed32())) {
- int32_t value = OpParameter<int32_t>(node);
- return jsgraph()->Float64Constant(value);
- } else {
- DCHECK(output_type->Is(Type::Unsigned32()));
- uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
- return jsgraph()->Float64Constant(static_cast<double>(value));
- }
case IrOpcode::kFloat64Constant:
- return node; // No change necessary.
case IrOpcode::kFloat32Constant:
- return jsgraph()->Float64Constant(OpParameter<float>(node));
+ UNREACHABLE();
+ break;
default:
break;
}
@@ -542,19 +560,11 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
- return node; // No change necessary.
- case IrOpcode::kFloat32Constant: {
- float const fv = OpParameter<float>(node);
- if (use_info.type_check() == TypeCheckKind::kNone ||
- ((use_info.type_check() == TypeCheckKind::kSignedSmall ||
- use_info.type_check() == TypeCheckKind::kSigned32) &&
- IsInt32Double(fv))) {
- return MakeTruncatedInt32Constant(fv);
- }
+ case IrOpcode::kFloat32Constant:
+ case IrOpcode::kFloat64Constant:
+ UNREACHABLE();
break;
- }
- case IrOpcode::kNumberConstant:
- case IrOpcode::kFloat64Constant: {
+ case IrOpcode::kNumberConstant: {
double const fv = OpParameter<double>(node);
if (use_info.type_check() == TypeCheckKind::kNone ||
((use_info.type_check() == TypeCheckKind::kSignedSmall ||
@@ -587,7 +597,7 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
use_info.type_check() == TypeCheckKind::kSigned32) {
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
- ? CheckForMinusZeroMode::kCheckForMinusZero
+ ? use_info.minus_zero_check()
: CheckForMinusZeroMode::kDontCheckForMinusZero);
}
} else if (output_rep == MachineRepresentation::kFloat32) {
@@ -706,7 +716,7 @@ Node* RepresentationChanger::GetBitRepresentationFor(
}
} else if (output_rep == MachineRepresentation::kTaggedSigned) {
node = jsgraph()->graph()->NewNode(machine()->WordEqual(), node,
- jsgraph()->ZeroConstant());
+ jsgraph()->IntPtrConstant(0));
return jsgraph()->graph()->NewNode(machine()->Word32Equal(), node,
jsgraph()->Int32Constant(0));
} else if (IsWord(output_rep)) {
@@ -983,6 +993,10 @@ Node* RepresentationChanger::InsertChangeFloat64ToInt32(Node* node) {
return jsgraph()->graph()->NewNode(machine()->ChangeFloat64ToInt32(), node);
}
+Node* RepresentationChanger::InsertChangeInt32ToFloat64(Node* node) {
+ return jsgraph()->graph()->NewNode(machine()->ChangeInt32ToFloat64(), node);
+}
+
Node* RepresentationChanger::InsertChangeTaggedSignedToInt32(Node* node) {
return jsgraph()->graph()->NewNode(simplified()->ChangeTaggedSignedToInt32(),
node);
@@ -993,6 +1007,10 @@ Node* RepresentationChanger::InsertChangeTaggedToFloat64(Node* node) {
node);
}
+Node* RepresentationChanger::InsertChangeUint32ToFloat64(Node* node) {
+ return jsgraph()->graph()->NewNode(machine()->ChangeUint32ToFloat64(), node);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index f27108ed46..d7895da825 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -19,7 +19,6 @@ class Truncation final {
static Truncation Bool() { return Truncation(TruncationKind::kBool); }
static Truncation Word32() { return Truncation(TruncationKind::kWord32); }
static Truncation Word64() { return Truncation(TruncationKind::kWord64); }
- static Truncation Float32() { return Truncation(TruncationKind::kFloat32); }
static Truncation Float64() { return Truncation(TruncationKind::kFloat64); }
static Truncation Any() { return Truncation(TruncationKind::kAny); }
@@ -63,7 +62,6 @@ class Truncation final {
kBool,
kWord32,
kWord64,
- kFloat32,
kFloat64,
kAny
};
@@ -82,7 +80,8 @@ enum class TypeCheckKind : uint8_t {
kSignedSmall,
kSigned32,
kNumber,
- kNumberOrOddball
+ kNumberOrOddball,
+ kHeapObject
};
inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
@@ -97,6 +96,8 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
return os << "Number";
case TypeCheckKind::kNumberOrOddball:
return os << "NumberOrOddball";
+ case TypeCheckKind::kHeapObject:
+ return os << "HeapObject";
}
UNREACHABLE();
return os;
@@ -108,7 +109,8 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
//
// 1. During propagation, the use info is used to inform the input node
// about what part of the input is used (we call this truncation) and what
-// is the preferred representation.
+// is the preferred representation. For conversions that will require
+// checks, we also keep track of whether a minus zero check is needed.
//
// 2. During lowering, the use info is used to properly convert the input
// to the preferred representation. The preferred representation might be
@@ -117,10 +119,13 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
class UseInfo {
public:
UseInfo(MachineRepresentation representation, Truncation truncation,
- TypeCheckKind type_check = TypeCheckKind::kNone)
+ TypeCheckKind type_check = TypeCheckKind::kNone,
+ CheckForMinusZeroMode minus_zero_check =
+ CheckForMinusZeroMode::kCheckForMinusZero)
: representation_(representation),
truncation_(truncation),
- type_check_(type_check) {}
+ type_check_(type_check),
+ minus_zero_check_(minus_zero_check) {}
static UseInfo TruncatingWord32() {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
}
@@ -130,8 +135,8 @@ class UseInfo {
static UseInfo Bool() {
return UseInfo(MachineRepresentation::kBit, Truncation::Bool());
}
- static UseInfo TruncatingFloat32() {
- return UseInfo(MachineRepresentation::kFloat32, Truncation::Float32());
+ static UseInfo Float32() {
+ return UseInfo(MachineRepresentation::kFloat32, Truncation::Any());
}
static UseInfo TruncatingFloat64() {
return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64());
@@ -150,17 +155,25 @@ class UseInfo {
}
// Possibly deoptimizing conversions.
+ static UseInfo CheckedHeapObjectAsTaggedPointer() {
+ return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(),
+ TypeCheckKind::kHeapObject);
+ }
static UseInfo CheckedSignedSmallAsTaggedSigned() {
return UseInfo(MachineRepresentation::kTaggedSigned, Truncation::Any(),
TypeCheckKind::kSignedSmall);
}
- static UseInfo CheckedSignedSmallAsWord32() {
+ static UseInfo CheckedSignedSmallAsWord32(
+ CheckForMinusZeroMode minus_zero_mode =
+ CheckForMinusZeroMode::kCheckForMinusZero) {
return UseInfo(MachineRepresentation::kWord32, Truncation::Any(),
- TypeCheckKind::kSignedSmall);
+ TypeCheckKind::kSignedSmall, minus_zero_mode);
}
- static UseInfo CheckedSigned32AsWord32() {
+ static UseInfo CheckedSigned32AsWord32(
+ CheckForMinusZeroMode minus_zero_mode =
+ CheckForMinusZeroMode::kCheckForMinusZero) {
return UseInfo(MachineRepresentation::kWord32, Truncation::Any(),
- TypeCheckKind::kSigned32);
+ TypeCheckKind::kSigned32, minus_zero_mode);
}
static UseInfo CheckedNumberAsFloat64() {
return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64(),
@@ -195,11 +208,14 @@ class UseInfo {
MachineRepresentation representation() const { return representation_; }
Truncation truncation() const { return truncation_; }
TypeCheckKind type_check() const { return type_check_; }
+ CheckForMinusZeroMode minus_zero_check() const { return minus_zero_check_; }
private:
MachineRepresentation representation_;
Truncation truncation_;
TypeCheckKind type_check_;
+ // TODO(jarin) Integrate with truncations.
+ CheckForMinusZeroMode minus_zero_check_;
};
// Contains logic related to changing the representation of values for constants
@@ -251,7 +267,8 @@ class RepresentationChanger final {
UseInfo use_info);
Node* GetTaggedPointerRepresentationFor(Node* node,
MachineRepresentation output_rep,
- Type* output_type);
+ Type* output_type, Node* use_node,
+ UseInfo use_info);
Node* GetTaggedRepresentationFor(Node* node, MachineRepresentation output_rep,
Type* output_type, Truncation truncation);
Node* GetFloat32RepresentationFor(Node* node,
@@ -275,8 +292,10 @@ class RepresentationChanger final {
Node* InsertChangeFloat32ToFloat64(Node* node);
Node* InsertChangeFloat64ToInt32(Node* node);
Node* InsertChangeFloat64ToUint32(Node* node);
+ Node* InsertChangeInt32ToFloat64(Node* node);
Node* InsertChangeTaggedSignedToInt32(Node* node);
Node* InsertChangeTaggedToFloat64(Node* node);
+ Node* InsertChangeUint32ToFloat64(Node* node);
Node* InsertConversion(Node* node, const Operator* op, Node* use_node);
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index 284c3fc6e3..5dcc82f7a0 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -924,8 +924,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallJSFunctionFromJSFunction:
- case kArchTailCallJSFunction: {
+ case kArchTailCallJSFunctionFromJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
@@ -934,11 +933,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ CmpP(cp, kScratchReg);
__ Assert(eq, kWrongFunctionContext);
}
- if (opcode == kArchTailCallJSFunctionFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(ip);
frame_access_state()->ClearSPDelta();
@@ -995,7 +992,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchRet:
- AssembleReturn();
+ AssembleReturn(instr->InputAt(0));
break;
case kArchStackPointer:
__ LoadRR(i.OutputRegister(), sp);
@@ -1159,39 +1156,46 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lr(i.OutputRegister(0), r1);
__ srag(i.OutputRegister(1), r1, Operand(32));
break;
- case kS390_ShiftLeftPair:
+ case kS390_ShiftLeftPair: {
+ Register second_output =
+ instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
if (instr->InputAt(2)->IsImmediate()) {
- __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
- i.InputRegister(0), i.InputRegister(1),
- i.InputInt32(2));
+ __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
+ i.InputRegister(1), i.InputInt32(2));
} else {
- __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
- i.InputRegister(0), i.InputRegister(1), kScratchReg,
- i.InputRegister(2));
+ __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
+ i.InputRegister(1), kScratchReg, i.InputRegister(2));
}
break;
- case kS390_ShiftRightPair:
+ }
+ case kS390_ShiftRightPair: {
+ Register second_output =
+ instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
if (instr->InputAt(2)->IsImmediate()) {
- __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+ __ ShiftRightPair(i.OutputRegister(0), second_output,
i.InputRegister(0), i.InputRegister(1),
i.InputInt32(2));
} else {
- __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
+ __ ShiftRightPair(i.OutputRegister(0), second_output,
i.InputRegister(0), i.InputRegister(1), kScratchReg,
i.InputRegister(2));
}
break;
- case kS390_ShiftRightArithPair:
+ }
+ case kS390_ShiftRightArithPair: {
+ Register second_output =
+ instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
if (instr->InputAt(2)->IsImmediate()) {
- __ ShiftRightArithPair(i.OutputRegister(0), i.OutputRegister(1),
+ __ ShiftRightArithPair(i.OutputRegister(0), second_output,
i.InputRegister(0), i.InputRegister(1),
i.InputInt32(2));
} else {
- __ ShiftRightArithPair(i.OutputRegister(0), i.OutputRegister(1),
+ __ ShiftRightArithPair(i.OutputRegister(0), second_output,
i.InputRegister(0), i.InputRegister(1),
kScratchReg, i.InputRegister(2));
}
break;
+ }
#endif
case kS390_RotRight32:
if (HasRegisterInput(instr, 1)) {
@@ -1240,7 +1244,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
#if V8_TARGET_ARCH_S390X
case kS390_RotLeftAndClear64:
- UNIMPLEMENTED(); // Find correct instruction
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ int shiftAmount = i.InputInt32(1);
+ int endBit = 63 - shiftAmount;
+ int startBit = 63 - i.InputInt32(2);
+ __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
+ Operand(endBit), Operand(shiftAmount), true);
+ } else {
+ int shiftAmount = i.InputInt32(1);
+ int clearBit = 63 - i.InputInt32(2);
+ __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
+ __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
+ __ srlg(i.OutputRegister(), i.OutputRegister(),
+ Operand(clearBit + shiftAmount));
+ __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(shiftAmount));
+ }
break;
case kS390_RotLeftAndClearLeft64:
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
@@ -2202,7 +2220,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -2238,6 +2256,9 @@ void CodeGenerator::AssembleConstructFrame() {
__ LoadRR(fp, sp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(this->info()->GeneratePreagedPrologue(), ip);
+ if (descriptor->PushArgumentCount()) {
+ __ Push(kJavaScriptCallArgCountRegister);
+ }
} else {
StackFrame::Type type = info()->GetOutputStackFrameType();
// TODO(mbrandy): Detect cases where ip is the entrypoint (for
@@ -2246,7 +2267,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots =
+ frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(kShouldNotDirectlyEnterOsrFunction);
@@ -2280,7 +2302,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int pop_count = static_cast<int>(descriptor->StackParameterCount());
@@ -2296,19 +2318,32 @@ void CodeGenerator::AssembleReturn() {
__ MultiPopDoubles(double_saves);
}
+ S390OperandConverter g(this, nullptr);
if (descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
- // Canonicalize JSFunction return sites for now.
- if (return_label_.is_bound()) {
- __ b(&return_label_);
- return;
+ // Canonicalize JSFunction return sites for now unless they have an variable
+ // number of stack slot pops
+ if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (return_label_.is_bound()) {
+ __ b(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ AssembleDeconstructFrame();
+ }
} else {
- __ bind(&return_label_);
AssembleDeconstructFrame();
}
}
- __ Ret(pop_count);
+ if (pop->IsImmediate()) {
+ DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+ pop_count += g.ToConstant(pop).ToInt32();
+ } else {
+ __ Drop(g.ToRegister(pop));
+ }
+ __ Drop(pop_count);
+ __ Ret();
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index f1aa332a49..eed08a9c44 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -343,7 +343,7 @@ void InstructionSelector::VisitStore(Node* node) {
MachineRepresentation rep = store_rep.representation();
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ DCHECK(CanBeTaggedPointer(rep));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
@@ -835,48 +835,69 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
}
#if !V8_TARGET_ARCH_S390X
-void VisitPairBinop(InstructionSelector* selector, ArchOpcode opcode,
- Node* node) {
+void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
+ InstructionCode opcode2, Node* node) {
S390OperandGenerator g(selector);
- // We use UseUniqueRegister here to avoid register sharing with the output
- // registers.
- InstructionOperand inputs[] = {
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ // We use UseUniqueRegister here to avoid register sharing with the output
+ // registers.
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
- InstructionOperand outputs[] = {
- g.DefineAsRegister(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
- selector->Emit(opcode, 2, outputs, 4, inputs);
+ selector->Emit(opcode, 2, outputs, 4, inputs);
+ } else {
+ // The high word of the result is not used, so we emit the standard 32 bit
+ // instruction.
+ selector->Emit(opcode2, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(2)));
+ }
}
void InstructionSelector::VisitInt32PairAdd(Node* node) {
- VisitPairBinop(this, kS390_AddPair, node);
+ VisitPairBinop(this, kS390_AddPair, kS390_Add32, node);
}
void InstructionSelector::VisitInt32PairSub(Node* node) {
- VisitPairBinop(this, kS390_SubPair, node);
+ VisitPairBinop(this, kS390_SubPair, kS390_Sub32, node);
}
void InstructionSelector::VisitInt32PairMul(Node* node) {
S390OperandGenerator g(this);
- InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)),
- g.UseUniqueRegister(node->InputAt(2)),
- g.UseUniqueRegister(node->InputAt(3))};
-
- InstructionOperand outputs[] = {
- g.DefineAsRegister(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
-
- Emit(kS390_MulPair, 2, outputs, 4, inputs);
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)),
+ g.UseUniqueRegister(node->InputAt(3))};
+
+ InstructionOperand outputs[] = {
+ g.DefineAsRegister(node),
+ g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+
+ Emit(kS390_MulPair, 2, outputs, 4, inputs);
+ } else {
+ // The high word of the result is not used, so we emit the standard 32 bit
+ // instruction.
+ Emit(kS390_Mul32, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(2)));
+ }
}
-void VisitPairShift(InstructionSelector* selector, ArchOpcode opcode,
+namespace {
+// Shared routine for multiple shift operations.
+void VisitPairShift(InstructionSelector* selector, InstructionCode opcode,
Node* node) {
S390OperandGenerator g(selector);
+ // We use g.UseUniqueRegister here to guarantee that there is
+ // no register aliasing of input registers with output registers.
Int32Matcher m(node->InputAt(2));
InstructionOperand shift_operand;
if (m.HasValue()) {
@@ -885,16 +906,27 @@ void VisitPairShift(InstructionSelector* selector, ArchOpcode opcode,
shift_operand = g.UseUniqueRegister(m.node());
}
- InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)),
+ InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)),
shift_operand};
- InstructionOperand outputs[] = {
- g.DefineSameAsFirst(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
- selector->Emit(opcode, 2, outputs, 3, inputs);
+ InstructionOperand outputs[2];
+ InstructionOperand temps[1];
+ int32_t output_count = 0;
+ int32_t temp_count = 0;
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (projection1) {
+ outputs[output_count++] = g.DefineAsRegister(projection1);
+ } else {
+ temps[temp_count++] = g.TempRegister();
+ }
+
+ selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
}
+} // namespace
void InstructionSelector::VisitWord32PairShl(Node* node) {
VisitPairShift(this, kS390_ShiftLeftPair, node);
@@ -1575,21 +1607,22 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
void VisitWordCompareZero(InstructionSelector* selector, Node* user,
Node* value, InstructionCode opcode,
FlagsContinuation* cont) {
- while (selector->CanCover(user, value)) {
+ // Try to combine with comparisons against 0 by simply inverting the branch.
+ while (value->opcode() == IrOpcode::kWord32Equal &&
+ selector->CanCover(user, value)) {
+ Int32BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ }
+
+ if (selector->CanCover(user, value)) {
switch (value->opcode()) {
- case IrOpcode::kWord32Equal: {
- // Combine with comparisons against 0 by simply inverting the
- // continuation.
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont->Negate();
- continue;
- }
+ case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWord32Compare(selector, value, cont);
- }
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord32Compare(selector, value, cont);
@@ -1708,7 +1741,6 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
default:
break;
}
- break;
}
// Branch could not be combined with a compare, emit compare against 0.
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 6bd1a17be0..eb3dda8c26 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -344,7 +344,7 @@ void Schedule::EnsureSplitEdgeForm(BasicBlock* block) {
split_edge_block->set_control(BasicBlock::kGoto);
split_edge_block->successors().push_back(block);
split_edge_block->predecessors().push_back(pred);
- split_edge_block->set_deferred(pred->deferred());
+ split_edge_block->set_deferred(block->deferred());
*current_pred = split_edge_block;
// Find a corresponding successor in the previous block, replace it
// with the split edge block... but only do it once, since we only
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index 4fc0d0a540..3f9750cd70 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -7,6 +7,8 @@
#include <iosfwd>
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -26,7 +28,8 @@ typedef ZoneVector<Node*> NodeVector;
// A basic block contains an ordered list of nodes and ends with a control
// node. Note that if a basic block has phis, then all phis must appear as the
// first nodes in the block.
-class BasicBlock final : public ZoneObject {
+class V8_EXPORT_PRIVATE BasicBlock final
+ : public NON_EXPORTED_BASE(ZoneObject) {
public:
// Possible control nodes that can end a block.
enum Control {
@@ -177,7 +180,7 @@ std::ostream& operator<<(std::ostream&, const BasicBlock::Id&);
// and ordering them within basic blocks. Prior to computing a schedule,
// a graph has no notion of control flow ordering other than that induced
// by the graph's dependencies. A schedule is required to generate code.
-class Schedule final : public ZoneObject {
+class V8_EXPORT_PRIVATE Schedule final : public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit Schedule(Zone* zone, size_t node_count_hint = 0);
@@ -282,7 +285,7 @@ class Schedule final : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(Schedule);
};
-std::ostream& operator<<(std::ostream&, const Schedule&);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, const Schedule&);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/scheduler.h b/deps/v8/src/compiler/scheduler.h
index 416ba5c84c..1a08e4c019 100644
--- a/deps/v8/src/compiler/scheduler.h
+++ b/deps/v8/src/compiler/scheduler.h
@@ -9,7 +9,8 @@
#include "src/compiler/node.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/schedule.h"
-#include "src/compiler/zone-pool.h"
+#include "src/compiler/zone-stats.h"
+#include "src/globals.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -25,7 +26,7 @@ class SpecialRPONumberer;
// Computes a schedule from a graph, placing nodes into basic blocks and
// ordering the basic blocks in the special RPO order.
-class Scheduler {
+class V8_EXPORT_PRIVATE Scheduler {
public:
// Flags that control the mode of operation.
enum Flag { kNoFlags = 0u, kSplitNodes = 1u << 1 };
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
new file mode 100644
index 0000000000..c5a94b4297
--- /dev/null
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -0,0 +1,410 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/simd-scalar-lowering.h"
+#include "src/compiler/diamond.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+
+#include "src/compiler/node.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+SimdScalarLowering::SimdScalarLowering(
+ Graph* graph, MachineOperatorBuilder* machine,
+ CommonOperatorBuilder* common, Zone* zone,
+ Signature<MachineRepresentation>* signature)
+ : zone_(zone),
+ graph_(graph),
+ machine_(machine),
+ common_(common),
+ state_(graph, 3),
+ stack_(zone),
+ replacements_(nullptr),
+ signature_(signature),
+ placeholder_(
+ graph->NewNode(common->Parameter(-2, "placeholder"), graph->start())),
+ parameter_count_after_lowering_(-1) {
+ DCHECK_NOT_NULL(graph);
+ DCHECK_NOT_NULL(graph->end());
+ replacements_ = zone->NewArray<Replacement>(graph->NodeCount());
+ memset(replacements_, 0, sizeof(Replacement) * graph->NodeCount());
+}
+
+void SimdScalarLowering::LowerGraph() {
+ stack_.push_back({graph()->end(), 0});
+ state_.Set(graph()->end(), State::kOnStack);
+ replacements_[graph()->end()->id()].type = SimdType::kInt32;
+
+ while (!stack_.empty()) {
+ NodeState& top = stack_.back();
+ if (top.input_index == top.node->InputCount()) {
+ // All inputs of top have already been lowered, now lower top.
+ stack_.pop_back();
+ state_.Set(top.node, State::kVisited);
+ LowerNode(top.node);
+ } else {
+ // Push the next input onto the stack.
+ Node* input = top.node->InputAt(top.input_index++);
+ if (state_.Get(input) == State::kUnvisited) {
+ SetLoweredType(input, top.node);
+ if (input->opcode() == IrOpcode::kPhi) {
+ // To break cycles with phi nodes we push phis on a separate stack so
+ // that they are processed after all other nodes.
+ PreparePhiReplacement(input);
+ stack_.push_front({input, 0});
+ } else {
+ stack_.push_back({input, 0});
+ }
+ state_.Set(input, State::kOnStack);
+ }
+ }
+ }
+}
+
+#define FOREACH_INT32X4_OPCODE(V) \
+ V(Int32x4Add) \
+ V(Int32x4ExtractLane) \
+ V(CreateInt32x4)
+
+#define FOREACH_FLOAT32X4_OPCODE(V) \
+ V(Float32x4Add) \
+ V(Float32x4ExtractLane) \
+ V(CreateFloat32x4)
+
+void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
+ switch (node->opcode()) {
+#define CASE_STMT(name) case IrOpcode::k##name:
+ FOREACH_INT32X4_OPCODE(CASE_STMT)
+ case IrOpcode::kReturn:
+ case IrOpcode::kParameter:
+ case IrOpcode::kCall: {
+ replacements_[node->id()].type = SimdType::kInt32;
+ break;
+ }
+ FOREACH_FLOAT32X4_OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kFloat32;
+ break;
+ }
+#undef CASE_STMT
+ default:
+ replacements_[node->id()].type = replacements_[output->id()].type;
+ }
+}
+
+static int GetParameterIndexAfterLowering(
+ Signature<MachineRepresentation>* signature, int old_index) {
+ // In function calls, the simd128 types are passed as 4 Int32 types. The
+ // parameters are typecast to the types as needed for various operations.
+ int result = old_index;
+ for (int i = 0; i < old_index; i++) {
+ if (signature->GetParam(i) == MachineRepresentation::kSimd128) {
+ result += 3;
+ }
+ }
+ return result;
+}
+
+int SimdScalarLowering::GetParameterCountAfterLowering() {
+ if (parameter_count_after_lowering_ == -1) {
+ // GetParameterIndexAfterLowering(parameter_count) returns the parameter
+ // count after lowering.
+ parameter_count_after_lowering_ = GetParameterIndexAfterLowering(
+ signature(), static_cast<int>(signature()->parameter_count()));
+ }
+ return parameter_count_after_lowering_;
+}
+
+static int GetReturnCountAfterLowering(
+ Signature<MachineRepresentation>* signature) {
+ int result = static_cast<int>(signature->return_count());
+ for (int i = 0; i < static_cast<int>(signature->return_count()); i++) {
+ if (signature->GetReturn(i) == MachineRepresentation::kSimd128) {
+ result += 3;
+ }
+ }
+ return result;
+}
+
+void SimdScalarLowering::LowerNode(Node* node) {
+ SimdType rep_type = ReplacementType(node);
+ switch (node->opcode()) {
+ case IrOpcode::kStart: {
+ int parameter_count = GetParameterCountAfterLowering();
+ // Only exchange the node if the parameter count actually changed.
+ if (parameter_count != static_cast<int>(signature()->parameter_count())) {
+ int delta =
+ parameter_count - static_cast<int>(signature()->parameter_count());
+ int new_output_count = node->op()->ValueOutputCount() + delta;
+ NodeProperties::ChangeOp(node, common()->Start(new_output_count));
+ }
+ break;
+ }
+ case IrOpcode::kParameter: {
+ DCHECK(node->InputCount() == 1);
+ // Only exchange the node if the parameter count actually changed. We do
+ // not even have to do the default lowering because the the start node,
+ // the only input of a parameter node, only changes if the parameter count
+ // changes.
+ if (GetParameterCountAfterLowering() !=
+ static_cast<int>(signature()->parameter_count())) {
+ int old_index = ParameterIndexOf(node->op());
+ int new_index = GetParameterIndexAfterLowering(signature(), old_index);
+ if (old_index == new_index) {
+ NodeProperties::ChangeOp(node, common()->Parameter(new_index));
+
+ Node* new_node[kMaxLanes];
+ for (int i = 0; i < kMaxLanes; i++) {
+ new_node[i] = nullptr;
+ }
+ new_node[0] = node;
+ if (signature()->GetParam(old_index) ==
+ MachineRepresentation::kSimd128) {
+ for (int i = 1; i < kMaxLanes; i++) {
+ new_node[i] = graph()->NewNode(common()->Parameter(new_index + i),
+ graph()->start());
+ }
+ }
+ ReplaceNode(node, new_node);
+ }
+ }
+ break;
+ }
+ case IrOpcode::kReturn: {
+ DefaultLowering(node);
+ int new_return_count = GetReturnCountAfterLowering(signature());
+ if (static_cast<int>(signature()->return_count()) != new_return_count) {
+ NodeProperties::ChangeOp(node, common()->Return(new_return_count));
+ }
+ break;
+ }
+ case IrOpcode::kCall: {
+ // TODO(turbofan): Make WASM code const-correct wrt. CallDescriptor.
+ CallDescriptor* descriptor =
+ const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
+ if (DefaultLowering(node) ||
+ (descriptor->ReturnCount() == 1 &&
+ descriptor->GetReturnType(0) == MachineType::Simd128())) {
+ // We have to adjust the call descriptor.
+ const Operator* op =
+ common()->Call(wasm::ModuleEnv::GetI32WasmCallDescriptorForSimd(
+ zone(), descriptor));
+ NodeProperties::ChangeOp(node, op);
+ }
+ if (descriptor->ReturnCount() == 1 &&
+ descriptor->GetReturnType(0) == MachineType::Simd128()) {
+ // We access the additional return values through projections.
+ Node* rep_node[kMaxLanes];
+ for (int i = 0; i < kMaxLanes; i++) {
+ rep_node[i] =
+ graph()->NewNode(common()->Projection(i), node, graph()->start());
+ }
+ ReplaceNode(node, rep_node);
+ }
+ break;
+ }
+ case IrOpcode::kPhi: {
+ MachineRepresentation rep = PhiRepresentationOf(node->op());
+ if (rep == MachineRepresentation::kSimd128) {
+ // The replacement nodes have already been created, we only have to
+ // replace placeholder nodes.
+ Node** rep_node = GetReplacements(node);
+ for (int i = 0; i < node->op()->ValueInputCount(); i++) {
+ Node** rep_input =
+ GetReplacementsWithType(node->InputAt(i), rep_type);
+ for (int j = 0; j < kMaxLanes; j++) {
+ rep_node[j]->ReplaceInput(i, rep_input[j]);
+ }
+ }
+ } else {
+ DefaultLowering(node);
+ }
+ break;
+ }
+
+ case IrOpcode::kInt32x4Add: {
+ DCHECK(node->InputCount() == 2);
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
+ Node* rep_node[kMaxLanes];
+ for (int i = 0; i < kMaxLanes; i++) {
+ rep_node[i] =
+ graph()->NewNode(machine()->Int32Add(), rep_left[i], rep_right[i]);
+ }
+ ReplaceNode(node, rep_node);
+ break;
+ }
+
+ case IrOpcode::kCreateInt32x4: {
+ Node* rep_node[kMaxLanes];
+ for (int i = 0; i < kMaxLanes; i++) {
+ DCHECK(!HasReplacement(1, node->InputAt(i)));
+ rep_node[i] = node->InputAt(i);
+ }
+ ReplaceNode(node, rep_node);
+ break;
+ }
+
+ case IrOpcode::kInt32x4ExtractLane: {
+ Node* laneNode = node->InputAt(1);
+ DCHECK_EQ(laneNode->opcode(), IrOpcode::kInt32Constant);
+ int32_t lane = OpParameter<int32_t>(laneNode);
+ Node* rep_node[kMaxLanes] = {
+ GetReplacementsWithType(node->InputAt(0), rep_type)[lane], nullptr,
+ nullptr, nullptr};
+ ReplaceNode(node, rep_node);
+ break;
+ }
+
+ case IrOpcode::kFloat32x4Add: {
+ DCHECK(node->InputCount() == 2);
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
+ Node* rep_node[kMaxLanes];
+ for (int i = 0; i < kMaxLanes; i++) {
+ rep_node[i] = graph()->NewNode(machine()->Float32Add(), rep_left[i],
+ rep_right[i]);
+ }
+ ReplaceNode(node, rep_node);
+ break;
+ }
+
+ case IrOpcode::kCreateFloat32x4: {
+ Node* rep_node[kMaxLanes];
+ for (int i = 0; i < kMaxLanes; i++) {
+ DCHECK(!HasReplacement(1, node->InputAt(i)));
+ rep_node[i] = node->InputAt(i);
+ }
+ ReplaceNode(node, rep_node);
+ break;
+ }
+
+ case IrOpcode::kFloat32x4ExtractLane: {
+ Node* laneNode = node->InputAt(1);
+ DCHECK_EQ(laneNode->opcode(), IrOpcode::kInt32Constant);
+ int32_t lane = OpParameter<int32_t>(laneNode);
+ Node* rep_node[kMaxLanes] = {
+ GetReplacementsWithType(node->InputAt(0), rep_type)[lane], nullptr,
+ nullptr, nullptr};
+ ReplaceNode(node, rep_node);
+ break;
+ }
+
+ default: { DefaultLowering(node); }
+ }
+}
+
+bool SimdScalarLowering::DefaultLowering(Node* node) {
+ bool something_changed = false;
+ for (int i = NodeProperties::PastValueIndex(node) - 1; i >= 0; i--) {
+ Node* input = node->InputAt(i);
+ if (HasReplacement(0, input)) {
+ something_changed = true;
+ node->ReplaceInput(i, GetReplacements(input)[0]);
+ }
+ if (HasReplacement(1, input)) {
+ something_changed = true;
+ for (int j = 1; j < kMaxLanes; j++) {
+ node->InsertInput(zone(), i + j, GetReplacements(input)[j]);
+ }
+ }
+ }
+ return something_changed;
+}
+
+void SimdScalarLowering::ReplaceNode(Node* old, Node** new_node) {
+ // if new_low == nullptr, then also new_high == nullptr.
+ DCHECK(new_node[0] != nullptr ||
+ (new_node[1] == nullptr && new_node[2] == nullptr &&
+ new_node[3] == nullptr));
+ for (int i = 0; i < kMaxLanes; i++) {
+ replacements_[old->id()].node[i] = new_node[i];
+ }
+}
+
+bool SimdScalarLowering::HasReplacement(size_t index, Node* node) {
+ return replacements_[node->id()].node[index] != nullptr;
+}
+
+SimdScalarLowering::SimdType SimdScalarLowering::ReplacementType(Node* node) {
+ return replacements_[node->id()].type;
+}
+
+Node** SimdScalarLowering::GetReplacements(Node* node) {
+ Node** result = replacements_[node->id()].node;
+ DCHECK(result);
+ return result;
+}
+
+Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
+ Node** replacements = GetReplacements(node);
+ if (ReplacementType(node) == type) {
+ return GetReplacements(node);
+ }
+ Node** result = zone()->NewArray<Node*>(kMaxLanes);
+ if (ReplacementType(node) == SimdType::kInt32 && type == SimdType::kFloat32) {
+ for (int i = 0; i < kMaxLanes; i++) {
+ if (replacements[i] != nullptr) {
+ result[i] = graph()->NewNode(machine()->BitcastInt32ToFloat32(),
+ replacements[i]);
+ } else {
+ result[i] = nullptr;
+ }
+ }
+ } else {
+ for (int i = 0; i < kMaxLanes; i++) {
+ if (replacements[i] != nullptr) {
+ result[i] = graph()->NewNode(machine()->BitcastFloat32ToInt32(),
+ replacements[i]);
+ } else {
+ result[i] = nullptr;
+ }
+ }
+ }
+ return result;
+}
+
+void SimdScalarLowering::PreparePhiReplacement(Node* phi) {
+ MachineRepresentation rep = PhiRepresentationOf(phi->op());
+ if (rep == MachineRepresentation::kSimd128) {
+ // We have to create the replacements for a phi node before we actually
+ // lower the phi to break potential cycles in the graph. The replacements of
+ // input nodes do not exist yet, so we use a placeholder node to pass the
+ // graph verifier.
+ int value_count = phi->op()->ValueInputCount();
+ SimdType type = ReplacementType(phi);
+ Node** inputs_rep[kMaxLanes];
+ for (int i = 0; i < kMaxLanes; i++) {
+ inputs_rep[i] = zone()->NewArray<Node*>(value_count + 1);
+ inputs_rep[i][value_count] = NodeProperties::GetControlInput(phi, 0);
+ }
+ for (int i = 0; i < value_count; i++) {
+ for (int j = 0; j < kMaxLanes; j++) {
+ inputs_rep[j][i] = placeholder_;
+ }
+ }
+ Node* rep_nodes[kMaxLanes];
+ for (int i = 0; i < kMaxLanes; i++) {
+ if (type == SimdType::kInt32) {
+ rep_nodes[i] = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kWord32, value_count),
+ value_count + 1, inputs_rep[i], false);
+ } else if (type == SimdType::kFloat32) {
+ rep_nodes[i] = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat32, value_count),
+ value_count + 1, inputs_rep[i], false);
+ } else {
+ UNREACHABLE();
+ }
+ }
+ ReplaceNode(phi, rep_nodes);
+ }
+}
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
new file mode 100644
index 0000000000..39449f4b9f
--- /dev/null
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -0,0 +1,78 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SIMD_SCALAR_LOWERING_H_
+#define V8_COMPILER_SIMD_SCALAR_LOWERING_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-marker.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SimdScalarLowering {
+ public:
+ SimdScalarLowering(Graph* graph, MachineOperatorBuilder* machine,
+ CommonOperatorBuilder* common, Zone* zone,
+ Signature<MachineRepresentation>* signature);
+
+ void LowerGraph();
+
+ int GetParameterCountAfterLowering();
+
+ private:
+ enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
+
+ enum class SimdType : uint8_t { kInt32, kFloat32 };
+
+ static const int kMaxLanes = 4;
+
+ struct Replacement {
+ Node* node[kMaxLanes];
+ SimdType type; // represents what input type is expected
+ };
+
+ Zone* zone() const { return zone_; }
+ Graph* graph() const { return graph_; }
+ MachineOperatorBuilder* machine() const { return machine_; }
+ CommonOperatorBuilder* common() const { return common_; }
+ Signature<MachineRepresentation>* signature() const { return signature_; }
+
+ void LowerNode(Node* node);
+ bool DefaultLowering(Node* node);
+
+ void ReplaceNode(Node* old, Node** new_nodes);
+ bool HasReplacement(size_t index, Node* node);
+ Node** GetReplacements(Node* node);
+ Node** GetReplacementsWithType(Node* node, SimdType type);
+ SimdType ReplacementType(Node* node);
+ void PreparePhiReplacement(Node* phi);
+ void SetLoweredType(Node* node, Node* output);
+
+ struct NodeState {
+ Node* node;
+ int input_index;
+ };
+
+ Zone* zone_;
+ Graph* const graph_;
+ MachineOperatorBuilder* machine_;
+ CommonOperatorBuilder* common_;
+ NodeMarker<State> state_;
+ ZoneDeque<NodeState> stack_;
+ Replacement* replacements_;
+ Signature<MachineRepresentation>* signature_;
+ Node* placeholder_;
+ int parameter_count_after_lowering_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_SIMD_SCALAR_LOWERING_H_
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 97aacd691c..c90d7437bf 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -11,6 +11,7 @@
#include "src/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/diamond.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -19,7 +20,6 @@
#include "src/compiler/operator-properties.h"
#include "src/compiler/representation-change.h"
#include "src/compiler/simplified-operator.h"
-#include "src/compiler/source-position.h"
#include "src/compiler/type-cache.h"
#include "src/conversions-inl.h"
#include "src/objects.h"
@@ -87,12 +87,14 @@ MachineRepresentation MachineRepresentationFromArrayType(
return MachineRepresentation::kNone;
}
-UseInfo CheckedUseInfoAsWord32FromHint(NumberOperationHint hint) {
+UseInfo CheckedUseInfoAsWord32FromHint(
+ NumberOperationHint hint, CheckForMinusZeroMode minus_zero_mode =
+ CheckForMinusZeroMode::kCheckForMinusZero) {
switch (hint) {
case NumberOperationHint::kSignedSmall:
- return UseInfo::CheckedSignedSmallAsWord32();
+ return UseInfo::CheckedSignedSmallAsWord32(minus_zero_mode);
case NumberOperationHint::kSigned32:
- return UseInfo::CheckedSigned32AsWord32();
+ return UseInfo::CheckedSigned32AsWord32(minus_zero_mode);
case NumberOperationHint::kNumber:
return UseInfo::CheckedNumberAsWord32();
case NumberOperationHint::kNumberOrOddball:
@@ -127,7 +129,7 @@ UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
case MachineRepresentation::kFloat64:
return UseInfo::TruncatingFloat64();
case MachineRepresentation::kFloat32:
- return UseInfo::TruncatingFloat32();
+ return UseInfo::Float32();
case MachineRepresentation::kWord64:
return UseInfo::TruncatingWord64();
case MachineRepresentation::kWord8:
@@ -161,7 +163,8 @@ void ReplaceEffectControlUses(Node* node, Node* effect, Node* control) {
} else if (NodeProperties::IsEffectEdge(edge)) {
edge.UpdateTo(effect);
} else {
- DCHECK(NodeProperties::IsValueEdge(edge));
+ DCHECK(NodeProperties::IsValueEdge(edge) ||
+ NodeProperties::IsContextEdge(edge));
}
}
}
@@ -746,6 +749,23 @@ class RepresentationSelector {
}
}
+ void VisitReturn(Node* node) {
+ int tagged_limit = node->op()->ValueInputCount() +
+ OperatorProperties::GetContextInputCount(node->op()) +
+ OperatorProperties::GetFrameStateInputCount(node->op());
+ // Visit integer slot count to pop
+ ProcessInput(node, 0, UseInfo::TruncatingWord32());
+
+ // Visit value, context and frame state inputs as tagged.
+ for (int i = 1; i < tagged_limit; i++) {
+ ProcessInput(node, i, UseInfo::AnyTagged());
+ }
+ // Only enqueue other inputs (effects, control).
+ for (int i = tagged_limit; i < node->InputCount(); i++) {
+ EnqueueInput(node, i);
+ }
+ }
+
// Helper for an unused node.
void VisitUnused(Node* node) {
int value_count = node->op()->ValueInputCount() +
@@ -838,26 +858,8 @@ class RepresentationSelector {
return MachineRepresentation::kTagged;
} else if (type->Is(Type::Number())) {
return MachineRepresentation::kFloat64;
- } else if (type->Is(Type::Internal())) {
- // We mark (u)int64 as Type::Internal.
- // TODO(jarin) This is a workaround for our lack of (u)int64
- // types. This can be removed once we can represent (u)int64
- // unambiguously. (At the moment internal objects, such as the hole,
- // are also Type::Internal()).
- bool is_word64 = GetInfo(node->InputAt(0))->representation() ==
- MachineRepresentation::kWord64;
-#ifdef DEBUG
- if (node->opcode() != IrOpcode::kTypeGuard) {
- // Check that all the inputs agree on being Word64.
- DCHECK_EQ(IrOpcode::kPhi, node->opcode()); // This only works for phis.
- for (int i = 1; i < node->op()->ValueInputCount(); i++) {
- DCHECK_EQ(is_word64, GetInfo(node->InputAt(i))->representation() ==
- MachineRepresentation::kWord64);
- }
- }
-#endif
- return is_word64 ? MachineRepresentation::kWord64
- : MachineRepresentation::kTagged;
+ } else if (type->Is(Type::ExternalPointer())) {
+ return MachineType::PointerRepresentation();
}
return MachineRepresentation::kTagged;
}
@@ -992,6 +994,53 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kTagged);
}
+ void VisitObjectState(Node* node) {
+ if (propagate()) {
+ for (int i = 0; i < node->InputCount(); i++) {
+ Node* input = node->InputAt(i);
+ Type* input_type = TypeOf(input);
+ // TODO(turbofan): Special treatment for ExternalPointer here,
+ // to avoid incompatible truncations. We really need a story
+ // for the JSFunction::entry field.
+ UseInfo use_info = input_type->Is(Type::ExternalPointer())
+ ? UseInfo::PointerInt()
+ : UseInfo::Any();
+ EnqueueInput(node, i, use_info);
+ }
+ } else if (lower()) {
+ Zone* zone = jsgraph_->zone();
+ ZoneVector<MachineType>* types =
+ new (zone->New(sizeof(ZoneVector<MachineType>)))
+ ZoneVector<MachineType>(node->InputCount(), zone);
+ for (int i = 0; i < node->InputCount(); i++) {
+ Node* input = node->InputAt(i);
+ NodeInfo* input_info = GetInfo(input);
+ Type* input_type = TypeOf(input);
+ // TODO(turbofan): Special treatment for ExternalPointer here,
+ // to avoid incompatible truncations. We really need a story
+ // for the JSFunction::entry field.
+ if (input_type->Is(Type::ExternalPointer())) {
+ (*types)[i] = MachineType::Pointer();
+ } else {
+ MachineRepresentation rep = input_type->IsInhabited()
+ ? input_info->representation()
+ : MachineRepresentation::kNone;
+ MachineType machine_type(rep, DeoptValueSemanticOf(input_type));
+ DCHECK(machine_type.representation() !=
+ MachineRepresentation::kWord32 ||
+ machine_type.semantic() == MachineSemantic::kInt32 ||
+ machine_type.semantic() == MachineSemantic::kUint32);
+ DCHECK(machine_type.representation() != MachineRepresentation::kBit ||
+ input_type->Is(Type::Boolean()));
+ (*types)[i] = machine_type;
+ }
+ }
+ NodeProperties::ChangeOp(node,
+ jsgraph_->common()->TypedObjectState(types));
+ }
+ SetOutput(node, MachineRepresentation::kTagged);
+ }
+
const Operator* Int32Op(Node* node) {
return changer_->Int32OperatorFor(node->opcode());
}
@@ -1030,10 +1079,8 @@ class RepresentationSelector {
// undefined, because these special oddballs are always in the root set.
return kNoWriteBarrier;
}
- if (value_type->IsConstant() &&
- value_type->AsConstant()->Value()->IsHeapObject()) {
- Handle<HeapObject> value_object =
- Handle<HeapObject>::cast(value_type->AsConstant()->Value());
+ if (value_type->IsHeapConstant()) {
+ Handle<HeapObject> value_object = value_type->AsHeapConstant()->Value();
RootIndexMap root_index_map(jsgraph_->isolate());
int root_index = root_index_map.Lookup(*value_object);
if (root_index != RootIndexMap::kInvalidRootIndex &&
@@ -1147,8 +1194,15 @@ class RepresentationSelector {
if (hint == NumberOperationHint::kSignedSmall ||
hint == NumberOperationHint::kSigned32) {
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
- MachineRepresentation::kWord32, Type::Signed32());
+ UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint);
+ // For CheckedInt32Add and CheckedInt32Sub, we don't need to do
+ // a minus zero check for the right hand side, since we already
+ // know that the left hand side is a proper Signed32 value,
+ // potentially guarded by a check.
+ UseInfo right_use = CheckedUseInfoAsWord32FromHint(
+ hint, CheckForMinusZeroMode::kDontCheckForMinusZero);
+ VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
+ Type::Signed32());
if (lower()) ChangeToInt32OverflowOp(node);
return;
}
@@ -1266,6 +1320,30 @@ class RepresentationSelector {
return;
}
+ void VisitOsrGuard(Node* node) {
+ VisitInputs(node);
+
+ // Insert a dynamic check for the OSR value type if necessary.
+ switch (OsrGuardTypeOf(node->op())) {
+ case OsrGuardType::kUninitialized:
+ // At this point, we should always have a type for the OsrValue.
+ UNREACHABLE();
+ break;
+ case OsrGuardType::kSignedSmall:
+ if (lower()) {
+ NodeProperties::ChangeOp(node,
+ simplified()->CheckedTaggedToTaggedSigned());
+ }
+ return SetOutput(node, MachineRepresentation::kTaggedSigned);
+ case OsrGuardType::kAny: // Nothing to check.
+ if (lower()) {
+ DeferReplacement(node, node->InputAt(0));
+ }
+ return SetOutput(node, MachineRepresentation::kTagged);
+ }
+ UNREACHABLE();
+ }
+
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, Truncation truncation,
@@ -1304,7 +1382,15 @@ class RepresentationSelector {
case IrOpcode::kNumberConstant:
return VisitLeaf(node, MachineRepresentation::kTagged);
case IrOpcode::kHeapConstant:
- return VisitLeaf(node, MachineRepresentation::kTagged);
+ return VisitLeaf(node, MachineRepresentation::kTaggedPointer);
+ case IrOpcode::kPointerConstant: {
+ VisitLeaf(node, MachineType::PointerRepresentation());
+ if (lower()) {
+ intptr_t const value = OpParameter<intptr_t>(node);
+ DeferReplacement(node, lowering->jsgraph()->IntPtrConstant(value));
+ }
+ return;
+ }
case IrOpcode::kBranch:
ProcessInput(node, 0, UseInfo::Bool());
@@ -1332,7 +1418,7 @@ class RepresentationSelector {
if (lower()) DeferReplacement(node, node->InputAt(0));
} else {
VisitInputs(node);
- SetOutput(node, MachineRepresentation::kTagged);
+ SetOutput(node, MachineRepresentation::kTaggedPointer);
}
return;
}
@@ -1362,8 +1448,7 @@ class RepresentationSelector {
node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
} else {
- DCHECK_EQ(input_info->representation(),
- MachineRepresentation::kTagged);
+ DCHECK(CanBeTaggedPointer(input_info->representation()));
// BooleanNot(x: kRepTagged) => WordEqual(x, #false)
node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
@@ -2033,6 +2118,31 @@ class RepresentationSelector {
if (lower()) DeferReplacement(node, node->InputAt(0));
return;
}
+ case IrOpcode::kNumberToUint8Clamped: {
+ Type* const input_type = TypeOf(node->InputAt(0));
+ if (input_type->Is(type_cache_.kUint8OrMinusZeroOrNaN)) {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else if (input_type->Is(Type::Unsigned32OrMinusZeroOrNaN())) {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) lowering->DoUnsigned32ToUint8Clamped(node);
+ } else if (input_type->Is(Type::Signed32OrMinusZeroOrNaN())) {
+ VisitUnop(node, UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) lowering->DoSigned32ToUint8Clamped(node);
+ } else if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) lowering->DoIntegerToUint8Clamped(node);
+ } else {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kFloat64);
+ if (lower()) lowering->DoNumberToUint8Clamped(node);
+ }
+ return;
+ }
case IrOpcode::kReferenceEqual: {
VisitBinop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
if (lower()) {
@@ -2044,7 +2154,7 @@ class RepresentationSelector {
case IrOpcode::kStringLessThan:
case IrOpcode::kStringLessThanOrEqual: {
return VisitBinop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTagged);
+ MachineRepresentation::kTaggedPointer);
}
case IrOpcode::kStringCharCodeAt: {
VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
@@ -2053,12 +2163,12 @@ class RepresentationSelector {
}
case IrOpcode::kStringFromCharCode: {
VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kTagged);
+ MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kStringFromCodePoint: {
VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kTagged);
+ MachineRepresentation::kTaggedPointer);
return;
}
@@ -2082,11 +2192,13 @@ class RepresentationSelector {
}
case IrOpcode::kCheckHeapObject: {
if (InputCannotBe(node, Type::SignedSmall())) {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
- if (lower()) DeferReplacement(node, node->InputAt(0));
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
} else {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
+ MachineRepresentation::kTaggedPointer);
}
+ if (lower()) DeferReplacement(node, node->InputAt(0));
return;
}
case IrOpcode::kCheckIf: {
@@ -2127,10 +2239,12 @@ class RepresentationSelector {
}
case IrOpcode::kCheckString: {
if (InputIs(node, Type::String())) {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
if (lower()) DeferReplacement(node, node->InputAt(0));
} else {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
}
return;
}
@@ -2138,7 +2252,7 @@ class RepresentationSelector {
case IrOpcode::kAllocate: {
ProcessInput(node, 0, UseInfo::TruncatingWord32());
ProcessRemainingInputs(node, 1);
- SetOutput(node, MachineRepresentation::kTagged);
+ SetOutput(node, MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kLoadField: {
@@ -2352,7 +2466,8 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckTaggedHole: {
- VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
return;
}
case IrOpcode::kConvertTaggedHoleToUndefined: {
@@ -2385,14 +2500,14 @@ class RepresentationSelector {
}
case IrOpcode::kEnsureWritableFastElements:
return VisitBinop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTagged);
+ MachineRepresentation::kTaggedPointer);
case IrOpcode::kMaybeGrowFastElements: {
ProcessInput(node, 0, UseInfo::AnyTagged()); // object
ProcessInput(node, 1, UseInfo::AnyTagged()); // elements
ProcessInput(node, 2, UseInfo::TruncatingWord32()); // index
ProcessInput(node, 3, UseInfo::TruncatingWord32()); // length
ProcessRemainingInputs(node, 4);
- SetOutput(node, MachineRepresentation::kTagged);
+ SetOutput(node, MachineRepresentation::kTaggedPointer);
return;
}
@@ -2403,6 +2518,8 @@ class RepresentationSelector {
return;
case IrOpcode::kStateValues:
return VisitStateValues(node);
+ case IrOpcode::kObjectState:
+ return VisitObjectState(node);
case IrOpcode::kTypeGuard: {
// We just get rid of the sigma here. In principle, it should be
// possible to refine the truncation and representation based on
@@ -2414,10 +2531,22 @@ class RepresentationSelector {
return;
}
+ case IrOpcode::kOsrGuard:
+ return VisitOsrGuard(node);
+
+ case IrOpcode::kFinishRegion:
+ VisitInputs(node);
+ // Assume the output is tagged pointer.
+ return SetOutput(node, MachineRepresentation::kTaggedPointer);
+
+ case IrOpcode::kReturn:
+ VisitReturn(node);
+ // Assume the output is tagged.
+ return SetOutput(node, MachineRepresentation::kTagged);
+
// Operators with all inputs tagged and no or tagged output have uniform
// handling.
case IrOpcode::kEnd:
- case IrOpcode::kReturn:
case IrOpcode::kIfSuccess:
case IrOpcode::kIfException:
case IrOpcode::kIfTrue:
@@ -2431,10 +2560,8 @@ class RepresentationSelector {
case IrOpcode::kMerge:
case IrOpcode::kThrow:
case IrOpcode::kBeginRegion:
- case IrOpcode::kFinishRegion:
- case IrOpcode::kOsrValue:
case IrOpcode::kProjection:
- case IrOpcode::kObjectState:
+ case IrOpcode::kOsrValue:
// All JavaScript operators except JSToNumber have uniform handling.
#define OPCODE_CASE(name) case IrOpcode::k##name:
JS_SIMPLE_BINOP_LIST(OPCODE_CASE)
@@ -3199,6 +3326,71 @@ void SimplifiedLowering::DoNumberToBit(Node* node) {
NodeProperties::ChangeOp(node, machine()->Float64LessThan());
}
+void SimplifiedLowering::DoIntegerToUint8Clamped(Node* node) {
+ Node* const input = node->InputAt(0);
+ Node* const min = jsgraph()->Float64Constant(0.0);
+ Node* const max = jsgraph()->Float64Constant(255.0);
+
+ node->ReplaceInput(
+ 0, graph()->NewNode(machine()->Float64LessThan(), min, input));
+ node->AppendInput(
+ graph()->zone(),
+ graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), input, max), input,
+ max));
+ node->AppendInput(graph()->zone(), min);
+ NodeProperties::ChangeOp(node,
+ common()->Select(MachineRepresentation::kFloat64));
+}
+
+void SimplifiedLowering::DoNumberToUint8Clamped(Node* node) {
+ Node* const input = node->InputAt(0);
+ Node* const min = jsgraph()->Float64Constant(0.0);
+ Node* const max = jsgraph()->Float64Constant(255.0);
+
+ node->ReplaceInput(
+ 0, graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), min, input),
+ graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(), input, max),
+ input, max),
+ min));
+ NodeProperties::ChangeOp(node,
+ machine()->Float64RoundTiesEven().placeholder());
+}
+
+void SimplifiedLowering::DoSigned32ToUint8Clamped(Node* node) {
+ Node* const input = node->InputAt(0);
+ Node* const min = jsgraph()->Int32Constant(0);
+ Node* const max = jsgraph()->Int32Constant(255);
+
+ node->ReplaceInput(
+ 0, graph()->NewNode(machine()->Int32LessThanOrEqual(), input, max));
+ node->AppendInput(
+ graph()->zone(),
+ graph()->NewNode(common()->Select(MachineRepresentation::kWord32),
+ graph()->NewNode(machine()->Int32LessThan(), input, min),
+ min, input));
+ node->AppendInput(graph()->zone(), max);
+ NodeProperties::ChangeOp(node,
+ common()->Select(MachineRepresentation::kWord32));
+}
+
+void SimplifiedLowering::DoUnsigned32ToUint8Clamped(Node* node) {
+ Node* const input = node->InputAt(0);
+ Node* const max = jsgraph()->Uint32Constant(255u);
+
+ node->ReplaceInput(
+ 0, graph()->NewNode(machine()->Uint32LessThanOrEqual(), input, max));
+ node->AppendInput(graph()->zone(), input);
+ node->AppendInput(graph()->zone(), max);
+ NodeProperties::ChangeOp(node,
+ common()->Select(MachineRepresentation::kWord32));
+}
+
Node* SimplifiedLowering::ToNumberCode() {
if (!to_number_code_.is_set()) {
Callable callable = CodeFactory::ToNumber(isolate());
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 9e2a499bc6..09e58ff18a 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -44,6 +44,10 @@ class SimplifiedLowering final {
void DoIntegral32ToBit(Node* node);
void DoOrderedNumberToBit(Node* node);
void DoNumberToBit(Node* node);
+ void DoIntegerToUint8Clamped(Node* node);
+ void DoNumberToUint8Clamped(Node* node);
+ void DoSigned32ToUint8Clamped(Node* node);
+ void DoUnsigned32ToUint8Clamped(Node* node);
private:
JSGraph* const jsgraph_;
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index d172adcf60..b8a486df38 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -80,7 +80,9 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
case IrOpcode::kTruncateTaggedToFloat64: {
NumberMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceFloat64(m.Value());
- if (m.IsChangeFloat64ToTagged()) return Replace(m.node()->InputAt(0));
+ if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
+ return Replace(m.node()->InputAt(0));
+ }
if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged()) {
return Change(node, machine()->ChangeInt32ToFloat64(), m.InputAt(0));
}
@@ -89,10 +91,11 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
break;
}
+ case IrOpcode::kChangeTaggedSignedToInt32:
case IrOpcode::kChangeTaggedToInt32: {
NumberMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
- if (m.IsChangeFloat64ToTagged()) {
+ if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
return Change(node, machine()->ChangeFloat64ToInt32(), m.InputAt(0));
}
if (m.IsChangeInt31ToTaggedSigned() || m.IsChangeInt32ToTagged()) {
@@ -103,7 +106,7 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
case IrOpcode::kChangeTaggedToUint32: {
NumberMatcher m(node->InputAt(0));
if (m.HasValue()) return ReplaceUint32(DoubleToUint32(m.Value()));
- if (m.IsChangeFloat64ToTagged()) {
+ if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
return Change(node, machine()->ChangeFloat64ToUint32(), m.InputAt(0));
}
if (m.IsChangeUint32ToTagged()) return Replace(m.InputAt(0));
@@ -121,11 +124,12 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
m.IsChangeUint32ToTagged()) {
return Replace(m.InputAt(0));
}
- if (m.IsChangeFloat64ToTagged()) {
+ if (m.IsChangeFloat64ToTagged() || m.IsChangeFloat64ToTaggedPointer()) {
return Change(node, machine()->TruncateFloat64ToWord32(), m.InputAt(0));
}
break;
}
+ case IrOpcode::kCheckedTaggedToInt32:
case IrOpcode::kCheckedTaggedSignedToInt32: {
NodeMatcher m(node->InputAt(0));
if (m.IsConvertTaggedHoleToUndefined()) {
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index 44bfdff3e3..266cb236ba 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -5,7 +5,9 @@
#ifndef V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
#define V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -21,7 +23,8 @@ class JSGraph;
class MachineOperatorBuilder;
class SimplifiedOperatorBuilder;
-class SimplifiedOperatorReducer final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
public:
SimplifiedOperatorReducer(Editor* editor, JSGraph* jsgraph);
~SimplifiedOperatorReducer() final;
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 400db97bdc..345a2c5f88 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -393,6 +393,7 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(NumberToBoolean, Operator::kNoProperties, 1, 0) \
V(NumberToInt32, Operator::kNoProperties, 1, 0) \
V(NumberToUint32, Operator::kNoProperties, 1, 0) \
+ V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
V(StringFromCharCode, Operator::kNoProperties, 1, 0) \
@@ -404,6 +405,7 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
V(ChangeFloat64ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \
@@ -430,25 +432,26 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(SpeculativeNumberLessThan) \
V(SpeculativeNumberLessThanOrEqual)
-#define CHECKED_OP_LIST(V) \
- V(CheckBounds, 2, 1) \
- V(CheckHeapObject, 1, 1) \
- V(CheckIf, 1, 0) \
- V(CheckNumber, 1, 1) \
- V(CheckSmi, 1, 1) \
- V(CheckString, 1, 1) \
- V(CheckTaggedHole, 1, 1) \
- V(CheckedInt32Add, 2, 1) \
- V(CheckedInt32Sub, 2, 1) \
- V(CheckedInt32Div, 2, 1) \
- V(CheckedInt32Mod, 2, 1) \
- V(CheckedUint32Div, 2, 1) \
- V(CheckedUint32Mod, 2, 1) \
- V(CheckedUint32ToInt32, 1, 1) \
- V(CheckedUint32ToTaggedSigned, 1, 1) \
- V(CheckedInt32ToTaggedSigned, 1, 1) \
- V(CheckedTaggedSignedToInt32, 1, 1) \
- V(CheckedTaggedToTaggedSigned, 1, 1) \
+#define CHECKED_OP_LIST(V) \
+ V(CheckBounds, 2, 1) \
+ V(CheckHeapObject, 1, 1) \
+ V(CheckIf, 1, 0) \
+ V(CheckNumber, 1, 1) \
+ V(CheckSmi, 1, 1) \
+ V(CheckString, 1, 1) \
+ V(CheckTaggedHole, 1, 1) \
+ V(CheckedInt32Add, 2, 1) \
+ V(CheckedInt32Sub, 2, 1) \
+ V(CheckedInt32Div, 2, 1) \
+ V(CheckedInt32Mod, 2, 1) \
+ V(CheckedUint32Div, 2, 1) \
+ V(CheckedUint32Mod, 2, 1) \
+ V(CheckedUint32ToInt32, 1, 1) \
+ V(CheckedUint32ToTaggedSigned, 1, 1) \
+ V(CheckedInt32ToTaggedSigned, 1, 1) \
+ V(CheckedTaggedSignedToInt32, 1, 1) \
+ V(CheckedTaggedToTaggedSigned, 1, 1) \
+ V(CheckedTaggedToTaggedPointer, 1, 1) \
V(CheckedTruncateTaggedToWord32, 1, 1)
struct SimplifiedOperatorGlobalCache final {
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index a904391310..833a0554f5 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -7,8 +7,10 @@
#include <iosfwd>
+#include "src/base/compiler-specific.h"
#include "src/compiler/operator.h"
#include "src/compiler/types.h"
+#include "src/globals.h"
#include "src/handles.h"
#include "src/machine-type.h"
#include "src/objects.h"
@@ -45,15 +47,15 @@ class BufferAccess final {
ExternalArrayType const external_array_type_;
};
-bool operator==(BufferAccess, BufferAccess);
+V8_EXPORT_PRIVATE bool operator==(BufferAccess, BufferAccess);
bool operator!=(BufferAccess, BufferAccess);
size_t hash_value(BufferAccess);
-std::ostream& operator<<(std::ostream&, BufferAccess);
-
-BufferAccess const BufferAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BufferAccess);
+V8_EXPORT_PRIVATE BufferAccess const BufferAccessOf(const Operator* op)
+ WARN_UNUSED_RESULT;
// An access descriptor for loads/stores of fixed structures like field
// accesses of heap objects. Accesses from either tagged or untagged base
@@ -69,12 +71,12 @@ struct FieldAccess {
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
-bool operator==(FieldAccess const&, FieldAccess const&);
+V8_EXPORT_PRIVATE bool operator==(FieldAccess const&, FieldAccess const&);
bool operator!=(FieldAccess const&, FieldAccess const&);
size_t hash_value(FieldAccess const&);
-std::ostream& operator<<(std::ostream&, FieldAccess const&);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, FieldAccess const&);
FieldAccess const& FieldAccessOf(const Operator* op) WARN_UNUSED_RESULT;
@@ -96,14 +98,15 @@ struct ElementAccess {
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
-bool operator==(ElementAccess const&, ElementAccess const&);
+V8_EXPORT_PRIVATE bool operator==(ElementAccess const&, ElementAccess const&);
bool operator!=(ElementAccess const&, ElementAccess const&);
size_t hash_value(ElementAccess const&);
-std::ostream& operator<<(std::ostream&, ElementAccess const&);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ElementAccess const&);
-ElementAccess const& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+V8_EXPORT_PRIVATE ElementAccess const& ElementAccessOf(const Operator* op)
+ WARN_UNUSED_RESULT;
ExternalArrayType ExternalArrayTypeOf(const Operator* op) WARN_UNUSED_RESULT;
@@ -178,7 +181,7 @@ enum class NumberOperationHint : uint8_t {
size_t hash_value(NumberOperationHint);
-std::ostream& operator<<(std::ostream&, NumberOperationHint);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, NumberOperationHint);
NumberOperationHint NumberOperationHintOf(const Operator* op)
WARN_UNUSED_RESULT;
@@ -209,7 +212,8 @@ UnicodeEncoding UnicodeEncodingOf(const Operator*) WARN_UNUSED_RESULT;
// - Bool: a tagged pointer to either the canonical JS #false or
// the canonical JS #true object
// - Bit: an untagged integer 0 or 1, but word-sized
-class SimplifiedOperatorBuilder final : public ZoneObject {
+class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
+ : public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit SimplifiedOperatorBuilder(Zone* zone);
@@ -265,6 +269,7 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* NumberToBoolean();
const Operator* NumberToInt32();
const Operator* NumberToUint32();
+ const Operator* NumberToUint8Clamped();
const Operator* NumberSilenceNaN();
@@ -305,6 +310,7 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* ChangeInt32ToTagged();
const Operator* ChangeUint32ToTagged();
const Operator* ChangeFloat64ToTagged();
+ const Operator* ChangeFloat64ToTaggedPointer();
const Operator* ChangeTaggedToBit();
const Operator* ChangeBitToTagged();
const Operator* TruncateTaggedToWord32();
@@ -335,6 +341,7 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode);
const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode);
const Operator* CheckedTaggedToTaggedSigned();
+ const Operator* CheckedTaggedToTaggedPointer();
const Operator* CheckedTruncateTaggedToWord32();
const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc
index 77cc227038..e8310d7d56 100644
--- a/deps/v8/src/compiler/state-values-utils.cc
+++ b/deps/v8/src/compiler/state-values-utils.cc
@@ -274,8 +274,7 @@ MachineType StateValuesAccess::iterator::type() {
return MachineType::AnyTagged();
} else {
DCHECK_EQ(IrOpcode::kTypedStateValues, state->opcode());
- const ZoneVector<MachineType>* types =
- OpParameter<const ZoneVector<MachineType>*>(state);
+ ZoneVector<MachineType> const* types = MachineTypesOf(state->op());
return (*types)[Top()->index];
}
}
diff --git a/deps/v8/src/compiler/state-values-utils.h b/deps/v8/src/compiler/state-values-utils.h
index 704f5f63a5..14b1b9e599 100644
--- a/deps/v8/src/compiler/state-values-utils.h
+++ b/deps/v8/src/compiler/state-values-utils.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_STATE_VALUES_UTILS_H_
#include "src/compiler/js-graph.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -14,7 +15,7 @@ namespace compiler {
class Graph;
-class StateValuesCache {
+class V8_EXPORT_PRIVATE StateValuesCache {
public:
explicit StateValuesCache(JSGraph* js_graph);
@@ -60,7 +61,7 @@ class StateValuesCache {
Node* empty_state_values_;
};
-class StateValuesAccess {
+class V8_EXPORT_PRIVATE StateValuesAccess {
public:
struct TypedNode {
Node* node;
@@ -68,7 +69,7 @@ class StateValuesAccess {
TypedNode(Node* node, MachineType type) : node(node), type(type) {}
};
- class iterator {
+ class V8_EXPORT_PRIVATE iterator {
public:
// Bare minimum of operators needed for range iteration.
bool operator!=(iterator& other);
diff --git a/deps/v8/src/compiler/tail-call-optimization.cc b/deps/v8/src/compiler/tail-call-optimization.cc
index 7e1623aeca..605b0e7282 100644
--- a/deps/v8/src/compiler/tail-call-optimization.cc
+++ b/deps/v8/src/compiler/tail-call-optimization.cc
@@ -7,6 +7,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
+#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
namespace v8 {
@@ -18,12 +19,15 @@ Reduction TailCallOptimization::Reduce(Node* node) {
// The value which is returned must be the result of a potential tail call,
// there must be no try/catch/finally around the Call, and there must be no
// other effect between the Call and the Return nodes.
- Node* const call = NodeProperties::GetValueInput(node, 0);
+ Node* const call = NodeProperties::GetValueInput(node, 1);
if (call->opcode() == IrOpcode::kCall &&
CallDescriptorOf(call->op())->SupportsTailCalls() &&
NodeProperties::GetEffectInput(node) == call &&
!NodeProperties::IsExceptionalCall(call)) {
Node* const control = NodeProperties::GetControlInput(node);
+ // Ensure that no additional arguments are being popped other than those in
+ // the CallDescriptor, otherwise the tail call transformation is invalid.
+ DCHECK_EQ(0, Int32Matcher(NodeProperties::GetValueInput(node, 0)).Value());
if (control->opcode() == IrOpcode::kIfSuccess &&
call->OwnedBy(node, control) && control->OwnedBy(node)) {
// Furthermore, control has to flow via an IfSuccess from the Call, so
@@ -62,9 +66,10 @@ Reduction TailCallOptimization::Reduce(Node* node) {
// |
DCHECK_EQ(call, NodeProperties::GetControlInput(control, 0));
- DCHECK_EQ(3, node->InputCount());
+ DCHECK_EQ(4, node->InputCount());
node->ReplaceInput(0, NodeProperties::GetEffectInput(call));
node->ReplaceInput(1, NodeProperties::GetControlInput(call));
+ node->RemoveInput(3);
node->RemoveInput(2);
for (int index = 0; index < call->op()->ValueInputCount(); ++index) {
node->InsertInput(graph()->zone(), index,
diff --git a/deps/v8/src/compiler/tail-call-optimization.h b/deps/v8/src/compiler/tail-call-optimization.h
index b5d4f961fe..d693f3694c 100644
--- a/deps/v8/src/compiler/tail-call-optimization.h
+++ b/deps/v8/src/compiler/tail-call-optimization.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_TAIL_CALL_OPTIMIZATION_H_
#include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -18,7 +19,7 @@ class Graph;
// Performs tail call optimization by replacing certain combinations of Return
// and Call nodes with a single TailCall.
-class TailCallOptimization final : public Reducer {
+class V8_EXPORT_PRIVATE TailCallOptimization final : public Reducer {
public:
TailCallOptimization(CommonOperatorBuilder* common, Graph* graph)
: common_(common), graph_(graph) {}
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index aa51dacf91..69eaf11616 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -21,11 +21,13 @@ class TypeCache final {
public:
static TypeCache const& Get();
- TypeCache() : zone_(&allocator) {}
+ TypeCache() : zone_(&allocator, ZONE_NAME) {}
Type* const kInt8 = CreateRange<int8_t>();
Type* const kUint8 = CreateRange<uint8_t>();
Type* const kUint8Clamped = kUint8;
+ Type* const kUint8OrMinusZeroOrNaN =
+ Type::Union(kUint8, Type::MinusZeroOrNaN(), zone());
Type* const kInt16 = CreateRange<int16_t>();
Type* const kUint16 = CreateRange<uint16_t>();
Type* const kInt32 = Type::Signed32();
@@ -33,9 +35,8 @@ class TypeCache final {
Type* const kFloat32 = Type::Number();
Type* const kFloat64 = Type::Number();
- Type* const kSmi = Type::SignedSmall();
- Type* const kHoleySmi = Type::Union(kSmi, Type::Hole(), zone());
- Type* const kHeapNumber = Type::Number();
+ Type* const kHoleySmi =
+ Type::Union(Type::SignedSmall(), Type::Hole(), zone());
Type* const kSingletonZero = CreateRange(0.0, 0.0);
Type* const kSingletonOne = CreateRange(1.0, 1.0);
diff --git a/deps/v8/src/compiler/type-hint-analyzer.cc b/deps/v8/src/compiler/type-hint-analyzer.cc
index a668a48ad8..da77a0c997 100644
--- a/deps/v8/src/compiler/type-hint-analyzer.cc
+++ b/deps/v8/src/compiler/type-hint-analyzer.cc
@@ -92,21 +92,7 @@ bool TypeHintAnalysis::GetToBooleanHints(TypeFeedbackId id,
Handle<Code> code = i->second;
DCHECK_EQ(Code::TO_BOOLEAN_IC, code->kind());
ToBooleanICStub stub(code->GetIsolate(), code->extra_ic_state());
-// TODO(bmeurer): Replace ToBooleanICStub::Types with ToBooleanHints.
-#define ASSERT_COMPATIBLE(NAME, Name) \
- STATIC_ASSERT(1 << ToBooleanICStub::NAME == \
- static_cast<int>(ToBooleanHint::k##Name))
- ASSERT_COMPATIBLE(UNDEFINED, Undefined);
- ASSERT_COMPATIBLE(BOOLEAN, Boolean);
- ASSERT_COMPATIBLE(NULL_TYPE, Null);
- ASSERT_COMPATIBLE(SMI, SmallInteger);
- ASSERT_COMPATIBLE(SPEC_OBJECT, Receiver);
- ASSERT_COMPATIBLE(STRING, String);
- ASSERT_COMPATIBLE(SYMBOL, Symbol);
- ASSERT_COMPATIBLE(HEAP_NUMBER, HeapNumber);
- ASSERT_COMPATIBLE(SIMD_VALUE, SimdValue);
-#undef ASSERT_COMPATIBLE
- *hints = ToBooleanHints(stub.types().ToIntegral());
+ *hints = stub.hints();
return true;
}
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index c5e8648ca5..5ebc390c8b 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -22,8 +22,9 @@ TypedOptimization::TypedOptimization(Editor* editor,
dependencies_(dependencies),
flags_(flags),
jsgraph_(jsgraph),
- true_type_(Type::Constant(factory()->true_value(), graph()->zone())),
- false_type_(Type::Constant(factory()->false_value(), graph()->zone())),
+ true_type_(Type::HeapConstant(factory()->true_value(), graph()->zone())),
+ false_type_(
+ Type::HeapConstant(factory()->false_value(), graph()->zone())),
type_cache_(TypeCache::Get()) {}
TypedOptimization::~TypedOptimization() {}
@@ -43,8 +44,9 @@ Reduction TypedOptimization::Reduce(Node* node) {
// the Operator::kNoDeopt property).
Type* upper = NodeProperties::GetType(node);
if (upper->IsInhabited()) {
- if (upper->IsConstant()) {
- Node* replacement = jsgraph()->Constant(upper->AsConstant()->Value());
+ if (upper->IsHeapConstant()) {
+ Node* replacement =
+ jsgraph()->Constant(upper->AsHeapConstant()->Value());
ReplaceWithValue(node, replacement);
return Changed(replacement);
} else if (upper->Is(Type::MinusZero())) {
@@ -72,6 +74,8 @@ Reduction TypedOptimization::Reduce(Node* node) {
}
}
switch (node->opcode()) {
+ case IrOpcode::kCheckHeapObject:
+ return ReduceCheckHeapObject(node);
case IrOpcode::kCheckMaps:
return ReduceCheckMaps(node);
case IrOpcode::kCheckString:
@@ -83,6 +87,8 @@ Reduction TypedOptimization::Reduce(Node* node) {
case IrOpcode::kNumberRound:
case IrOpcode::kNumberTrunc:
return ReduceNumberRoundop(node);
+ case IrOpcode::kNumberToUint8Clamped:
+ return ReduceNumberToUint8Clamped(node);
case IrOpcode::kPhi:
return ReducePhi(node);
case IrOpcode::kSelect:
@@ -96,10 +102,8 @@ Reduction TypedOptimization::Reduce(Node* node) {
namespace {
MaybeHandle<Map> GetStableMapFromObjectType(Type* object_type) {
- if (object_type->IsConstant() &&
- object_type->AsConstant()->Value()->IsHeapObject()) {
- Handle<Map> object_map(
- Handle<HeapObject>::cast(object_type->AsConstant()->Value())->map());
+ if (object_type->IsHeapConstant()) {
+ Handle<Map> object_map(object_type->AsHeapConstant()->Value()->map());
if (object_map->is_stable()) return object_map;
}
return MaybeHandle<Map>();
@@ -107,6 +111,16 @@ MaybeHandle<Map> GetStableMapFromObjectType(Type* object_type) {
} // namespace
+Reduction TypedOptimization::ReduceCheckHeapObject(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (!input_type->Maybe(Type::SignedSmall())) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
Reduction TypedOptimization::ReduceCheckMaps(Node* node) {
// The CheckMaps(o, ...map...) can be eliminated if map is stable,
// o has type Constant(object) and map == object->map, and either
@@ -121,8 +135,8 @@ Reduction TypedOptimization::ReduceCheckMaps(Node* node) {
for (int i = 1; i < node->op()->ValueInputCount(); ++i) {
Node* const map = NodeProperties::GetValueInput(node, i);
Type* const map_type = NodeProperties::GetType(map);
- if (map_type->IsConstant() &&
- map_type->AsConstant()->Value().is_identical_to(object_map)) {
+ if (map_type->IsHeapConstant() &&
+ map_type->AsHeapConstant()->Value().is_identical_to(object_map)) {
if (object_map->CanTransition()) {
dependencies()->AssumeMapStable(object_map);
}
@@ -180,6 +194,15 @@ Reduction TypedOptimization::ReduceNumberRoundop(Node* node) {
return NoChange();
}
+Reduction TypedOptimization::ReduceNumberToUint8Clamped(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(type_cache_.kUint8)) {
+ return Replace(input);
+ }
+ return NoChange();
+}
+
Reduction TypedOptimization::ReducePhi(Node* node) {
// Try to narrow the type of the Phi {node}, which might be more precise now
// after lowering based on types, i.e. a SpeculativeNumberAdd has a more
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index 54d780c33e..fb2db7249d 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -5,8 +5,10 @@
#ifndef V8_COMPILER_TYPED_OPTIMIZATION_H_
#define V8_COMPILER_TYPED_OPTIMIZATION_H_
+#include "src/base/compiler-specific.h"
#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -23,7 +25,8 @@ class JSGraph;
class SimplifiedOperatorBuilder;
class TypeCache;
-class TypedOptimization final : public AdvancedReducer {
+class V8_EXPORT_PRIVATE TypedOptimization final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
public:
// Flags that control the mode of operation.
enum Flag {
@@ -39,10 +42,12 @@ class TypedOptimization final : public AdvancedReducer {
Reduction Reduce(Node* node) final;
private:
+ Reduction ReduceCheckHeapObject(Node* node);
Reduction ReduceCheckMaps(Node* node);
Reduction ReduceCheckString(Node* node);
Reduction ReduceLoadField(Node* node);
Reduction ReduceNumberRoundop(Node* node);
+ Reduction ReduceNumberToUint8Clamped(Node* node);
Reduction ReducePhi(Node* node);
Reduction ReduceSelect(Node* node);
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index ec1197bb80..2642a1007a 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -11,6 +11,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/loop-variable-optimizer.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
@@ -32,8 +33,9 @@ class Typer::Decorator final : public GraphDecorator {
Typer* const typer_;
};
-Typer::Typer(Isolate* isolate, Graph* graph)
+Typer::Typer(Isolate* isolate, Flags flags, Graph* graph)
: isolate_(isolate),
+ flags_(flags),
graph_(graph),
decorator_(nullptr),
cache_(TypeCache::Get()),
@@ -41,9 +43,9 @@ Typer::Typer(Isolate* isolate, Graph* graph)
Zone* zone = this->zone();
Factory* const factory = isolate->factory();
- singleton_false_ = Type::Constant(factory->false_value(), zone);
- singleton_true_ = Type::Constant(factory->true_value(), zone);
- singleton_the_hole_ = Type::Constant(factory->the_hole_value(), zone);
+ singleton_false_ = Type::HeapConstant(factory->false_value(), zone);
+ singleton_true_ = Type::HeapConstant(factory->true_value(), zone);
+ singleton_the_hole_ = Type::HeapConstant(factory->the_hole_value(), zone);
falsish_ = Type::Union(
Type::Undetectable(),
Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
@@ -290,7 +292,6 @@ class Typer::Visitor : public Reducer {
JS_SIMPLE_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
- static Type* JSTypeOfTyper(Type*, Typer*);
static Type* JSCallFunctionTyper(Type*, Typer*);
static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
@@ -547,25 +548,63 @@ Type* Typer::Visitor::TypeIfException(Node* node) {
// Common operators.
-Type* Typer::Visitor::TypeParameter(Node* node) { return Type::Any(); }
+Type* Typer::Visitor::TypeParameter(Node* node) {
+ Node* const start = node->InputAt(0);
+ DCHECK_EQ(IrOpcode::kStart, start->opcode());
+ int const parameter_count = start->op()->ValueOutputCount() - 4;
+ DCHECK_LE(1, parameter_count);
+ int const index = ParameterIndexOf(node->op());
+ if (index == Linkage::kJSCallClosureParamIndex) {
+ return Type::Function();
+ } else if (index == 0) {
+ if (typer_->flags() & Typer::kThisIsReceiver) {
+ return Type::Receiver();
+ } else {
+ // Parameter[this] can be the_hole for derived class constructors.
+ return Type::Union(Type::Hole(), Type::NonInternal(), typer_->zone());
+ }
+ } else if (index == Linkage::GetJSCallNewTargetParamIndex(parameter_count)) {
+ if (typer_->flags() & Typer::kNewTargetIsReceiver) {
+ return Type::Receiver();
+ } else {
+ return Type::Union(Type::Receiver(), Type::Undefined(), typer_->zone());
+ }
+ } else if (index == Linkage::GetJSCallArgCountParamIndex(parameter_count)) {
+ return Type::Range(0.0, Code::kMaxArguments, typer_->zone());
+ } else if (index == Linkage::GetJSCallContextParamIndex(parameter_count)) {
+ return Type::OtherInternal();
+ }
+ return Type::NonInternal();
+}
Type* Typer::Visitor::TypeOsrValue(Node* node) { return Type::Any(); }
+Type* Typer::Visitor::TypeOsrGuard(Node* node) {
+ switch (OsrGuardTypeOf(node->op())) {
+ case OsrGuardType::kUninitialized:
+ return Type::None();
+ case OsrGuardType::kSignedSmall:
+ return Type::SignedSmall();
+ case OsrGuardType::kAny:
+ return Type::Any();
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
Type* Typer::Visitor::TypeRetain(Node* node) {
UNREACHABLE();
return nullptr;
}
Type* Typer::Visitor::TypeInt32Constant(Node* node) {
- double number = OpParameter<int32_t>(node);
- return Type::Intersect(Type::Range(number, number, zone()),
- Type::Integral32(), zone());
+ UNREACHABLE();
+ return nullptr;
}
-
Type* Typer::Visitor::TypeInt64Constant(Node* node) {
- // TODO(rossberg): This actually seems to be a PointerConstant so far...
- return Type::Internal(); // TODO(rossberg): Add int64 bitset type?
+ UNREACHABLE();
+ return nullptr;
}
Type* Typer::Visitor::TypeRelocatableInt32Constant(Node* node) {
@@ -583,32 +622,27 @@ Type* Typer::Visitor::TypeFloat32Constant(Node* node) {
return nullptr;
}
-
Type* Typer::Visitor::TypeFloat64Constant(Node* node) {
UNREACHABLE();
return nullptr;
}
-
Type* Typer::Visitor::TypeNumberConstant(Node* node) {
- Factory* f = isolate()->factory();
double number = OpParameter<double>(node);
- if (Type::IsInteger(number)) {
- return Type::Range(number, number, zone());
- }
- return Type::Constant(f->NewNumber(number), zone());
+ return Type::NewConstant(number, zone());
}
-
Type* Typer::Visitor::TypeHeapConstant(Node* node) {
return TypeConstant(OpParameter<Handle<HeapObject>>(node));
}
-
Type* Typer::Visitor::TypeExternalConstant(Node* node) {
- return Type::Internal();
+ return Type::ExternalPointer();
}
+Type* Typer::Visitor::TypePointerConstant(Node* node) {
+ return Type::ExternalPointer();
+}
Type* Typer::Visitor::TypeSelect(Node* node) {
return Type::Union(Operand(node, 1), Operand(node, 2), zone());
@@ -784,12 +818,15 @@ Type* Typer::Visitor::TypeFrameState(Node* node) {
Type* Typer::Visitor::TypeStateValues(Node* node) { return Type::Internal(); }
-Type* Typer::Visitor::TypeObjectState(Node* node) { return Type::Internal(); }
-
Type* Typer::Visitor::TypeTypedStateValues(Node* node) {
return Type::Internal();
}
+Type* Typer::Visitor::TypeObjectState(Node* node) { return Type::Internal(); }
+
+Type* Typer::Visitor::TypeTypedObjectState(Node* node) {
+ return Type::Internal();
+}
Type* Typer::Visitor::TypeCall(Node* node) { return Type::Any(); }
@@ -823,7 +860,7 @@ Type* Typer::Visitor::JSEqualTyper(Type* lhs, Type* rhs, Typer* t) {
(lhs->Max() < rhs->Min() || lhs->Min() > rhs->Max())) {
return t->singleton_false_;
}
- if (lhs->IsConstant() && rhs->Is(lhs)) {
+ if (lhs->IsHeapConstant() && rhs->Is(lhs)) {
// Types are equal and are inhabited only by a single semantic value,
// which is not nan due to the earlier check.
return t->singleton_true_;
@@ -860,7 +897,7 @@ Type* Typer::Visitor::JSStrictEqualTyper(Type* lhs, Type* rhs, Typer* t) {
!lhs->Maybe(rhs)) {
return t->singleton_false_;
}
- if (lhs->IsConstant() && rhs->Is(lhs)) {
+ if (lhs->IsHeapConstant() && rhs->Is(lhs)) {
// Types are equal and are inhabited only by a single semantic value,
// which is not nan due to the earlier check.
return t->singleton_true_;
@@ -894,7 +931,7 @@ Typer::Visitor::ComparisonOutcome Typer::Visitor::JSCompareTyper(Type* lhs,
if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return kComparisonUndefined;
ComparisonOutcome result;
- if (lhs->IsConstant() && rhs->Is(lhs)) {
+ if (lhs->IsHeapConstant() && rhs->Is(lhs)) {
// Types are equal and are inhabited only by a single semantic value.
result = kComparisonFalse;
} else if (lhs->Min() >= rhs->Max()) {
@@ -1005,33 +1042,8 @@ Type* Typer::Visitor::JSModulusTyper(Type* lhs, Type* rhs, Typer* t) {
// JS unary operators.
-Type* Typer::Visitor::JSTypeOfTyper(Type* type, Typer* t) {
- Factory* const f = t->isolate()->factory();
- if (type->Is(Type::Boolean())) {
- return Type::Constant(f->boolean_string(), t->zone());
- } else if (type->Is(Type::Number())) {
- return Type::Constant(f->number_string(), t->zone());
- } else if (type->Is(Type::String())) {
- return Type::Constant(f->string_string(), t->zone());
- } else if (type->Is(Type::Symbol())) {
- return Type::Constant(f->symbol_string(), t->zone());
- } else if (type->Is(Type::Union(Type::Undefined(), Type::OtherUndetectable(),
- t->zone()))) {
- return Type::Constant(f->undefined_string(), t->zone());
- } else if (type->Is(Type::Null())) {
- return Type::Constant(f->object_string(), t->zone());
- } else if (type->Is(Type::Function())) {
- return Type::Constant(f->function_string(), t->zone());
- } else if (type->IsConstant()) {
- return Type::Constant(
- Object::TypeOf(t->isolate(), type->AsConstant()->Value()), t->zone());
- }
- return Type::InternalizedString();
-}
-
-
Type* Typer::Visitor::TypeJSTypeOf(Node* node) {
- return TypeUnaryOp(node, JSTypeOfTyper);
+ return Type::InternalizedString();
}
@@ -1091,6 +1103,9 @@ Type* Typer::Visitor::TypeJSCreateIterResultObject(Node* node) {
return Type::OtherObject();
}
+Type* Typer::Visitor::TypeJSCreateKeyValueArray(Node* node) {
+ return Type::OtherObject();
+}
Type* Typer::Visitor::TypeJSCreateLiteralArray(Node* node) {
return Type::OtherObject();
@@ -1281,11 +1296,10 @@ Type* Typer::Visitor::TypeJSCallConstruct(Node* node) {
return Type::Receiver();
}
-
Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
- if (fun->IsConstant() && fun->AsConstant()->Value()->IsJSFunction()) {
+ if (fun->IsHeapConstant() && fun->AsHeapConstant()->Value()->IsJSFunction()) {
Handle<JSFunction> function =
- Handle<JSFunction>::cast(fun->AsConstant()->Value());
+ Handle<JSFunction>::cast(fun->AsHeapConstant()->Value());
if (function->shared()->HasBuiltinFunctionId()) {
switch (function->shared()->builtin_function_id()) {
case kMathRandom:
@@ -1355,6 +1369,8 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
case kNumberIsNaN:
case kNumberIsSafeInteger:
return Type::Boolean();
+ case kNumberParseFloat:
+ return Type::Number();
case kNumberParseInt:
return t->cache_.kIntegerOrMinusZeroOrNaN;
case kNumberToString:
@@ -1371,9 +1387,19 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
case kStringToUpperCase:
return Type::String();
+ case kStringIterator:
case kStringIteratorNext:
return Type::OtherObject();
+ case kArrayEntries:
+ case kArrayKeys:
+ case kArrayValues:
+ case kTypedArrayEntries:
+ case kTypedArrayKeys:
+ case kTypedArrayValues:
+ case kArrayIteratorNext:
+ return Type::OtherObject();
+
// Array functions.
case kArrayIndexOf:
case kArrayLastIndexOf:
@@ -1428,7 +1454,6 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
case Runtime::kInlineIsRegExp:
return Type::Boolean();
case Runtime::kInlineCreateIterResultObject:
- case Runtime::kInlineRegExpConstructResult:
return Type::OtherObject();
case Runtime::kInlineSubString:
case Runtime::kInlineStringCharFromCode:
@@ -1468,7 +1493,7 @@ Type* Typer::Visitor::TypeJSForInNext(Node* node) {
Type* Typer::Visitor::TypeJSForInPrepare(Node* node) {
STATIC_ASSERT(Map::EnumLengthBits::kMax <= FixedArray::kMaxLength);
Type* const cache_type =
- Type::Union(typer_->cache_.kSmi, Type::OtherInternal(), zone());
+ Type::Union(Type::SignedSmall(), Type::OtherInternal(), zone());
Type* const cache_array = Type::OtherInternal();
Type* const cache_length = typer_->cache_.kFixedArrayLengthType;
return Type::Tuple(cache_type, cache_array, cache_length, zone());
@@ -1483,13 +1508,20 @@ Type* Typer::Visitor::TypeJSStoreMessage(Node* node) {
return nullptr;
}
+Type* Typer::Visitor::TypeJSLoadModule(Node* node) { return Type::Any(); }
+
+Type* Typer::Visitor::TypeJSStoreModule(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
Type* Typer::Visitor::TypeJSGeneratorStore(Node* node) {
UNREACHABLE();
return nullptr;
}
Type* Typer::Visitor::TypeJSGeneratorRestoreContinuation(Node* node) {
- return typer_->cache_.kSmi;
+ return Type::SignedSmall();
}
Type* Typer::Visitor::TypeJSGeneratorRestoreRegister(Node* node) {
@@ -1536,7 +1568,7 @@ Type* Typer::Visitor::TypePlainPrimitiveToFloat64(Node* node) {
// static
Type* Typer::Visitor::ReferenceEqualTyper(Type* lhs, Type* rhs, Typer* t) {
- if (lhs->IsConstant() && rhs->Is(lhs)) {
+ if (lhs->IsHeapConstant() && rhs->Is(lhs)) {
return t->singleton_true_;
}
return Type::Boolean();
@@ -1556,34 +1588,15 @@ Type* Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
}
Type* Typer::Visitor::StringFromCharCodeTyper(Type* type, Typer* t) {
- type = NumberToUint32(ToNumber(type, t), t);
- Factory* f = t->isolate()->factory();
- double min = type->Min();
- double max = type->Max();
- if (min == max) {
- uint32_t code = static_cast<uint32_t>(min) & String::kMaxUtf16CodeUnitU;
- Handle<String> string = f->LookupSingleCharacterStringFromCode(code);
- return Type::Constant(string, t->zone());
- }
return Type::String();
}
Type* Typer::Visitor::StringFromCodePointTyper(Type* type, Typer* t) {
- type = NumberToUint32(ToNumber(type, t), t);
- Factory* f = t->isolate()->factory();
- double min = type->Min();
- double max = type->Max();
- if (min == max) {
- uint32_t code = static_cast<uint32_t>(min) & String::kMaxUtf16CodeUnitU;
- Handle<String> string = f->LookupSingleCharacterStringFromCode(code);
- return Type::Constant(string, t->zone());
- }
return Type::String();
}
Type* Typer::Visitor::TypeStringCharCodeAt(Node* node) {
- // TODO(bmeurer): We could do better here based on inputs.
- return Type::Range(0, kMaxUInt16, zone());
+ return typer_->cache_.kUint16;
}
Type* Typer::Visitor::TypeStringFromCharCode(Node* node) {
@@ -1663,8 +1676,6 @@ Type* Typer::Visitor::TypeLoadField(Node* node) {
}
Type* Typer::Visitor::TypeLoadBuffer(Node* node) {
- // TODO(bmeurer): This typing is not yet correct. Since we can still access
- // out of bounds, the type in the general case has to include Undefined.
switch (BufferAccessOf(node->op()).external_array_type()) {
#define TYPED_ARRAY_CASE(ElemType, type, TYPE, ctype, size) \
case kExternal##ElemType##Array: \
@@ -1751,7 +1762,7 @@ Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
if (Type::IsInteger(*value)) {
return Type::Range(value->Number(), value->Number(), zone());
}
- return Type::Constant(value, zone());
+ return Type::NewConstant(value, zone());
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index 875b4839e5..7f6f90a517 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -7,6 +7,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/operation-typer.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -15,9 +16,16 @@ namespace compiler {
// Forward declarations.
class LoopVariableOptimizer;
-class Typer {
+class V8_EXPORT_PRIVATE Typer {
public:
- Typer(Isolate* isolate, Graph* graph);
+ enum Flag : uint8_t {
+ kNoFlags = 0,
+ kThisIsReceiver = 1u << 0, // Parameter this is an Object.
+ kNewTargetIsReceiver = 1u << 1, // Parameter new.target is an Object.
+ };
+ typedef base::Flags<Flag> Flags;
+
+ Typer(Isolate* isolate, Flags flags, Graph* graph);
~Typer();
void Run();
@@ -29,12 +37,14 @@ class Typer {
class Visitor;
class Decorator;
+ Flags flags() const { return flags_; }
Graph* graph() const { return graph_; }
Zone* zone() const { return graph()->zone(); }
Isolate* isolate() const { return isolate_; }
OperationTyper* operation_typer() { return &operation_typer_; }
Isolate* const isolate_;
+ Flags const flags_;
Graph* const graph_;
Decorator* decorator_;
TypeCache const& cache_;
@@ -49,6 +59,8 @@ class Typer {
DISALLOW_COPY_AND_ASSIGN(Typer);
};
+DEFINE_OPERATORS_FOR_FLAGS(Typer::Flags);
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 43d2f80483..806bd8f2c5 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -56,12 +56,6 @@ bool Type::Contains(RangeType* lhs, RangeType* rhs) {
return lhs->Min() <= rhs->Min() && rhs->Max() <= lhs->Max();
}
-bool Type::Contains(RangeType* lhs, ConstantType* rhs) {
- DisallowHeapAllocation no_allocation;
- return IsInteger(*rhs->Value()) && lhs->Min() <= rhs->Value()->Number() &&
- rhs->Value()->Number() <= lhs->Max();
-}
-
bool Type::Contains(RangeType* range, i::Object* val) {
DisallowHeapAllocation no_allocation;
return IsInteger(val) && range->Min() <= val->Number() &&
@@ -82,7 +76,8 @@ double Type::Min() {
return min;
}
if (this->IsRange()) return this->AsRange()->Min();
- if (this->IsConstant()) return this->AsConstant()->Value()->Number();
+ if (this->IsOtherNumberConstant())
+ return this->AsOtherNumberConstant()->Value();
UNREACHABLE();
return 0;
}
@@ -98,7 +93,8 @@ double Type::Max() {
return max;
}
if (this->IsRange()) return this->AsRange()->Max();
- if (this->IsConstant()) return this->AsConstant()->Value()->Number();
+ if (this->IsOtherNumberConstant())
+ return this->AsOtherNumberConstant()->Value();
UNREACHABLE();
return 0;
}
@@ -139,7 +135,9 @@ Type::bitset BitsetType::Lub(Type* type) {
}
return bitset;
}
- if (type->IsConstant()) return type->AsConstant()->Lub();
+ if (type->IsHeapConstant()) return type->AsHeapConstant()->Lub();
+ if (type->IsOtherNumberConstant())
+ return type->AsOtherNumberConstant()->Lub();
if (type->IsRange()) return type->AsRange()->Lub();
if (type->IsTuple()) return kOtherInternal;
UNREACHABLE();
@@ -205,6 +203,8 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_DATE_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
+ case JS_MODULE_NAMESPACE_TYPE:
+ case JS_FIXED_ARRAY_ITERATOR_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_TYPE:
case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type.
@@ -215,6 +215,43 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
+
+ case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
+
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
case JS_PROMISE_TYPE:
@@ -231,6 +268,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case ALLOCATION_SITE_TYPE:
case ACCESSOR_INFO_TYPE:
case SHARED_FUNCTION_INFO_TYPE:
+ case FUNCTION_TEMPLATE_INFO_TYPE:
case ACCESSOR_PAIR_TYPE:
case FIXED_ARRAY_TYPE:
case FIXED_DOUBLE_ARRAY_TYPE:
@@ -242,6 +280,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case CODE_TYPE:
case PROPERTY_CELL_TYPE:
case MODULE_TYPE:
+ case MODULE_INFO_ENTRY_TYPE:
return kOtherInternal;
// Remaining instance types are unsupported for now. If any of them do
@@ -257,7 +296,6 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case ACCESS_CHECK_INFO_TYPE:
case INTERCEPTOR_INFO_TYPE:
case CALL_HANDLER_INFO_TYPE:
- case FUNCTION_TEMPLATE_INFO_TYPE:
case OBJECT_TEMPLATE_INFO_TYPE:
case SIGNATURE_INFO_TYPE:
case TYPE_SWITCH_INFO_TYPE:
@@ -265,12 +303,14 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case TYPE_FEEDBACK_INFO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE:
case BOX_TYPE:
- case PROMISE_CONTAINER_TYPE:
+ case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
+ case PROMISE_REACTION_JOB_INFO_TYPE:
case DEBUG_INFO_TYPE:
case BREAK_POINT_INFO_TYPE:
case CELL_TYPE:
case WEAK_CELL_TYPE:
case PROTOTYPE_INFO_TYPE:
+ case TUPLE3_TYPE:
case CONTEXT_EXTENSION_TYPE:
UNREACHABLE();
return kNone;
@@ -390,14 +430,43 @@ double BitsetType::Max(bitset bits) {
return std::numeric_limits<double>::quiet_NaN();
}
+// static
+bool OtherNumberConstantType::IsOtherNumberConstant(double value) {
+ // Not an integer, not NaN, and not -0.
+ return !std::isnan(value) && !Type::IsInteger(value) &&
+ !i::IsMinusZero(value);
+}
+
+// static
+bool OtherNumberConstantType::IsOtherNumberConstant(Object* value) {
+ return value->IsHeapNumber() &&
+ IsOtherNumberConstant(HeapNumber::cast(value)->value());
+}
+
+HeapConstantType::HeapConstantType(BitsetType::bitset bitset,
+ i::Handle<i::HeapObject> object)
+ : TypeBase(kHeapConstant), bitset_(bitset), object_(object) {
+ DCHECK(!object->IsHeapNumber());
+ DCHECK(!object->IsString());
+}
+
// -----------------------------------------------------------------------------
// Predicates.
bool Type::SimplyEquals(Type* that) {
DisallowHeapAllocation no_allocation;
- if (this->IsConstant()) {
- return that->IsConstant() &&
- *this->AsConstant()->Value() == *that->AsConstant()->Value();
+ if (this->IsHeapConstant()) {
+ return that->IsHeapConstant() &&
+ this->AsHeapConstant()->Value().address() ==
+ that->AsHeapConstant()->Value().address();
+ }
+ if (this->IsOtherNumberConstant()) {
+ return that->IsOtherNumberConstant() &&
+ this->AsOtherNumberConstant()->Value() ==
+ that->AsOtherNumberConstant()->Value();
+ }
+ if (this->IsRange()) {
+ if (that->IsHeapConstant() || that->IsOtherNumberConstant()) return false;
}
if (this->IsTuple()) {
if (!that->IsTuple()) return false;
@@ -446,9 +515,7 @@ bool Type::SlowIs(Type* that) {
}
if (that->IsRange()) {
- return (this->IsRange() && Contains(that->AsRange(), this->AsRange())) ||
- (this->IsConstant() &&
- Contains(that->AsRange(), this->AsConstant()));
+ return (this->IsRange() && Contains(that->AsRange(), this->AsRange()));
}
if (this->IsRange()) return false;
@@ -481,9 +548,6 @@ bool Type::Maybe(Type* that) {
if (this->IsBitset() && that->IsBitset()) return true;
if (this->IsRange()) {
- if (that->IsConstant()) {
- return Contains(this->AsRange(), that->AsConstant());
- }
if (that->IsRange()) {
return Overlap(this->AsRange(), that->AsRange());
}
@@ -673,9 +737,6 @@ int Type::IntersectAux(Type* lhs, Type* rhs, UnionType* result, int size,
}
return size;
}
- if (rhs->IsConstant() && Contains(lhs->AsRange(), rhs->AsConstant())) {
- return AddToUnion(rhs, result, size, zone);
- }
if (rhs->IsRange()) {
RangeType::Limits lim = RangeType::Limits::Intersect(
RangeType::Limits(lhs->AsRange()), RangeType::Limits(rhs->AsRange()));
@@ -743,6 +804,40 @@ Type* Type::NormalizeRangeAndBitset(Type* range, bitset* bits, Zone* zone) {
return RangeType::New(range_min, range_max, zone);
}
+Type* Type::NewConstant(double value, Zone* zone) {
+ if (IsInteger(value)) {
+ return Range(value, value, zone);
+ } else if (i::IsMinusZero(value)) {
+ return Type::MinusZero();
+ } else if (std::isnan(value)) {
+ return Type::NaN();
+ }
+
+ DCHECK(OtherNumberConstantType::IsOtherNumberConstant(value));
+ return OtherNumberConstant(value, zone);
+}
+
+Type* Type::NewConstant(i::Handle<i::Object> value, Zone* zone) {
+ if (IsInteger(*value)) {
+ double v = value->Number();
+ return Range(v, v, zone);
+ } else if (value->IsHeapNumber()) {
+ return NewConstant(value->Number(), zone);
+ } else if (value->IsString()) {
+ bitset b = BitsetType::Lub(*value);
+ DCHECK(b == BitsetType::kInternalizedString ||
+ b == BitsetType::kOtherString);
+ if (b == BitsetType::kInternalizedString) {
+ return Type::InternalizedString();
+ } else if (b == BitsetType::kOtherString) {
+ return Type::OtherString();
+ } else {
+ UNREACHABLE();
+ }
+ }
+ return HeapConstant(i::Handle<i::HeapObject>::cast(value), zone);
+}
+
Type* Type::Union(Type* type1, Type* type2, Zone* zone) {
// Fast case: bit sets.
if (type1->IsBitset() && type2->IsBitset()) {
@@ -833,17 +928,14 @@ Type* Type::NormalizeUnion(Type* union_type, int size, Zone* zone) {
return union_type;
}
-// -----------------------------------------------------------------------------
-// Iteration.
-
int Type::NumConstants() {
DisallowHeapAllocation no_allocation;
- if (this->IsConstant()) {
+ if (this->IsHeapConstant() || this->IsOtherNumberConstant()) {
return 1;
} else if (this->IsUnion()) {
int result = 0;
for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
- if (this->AsUnion()->Get(i)->IsConstant()) ++result;
+ if (this->AsUnion()->Get(i)->IsHeapConstant()) ++result;
}
return result;
} else {
@@ -905,8 +997,11 @@ void Type::PrintTo(std::ostream& os) {
DisallowHeapAllocation no_allocation;
if (this->IsBitset()) {
BitsetType::Print(os, this->AsBitset());
- } else if (this->IsConstant()) {
- os << "Constant(" << Brief(*this->AsConstant()->Value()) << ")";
+ } else if (this->IsHeapConstant()) {
+ os << "HeapConstant(" << Brief(*this->AsHeapConstant()->Value()) << ")";
+ } else if (this->IsOtherNumberConstant()) {
+ os << "OtherNumberConstant(" << this->AsOtherNumberConstant()->Value()
+ << ")";
} else if (this->IsRange()) {
std::ostream::fmtflags saved_flags = os.setf(std::ios::fixed);
std::streamsize saved_precision = os.precision(0);
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index ef5bec3f9d..e78357030e 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -5,7 +5,9 @@
#ifndef V8_COMPILER_TYPES_H_
#define V8_COMPILER_TYPES_H_
+#include "src/base/compiler-specific.h"
#include "src/conversions.h"
+#include "src/globals.h"
#include "src/handles.h"
#include "src/objects.h"
#include "src/ostreams.h"
@@ -121,6 +123,7 @@ namespace compiler {
V(Function, 1u << 19) \
V(Hole, 1u << 20) \
V(OtherInternal, 1u << 21) \
+ V(ExternalPointer, 1u << 22) \
\
V(Signed31, kUnsigned30 | kNegative31) \
V(Signed32, kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
@@ -155,10 +158,11 @@ namespace compiler {
V(DetectableReceiver, kFunction | kOtherObject | kProxy) \
V(Object, kFunction | kOtherObject | kOtherUndetectable) \
V(Receiver, kObject | kProxy) \
+ V(ReceiverOrUndefined, kReceiver | kUndefined) \
V(StringOrReceiver, kString | kReceiver) \
V(Unique, kBoolean | kUniqueName | kNull | kUndefined | \
kReceiver) \
- V(Internal, kHole | kOtherInternal) \
+ V(Internal, kHole | kExternalPointer | kOtherInternal) \
V(NonInternal, kPrimitive | kReceiver) \
V(NonNumber, kUnique | kString | kInternal) \
V(Any, 0xfffffffeu)
@@ -190,7 +194,7 @@ class Type;
// -----------------------------------------------------------------------------
// Bitset types (internal).
-class BitsetType {
+class V8_EXPORT_PRIVATE BitsetType {
public:
typedef uint32_t bitset; // Internal
@@ -263,7 +267,7 @@ class TypeBase {
protected:
friend class Type;
- enum Kind { kConstant, kTuple, kUnion, kRange };
+ enum Kind { kHeapConstant, kOtherNumberConstant, kTuple, kUnion, kRange };
Kind kind() const { return kind_; }
explicit TypeBase(Kind kind) : kind_(kind) {}
@@ -287,34 +291,63 @@ class TypeBase {
// -----------------------------------------------------------------------------
// Constant types.
-class ConstantType : public TypeBase {
+class OtherNumberConstantType : public TypeBase {
public:
- i::Handle<i::Object> Value() { return object_; }
+ double Value() { return value_; }
+
+ static bool IsOtherNumberConstant(double value);
+ static bool IsOtherNumberConstant(Object* value);
+
+ private:
+ friend class Type;
+ friend class BitsetType;
+
+ static Type* New(double value, Zone* zone) {
+ return AsType(new (zone->New(sizeof(OtherNumberConstantType)))
+ OtherNumberConstantType(value)); // NOLINT
+ }
+
+ static OtherNumberConstantType* cast(Type* type) {
+ DCHECK(IsKind(type, kOtherNumberConstant));
+ return static_cast<OtherNumberConstantType*>(FromType(type));
+ }
+
+ explicit OtherNumberConstantType(double value)
+ : TypeBase(kOtherNumberConstant), value_(value) {
+ CHECK(IsOtherNumberConstant(value));
+ }
+
+ BitsetType::bitset Lub() { return BitsetType::kOtherNumber; }
+
+ double value_;
+};
+
+class V8_EXPORT_PRIVATE HeapConstantType : public NON_EXPORTED_BASE(TypeBase) {
+ public:
+ i::Handle<i::HeapObject> Value() { return object_; }
private:
friend class Type;
friend class BitsetType;
- static Type* New(i::Handle<i::Object> value, Zone* zone) {
+ static Type* New(i::Handle<i::HeapObject> value, Zone* zone) {
BitsetType::bitset bitset = BitsetType::Lub(*value);
- return AsType(new (zone->New(sizeof(ConstantType)))
- ConstantType(bitset, value));
+ return AsType(new (zone->New(sizeof(HeapConstantType)))
+ HeapConstantType(bitset, value));
}
- static ConstantType* cast(Type* type) {
- DCHECK(IsKind(type, kConstant));
- return static_cast<ConstantType*>(FromType(type));
+ static HeapConstantType* cast(Type* type) {
+ DCHECK(IsKind(type, kHeapConstant));
+ return static_cast<HeapConstantType*>(FromType(type));
}
- ConstantType(BitsetType::bitset bitset, i::Handle<i::Object> object)
- : TypeBase(kConstant), bitset_(bitset), object_(object) {}
+ HeapConstantType(BitsetType::bitset bitset, i::Handle<i::HeapObject> object);
BitsetType::bitset Lub() { return bitset_; }
BitsetType::bitset bitset_;
- Handle<i::Object> object_;
+ Handle<i::HeapObject> object_;
};
-// TODO(neis): Also cache value if numerical.
// -----------------------------------------------------------------------------
// Range types.
@@ -457,7 +490,7 @@ class UnionType : public StructuralType {
bool Wellformed();
};
-class Type {
+class V8_EXPORT_PRIVATE Type {
public:
typedef BitsetType::bitset bitset; // Internal
@@ -474,8 +507,11 @@ class Type {
return BitsetType::New(BitsetType::UnsignedSmall());
}
- static Type* Constant(i::Handle<i::Object> value, Zone* zone) {
- return ConstantType::New(value, zone);
+ static Type* OtherNumberConstant(double value, Zone* zone) {
+ return OtherNumberConstantType::New(value, zone);
+ }
+ static Type* HeapConstant(i::Handle<i::HeapObject> value, Zone* zone) {
+ return HeapConstantType::New(value, zone);
}
static Type* Range(double min, double max, Zone* zone) {
return RangeType::New(min, max, zone);
@@ -488,6 +524,10 @@ class Type {
return tuple;
}
+ // NewConstant is a factory that returns Constant, Range or Number.
+ static Type* NewConstant(i::Handle<i::Object> value, Zone* zone);
+ static Type* NewConstant(double value, Zone* zone);
+
static Type* Union(Type* type1, Type* type2, Zone* zone);
static Type* Intersect(Type* type1, Type* type2, Zone* zone);
@@ -515,10 +555,16 @@ class Type {
// Inspection.
bool IsRange() { return IsKind(TypeBase::kRange); }
- bool IsConstant() { return IsKind(TypeBase::kConstant); }
+ bool IsHeapConstant() { return IsKind(TypeBase::kHeapConstant); }
+ bool IsOtherNumberConstant() {
+ return IsKind(TypeBase::kOtherNumberConstant);
+ }
bool IsTuple() { return IsKind(TypeBase::kTuple); }
- ConstantType* AsConstant() { return ConstantType::cast(this); }
+ HeapConstantType* AsHeapConstant() { return HeapConstantType::cast(this); }
+ OtherNumberConstantType* AsOtherNumberConstant() {
+ return OtherNumberConstantType::cast(this);
+ }
RangeType* AsRange() { return RangeType::cast(this); }
TupleType* AsTuple() { return TupleType::cast(this); }
@@ -582,7 +628,6 @@ class Type {
static bool Overlap(RangeType* lhs, RangeType* rhs);
static bool Contains(RangeType* lhs, RangeType* rhs);
- static bool Contains(RangeType* range, ConstantType* constant);
static bool Contains(RangeType* range, i::Object* val);
static int UpdateRange(Type* type, UnionType* result, int size, Zone* zone);
diff --git a/deps/v8/src/compiler/value-numbering-reducer.cc b/deps/v8/src/compiler/value-numbering-reducer.cc
index 4769cb0c8b..30473f2798 100644
--- a/deps/v8/src/compiler/value-numbering-reducer.cc
+++ b/deps/v8/src/compiler/value-numbering-reducer.cc
@@ -69,7 +69,7 @@ Reduction ValueNumberingReducer::Reduce(Node* node) {
}
DCHECK(size_ < capacity_);
- DCHECK(size_ * kCapacityToSizeRatio < capacity_);
+ DCHECK(size_ + size_ / 4 < capacity_);
const size_t mask = capacity_ - 1;
size_t dead = capacity_;
@@ -85,10 +85,10 @@ Reduction ValueNumberingReducer::Reduce(Node* node) {
entries_[i] = node;
size_++;
- // Resize to keep load factor below 1/kCapacityToSizeRatio.
- if (size_ * kCapacityToSizeRatio >= capacity_) Grow();
+ // Resize to keep load factor below 80%
+ if (size_ + size_ / 4 >= capacity_) Grow();
}
- DCHECK(size_ * kCapacityToSizeRatio < capacity_);
+ DCHECK(size_ + size_ / 4 < capacity_);
return NoChange();
}
@@ -112,10 +112,31 @@ Reduction ValueNumberingReducer::Reduce(Node* node) {
if (entry->IsDead()) {
continue;
}
+ if (entry == node) {
+ // Collision with ourselves, doesn't count as a real collision.
+ // Opportunistically clean-up the duplicate entry if we're at the end
+ // of a bucket.
+ if (!entries_[(j + 1) & mask]) {
+ entries_[j] = nullptr;
+ size_--;
+ return NoChange();
+ }
+ // Otherwise, keep searching for another collision.
+ continue;
+ }
if (Equals(entry, node)) {
- // Overwrite the colliding entry with the actual entry.
- entries_[i] = entry;
- return Replace(entry);
+ Reduction reduction = ReplaceIfTypesMatch(node, entry);
+ if (reduction.Changed()) {
+ // Overwrite the colliding entry with the actual entry.
+ entries_[i] = entry;
+ // Opportunistically clean-up the duplicate entry if we're at the
+ // end of a bucket.
+ if (!entries_[(j + 1) & mask]) {
+ entries_[j] = nullptr;
+ size_--;
+ }
+ }
+ return reduction;
}
}
}
@@ -126,37 +147,40 @@ Reduction ValueNumberingReducer::Reduce(Node* node) {
continue;
}
if (Equals(entry, node)) {
- // Make sure the replacement has at least as good type as the original
- // node.
- if (NodeProperties::IsTyped(entry) && NodeProperties::IsTyped(node)) {
- Type* entry_type = NodeProperties::GetType(entry);
- Type* node_type = NodeProperties::GetType(node);
- if (!entry_type->Is(node_type)) {
- // Ideally, we would set an intersection of {entry_type} and
- // {node_type} here. However, typing of NumberConstants assigns
- // different types to constants with the same value (it creates
- // a fresh heap number), which would make the intersection empty.
- // To be safe, we use the smaller type if the types are comparable.
- if (node_type->Is(entry_type)) {
- NodeProperties::SetType(entry, node_type);
- } else {
- // Types are not comparable => do not replace.
- return NoChange();
- }
- }
+ return ReplaceIfTypesMatch(node, entry);
+ }
+ }
+}
+
+Reduction ValueNumberingReducer::ReplaceIfTypesMatch(Node* node,
+ Node* replacement) {
+ // Make sure the replacement has at least as good type as the original node.
+ if (NodeProperties::IsTyped(replacement) && NodeProperties::IsTyped(node)) {
+ Type* replacement_type = NodeProperties::GetType(replacement);
+ Type* node_type = NodeProperties::GetType(node);
+ if (!replacement_type->Is(node_type)) {
+ // Ideally, we would set an intersection of {replacement_type} and
+ // {node_type} here. However, typing of NumberConstants assigns different
+ // types to constants with the same value (it creates a fresh heap
+ // number), which would make the intersection empty. To be safe, we use
+ // the smaller type if the types are comparable.
+ if (node_type->Is(replacement_type)) {
+ NodeProperties::SetType(replacement, node_type);
+ } else {
+ // Types are not comparable => do not replace.
+ return NoChange();
}
- return Replace(entry);
}
}
+ return Replace(replacement);
}
void ValueNumberingReducer::Grow() {
- // Allocate a new block of entries kCapacityToSizeRatio times the previous
- // capacity.
+ // Allocate a new block of entries double the previous capacity.
Node** const old_entries = entries_;
size_t const old_capacity = capacity_;
- capacity_ *= kCapacityToSizeRatio;
+ capacity_ *= 2;
entries_ = temp_zone()->NewArray<Node*>(capacity_);
memset(entries_, 0, sizeof(*entries_) * capacity_);
size_ = 0;
diff --git a/deps/v8/src/compiler/value-numbering-reducer.h b/deps/v8/src/compiler/value-numbering-reducer.h
index f700c85bc0..521ce59f20 100644
--- a/deps/v8/src/compiler/value-numbering-reducer.h
+++ b/deps/v8/src/compiler/value-numbering-reducer.h
@@ -5,13 +5,16 @@
#ifndef V8_COMPILER_VALUE_NUMBERING_REDUCER_H_
#define V8_COMPILER_VALUE_NUMBERING_REDUCER_H_
+#include "src/base/compiler-specific.h"
#include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
namespace compiler {
-class ValueNumberingReducer final : public Reducer {
+class V8_EXPORT_PRIVATE ValueNumberingReducer final
+ : public NON_EXPORTED_BASE(Reducer) {
public:
explicit ValueNumberingReducer(Zone* temp_zone, Zone* graph_zone);
~ValueNumberingReducer();
@@ -19,8 +22,9 @@ class ValueNumberingReducer final : public Reducer {
Reduction Reduce(Node* node) override;
private:
- enum { kInitialCapacity = 256u, kCapacityToSizeRatio = 2u };
+ enum { kInitialCapacity = 256u };
+ Reduction ReplaceIfTypesMatch(Node* node, Node* replacement);
void Grow();
Zone* temp_zone() const { return temp_zone_; }
Zone* graph_zone() const { return graph_zone_; }
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index b9faeeedd4..872305b40a 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -333,40 +333,35 @@ void Verifier::Visitor::Check(Node* node) {
CheckTypeIs(node, Type::Any());
break;
}
- case IrOpcode::kInt32Constant: // TODO(rossberg): rename Word32Constant?
- // Constants have no inputs.
- CHECK_EQ(0, input_count);
- // Type is a 32 bit integer, signed or unsigned.
- CheckTypeIs(node, Type::Integral32());
- break;
- case IrOpcode::kInt64Constant:
+ case IrOpcode::kInt32Constant: // TODO(turbofan): rename Word32Constant?
+ case IrOpcode::kInt64Constant: // TODO(turbofan): rename Word64Constant?
+ case IrOpcode::kFloat32Constant:
+ case IrOpcode::kFloat64Constant:
+ case IrOpcode::kRelocatableInt32Constant:
+ case IrOpcode::kRelocatableInt64Constant:
// Constants have no inputs.
CHECK_EQ(0, input_count);
- // Type is internal.
- // TODO(rossberg): Introduce proper Int64 type.
- CheckTypeIs(node, Type::Internal());
+ // Type is empty.
+ CheckNotTyped(node);
break;
- case IrOpcode::kFloat32Constant:
- case IrOpcode::kFloat64Constant:
case IrOpcode::kNumberConstant:
// Constants have no inputs.
CHECK_EQ(0, input_count);
// Type is a number.
CheckTypeIs(node, Type::Number());
break;
- case IrOpcode::kRelocatableInt32Constant:
- case IrOpcode::kRelocatableInt64Constant:
- CHECK_EQ(0, input_count);
- break;
case IrOpcode::kHeapConstant:
// Constants have no inputs.
CHECK_EQ(0, input_count);
+ // Type is anything.
+ CheckTypeIs(node, Type::Any());
break;
case IrOpcode::kExternalConstant:
+ case IrOpcode::kPointerConstant:
// Constants have no inputs.
CHECK_EQ(0, input_count);
- // Type is considered internal.
- CheckTypeIs(node, Type::Internal());
+ // Type is an external pointer.
+ CheckTypeIs(node, Type::ExternalPointer());
break;
case IrOpcode::kOsrValue:
// OSR values have a value and a control input.
@@ -375,6 +370,23 @@ void Verifier::Visitor::Check(Node* node) {
// Type is merged from other values in the graph and could be any.
CheckTypeIs(node, Type::Any());
break;
+ case IrOpcode::kOsrGuard:
+ // OSR values have a value and a control input.
+ CHECK_EQ(1, value_count);
+ CHECK_EQ(1, effect_count);
+ CHECK_EQ(1, control_count);
+ switch (OsrGuardTypeOf(node->op())) {
+ case OsrGuardType::kUninitialized:
+ CheckTypeIs(node, Type::None());
+ break;
+ case OsrGuardType::kSignedSmall:
+ CheckTypeIs(node, Type::SignedSmall());
+ break;
+ case OsrGuardType::kAny:
+ CheckTypeIs(node, Type::Any());
+ break;
+ }
+ break;
case IrOpcode::kProjection: {
// Projection has an input that produces enough values.
int index = static_cast<int>(ProjectionIndexOf(node->op()));
@@ -471,8 +483,9 @@ void Verifier::Visitor::Check(Node* node) {
break;
}
case IrOpcode::kStateValues:
- case IrOpcode::kObjectState:
case IrOpcode::kTypedStateValues:
+ case IrOpcode::kObjectState:
+ case IrOpcode::kTypedObjectState:
// TODO(jarin): what are the constraints on these?
break;
case IrOpcode::kCall:
@@ -566,6 +579,10 @@ void Verifier::Visitor::Check(Node* node) {
// Type is OtherObject.
CheckTypeIs(node, Type::OtherObject());
break;
+ case IrOpcode::kJSCreateKeyValueArray:
+ // Type is OtherObject.
+ CheckTypeIs(node, Type::OtherObject());
+ break;
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject:
case IrOpcode::kJSCreateLiteralRegExp:
@@ -643,6 +660,13 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kJSStoreMessage:
break;
+ case IrOpcode::kJSLoadModule:
+ CheckTypeIs(node, Type::Any());
+ break;
+ case IrOpcode::kJSStoreModule:
+ CheckNotTyped(node);
+ break;
+
case IrOpcode::kJSGeneratorStore:
CheckNotTyped(node);
break;
@@ -809,6 +833,7 @@ void Verifier::Visitor::Check(Node* node) {
CheckTypeIs(node, Type::Signed32());
break;
case IrOpcode::kNumberToUint32:
+ case IrOpcode::kNumberToUint8Clamped:
// Number -> Unsigned32
CheckValueInputIs(node, 0, Type::Number());
CheckTypeIs(node, Type::Unsigned32());
@@ -972,6 +997,8 @@ void Verifier::Visitor::Check(Node* node) {
// CheckTypeIs(node, to));
break;
}
+ case IrOpcode::kChangeFloat64ToTaggedPointer:
+ break;
case IrOpcode::kChangeTaggedToBit: {
// Boolean /\ TaggedPtr -> Boolean /\ UntaggedInt1
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -1049,6 +1076,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kCheckedTaggedToInt32:
case IrOpcode::kCheckedTaggedToFloat64:
case IrOpcode::kCheckedTaggedToTaggedSigned:
+ case IrOpcode::kCheckedTaggedToTaggedPointer:
case IrOpcode::kCheckedTruncateTaggedToWord32:
break;
@@ -1288,7 +1316,7 @@ void Verifier::Visitor::Check(Node* node) {
void Verifier::Run(Graph* graph, Typing typing, CheckInputs check_inputs) {
CHECK_NOT_NULL(graph->start());
CHECK_NOT_NULL(graph->end());
- Zone zone(graph->zone()->allocator());
+ Zone zone(graph->zone()->allocator(), ZONE_NAME);
Visitor visitor(&zone, typing, check_inputs);
AllNodes all(&zone, graph);
for (Node* node : all.reachable) visitor.Check(node);
@@ -1378,7 +1406,7 @@ static void CheckInputsDominate(Schedule* schedule, BasicBlock* block,
void ScheduleVerifier::Run(Schedule* schedule) {
const size_t count = schedule->BasicBlockCount();
- Zone tmp_zone(schedule->zone()->allocator());
+ Zone tmp_zone(schedule->zone()->allocator(), ZONE_NAME);
Zone* zone = &tmp_zone;
BasicBlock* start = schedule->start();
BasicBlockVector* rpo_order = schedule->rpo_order();
diff --git a/deps/v8/src/compiler/verifier.h b/deps/v8/src/compiler/verifier.h
index 60849e0238..db0f4538b8 100644
--- a/deps/v8/src/compiler/verifier.h
+++ b/deps/v8/src/compiler/verifier.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_VERIFIER_H_
#include "src/base/macros.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -54,7 +55,7 @@ class Verifier {
};
// Verifies properties of a schedule, such as dominance, phi placement, etc.
-class ScheduleVerifier {
+class V8_EXPORT_PRIVATE ScheduleVerifier {
public:
static void Run(Schedule* schedule);
};
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index b003e9968a..1b61c1504e 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -13,6 +13,7 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/diamond.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/graph.h"
@@ -24,8 +25,8 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/pipeline.h"
-#include "src/compiler/source-position.h"
-#include "src/compiler/zone-pool.h"
+#include "src/compiler/simd-scalar-lowering.h"
+#include "src/compiler/zone-stats.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@@ -206,6 +207,9 @@ class WasmTrapHelper : public ZoneObject {
case wasm::kAstF64:
return jsgraph()->Float64Constant(bit_cast<double>(0xdeadbeefdeadbeef));
break;
+ case wasm::kAstS128:
+ return builder_->CreateS128Value(0xdeadbeef);
+ break;
default:
UNREACHABLE();
return nullptr;
@@ -277,7 +281,8 @@ class WasmTrapHelper : public ZoneObject {
} else {
// End the control flow with returning 0xdeadbeef
Node* ret_value = GetTrapValue(builder_->GetFunctionSignature());
- end = graph()->NewNode(jsgraph()->common()->Return(), ret_value,
+ end = graph()->NewNode(jsgraph()->common()->Return(),
+ jsgraph()->Int32Constant(0), ret_value,
*effect_ptr, *control_ptr);
}
@@ -294,6 +299,7 @@ WasmGraphBuilder::WasmGraphBuilder(
mem_buffer_(nullptr),
mem_size_(nullptr),
function_tables_(zone),
+ function_table_sizes_(zone),
control_(nullptr),
effect_(nullptr),
cur_buffer_(def_buffer_),
@@ -404,37 +410,44 @@ Node* WasmGraphBuilder::Int64Constant(int64_t value) {
return jsgraph()->Int64Constant(value);
}
-void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
+void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
+ Node** effect, Node** control) {
+ if (effect == nullptr) {
+ effect = effect_;
+ }
+ if (control == nullptr) {
+ control = control_;
+ }
// We do not generate stack checks for cctests.
if (module_ && !module_->instance->context.is_null()) {
Node* limit = graph()->NewNode(
jsgraph()->machine()->Load(MachineType::Pointer()),
jsgraph()->ExternalConstant(
ExternalReference::address_of_stack_limit(jsgraph()->isolate())),
- jsgraph()->IntPtrConstant(0), *effect_, *control_);
+ jsgraph()->IntPtrConstant(0), *effect, *control);
Node* pointer = graph()->NewNode(jsgraph()->machine()->LoadStackPointer());
Node* check =
graph()->NewNode(jsgraph()->machine()->UintLessThan(), limit, pointer);
Diamond stack_check(graph(), jsgraph()->common(), check, BranchHint::kTrue);
-
- Node* effect_true = *effect_;
+ stack_check.Chain(*control);
+ Node* effect_true = *effect;
Node* effect_false;
// Generate a call to the runtime if there is a stack check failure.
{
Node* node = BuildCallToRuntime(Runtime::kStackGuard, jsgraph(),
module_->instance->context, nullptr, 0,
- effect_, stack_check.if_false);
+ effect, stack_check.if_false);
effect_false = node;
}
Node* ephi = graph()->NewNode(jsgraph()->common()->EffectPhi(2),
effect_true, effect_false, stack_check.merge);
- *control_ = stack_check.merge;
- *effect_ = ephi;
+ *control = stack_check.merge;
+ *effect = ephi;
}
}
@@ -980,16 +993,36 @@ Node* WasmGraphBuilder::HeapConstant(Handle<HeapObject> value) {
return jsgraph()->HeapConstant(value);
}
-Node* WasmGraphBuilder::Branch(Node* cond, Node** true_node,
- Node** false_node) {
+namespace {
+Node* Branch(JSGraph* jsgraph, Node* cond, Node** true_node, Node** false_node,
+ Node* control, BranchHint hint) {
DCHECK_NOT_NULL(cond);
- DCHECK_NOT_NULL(*control_);
+ DCHECK_NOT_NULL(control);
Node* branch =
- graph()->NewNode(jsgraph()->common()->Branch(), cond, *control_);
- *true_node = graph()->NewNode(jsgraph()->common()->IfTrue(), branch);
- *false_node = graph()->NewNode(jsgraph()->common()->IfFalse(), branch);
+ jsgraph->graph()->NewNode(jsgraph->common()->Branch(hint), cond, control);
+ *true_node = jsgraph->graph()->NewNode(jsgraph->common()->IfTrue(), branch);
+ *false_node = jsgraph->graph()->NewNode(jsgraph->common()->IfFalse(), branch);
return branch;
}
+} // namespace
+
+Node* WasmGraphBuilder::BranchNoHint(Node* cond, Node** true_node,
+ Node** false_node) {
+ return Branch(jsgraph(), cond, true_node, false_node, *control_,
+ BranchHint::kNone);
+}
+
+Node* WasmGraphBuilder::BranchExpectTrue(Node* cond, Node** true_node,
+ Node** false_node) {
+ return Branch(jsgraph(), cond, true_node, false_node, *control_,
+ BranchHint::kTrue);
+}
+
+Node* WasmGraphBuilder::BranchExpectFalse(Node* cond, Node** true_node,
+ Node** false_node) {
+ return Branch(jsgraph(), cond, true_node, false_node, *control_,
+ BranchHint::kFalse);
+}
Node* WasmGraphBuilder::Switch(unsigned count, Node* key) {
return graph()->NewNode(jsgraph()->common()->Switch(count), key, *control_);
@@ -1009,11 +1042,13 @@ Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
DCHECK_NOT_NULL(*control_);
DCHECK_NOT_NULL(*effect_);
- Node** buf = Realloc(vals, count, count + 2);
- buf[count] = *effect_;
- buf[count + 1] = *control_;
+ Node** buf = Realloc(vals, count, count + 3);
+ memmove(buf + 1, buf, sizeof(void*) * count);
+ buf[0] = jsgraph()->Int32Constant(0);
+ buf[count + 1] = *effect_;
+ buf[count + 2] = *control_;
Node* ret =
- graph()->NewNode(jsgraph()->common()->Return(count), count + 2, vals);
+ graph()->NewNode(jsgraph()->common()->Return(count), count + 3, buf);
MergeControlToEnd(jsgraph(), ret);
return ret;
@@ -1681,7 +1716,7 @@ Node* WasmGraphBuilder::GrowMemory(Node* input) {
graph(), jsgraph()->common(),
graph()->NewNode(
jsgraph()->machine()->Uint32LessThanOrEqual(), input,
- jsgraph()->Uint32Constant(wasm::WasmModule::kMaxMemPages)),
+ jsgraph()->Uint32Constant(wasm::WasmModule::kV8MaxPages)),
BranchHint::kTrue);
check_input_range.Chain(*control_);
@@ -1748,7 +1783,7 @@ Node* WasmGraphBuilder::Catch(Node* input, wasm::WasmCodePosition position) {
Node* is_smi;
Node* is_heap;
- Branch(BuildTestNotSmi(value), &is_heap, &is_smi);
+ BranchExpectFalse(BuildTestNotSmi(value), &is_heap, &is_smi);
// is_smi
Node* smi_i32 = BuildChangeSmiToInt32(value);
@@ -1788,7 +1823,7 @@ Node* WasmGraphBuilder::BuildI32DivS(Node* left, Node* right,
Node* before = *control_;
Node* denom_is_m1;
Node* denom_is_not_m1;
- Branch(
+ BranchExpectFalse(
graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
&denom_is_m1, &denom_is_not_m1);
*control_ = denom_is_m1;
@@ -1836,6 +1871,18 @@ Node* WasmGraphBuilder::BuildI32RemU(Node* left, Node* right,
Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
MachineOperatorBuilder* m = jsgraph()->machine();
+
+ Int32Matcher mr(right);
+ if (mr.HasValue()) {
+ if (mr.Value() == 0) {
+ return jsgraph()->Int32Constant(0);
+ } else if (mr.Value() == -1) {
+ // The result is the negation of the left input.
+ return graph()->NewNode(m->Int32Sub(), jsgraph()->Int32Constant(0), left);
+ }
+ return graph()->NewNode(m->Int32Div(), left, right, *control_);
+ }
+
// asm.js semantics return 0 on divide or mod by zero.
if (m->Int32DivIsSafe()) {
// The hardware instruction does the right thing (e.g. arm).
@@ -1865,6 +1912,17 @@ Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
MachineOperatorBuilder* m = jsgraph()->machine();
+
+ Int32Matcher mr(right);
+ if (mr.HasValue()) {
+ if (mr.Value() == 0) {
+ return jsgraph()->Int32Constant(0);
+ } else if (mr.Value() == -1) {
+ return jsgraph()->Int32Constant(0);
+ }
+ return graph()->NewNode(m->Int32Mod(), left, right, *control_);
+ }
+
// asm.js semantics return 0 on divide or mod by zero.
// Explicit check for x % 0.
Diamond z(
@@ -1930,9 +1988,9 @@ Node* WasmGraphBuilder::BuildI64DivS(Node* left, Node* right,
Node* before = *control_;
Node* denom_is_m1;
Node* denom_is_not_m1;
- Branch(graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
- jsgraph()->Int64Constant(-1)),
- &denom_is_m1, &denom_is_not_m1);
+ BranchExpectFalse(graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
+ jsgraph()->Int64Constant(-1)),
+ &denom_is_m1, &denom_is_not_m1);
*control_ = denom_is_m1;
trap_->TrapIfEq64(wasm::kTrapDivUnrepresentable, left,
std::numeric_limits<int64_t>::min(), position);
@@ -2100,37 +2158,27 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
return BuildWasmCall(sig, args, rets, position);
}
-Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args, Node*** rets,
+Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
+ Node*** rets,
wasm::WasmCodePosition position) {
DCHECK_NOT_NULL(args[0]);
DCHECK(module_ && module_->instance);
- MachineOperatorBuilder* machine = jsgraph()->machine();
+ // Assume only one table for now.
+ uint32_t table_index = 0;
+ wasm::FunctionSig* sig = module_->GetSignature(sig_index);
+
+ DCHECK(module_->IsValidTable(table_index));
- // Compute the code object by loading it from the function table.
+ EnsureFunctionTableNodes();
+ MachineOperatorBuilder* machine = jsgraph()->machine();
Node* key = args[0];
- // Assume only one table for now.
- DCHECK_LE(module_->instance->function_tables.size(), 1u);
- // Bounds check the index.
- uint32_t table_size =
- module_->IsValidTable(0) ? module_->GetTable(0)->max_size : 0;
- wasm::FunctionSig* sig = module_->GetSignature(index);
- if (table_size > 0) {
- // Bounds check against the table size.
- Node* size = Uint32Constant(table_size);
- Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
- trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
- } else {
- // No function table. Generate a trap and return a constant.
- trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, Int32Constant(0), position);
- (*rets) = Buffer(sig->return_count());
- for (size_t i = 0; i < sig->return_count(); i++) {
- (*rets)[i] = trap_->GetTrapValue(sig->GetReturn(i));
- }
- return trap_->GetTrapValue(sig);
- }
- Node* table = FunctionTable(0);
+ // Bounds check against the table size.
+ Node* size = function_table_sizes_[table_index];
+ Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
+ trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
+ Node* table = function_tables_[table_index];
// Load signature from the table and check.
// The table is a FixedArray; signatures are encoded as SMIs.
@@ -2145,13 +2193,16 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args, Node*** rets,
Int32Constant(kPointerSizeLog2)),
Int32Constant(fixed_offset)),
*effect_, *control_);
- Node* sig_match =
- graph()->NewNode(machine->Word32Equal(),
- BuildChangeSmiToInt32(load_sig), Int32Constant(index));
+ auto map = const_cast<wasm::SignatureMap&>(
+ module_->module->function_tables[0].map);
+ Node* sig_match = graph()->NewNode(
+ machine->WordEqual(), load_sig,
+ jsgraph()->SmiConstant(static_cast<int>(map.FindOrInsert(sig))));
trap_->AddTrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
}
// Load code object from the table.
+ uint32_t table_size = module_->module->function_tables[table_index].min_size;
uint32_t offset = fixed_offset + kPointerSize * table_size;
Node* load_code = graph()->NewNode(
machine->Load(MachineType::AnyTagged()), table,
@@ -2295,6 +2346,7 @@ Node* WasmGraphBuilder::ToJS(Node* node, wasm::LocalType type) {
switch (type) {
case wasm::kAstI32:
return BuildChangeInt32ToTagged(node);
+ case wasm::kAstS128:
case wasm::kAstI64:
// Throw a TypeError. The native context is good enough here because we
// only throw a TypeError.
@@ -2457,6 +2509,7 @@ Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
num);
break;
}
+ case wasm::kAstS128:
case wasm::kAstI64:
// Throw a TypeError. The native context is good enough here because we
// only throw a TypeError.
@@ -2617,8 +2670,8 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
}
Node* jsval = ToJS(
retval, sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
- Node* ret =
- graph()->NewNode(jsgraph()->common()->Return(), jsval, call, start);
+ Node* ret = graph()->NewNode(jsgraph()->common()->Return(),
+ jsgraph()->Int32Constant(0), jsval, call, start);
MergeControlToEnd(jsgraph(), ret);
}
@@ -2729,14 +2782,16 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
Node* val =
FromJS(call, HeapConstant(isolate->native_context()),
sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
+ Node* pop_size = jsgraph()->Int32Constant(0);
if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
sig->GetReturn() == wasm::kAstI64) {
- ret = graph()->NewNode(jsgraph()->common()->Return(), val,
+ ret = graph()->NewNode(jsgraph()->common()->Return(), pop_size, val,
graph()->NewNode(jsgraph()->machine()->Word32Sar(),
val, jsgraph()->Int32Constant(31)),
call, start);
} else {
- ret = graph()->NewNode(jsgraph()->common()->Return(), val, call, start);
+ ret = graph()->NewNode(jsgraph()->common()->Return(), pop_size, val, call,
+ start);
}
MergeControlToEnd(jsgraph(), ret);
@@ -2796,17 +2851,15 @@ Node* WasmGraphBuilder::MemSize(uint32_t offset) {
}
}
-Node* WasmGraphBuilder::FunctionTable(uint32_t index) {
- DCHECK(module_ && module_->instance &&
- index < module_->instance->function_tables.size());
- if (!function_tables_.size()) {
- for (size_t i = 0; i < module_->instance->function_tables.size(); ++i) {
- DCHECK(!module_->instance->function_tables[i].is_null());
- function_tables_.push_back(
- HeapConstant(module_->instance->function_tables[i]));
- }
+void WasmGraphBuilder::EnsureFunctionTableNodes() {
+ if (function_tables_.size() > 0) return;
+ for (size_t i = 0; i < module_->instance->function_tables.size(); ++i) {
+ auto handle = module_->instance->function_tables[i];
+ DCHECK(!handle.is_null());
+ function_tables_.push_back(HeapConstant(handle));
+ uint32_t table_size = module_->module->function_tables[i].min_size;
+ function_table_sizes_.push_back(Uint32Constant(table_size));
}
- return function_tables_[index];
}
Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
@@ -2845,25 +2898,32 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
uint32_t size = module_->instance->mem_size;
byte memsize = wasm::WasmOpcodes::MemSize(memtype);
- // Check against the effective size.
size_t effective_size;
- if (size == 0) {
- effective_size = 0;
- } else if (offset >= size ||
- (static_cast<uint64_t>(offset) + memsize) > size) {
+ if (size <= offset || size < (static_cast<uint64_t>(offset) + memsize)) {
// Two checks are needed in the case where the offset is statically
// out of bounds; one check for the offset being in bounds, and the next for
// the offset + index being out of bounds for code to be patched correctly
// on relocation.
- effective_size = size - memsize + 1;
+
+ // Check for overflows.
+ if ((std::numeric_limits<uint32_t>::max() - memsize) + 1 < offset) {
+ // Always trap. Do not use TrapAlways because it does not create a valid
+ // graph here.
+ trap_->TrapIfEq32(wasm::kTrapMemOutOfBounds, jsgraph()->Int32Constant(0),
+ 0, position);
+ return;
+ }
+ size_t effective_offset = (offset - 1) + memsize;
+
Node* cond = graph()->NewNode(jsgraph()->machine()->Uint32LessThan(),
- jsgraph()->IntPtrConstant(offset),
+ jsgraph()->IntPtrConstant(effective_offset),
jsgraph()->RelocatableInt32Constant(
- static_cast<uint32_t>(effective_size),
+ static_cast<uint32_t>(size),
RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
- DCHECK(offset >= effective_size);
- effective_size = offset - effective_size;
+ // For offset > effective size, this relies on check above to fail and
+ // effective size can be negative, relies on wrap around.
+ effective_size = size - offset - memsize + 1;
} else {
effective_size = size - offset - memsize + 1;
CHECK(effective_size <= kMaxUInt32);
@@ -3016,20 +3076,26 @@ void WasmGraphBuilder::Int64LoweringForTesting() {
}
}
+void WasmGraphBuilder::SimdScalarLoweringForTesting() {
+ SimdScalarLowering(jsgraph()->graph(), jsgraph()->machine(),
+ jsgraph()->common(), jsgraph()->zone(),
+ function_signature_)
+ .LowerGraph();
+}
+
void WasmGraphBuilder::SetSourcePosition(Node* node,
wasm::WasmCodePosition position) {
DCHECK_NE(position, wasm::kNoCodePosition);
- compiler::SourcePosition pos(position);
if (source_position_table_)
- source_position_table_->SetSourcePosition(node, pos);
+ source_position_table_->SetSourcePosition(node, SourcePosition(position));
}
-Node* WasmGraphBuilder::DefaultS128Value() {
+Node* WasmGraphBuilder::CreateS128Value(int32_t value) {
// TODO(gdeepti): Introduce Simd128Constant to common-operator.h and use
// instead of creating a SIMD Value.
return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(),
- Int32Constant(0), Int32Constant(0), Int32Constant(0),
- Int32Constant(0));
+ Int32Constant(value), Int32Constant(value),
+ Int32Constant(value), Int32Constant(value));
}
Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
@@ -3038,6 +3104,18 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
case wasm::kExprI32x4Splat:
return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(), inputs[0],
inputs[0], inputs[0], inputs[0]);
+ case wasm::kExprI32x4Add:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4Add(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF32x4ExtractLane:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4ExtractLane(),
+ inputs[0], inputs[1]);
+ case wasm::kExprF32x4Splat:
+ return graph()->NewNode(jsgraph()->machine()->CreateFloat32x4(),
+ inputs[0], inputs[0], inputs[0], inputs[0]);
+ case wasm::kExprF32x4Add:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4Add(), inputs[0],
+ inputs[1]);
default:
return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
}
@@ -3049,6 +3127,9 @@ Node* WasmGraphBuilder::SimdExtractLane(wasm::WasmOpcode opcode, uint8_t lane,
case wasm::kExprI32x4ExtractLane:
return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(), input,
Int32Constant(lane));
+ case wasm::kExprF32x4ExtractLane:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4ExtractLane(),
+ input, Int32Constant(lane));
default:
return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
}
@@ -3082,7 +3163,7 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
MachineOperatorBuilder machine(&zone);
@@ -3151,12 +3232,12 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
wasm::FunctionSig* sig, uint32_t index,
- Handle<String> import_module,
- MaybeHandle<String> import_function) {
+ Handle<String> module_name,
+ MaybeHandle<String> import_name) {
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
MachineOperatorBuilder machine(&zone);
@@ -3215,14 +3296,14 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
const char* function_name = nullptr;
int function_name_size = 0;
- if (!import_function.is_null()) {
- Handle<String> handle = import_function.ToHandleChecked();
+ if (!import_name.is_null()) {
+ Handle<String> handle = import_name.ToHandleChecked();
function_name = handle->ToCString().get();
function_name_size = handle->length();
}
RecordFunctionCompilation(
CodeEventListener::FUNCTION_TAG, isolate, code, "wasm-to-js", index,
- {import_module->ToCString().get(), import_module->length()},
+ {module_name->ToCString().get(), module_name->length()},
{function_name, function_name_size});
}
@@ -3264,6 +3345,9 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
r.LowerGraph();
}
+ SimdScalarLowering(graph, machine, common, jsgraph_->zone(), function_->sig)
+ .LowerGraph();
+
int index = static_cast<int>(function_->func_index);
if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
@@ -3285,7 +3369,7 @@ WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower,
isolate_(isolate),
module_env_(module_env),
function_(function),
- graph_zone_(new Zone(isolate->allocator())),
+ graph_zone_(new Zone(isolate->allocator(), ZONE_NAME)),
jsgraph_(new (graph_zone()) JSGraph(
isolate, new (graph_zone()) Graph(graph_zone()),
new (graph_zone()) CommonOperatorBuilder(graph_zone()), nullptr,
@@ -3293,7 +3377,7 @@ WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower,
graph_zone(), MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements()))),
- compilation_zone_(isolate->allocator()),
+ compilation_zone_(isolate->allocator(), ZONE_NAME),
info_(function->name_length != 0
? module_env->module->GetNameOrNull(function->name_offset,
function->name_length)
@@ -3371,7 +3455,7 @@ Handle<Code> WasmCompilationUnit::FinishCompilation() {
function_->name_offset, function_->name_length);
SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
function_->func_index, name.length(), name.start());
- thrower_->Failed(buffer.start(), graph_construction_result_);
+ thrower_->CompileFailed(buffer.start(), graph_construction_result_);
}
return Handle<Code>::null();
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index c980a87fcb..b4bc350297 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -84,8 +84,8 @@ class WasmCompilationUnit final {
// Wraps a JS function, producing a code object that can be called from WASM.
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
wasm::FunctionSig* sig, uint32_t index,
- Handle<String> import_module,
- MaybeHandle<String> import_function);
+ Handle<String> module_name,
+ MaybeHandle<String> import_name);
// Wraps a given wasm code object, producing a code object.
Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
@@ -142,12 +142,16 @@ class WasmGraphBuilder {
void AppendToMerge(Node* merge, Node* from);
void AppendToPhi(Node* phi, Node* from);
- void StackCheck(wasm::WasmCodePosition position);
+ void StackCheck(wasm::WasmCodePosition position, Node** effect = nullptr,
+ Node** control = nullptr);
//-----------------------------------------------------------------------
// Operations that read and/or write {control} and {effect}.
//-----------------------------------------------------------------------
- Node* Branch(Node* cond, Node** true_node, Node** false_node);
+ Node* BranchNoHint(Node* cond, Node** true_node, Node** false_node);
+ Node* BranchExpectTrue(Node* cond, Node** true_node, Node** false_node);
+ Node* BranchExpectFalse(Node* cond, Node** true_node, Node** false_node);
+
Node* Switch(unsigned count, Node* key);
Node* IfValue(int32_t value, Node* sw);
Node* IfDefault(Node* sw);
@@ -166,7 +170,7 @@ class WasmGraphBuilder {
Node* ToJS(Node* node, wasm::LocalType type);
Node* FromJS(Node* node, Node* context, wasm::LocalType type);
Node* Invert(Node* node);
- Node* FunctionTable(uint32_t index);
+ void EnsureFunctionTableNodes();
//-----------------------------------------------------------------------
// Operations that concern the linear memory.
@@ -196,9 +200,11 @@ class WasmGraphBuilder {
void Int64LoweringForTesting();
+ void SimdScalarLoweringForTesting();
+
void SetSourcePosition(Node* node, wasm::WasmCodePosition position);
- Node* DefaultS128Value();
+ Node* CreateS128Value(int32_t value);
Node* SimdOp(wasm::WasmOpcode opcode, const NodeVector& inputs);
Node* SimdExtractLane(wasm::WasmOpcode opcode, uint8_t lane, Node* input);
@@ -213,6 +219,7 @@ class WasmGraphBuilder {
Node* mem_buffer_;
Node* mem_size_;
NodeVector function_tables_;
+ NodeVector function_table_sizes_;
Node** control_;
Node** effect_;
Node** cur_buffer_;
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index 574db1cfef..a41c93ca35 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -178,6 +178,17 @@ struct Allocator {
// Allocate a floating point register/stack location.
if (fp_offset < fp_count) {
DoubleRegister reg = fp_regs[fp_offset++];
+#if V8_TARGET_ARCH_ARM
+ // Allocate floats using a double register, but modify the code to
+ // reflect how ARM FP registers alias.
+ // TODO(bbudge) Modify wasm linkage to allow use of all float regs.
+ if (type == kAstF32) {
+ int float_reg_code = reg.code() * 2;
+ DCHECK(float_reg_code < RegisterConfiguration::kMaxFPRegisters);
+ return regloc(DoubleRegister::from_code(float_reg_code),
+ MachineTypeFor(type));
+ }
+#endif
return regloc(reg, MachineTypeFor(type));
} else {
int offset = -1 - stack_offset;
@@ -307,26 +318,23 @@ CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
"wasm-call");
}
-CallDescriptor* ModuleEnv::GetI32WasmCallDescriptor(
- Zone* zone, CallDescriptor* descriptor) {
+CallDescriptor* ReplaceTypeInCallDescriptorWith(
+ Zone* zone, CallDescriptor* descriptor, size_t num_replacements,
+ MachineType input_type, MachineRepresentation output_type) {
size_t parameter_count = descriptor->ParameterCount();
size_t return_count = descriptor->ReturnCount();
for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
- if (descriptor->GetParameterType(i) == MachineType::Int64()) {
- // For each int64 input we get two int32 inputs.
- parameter_count++;
+ if (descriptor->GetParameterType(i) == input_type) {
+ parameter_count += num_replacements - 1;
}
}
for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
- if (descriptor->GetReturnType(i) == MachineType::Int64()) {
- // For each int64 return we get two int32 returns.
- return_count++;
+ if (descriptor->GetReturnType(i) == input_type) {
+ return_count += num_replacements - 1;
}
}
if (parameter_count == descriptor->ParameterCount() &&
return_count == descriptor->ReturnCount()) {
- // If there is no int64 parameter or return value, we can just return the
- // original descriptor.
return descriptor;
}
@@ -335,10 +343,10 @@ CallDescriptor* ModuleEnv::GetI32WasmCallDescriptor(
Allocator rets = return_registers.Get();
for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
- if (descriptor->GetReturnType(i) == MachineType::Int64()) {
- // For each int64 return we get two int32 returns.
- locations.AddReturn(rets.Next(MachineRepresentation::kWord32));
- locations.AddReturn(rets.Next(MachineRepresentation::kWord32));
+ if (descriptor->GetReturnType(i) == input_type) {
+ for (size_t j = 0; j < num_replacements; j++) {
+ locations.AddReturn(rets.Next(output_type));
+ }
} else {
locations.AddReturn(
rets.Next(descriptor->GetReturnType(i).representation()));
@@ -348,10 +356,10 @@ CallDescriptor* ModuleEnv::GetI32WasmCallDescriptor(
Allocator params = parameter_registers.Get();
for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
- if (descriptor->GetParameterType(i) == MachineType::Int64()) {
- // For each int64 input we get two int32 inputs.
- locations.AddParam(params.Next(MachineRepresentation::kWord32));
- locations.AddParam(params.Next(MachineRepresentation::kWord32));
+ if (descriptor->GetParameterType(i) == input_type) {
+ for (size_t j = 0; j < num_replacements; j++) {
+ locations.AddParam(params.Next(output_type));
+ }
} else {
locations.AddParam(
params.Next(descriptor->GetParameterType(i).representation()));
@@ -369,8 +377,20 @@ CallDescriptor* ModuleEnv::GetI32WasmCallDescriptor(
descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
descriptor->flags(), // flags
descriptor->debug_name());
+}
- return descriptor;
+CallDescriptor* ModuleEnv::GetI32WasmCallDescriptor(
+ Zone* zone, CallDescriptor* descriptor) {
+ return ReplaceTypeInCallDescriptorWith(zone, descriptor, 2,
+ MachineType::Int64(),
+ MachineRepresentation::kWord32);
+}
+
+CallDescriptor* ModuleEnv::GetI32WasmCallDescriptorForSimd(
+ Zone* zone, CallDescriptor* descriptor) {
+ return ReplaceTypeInCallDescriptorWith(zone, descriptor, 4,
+ MachineType::Simd128(),
+ MachineRepresentation::kWord32);
}
} // namespace wasm
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 4d63e9ad83..745ac50841 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -4,6 +4,8 @@
#include "src/compiler/code-generator.h"
+#include <limits>
+
#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
@@ -445,7 +447,7 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
OutOfLineCode* ool; \
if (instr->InputAt(3)->IsRegister()) { \
auto length = i.InputRegister(3); \
- DCHECK_EQ(0, index2); \
+ DCHECK_EQ(0u, index2); \
__ cmpl(index1, length); \
ool = new (zone()) OutOfLineLoadNaN(this, result); \
} else { \
@@ -500,7 +502,7 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
OutOfLineCode* ool; \
if (instr->InputAt(3)->IsRegister()) { \
auto length = i.InputRegister(3); \
- DCHECK_EQ(0, index2); \
+ DCHECK_EQ(0u, index2); \
__ cmpl(index1, length); \
ool = new (zone()) OutOfLineLoadZero(this, result); \
} else { \
@@ -557,7 +559,7 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
auto value = i.InputDoubleRegister(4); \
if (instr->InputAt(3)->IsRegister()) { \
auto length = i.InputRegister(3); \
- DCHECK_EQ(0, index2); \
+ DCHECK_EQ(0u, index2); \
Label done; \
__ cmpl(index1, length); \
__ j(above_equal, &done, Label::kNear); \
@@ -612,7 +614,7 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
auto index2 = i.InputUint32(2); \
if (instr->InputAt(3)->IsRegister()) { \
auto length = i.InputRegister(3); \
- DCHECK_EQ(0, index2); \
+ DCHECK_EQ(0u, index2); \
Label done; \
__ cmpl(index1, length); \
__ j(above_equal, &done, Label::kNear); \
@@ -848,19 +850,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
RecordCallPosition(instr);
break;
}
- case kArchTailCallJSFunctionFromJSFunction:
- case kArchTailCallJSFunction: {
+ case kArchTailCallJSFunctionFromJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, kWrongFunctionContext);
}
- if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- i.TempRegister(0), i.TempRegister(1),
- i.TempRegister(2));
- }
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
+ i.TempRegister(0), i.TempRegister(1),
+ i.TempRegister(2));
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -921,7 +920,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchRet:
- AssembleReturn();
+ AssembleReturn(instr->InputAt(0));
break;
case kArchStackPointer:
__ movq(i.OutputRegister(), rsp);
@@ -1997,7 +1996,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (i.InputRegister(1).is(i.OutputRegister())) {
__ shll(i.OutputRegister(), Immediate(1));
} else {
- __ leal(i.OutputRegister(), i.MemoryOperand());
+ __ addl(i.OutputRegister(), i.InputRegister(1));
}
} else if (mode == kMode_M2) {
__ shll(i.OutputRegister(), Immediate(1));
@@ -2008,15 +2007,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ leal(i.OutputRegister(), i.MemoryOperand());
}
+ } else if (mode == kMode_MR1 &&
+ i.InputRegister(1).is(i.OutputRegister())) {
+ __ addl(i.OutputRegister(), i.InputRegister(0));
} else {
__ leal(i.OutputRegister(), i.MemoryOperand());
}
__ AssertZeroExtended(i.OutputRegister());
break;
}
- case kX64Lea:
- __ leaq(i.OutputRegister(), i.MemoryOperand());
+ case kX64Lea: {
+ AddressingMode mode = AddressingModeField::decode(instr->opcode());
+ // Shorten "leaq" to "addq", "subq" or "shlq" if the register allocation
+ // and addressing mode just happens to work out. The "addq"/"subq" forms
+ // in these cases are faster based on measurements.
+ if (i.InputRegister(0).is(i.OutputRegister())) {
+ if (mode == kMode_MRI) {
+ int32_t constant_summand = i.InputInt32(1);
+ if (constant_summand > 0) {
+ __ addq(i.OutputRegister(), Immediate(constant_summand));
+ } else if (constant_summand < 0) {
+ __ subq(i.OutputRegister(), Immediate(-constant_summand));
+ }
+ } else if (mode == kMode_MR1) {
+ if (i.InputRegister(1).is(i.OutputRegister())) {
+ __ shlq(i.OutputRegister(), Immediate(1));
+ } else {
+ __ addq(i.OutputRegister(), i.InputRegister(1));
+ }
+ } else if (mode == kMode_M2) {
+ __ shlq(i.OutputRegister(), Immediate(1));
+ } else if (mode == kMode_M4) {
+ __ shlq(i.OutputRegister(), Immediate(2));
+ } else if (mode == kMode_M8) {
+ __ shlq(i.OutputRegister(), Immediate(3));
+ } else {
+ __ leaq(i.OutputRegister(), i.MemoryOperand());
+ }
+ } else if (mode == kMode_MR1 &&
+ i.InputRegister(1).is(i.OutputRegister())) {
+ __ addq(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ leaq(i.OutputRegister(), i.MemoryOperand());
+ }
break;
+ }
case kX64Dec32:
__ decl(i.OutputRegister());
break;
@@ -2316,7 +2351,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -2362,6 +2397,9 @@ void CodeGenerator::AssembleConstructFrame() {
__ movq(rbp, rsp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(this->info()->GeneratePreagedPrologue());
+ if (descriptor->PushArgumentCount()) {
+ __ pushq(kJavaScriptCallArgCountRegister);
+ }
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
}
@@ -2370,7 +2408,8 @@ void CodeGenerator::AssembleConstructFrame() {
unwinding_info_writer_.MarkFrameConstructed(pc_base);
}
}
- int shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots =
+ frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -2414,8 +2453,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
// Restore registers.
@@ -2444,22 +2482,41 @@ void CodeGenerator::AssembleReturn() {
unwinding_info_writer_.MarkBlockWillExit();
+ // Might need rcx for scratch if pop_size is too big or if there is a variable
+ // pop count.
+ DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & rcx.bit());
+ DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & rdx.bit());
+ size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ X64OperandConverter g(this, nullptr);
if (descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
- // Canonicalize JSFunction return sites for now.
- if (return_label_.is_bound()) {
- __ jmp(&return_label_);
- return;
+ if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ // Canonicalize JSFunction return sites for now.
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ AssembleDeconstructFrame();
+ }
} else {
- __ bind(&return_label_);
AssembleDeconstructFrame();
}
}
- size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
- // Might need rcx for scratch if pop_size is too big.
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & rcx.bit());
- __ Ret(static_cast<int>(pop_size), rcx);
+
+ if (pop->IsImmediate()) {
+ DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+ pop_size += g.ToConstant(pop).ToInt32() * kPointerSize;
+ CHECK_LT(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
+ __ Ret(static_cast<int>(pop_size), rcx);
+ } else {
+ Register pop_reg = g.ToRegister(pop);
+ Register scratch_reg = pop_reg.is(rcx) ? rdx : rcx;
+ __ popq(scratch_reg);
+ __ leaq(rsp, Operand(rsp, pop_reg, times_8, static_cast<int>(pop_size)));
+ __ jmp(scratch_reg);
+ }
}
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index 4208d8a594..ef0c3ad92c 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -194,8 +194,77 @@ int InstructionScheduler::GetTargetInstructionFlags(
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
- // TODO(all): Add instruction cost modeling.
- return 1;
+ // Basic latency modeling for x64 instructions. They have been determined
+ // in an empirical way.
+ switch (instr->arch_opcode()) {
+ case kCheckedLoadInt8:
+ case kCheckedLoadUint8:
+ case kCheckedLoadInt16:
+ case kCheckedLoadUint16:
+ case kCheckedLoadWord32:
+ case kCheckedLoadWord64:
+ case kCheckedLoadFloat32:
+ case kCheckedLoadFloat64:
+ case kCheckedStoreWord8:
+ case kCheckedStoreWord16:
+ case kCheckedStoreWord32:
+ case kCheckedStoreWord64:
+ case kCheckedStoreFloat32:
+ case kCheckedStoreFloat64:
+ case kSSEFloat64Mul:
+ return 5;
+ case kX64Imul:
+ case kX64Imul32:
+ case kX64ImulHigh32:
+ case kX64UmulHigh32:
+ case kSSEFloat32Cmp:
+ case kSSEFloat32Add:
+ case kSSEFloat32Sub:
+ case kSSEFloat32Abs:
+ case kSSEFloat32Neg:
+ case kSSEFloat64Cmp:
+ case kSSEFloat64Add:
+ case kSSEFloat64Sub:
+ case kSSEFloat64Max:
+ case kSSEFloat64Min:
+ case kSSEFloat64Abs:
+ case kSSEFloat64Neg:
+ return 3;
+ case kSSEFloat32Mul:
+ case kSSEFloat32ToFloat64:
+ case kSSEFloat64ToFloat32:
+ case kSSEFloat32Round:
+ case kSSEFloat64Round:
+ case kSSEFloat32ToInt32:
+ case kSSEFloat32ToUint32:
+ case kSSEFloat64ToInt32:
+ case kSSEFloat64ToUint32:
+ return 4;
+ case kX64Idiv:
+ return 49;
+ case kX64Idiv32:
+ return 35;
+ case kX64Udiv:
+ return 38;
+ case kX64Udiv32:
+ return 26;
+ case kSSEFloat32Div:
+ case kSSEFloat64Div:
+ case kSSEFloat32Sqrt:
+ case kSSEFloat64Sqrt:
+ return 13;
+ case kSSEFloat32ToInt64:
+ case kSSEFloat64ToInt64:
+ case kSSEFloat32ToUint64:
+ case kSSEFloat64ToUint64:
+ return 10;
+ case kSSEFloat64Mod:
+ return 50;
+ case kArchTruncateDoubleToI:
+ return 6;
+ default:
+ return 1;
+ }
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 9a7657ef32..878e778da0 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -1250,6 +1250,19 @@ bool ZeroExtendsWord32ToWord64(Node* node) {
return false;
}
}
+ case IrOpcode::kLoad: {
+ // The movzxbl/movsxbl/movzxwl/movsxwl operations implicitly zero-extend
+ // to 64-bit on x64,
+ // so the zero-extension is a no-op.
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ return true;
+ default:
+ return false;
+ }
+ }
default:
return false;
}
@@ -1775,6 +1788,29 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
void VisitWord64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
X64OperandGenerator g(selector);
+ if (selector->CanUseRootsRegister()) {
+ Heap* const heap = selector->isolate()->heap();
+ Heap::RootListIndex root_index;
+ HeapObjectBinopMatcher m(node);
+ if (m.right().HasValue() &&
+ heap->IsRootHandle(m.right().Value(), &root_index)) {
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ InstructionCode opcode =
+ kX64Cmp | AddressingModeField::encode(kMode_Root);
+ return VisitCompare(
+ selector, opcode,
+ g.TempImmediate((root_index * kPointerSize) - kRootRegisterBias),
+ g.UseRegister(m.left().node()), cont);
+ } else if (m.left().HasValue() &&
+ heap->IsRootHandle(m.left().Value(), &root_index)) {
+ InstructionCode opcode =
+ kX64Cmp | AddressingModeField::encode(kMode_Root);
+ return VisitCompare(
+ selector, opcode,
+ g.TempImmediate((root_index * kPointerSize) - kRootRegisterBias),
+ g.UseRegister(m.right().node()), cont);
+ }
+ }
Int64BinopMatcher m(node);
if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
@@ -1833,21 +1869,22 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
// Shared routine for word comparison against zero.
void VisitWordCompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
- while (selector->CanCover(user, value)) {
+ // Try to combine with comparisons against 0 by simply inverting the branch.
+ while (value->opcode() == IrOpcode::kWord32Equal &&
+ selector->CanCover(user, value)) {
+ Int32BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ }
+
+ if (selector->CanCover(user, value)) {
switch (value->opcode()) {
- case IrOpcode::kWord32Equal: {
- // Combine with comparisons against 0 by simply inverting the
- // continuation.
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont->Negate();
- continue;
- }
+ case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWordCompare(selector, value, kX64Cmp32, cont);
- }
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWordCompare(selector, value, kX64Cmp32, cont);
@@ -1905,9 +1942,26 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
return VisitFloat64Compare(selector, value, cont);
- case IrOpcode::kFloat64LessThan:
+ case IrOpcode::kFloat64LessThan: {
+ Float64BinopMatcher m(value);
+ if (m.left().Is(0.0) && m.right().IsFloat64Abs()) {
+ // This matches the pattern
+ //
+ // Float64LessThan(#0.0, Float64Abs(x))
+ //
+ // which TurboFan generates for NumberToBoolean in the general case,
+ // and which evaluates to false if x is 0, -0 or NaN. We can compile
+ // this to a simple (v)ucomisd using not_equal flags condition, which
+ // avoids the costly Float64Abs.
+ cont->OverwriteAndNegateIfEqual(kNotEqual);
+ InstructionCode const opcode =
+ selector->IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
+ return VisitCompare(selector, opcode, m.left().node(),
+ m.right().InputAt(0), cont, false);
+ }
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
return VisitFloat64Compare(selector, value, cont);
+ }
case IrOpcode::kFloat64LessThanOrEqual:
cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
@@ -1956,7 +2010,6 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
default:
break;
}
- break;
}
// Branch could not be combined with a compare, emit compare against 0.
@@ -2169,14 +2222,28 @@ void InstructionSelector::VisitFloat64Equal(Node* node) {
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ Float64BinopMatcher m(node);
+ if (m.left().Is(0.0) && m.right().IsFloat64Abs()) {
+ // This matches the pattern
+ //
+ // Float64LessThan(#0.0, Float64Abs(x))
+ //
+ // which TurboFan generates for NumberToBoolean in the general case,
+ // and which evaluates to false if x is 0, -0 or NaN. We can compile
+ // this to a simple (v)ucomisd using not_equal flags condition, which
+ // avoids the costly Float64Abs.
+ FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, node);
+ InstructionCode const opcode =
+ IsSupported(AVX) ? kAVXFloat64Cmp : kSSEFloat64Cmp;
+ return VisitCompare(this, opcode, m.left().node(), m.right().InputAt(0),
+ &cont, false);
+ }
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
VisitFloat64Compare(this, node, &cont);
}
-
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
diff --git a/deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc b/deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc
index 4efba3254f..31338bdaff 100644
--- a/deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc
+++ b/deps/v8/src/compiler/x64/unwinding-info-writer-x64.cc
@@ -15,7 +15,8 @@ void UnwindingInfoWriter::BeginInstructionBlock(int pc_offset,
block_will_exit_ = false;
- DCHECK_LT(block->rpo_number().ToInt(), block_initial_states_.size());
+ DCHECK_LT(block->rpo_number().ToInt(),
+ static_cast<int>(block_initial_states_.size()));
const BlockInitialState* initial_state =
block_initial_states_[block->rpo_number().ToInt()];
if (initial_state) {
@@ -47,7 +48,7 @@ void UnwindingInfoWriter::EndInstructionBlock(const InstructionBlock* block) {
for (const RpoNumber& successor : block->successors()) {
int successor_index = successor.ToInt();
- DCHECK_LT(successor_index, block_initial_states_.size());
+ DCHECK_LT(successor_index, static_cast<int>(block_initial_states_.size()));
const BlockInitialState* existing_state =
block_initial_states_[successor_index];
// If we already had an entry for this BB, check that the values are the
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
index f5e6634561..d2f64e8cf1 100644
--- a/deps/v8/src/compiler/x87/code-generator-x87.cc
+++ b/deps/v8/src/compiler/x87/code-generator-x87.cc
@@ -637,8 +637,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->ClearSPDelta();
break;
}
- case kArchTailCallJSFunctionFromJSFunction:
- case kArchTailCallJSFunction: {
+ case kArchTailCallJSFunctionFromJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
@@ -649,10 +648,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ VerifyX87StackDepth(1);
}
__ fstp(0);
- if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- no_reg, no_reg, no_reg);
- }
+ AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, no_reg,
+ no_reg, no_reg);
__ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -749,7 +746,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchRet:
- AssembleReturn();
+ AssembleReturn(instr->InputAt(0));
break;
case kArchFramePointer:
__ mov(i.OutputRegister(), ebp);
@@ -1870,7 +1867,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (i.InputRegister(1).is(i.OutputRegister())) {
__ shl(i.OutputRegister(), 1);
} else {
- __ lea(i.OutputRegister(), i.MemoryOperand());
+ __ add(i.OutputRegister(), i.InputRegister(1));
}
} else if (mode == kMode_M2) {
__ shl(i.OutputRegister(), 1);
@@ -1881,6 +1878,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
+ } else if (mode == kMode_MR1 &&
+ i.InputRegister(1).is(i.OutputRegister())) {
+ __ add(i.OutputRegister(), i.InputRegister(0));
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
@@ -2245,7 +2245,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
DeoptimizeReason deoptimization_reason =
GetDeoptimizationReason(deoptimization_id);
- __ RecordDeoptReason(deoptimization_reason, pos.raw(), deoptimization_id);
+ __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
return kSuccess;
}
@@ -2404,12 +2404,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ mov(ebp, esp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(this->info()->GeneratePreagedPrologue());
+ if (descriptor->PushArgumentCount()) {
+ __ push(kJavaScriptCallArgCountRegister);
+ }
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
}
}
- int shrink_slots = frame()->GetSpillSlotCount();
+ int shrink_slots =
+ frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -2444,8 +2448,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-
-void CodeGenerator::AssembleReturn() {
+void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
// Clear the FPU stack only if there is no return value in the stack.
@@ -2453,7 +2456,7 @@ void CodeGenerator::AssembleReturn() {
__ VerifyX87StackDepth(1);
}
bool clear_stack = true;
- for (int i = 0; i < descriptor->ReturnCount(); i++) {
+ for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
MachineRepresentation rep = descriptor->GetReturnType(i).representation();
LinkageLocation loc = descriptor->GetReturnLocation(i);
if (IsFloatingPoint(rep) && loc == LinkageLocation::ForRegister(0)) {
@@ -2463,7 +2466,6 @@ void CodeGenerator::AssembleReturn() {
}
if (clear_stack) __ fstp(0);
- int pop_count = static_cast<int>(descriptor->StackParameterCount());
const RegList saves = descriptor->CalleeSavedRegisters();
// Restore registers.
if (saves != 0) {
@@ -2473,22 +2475,40 @@ void CodeGenerator::AssembleReturn() {
}
}
+ // Might need ecx for scratch if pop_size is too big or if there is a variable
+ // pop count.
+ DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
+ size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
+ X87OperandConverter g(this, nullptr);
if (descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
- // Canonicalize JSFunction return sites for now.
- if (return_label_.is_bound()) {
- __ jmp(&return_label_);
- return;
+ // Canonicalize JSFunction return sites for now if they always have the same
+ // number of return args.
+ if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (return_label_.is_bound()) {
+ __ jmp(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ AssembleDeconstructFrame();
+ }
} else {
- __ bind(&return_label_);
AssembleDeconstructFrame();
}
}
- if (pop_count == 0) {
- __ ret(0);
+ DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & edx.bit());
+ DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
+ if (pop->IsImmediate()) {
+ DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
+ pop_size += g.ToConstant(pop).ToInt32() * kPointerSize;
+ __ Ret(static_cast<int>(pop_size), ecx);
} else {
- __ Ret(pop_count * kPointerSize, ebx);
+ Register pop_reg = g.ToRegister(pop);
+ Register scratch_reg = pop_reg.is(ecx) ? edx : ecx;
+ __ pop(scratch_reg);
+ __ lea(esp, Operand(esp, pop_reg, times_4, static_cast<int>(pop_size)));
+ __ jmp(scratch_reg);
}
}
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
index 757eee961e..a737d1e9e8 100644
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -607,55 +607,78 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
void InstructionSelector::VisitInt32PairAdd(Node* node) {
X87OperandGenerator g(this);
- // We use UseUniqueRegister here to avoid register sharing with the temp
- // register.
- InstructionOperand inputs[] = {
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ // We use UseUniqueRegister here to avoid register sharing with the temp
+ // register.
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
- InstructionOperand outputs[] = {
- g.DefineSameAsFirst(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
+ g.DefineAsRegister(projection1)};
- InstructionOperand temps[] = {g.TempRegister()};
+ InstructionOperand temps[] = {g.TempRegister()};
- Emit(kX87AddPair, 2, outputs, 4, inputs, 1, temps);
+ Emit(kX87AddPair, 2, outputs, 4, inputs, 1, temps);
+ } else {
+ // The high word of the result is not used, so we emit the standard 32 bit
+ // instruction.
+ Emit(kX87Add, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.Use(node->InputAt(2)));
+ }
}
void InstructionSelector::VisitInt32PairSub(Node* node) {
X87OperandGenerator g(this);
- // We use UseUniqueRegister here to avoid register sharing with the temp
- // register.
- InstructionOperand inputs[] = {
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ // We use UseUniqueRegister here to avoid register sharing with the temp
+ // register.
+ InstructionOperand inputs[] = {
+ g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
- InstructionOperand outputs[] = {
- g.DefineSameAsFirst(node),
- g.DefineAsRegister(NodeProperties::FindProjection(node, 1))};
+ InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
+ g.DefineAsRegister(projection1)};
- InstructionOperand temps[] = {g.TempRegister()};
+ InstructionOperand temps[] = {g.TempRegister()};
- Emit(kX87SubPair, 2, outputs, 4, inputs, 1, temps);
+ Emit(kX87SubPair, 2, outputs, 4, inputs, 1, temps);
+ } else {
+ // The high word of the result is not used, so we emit the standard 32 bit
+ // instruction.
+ Emit(kX87Sub, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.Use(node->InputAt(2)));
+ }
}
void InstructionSelector::VisitInt32PairMul(Node* node) {
X87OperandGenerator g(this);
- // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
- // register and one mov instruction.
- InstructionOperand inputs[] = {
- g.UseUnique(node->InputAt(0)), g.UseUnique(node->InputAt(1)),
- g.UseUniqueRegister(node->InputAt(2)), g.UseFixed(node->InputAt(3), ecx)};
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
+ // register and one mov instruction.
+ InstructionOperand inputs[] = {g.UseUnique(node->InputAt(0)),
+ g.UseUnique(node->InputAt(1)),
+ g.UseUniqueRegister(node->InputAt(2)),
+ g.UseFixed(node->InputAt(3), ecx)};
- InstructionOperand outputs[] = {
- g.DefineAsFixed(node, eax),
- g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
+ InstructionOperand outputs[] = {
+ g.DefineAsFixed(node, eax),
+ g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
- InstructionOperand temps[] = {g.TempRegister(edx)};
+ InstructionOperand temps[] = {g.TempRegister(edx)};
- Emit(kX87MulPair, 2, outputs, 4, inputs, 1, temps);
+ Emit(kX87MulPair, 2, outputs, 4, inputs, 1, temps);
+ } else {
+ // The high word of the result is not used, so we emit the standard 32 bit
+ // instruction.
+ Emit(kX87Imul, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.Use(node->InputAt(2)));
+ }
}
void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
@@ -673,11 +696,19 @@ void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
g.UseFixed(node->InputAt(1), edx),
shift_operand};
- InstructionOperand outputs[] = {
- g.DefineAsFixed(node, eax),
- g.DefineAsFixed(NodeProperties::FindProjection(node, 1), edx)};
+ InstructionOperand outputs[2];
+ InstructionOperand temps[1];
+ int32_t output_count = 0;
+ int32_t temp_count = 0;
+ outputs[output_count++] = g.DefineAsFixed(node, eax);
+ Node* projection1 = NodeProperties::FindProjection(node, 1);
+ if (projection1) {
+ outputs[output_count++] = g.DefineAsFixed(projection1, edx);
+ } else {
+ temps[temp_count++] = g.TempRegister(edx);
+ }
- selector->Emit(opcode, 2, outputs, 3, inputs);
+ selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
}
void InstructionSelector::VisitWord32PairShl(Node* node) {
@@ -1402,22 +1433,22 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Shared routine for word comparison with zero.
void VisitWordCompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
- // Try to combine the branch with a comparison.
- while (selector->CanCover(user, value)) {
+ // Try to combine with comparisons against 0 by simply inverting the branch.
+ while (value->opcode() == IrOpcode::kWord32Equal &&
+ selector->CanCover(user, value)) {
+ Int32BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+
+ user = value;
+ value = m.left().node();
+ cont->Negate();
+ }
+
+ if (selector->CanCover(user, value)) {
switch (value->opcode()) {
- case IrOpcode::kWord32Equal: {
- // Try to combine with comparisons against 0 by simply inverting the
- // continuation.
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont->Negate();
- continue;
- }
+ case IrOpcode::kWord32Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWordCompare(selector, value, cont);
- }
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWordCompare(selector, value, cont);
@@ -1483,7 +1514,6 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
default:
break;
}
- break;
}
// Continuation could not be combined with a compare, emit compare against 0.
diff --git a/deps/v8/src/compiler/zone-pool.cc b/deps/v8/src/compiler/zone-stats.cc
index 7681eeb5d1..8942df5555 100644
--- a/deps/v8/src/compiler/zone-pool.cc
+++ b/deps/v8/src/compiler/zone-stats.cc
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/zone-pool.h"
+#include "src/compiler/zone-stats.h"
namespace v8 {
namespace internal {
namespace compiler {
-ZonePool::StatsScope::StatsScope(ZonePool* zone_pool)
- : zone_pool_(zone_pool),
- total_allocated_bytes_at_start_(zone_pool->GetTotalAllocatedBytes()),
+ZoneStats::StatsScope::StatsScope(ZoneStats* zone_stats)
+ : zone_stats_(zone_stats),
+ total_allocated_bytes_at_start_(zone_stats->GetTotalAllocatedBytes()),
max_allocated_bytes_(0) {
- zone_pool_->stats_.push_back(this);
- for (Zone* zone : zone_pool_->used_) {
+ zone_stats_->stats_.push_back(this);
+ for (Zone* zone : zone_stats_->zones_) {
size_t size = static_cast<size_t>(zone->allocation_size());
std::pair<InitialValues::iterator, bool> res =
initial_values_.insert(std::make_pair(zone, size));
@@ -22,21 +22,18 @@ ZonePool::StatsScope::StatsScope(ZonePool* zone_pool)
}
}
-
-ZonePool::StatsScope::~StatsScope() {
- DCHECK_EQ(zone_pool_->stats_.back(), this);
- zone_pool_->stats_.pop_back();
+ZoneStats::StatsScope::~StatsScope() {
+ DCHECK_EQ(zone_stats_->stats_.back(), this);
+ zone_stats_->stats_.pop_back();
}
-
-size_t ZonePool::StatsScope::GetMaxAllocatedBytes() {
+size_t ZoneStats::StatsScope::GetMaxAllocatedBytes() {
return std::max(max_allocated_bytes_, GetCurrentAllocatedBytes());
}
-
-size_t ZonePool::StatsScope::GetCurrentAllocatedBytes() {
+size_t ZoneStats::StatsScope::GetCurrentAllocatedBytes() {
size_t total = 0;
- for (Zone* zone : zone_pool_->used_) {
+ for (Zone* zone : zone_stats_->zones_) {
total += static_cast<size_t>(zone->allocation_size());
// Adjust for initial values.
InitialValues::iterator it = initial_values_.find(zone);
@@ -47,13 +44,12 @@ size_t ZonePool::StatsScope::GetCurrentAllocatedBytes() {
return total;
}
-
-size_t ZonePool::StatsScope::GetTotalAllocatedBytes() {
- return zone_pool_->GetTotalAllocatedBytes() - total_allocated_bytes_at_start_;
+size_t ZoneStats::StatsScope::GetTotalAllocatedBytes() {
+ return zone_stats_->GetTotalAllocatedBytes() -
+ total_allocated_bytes_at_start_;
}
-
-void ZonePool::StatsScope::ZoneReturned(Zone* zone) {
+void ZoneStats::StatsScope::ZoneReturned(Zone* zone) {
size_t current_total = GetCurrentAllocatedBytes();
// Update max.
max_allocated_bytes_ = std::max(max_allocated_bytes_, current_total);
@@ -64,53 +60,37 @@ void ZonePool::StatsScope::ZoneReturned(Zone* zone) {
}
}
-ZonePool::ZonePool(AccountingAllocator* allocator)
+ZoneStats::ZoneStats(AccountingAllocator* allocator)
: max_allocated_bytes_(0), total_deleted_bytes_(0), allocator_(allocator) {}
-ZonePool::~ZonePool() {
- DCHECK(used_.empty());
+ZoneStats::~ZoneStats() {
+ DCHECK(zones_.empty());
DCHECK(stats_.empty());
- for (Zone* zone : unused_) {
- delete zone;
- }
}
-
-size_t ZonePool::GetMaxAllocatedBytes() {
+size_t ZoneStats::GetMaxAllocatedBytes() {
return std::max(max_allocated_bytes_, GetCurrentAllocatedBytes());
}
-
-size_t ZonePool::GetCurrentAllocatedBytes() {
+size_t ZoneStats::GetCurrentAllocatedBytes() {
size_t total = 0;
- for (Zone* zone : used_) {
+ for (Zone* zone : zones_) {
total += static_cast<size_t>(zone->allocation_size());
}
return total;
}
-
-size_t ZonePool::GetTotalAllocatedBytes() {
+size_t ZoneStats::GetTotalAllocatedBytes() {
return total_deleted_bytes_ + GetCurrentAllocatedBytes();
}
-
-Zone* ZonePool::NewEmptyZone() {
- Zone* zone;
- // Grab a zone from pool if possible.
- if (!unused_.empty()) {
- zone = unused_.back();
- unused_.pop_back();
- } else {
- zone = new Zone(allocator_);
- }
- used_.push_back(zone);
- DCHECK_EQ(0u, zone->allocation_size());
+Zone* ZoneStats::NewEmptyZone(const char* zone_name) {
+ Zone* zone = new Zone(allocator_, zone_name);
+ zones_.push_back(zone);
return zone;
}
-
-void ZonePool::ReturnZone(Zone* zone) {
+void ZoneStats::ReturnZone(Zone* zone) {
size_t current_total = GetCurrentAllocatedBytes();
// Update max.
max_allocated_bytes_ = std::max(max_allocated_bytes_, current_total);
@@ -119,18 +99,11 @@ void ZonePool::ReturnZone(Zone* zone) {
stat_scope->ZoneReturned(zone);
}
// Remove from used.
- Used::iterator it = std::find(used_.begin(), used_.end(), zone);
- DCHECK(it != used_.end());
- used_.erase(it);
+ Zones::iterator it = std::find(zones_.begin(), zones_.end(), zone);
+ DCHECK(it != zones_.end());
+ zones_.erase(it);
total_deleted_bytes_ += static_cast<size_t>(zone->allocation_size());
- // Delete zone or clear and stash on unused_.
- if (unused_.size() >= kMaxUnusedSize) {
- delete zone;
- } else {
- zone->DeleteAll();
- DCHECK_EQ(0u, zone->allocation_size());
- unused_.push_back(zone);
- }
+ delete zone;
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/zone-pool.h b/deps/v8/src/compiler/zone-stats.h
index 7a3fe75468..39adca3693 100644
--- a/deps/v8/src/compiler/zone-pool.h
+++ b/deps/v8/src/compiler/zone-stats.h
@@ -2,45 +2,47 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_COMPILER_ZONE_POOL_H_
-#define V8_COMPILER_ZONE_POOL_H_
+#ifndef V8_COMPILER_ZONE_STATS_H_
+#define V8_COMPILER_ZONE_STATS_H_
#include <map>
#include <set>
#include <vector>
+#include "src/globals.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
-class ZonePool final {
+class V8_EXPORT_PRIVATE ZoneStats final {
public:
class Scope final {
public:
- explicit Scope(ZonePool* zone_pool)
- : zone_pool_(zone_pool), zone_(nullptr) {}
+ explicit Scope(ZoneStats* zone_stats, const char* zone_name)
+ : zone_name_(zone_name), zone_stats_(zone_stats), zone_(nullptr) {}
~Scope() { Destroy(); }
Zone* zone() {
- if (zone_ == nullptr) zone_ = zone_pool_->NewEmptyZone();
+ if (zone_ == nullptr) zone_ = zone_stats_->NewEmptyZone(zone_name_);
return zone_;
}
void Destroy() {
- if (zone_ != nullptr) zone_pool_->ReturnZone(zone_);
+ if (zone_ != nullptr) zone_stats_->ReturnZone(zone_);
zone_ = nullptr;
}
private:
- ZonePool* const zone_pool_;
+ const char* zone_name_;
+ ZoneStats* const zone_stats_;
Zone* zone_;
DISALLOW_COPY_AND_ASSIGN(Scope);
};
- class StatsScope final {
+ class V8_EXPORT_PRIVATE StatsScope final {
public:
- explicit StatsScope(ZonePool* zone_pool);
+ explicit StatsScope(ZoneStats* zone_stats);
~StatsScope();
size_t GetMaxAllocatedBytes();
@@ -48,12 +50,12 @@ class ZonePool final {
size_t GetTotalAllocatedBytes();
private:
- friend class ZonePool;
+ friend class ZoneStats;
void ZoneReturned(Zone* zone);
typedef std::map<Zone*, size_t> InitialValues;
- ZonePool* const zone_pool_;
+ ZoneStats* const zone_stats_;
InitialValues initial_values_;
size_t total_allocated_bytes_at_start_;
size_t max_allocated_bytes_;
@@ -61,34 +63,32 @@ class ZonePool final {
DISALLOW_COPY_AND_ASSIGN(StatsScope);
};
- explicit ZonePool(AccountingAllocator* allocator);
- ~ZonePool();
+ explicit ZoneStats(AccountingAllocator* allocator);
+ ~ZoneStats();
size_t GetMaxAllocatedBytes();
size_t GetTotalAllocatedBytes();
size_t GetCurrentAllocatedBytes();
private:
- Zone* NewEmptyZone();
+ Zone* NewEmptyZone(const char* zone_name);
void ReturnZone(Zone* zone);
static const size_t kMaxUnusedSize = 3;
- typedef std::vector<Zone*> Unused;
- typedef std::vector<Zone*> Used;
+ typedef std::vector<Zone*> Zones;
typedef std::vector<StatsScope*> Stats;
- Unused unused_;
- Used used_;
+ Zones zones_;
Stats stats_;
size_t max_allocated_bytes_;
size_t total_deleted_bytes_;
AccountingAllocator* allocator_;
- DISALLOW_COPY_AND_ASSIGN(ZonePool);
+ DISALLOW_COPY_AND_ASSIGN(ZoneStats);
};
} // namespace compiler
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_COMPILER_ZONE_STATS_H_
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 4fb3c833b7..012944e2c2 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -82,7 +82,7 @@ Context* Context::declaration_context() {
Context* Context::closure_context() {
Context* current = this;
while (!current->IsFunctionContext() && !current->IsScriptContext() &&
- !current->IsNativeContext()) {
+ !current->IsModuleContext() && !current->IsNativeContext()) {
current = current->previous();
DCHECK(current->closure() == closure());
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index b927d05fd6..b0b719585e 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -51,6 +51,7 @@ enum ContextLookupFlags {
V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
V(OBJECT_KEYS, JSFunction, object_keys) \
+ V(REGEXP_INTERNAL_MATCH, JSFunction, regexp_internal_match) \
V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
@@ -58,7 +59,8 @@ enum ContextLookupFlags {
V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
- V(MATH_POW_INDEX, JSFunction, math_pow)
+ V(MATH_POW_INDEX, JSFunction, math_pow) \
+ V(CREATE_RESOLVING_FUNCTION_INDEX, JSFunction, create_resolving_functions)
#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
V(ARRAY_CONCAT_INDEX, JSFunction, array_concat) \
@@ -92,13 +94,15 @@ enum ContextLookupFlags {
V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
+ V(PROMISE_HANDLE_INDEX, JSFunction, promise_handle) \
V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction, \
promise_has_user_defined_reject_handler) \
+ V(PROMISE_DEBUG_GET_INFO_INDEX, JSFunction, promise_debug_get_info) \
V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
+ V(PROMISE_INTERNAL_REJECT_INDEX, JSFunction, promise_internal_reject) \
V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
- V(REGEXP_LAST_MATCH_INFO_INDEX, JSObject, regexp_last_match_info) \
V(REJECT_PROMISE_NO_DEBUG_EVENT_INDEX, JSFunction, \
reject_promise_no_debug_event) \
V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
@@ -107,7 +111,77 @@ enum ContextLookupFlags {
V(SET_HAS_METHOD_INDEX, JSFunction, set_has) \
V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
- V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function)
+ V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
+ V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction, \
+ wasm_compile_error_function) \
+ V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, wasm_runtime_error_function)
+
+#define NATIVE_CONTEXT_JS_ARRAY_ITERATOR_MAPS(V) \
+ V(TYPED_ARRAY_KEY_ITERATOR_MAP_INDEX, Map, typed_array_key_iterator_map) \
+ V(FAST_ARRAY_KEY_ITERATOR_MAP_INDEX, Map, fast_array_key_iterator_map) \
+ V(GENERIC_ARRAY_KEY_ITERATOR_MAP_INDEX, Map, array_key_iterator_map) \
+ \
+ V(UINT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ uint8_array_key_value_iterator_map) \
+ V(INT8_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ int8_array_key_value_iterator_map) \
+ V(UINT16_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ uint16_array_key_value_iterator_map) \
+ V(INT16_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ int16_array_key_value_iterator_map) \
+ V(UINT32_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ uint32_array_key_value_iterator_map) \
+ V(INT32_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ int32_array_key_value_iterator_map) \
+ V(FLOAT32_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ float32_array_key_value_iterator_map) \
+ V(FLOAT64_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ float64_array_key_value_iterator_map) \
+ V(UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ uint8_clamped_array_key_value_iterator_map) \
+ \
+ V(FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ fast_smi_array_key_value_iterator_map) \
+ V(FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ fast_holey_smi_array_key_value_iterator_map) \
+ V(FAST_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ fast_array_key_value_iterator_map) \
+ V(FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ fast_holey_array_key_value_iterator_map) \
+ V(FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ fast_double_array_key_value_iterator_map) \
+ V(FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ fast_holey_double_array_key_value_iterator_map) \
+ V(GENERIC_ARRAY_KEY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ array_key_value_iterator_map) \
+ \
+ V(UINT8_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, uint8_array_value_iterator_map) \
+ V(INT8_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, int8_array_value_iterator_map) \
+ V(UINT16_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ uint16_array_value_iterator_map) \
+ V(INT16_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, int16_array_value_iterator_map) \
+ V(UINT32_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ uint32_array_value_iterator_map) \
+ V(INT32_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, int32_array_value_iterator_map) \
+ V(FLOAT32_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ float32_array_value_iterator_map) \
+ V(FLOAT64_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ float64_array_value_iterator_map) \
+ V(UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ uint8_clamped_array_value_iterator_map) \
+ \
+ V(FAST_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ fast_smi_array_value_iterator_map) \
+ V(FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ fast_holey_smi_array_value_iterator_map) \
+ V(FAST_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, fast_array_value_iterator_map) \
+ V(FAST_HOLEY_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ fast_holey_array_value_iterator_map) \
+ V(FAST_DOUBLE_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ fast_double_array_value_iterator_map) \
+ V(FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, \
+ fast_holey_double_array_value_iterator_map) \
+ V(GENERIC_ARRAY_VALUE_ITERATOR_MAP_INDEX, Map, array_value_iterator_map)
#define NATIVE_CONTEXT_FIELDS(V) \
V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \
@@ -153,6 +227,10 @@ enum ContextLookupFlags {
V(GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \
generator_function_function) \
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
+ V(INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX, JSObject, \
+ initial_array_iterator_prototype) \
+ V(INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \
+ initial_array_iterator_prototype_map) \
V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
V(INITIAL_GENERATOR_PROTOTYPE_INDEX, JSObject, initial_generator_prototype) \
V(INITIAL_ITERATOR_PROTOTYPE_INDEX, JSObject, initial_iterator_prototype) \
@@ -178,6 +256,7 @@ enum ContextLookupFlags {
js_array_fast_holey_double_elements_map_index) \
V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun) \
V(JS_MAP_MAP_INDEX, Map, js_map_map) \
+ V(JS_MODULE_NAMESPACE_MAP, Map, js_module_namespace_map) \
V(JS_SET_FUN_INDEX, JSFunction, js_set_fun) \
V(JS_SET_MAP_INDEX, Map, js_set_map) \
V(JS_WEAK_MAP_FUN_INDEX, JSFunction, js_weak_map_fun) \
@@ -185,12 +264,15 @@ enum ContextLookupFlags {
V(MAP_CACHE_INDEX, Object, map_cache) \
V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map) \
V(STRING_ITERATOR_MAP_INDEX, Map, string_iterator_map) \
+ V(MATH_RANDOM_INDEX_INDEX, Smi, math_random_index) \
+ V(MATH_RANDOM_CACHE_INDEX, Object, math_random_cache) \
V(MESSAGE_LISTENERS_INDEX, TemplateList, message_listeners) \
V(NATIVES_UTILS_OBJECT_INDEX, Object, natives_utils_object) \
V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache) \
V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
- V(OBJECT_WITH_NULL_PROTOTYPE_MAP, Map, object_with_null_prototype_map) \
+ V(SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP, Map, \
+ slow_object_with_null_prototype_map) \
V(OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX, Map, object_function_prototype_map) \
V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
V(PROXY_CALLABLE_MAP_INDEX, Map, proxy_callable_map) \
@@ -198,13 +280,22 @@ enum ContextLookupFlags {
V(PROXY_FUNCTION_INDEX, JSFunction, proxy_function) \
V(PROXY_FUNCTION_MAP_INDEX, Map, proxy_function_map) \
V(PROXY_MAP_INDEX, Map, proxy_map) \
+ V(PROMISE_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
+ promise_resolve_shared_fun) \
+ V(PROMISE_REJECT_SHARED_FUN, SharedFunctionInfo, promise_reject_shared_fun) \
+ V(REGEXP_EXEC_FUNCTION_INDEX, JSFunction, regexp_exec_function) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
+ V(REGEXP_LAST_MATCH_INFO_INDEX, RegExpMatchInfo, regexp_last_match_info) \
+ V(REGEXP_INTERNAL_MATCH_INFO_INDEX, RegExpMatchInfo, \
+ regexp_internal_match_info) \
+ V(REGEXP_PROTOTYPE_MAP_INDEX, Map, regexp_prototype_map) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
V(SELF_WEAK_CELL_INDEX, WeakCell, self_weak_cell) \
V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map) \
+ V(FIXED_ARRAY_ITERATOR_MAP_INDEX, Map, fixed_array_iterator_map) \
V(SHARED_ARRAY_BUFFER_FUN_INDEX, JSFunction, shared_array_buffer_fun) \
V(SLOPPY_ARGUMENTS_MAP_INDEX, Map, sloppy_arguments_map) \
V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map) \
@@ -244,7 +335,8 @@ enum ContextLookupFlags {
V(UINT8X16_FUNCTION_INDEX, JSFunction, uint8x16_function) \
V(CURRENT_MODULE_INDEX, Module, current_module) \
NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
- NATIVE_CONTEXT_IMPORTED_FIELDS(V)
+ NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
+ NATIVE_CONTEXT_JS_ARRAY_ITERATOR_MAPS(V)
// A table of all script contexts. Every loaded top-level script with top-level
// lexical declarations contributes its ScriptContext into this table.
@@ -357,7 +449,7 @@ class Context: public FixedArray {
static inline Context* cast(Object* context);
// The default context slot layout; indices are FixedArray slot indices.
- enum {
+ enum Field {
// These slots are in all contexts.
CLOSURE_INDEX,
PREVIOUS_INDEX,
@@ -563,6 +655,8 @@ class Context: public FixedArray {
STATIC_ASSERT(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex);
};
+typedef Context::Field ContextField;
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/counters-inl.h b/deps/v8/src/counters-inl.h
index 303e5e3a81..7219ef778a 100644
--- a/deps/v8/src/counters-inl.h
+++ b/deps/v8/src/counters-inl.h
@@ -12,17 +12,20 @@ namespace internal {
RuntimeCallTimerScope::RuntimeCallTimerScope(
Isolate* isolate, RuntimeCallStats::CounterId counter_id) {
- if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
- FLAG_runtime_call_stats)) {
- Initialize(isolate, counter_id);
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
+ Initialize(isolate->counters()->runtime_call_stats(), counter_id);
}
}
RuntimeCallTimerScope::RuntimeCallTimerScope(
HeapObject* heap_object, RuntimeCallStats::CounterId counter_id) {
- if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
- FLAG_runtime_call_stats)) {
- Initialize(heap_object->GetIsolate(), counter_id);
+ RuntimeCallTimerScope(heap_object->GetIsolate(), counter_id);
+}
+
+RuntimeCallTimerScope::RuntimeCallTimerScope(
+ RuntimeCallStats* stats, RuntimeCallStats::CounterId counter_id) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
+ Initialize(stats, counter_id);
}
}
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index c4e86460aa..5089eb22e8 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -238,7 +238,7 @@ class RuntimeCallStatEntries {
return count_ < other.count_;
}
- void Print(std::ostream& os) {
+ V8_NOINLINE void Print(std::ostream& os) {
os.precision(2);
os << std::fixed << std::setprecision(2);
os << std::setw(50) << name_;
@@ -249,7 +249,8 @@ class RuntimeCallStatEntries {
os << std::endl;
}
- void SetTotal(base::TimeDelta total_time, uint64_t total_count) {
+ V8_NOINLINE void SetTotal(base::TimeDelta total_time,
+ uint64_t total_count) {
if (total_time.InMicroseconds() == 0) {
time_percent_ = 0;
} else {
@@ -276,125 +277,121 @@ void RuntimeCallCounter::Reset() {
time = base::TimeDelta();
}
-void RuntimeCallCounter::Dump(std::stringstream& out) {
- out << "\"" << name << "\":[" << count << "," << time.InMicroseconds()
- << "],";
+void RuntimeCallCounter::Dump(v8::tracing::TracedValue* value) {
+ value->BeginArray(name);
+ value->AppendLongInteger(count);
+ value->AppendLongInteger(time.InMicroseconds());
+ value->EndArray();
}
+void RuntimeCallCounter::Add(RuntimeCallCounter* other) {
+ count += other->count;
+ time += other->time;
+}
+
+// static
+const RuntimeCallStats::CounterId RuntimeCallStats::counters[] = {
+#define CALL_RUNTIME_COUNTER(name) &RuntimeCallStats::name,
+ FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER) //
+#undef CALL_RUNTIME_COUNTER
+#define CALL_RUNTIME_COUNTER(name, nargs, ressize) \
+ &RuntimeCallStats::Runtime_##name, //
+ FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER) //
+#undef CALL_RUNTIME_COUNTER
+#define CALL_BUILTIN_COUNTER(name) &RuntimeCallStats::Builtin_##name,
+ BUILTIN_LIST_C(CALL_BUILTIN_COUNTER) //
+#undef CALL_BUILTIN_COUNTER
+#define CALL_BUILTIN_COUNTER(name) &RuntimeCallStats::API_##name,
+ FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER) //
+#undef CALL_BUILTIN_COUNTER
+#define CALL_BUILTIN_COUNTER(name) &RuntimeCallStats::Handler_##name,
+ FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER)
+#undef CALL_BUILTIN_COUNTER
+};
+
// static
void RuntimeCallStats::Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
CounterId counter_id) {
RuntimeCallCounter* counter = &(stats->*counter_id);
- timer->Start(counter, stats->current_timer_);
- stats->current_timer_ = timer;
+ DCHECK(counter->name != nullptr);
+ timer->Start(counter, stats->current_timer_.Value());
+ stats->current_timer_.SetValue(timer);
}
// static
void RuntimeCallStats::Leave(RuntimeCallStats* stats, RuntimeCallTimer* timer) {
- if (stats->current_timer_ == timer) {
- stats->current_timer_ = timer->Stop();
+ if (stats->current_timer_.Value() == timer) {
+ stats->current_timer_.SetValue(timer->Stop());
} else {
// Must be a Threading cctest. Walk the chain of Timers to find the
// buried one that's leaving. We don't care about keeping nested timings
// accurate, just avoid crashing by keeping the chain intact.
- RuntimeCallTimer* next = stats->current_timer_;
- while (next->parent_ != timer) next = next->parent_;
- next->parent_ = timer->Stop();
+ RuntimeCallTimer* next = stats->current_timer_.Value();
+ while (next && next->parent() != timer) next = next->parent();
+ if (next == nullptr) return;
+ next->parent_.SetValue(timer->Stop());
+ }
+}
+
+void RuntimeCallStats::Add(RuntimeCallStats* other) {
+ for (const RuntimeCallStats::CounterId counter_id :
+ RuntimeCallStats::counters) {
+ RuntimeCallCounter* counter = &(this->*counter_id);
+ RuntimeCallCounter* other_counter = &(other->*counter_id);
+ counter->Add(other_counter);
}
}
// static
void RuntimeCallStats::CorrectCurrentCounterId(RuntimeCallStats* stats,
CounterId counter_id) {
- DCHECK_NOT_NULL(stats->current_timer_);
- RuntimeCallCounter* counter = &(stats->*counter_id);
- stats->current_timer_->counter_ = counter;
+ RuntimeCallTimer* timer = stats->current_timer_.Value();
+ // When RCS are enabled dynamically there might be no current timer set up.
+ if (timer == nullptr) return;
+ timer->counter_ = &(stats->*counter_id);
}
void RuntimeCallStats::Print(std::ostream& os) {
RuntimeCallStatEntries entries;
-
-#define PRINT_COUNTER(name) entries.Add(&this->name);
- FOR_EACH_MANUAL_COUNTER(PRINT_COUNTER)
-#undef PRINT_COUNTER
-
-#define PRINT_COUNTER(name, nargs, ressize) entries.Add(&this->Runtime_##name);
- FOR_EACH_INTRINSIC(PRINT_COUNTER)
-#undef PRINT_COUNTER
-
-#define PRINT_COUNTER(name) entries.Add(&this->Builtin_##name);
- BUILTIN_LIST_C(PRINT_COUNTER)
-#undef PRINT_COUNTER
-
-#define PRINT_COUNTER(name) entries.Add(&this->API_##name);
- FOR_EACH_API_COUNTER(PRINT_COUNTER)
-#undef PRINT_COUNTER
-
-#define PRINT_COUNTER(name) entries.Add(&this->Handler_##name);
- FOR_EACH_HANDLER_COUNTER(PRINT_COUNTER)
-#undef PRINT_COUNTER
-
+ if (current_timer_.Value() != nullptr) {
+ current_timer_.Value()->Elapsed();
+ }
+ for (const RuntimeCallStats::CounterId counter_id :
+ RuntimeCallStats::counters) {
+ RuntimeCallCounter* counter = &(this->*counter_id);
+ entries.Add(counter);
+ }
entries.Print(os);
}
void RuntimeCallStats::Reset() {
- if (!FLAG_runtime_call_stats &&
- !TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED())
- return;
-#define RESET_COUNTER(name) this->name.Reset();
- FOR_EACH_MANUAL_COUNTER(RESET_COUNTER)
-#undef RESET_COUNTER
-
-#define RESET_COUNTER(name, nargs, result_size) this->Runtime_##name.Reset();
- FOR_EACH_INTRINSIC(RESET_COUNTER)
-#undef RESET_COUNTER
-
-#define RESET_COUNTER(name) this->Builtin_##name.Reset();
- BUILTIN_LIST_C(RESET_COUNTER)
-#undef RESET_COUNTER
-
-#define RESET_COUNTER(name) this->API_##name.Reset();
- FOR_EACH_API_COUNTER(RESET_COUNTER)
-#undef RESET_COUNTER
+ if (V8_LIKELY(FLAG_runtime_stats == 0)) return;
+
+ // In tracing, we only what to trace the time spent on top level trace events,
+ // if runtime counter stack is not empty, we should clear the whole runtime
+ // counter stack, and then reset counters so that we can dump counters into
+ // top level trace events accurately.
+ while (current_timer_.Value()) {
+ current_timer_.SetValue(current_timer_.Value()->Stop());
+ }
-#define RESET_COUNTER(name) this->Handler_##name.Reset();
- FOR_EACH_HANDLER_COUNTER(RESET_COUNTER)
-#undef RESET_COUNTER
+ for (const RuntimeCallStats::CounterId counter_id :
+ RuntimeCallStats::counters) {
+ RuntimeCallCounter* counter = &(this->*counter_id);
+ counter->Reset();
+ }
in_use_ = true;
}
-std::string RuntimeCallStats::Dump() {
- buffer_.str(std::string());
- buffer_.clear();
- buffer_ << "{";
-#define DUMP_COUNTER(name) \
- if (this->name.count > 0) this->name.Dump(buffer_);
- FOR_EACH_MANUAL_COUNTER(DUMP_COUNTER)
-#undef DUMP_COUNTER
-
-#define DUMP_COUNTER(name, nargs, result_size) \
- if (this->Runtime_##name.count > 0) this->Runtime_##name.Dump(buffer_);
- FOR_EACH_INTRINSIC(DUMP_COUNTER)
-#undef DUMP_COUNTER
-
-#define DUMP_COUNTER(name) \
- if (this->Builtin_##name.count > 0) this->Builtin_##name.Dump(buffer_);
- BUILTIN_LIST_C(DUMP_COUNTER)
-#undef DUMP_COUNTER
-
-#define DUMP_COUNTER(name) \
- if (this->API_##name.count > 0) this->API_##name.Dump(buffer_);
- FOR_EACH_API_COUNTER(DUMP_COUNTER)
-#undef DUMP_COUNTER
-
-#define DUMP_COUNTER(name) \
- if (this->Handler_##name.count > 0) this->Handler_##name.Dump(buffer_);
- FOR_EACH_HANDLER_COUNTER(DUMP_COUNTER)
-#undef DUMP_COUNTER
- buffer_ << "\"END\":[]}";
+void RuntimeCallStats::Dump(v8::tracing::TracedValue* value) {
+ for (const RuntimeCallStats::CounterId counter_id :
+ RuntimeCallStats::counters) {
+ RuntimeCallCounter* counter = &(this->*counter_id);
+ if (counter->count > 0) counter->Dump(value);
+ }
+
in_use_ = false;
- return buffer_.str();
}
} // namespace internal
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 707ae9f738..4415250b24 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -7,6 +7,7 @@
#include "include/v8.h"
#include "src/allocation.h"
+#include "src/base/atomic-utils.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/time.h"
#include "src/builtins/builtins.h"
@@ -15,6 +16,8 @@
#include "src/objects.h"
#include "src/runtime/runtime.h"
#include "src/tracing/trace-event.h"
+#include "src/tracing/traced-value.h"
+#include "src/tracing/tracing-category-observer.h"
namespace v8 {
namespace internal {
@@ -483,8 +486,9 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
struct RuntimeCallCounter {
explicit RuntimeCallCounter(const char* name) : name(name) {}
- void Reset();
- V8_NOINLINE void Dump(std::stringstream& out);
+ V8_NOINLINE void Reset();
+ V8_NOINLINE void Dump(v8::tracing::TracedValue* value);
+ void Add(RuntimeCallCounter* other);
const char* name;
int64_t count = 0;
@@ -495,33 +499,49 @@ struct RuntimeCallCounter {
// timers used for properly measuring the own time of a RuntimeCallCounter.
class RuntimeCallTimer {
public:
- RuntimeCallTimer() {}
RuntimeCallCounter* counter() { return counter_; }
base::ElapsedTimer timer() { return timer_; }
+ RuntimeCallTimer* parent() const { return parent_.Value(); }
private:
friend class RuntimeCallStats;
inline void Start(RuntimeCallCounter* counter, RuntimeCallTimer* parent) {
counter_ = counter;
- parent_ = parent;
- timer_.Start();
+ parent_.SetValue(parent);
+ if (FLAG_runtime_stats !=
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_SAMPLING) {
+ timer_.Start();
+ }
}
inline RuntimeCallTimer* Stop() {
+ if (!timer_.IsStarted()) return parent();
base::TimeDelta delta = timer_.Elapsed();
timer_.Stop();
counter_->count++;
counter_->time += delta;
- if (parent_ != NULL) {
+ if (parent()) {
// Adjust parent timer so that it does not include sub timer's time.
- parent_->counter_->time -= delta;
+ parent()->counter_->time -= delta;
}
- return parent_;
+ return parent();
}
+ inline void Elapsed() {
+ base::TimeDelta delta = timer_.Elapsed();
+ counter_->time += delta;
+ if (parent()) {
+ parent()->counter_->time -= delta;
+ parent()->Elapsed();
+ }
+ timer_.Restart();
+ }
+
+ const char* name() { return counter_->name; }
+
RuntimeCallCounter* counter_ = nullptr;
- RuntimeCallTimer* parent_ = nullptr;
+ base::AtomicValue<RuntimeCallTimer*> parent_;
base::ElapsedTimer timer_;
};
@@ -670,6 +690,11 @@ class RuntimeCallTimer {
#define FOR_EACH_MANUAL_COUNTER(V) \
V(AccessorGetterCallback) \
V(AccessorNameGetterCallback) \
+ V(AccessorNameGetterCallback_ArrayLength) \
+ V(AccessorNameGetterCallback_BoundFunctionLength) \
+ V(AccessorNameGetterCallback_BoundFunctionName) \
+ V(AccessorNameGetterCallback_FunctionPrototype) \
+ V(AccessorNameGetterCallback_StringLength) \
V(AccessorNameSetterCallback) \
V(Compile) \
V(CompileCode) \
@@ -678,6 +703,7 @@ class RuntimeCallTimer {
V(CompileEval) \
V(CompileFullCode) \
V(CompileIgnition) \
+ V(CompilerDispatcher) \
V(CompileSerialize) \
V(DeoptimizeCode) \
V(FunctionCallback) \
@@ -701,8 +727,14 @@ class RuntimeCallTimer {
V(Map_TransitionToDataProperty) \
V(Object_DeleteProperty) \
V(OptimizeCode) \
- V(Parse) \
- V(ParseLazy) \
+ V(ParseArrowFunctionLiteral) \
+ V(ParseEval) \
+ V(ParseFunction) \
+ V(ParseFunctionLiteral) \
+ V(ParseProgram) \
+ V(PreParseArrowFunctionLiteral) \
+ V(PreParseNoVariableResolution) \
+ V(PreParseWithVariableResolution) \
V(PropertyCallback) \
V(PrototypeMap_TransitionToAccessorProperty) \
V(PrototypeMap_TransitionToDataProperty) \
@@ -712,46 +744,75 @@ class RuntimeCallTimer {
/* Dummy counter for the unexpected stub miss. */ \
V(UnexpectedStubMiss)
-#define FOR_EACH_HANDLER_COUNTER(V) \
- V(IC_HandlerCacheHit) \
- V(KeyedLoadIC_LoadIndexedStringStub) \
- V(KeyedLoadIC_LoadIndexedInterceptorStub) \
- V(KeyedLoadIC_KeyedLoadSloppyArgumentsStub) \
- V(KeyedLoadIC_LoadFastElementStub) \
- V(KeyedLoadIC_LoadDictionaryElementStub) \
- V(KeyedLoadIC_SlowStub) \
- V(KeyedStoreIC_KeyedStoreSloppyArgumentsStub) \
- V(KeyedStoreIC_StoreFastElementStub) \
- V(KeyedStoreIC_StoreElementStub) \
- V(KeyedStoreIC_Polymorphic) \
- V(LoadIC_FunctionPrototypeStub) \
- V(LoadIC_LoadApiGetterStub) \
- V(LoadIC_LoadCallback) \
- V(LoadIC_LoadConstant) \
- V(LoadIC_LoadConstantStub) \
- V(LoadIC_LoadField) \
- V(LoadIC_LoadFieldStub) \
- V(LoadIC_LoadGlobal) \
- V(LoadIC_LoadInterceptor) \
- V(LoadIC_LoadNonexistent) \
- V(LoadIC_LoadNormal) \
- V(LoadIC_LoadScriptContextFieldStub) \
- V(LoadIC_LoadViaGetter) \
- V(LoadIC_SlowStub) \
- V(LoadIC_StringLengthStub) \
- V(StoreIC_SlowStub) \
- V(StoreIC_StoreCallback) \
- V(StoreIC_StoreField) \
- V(StoreIC_StoreFieldStub) \
- V(StoreIC_StoreGlobal) \
- V(StoreIC_StoreGlobalTransition) \
- V(StoreIC_StoreInterceptorStub) \
- V(StoreIC_StoreNormal) \
- V(StoreIC_StoreScriptContextFieldStub) \
- V(StoreIC_StoreTransition) \
+#define FOR_EACH_HANDLER_COUNTER(V) \
+ V(IC_HandlerCacheHit) \
+ V(KeyedLoadIC_LoadIndexedStringStub) \
+ V(KeyedLoadIC_LoadIndexedInterceptorStub) \
+ V(KeyedLoadIC_KeyedLoadSloppyArgumentsStub) \
+ V(KeyedLoadIC_LoadElementDH) \
+ V(KeyedLoadIC_LoadFastElementStub) \
+ V(KeyedLoadIC_LoadDictionaryElementStub) \
+ V(KeyedLoadIC_SlowStub) \
+ V(KeyedStoreIC_ElementsTransitionAndStoreStub) \
+ V(KeyedStoreIC_KeyedStoreSloppyArgumentsStub) \
+ V(KeyedStoreIC_SlowStub) \
+ V(KeyedStoreIC_StoreFastElementStub) \
+ V(KeyedStoreIC_StoreElementStub) \
+ V(LoadIC_FunctionPrototypeStub) \
+ V(LoadIC_HandlerCacheHit_AccessCheck) \
+ V(LoadIC_HandlerCacheHit_Exotic) \
+ V(LoadIC_HandlerCacheHit_Interceptor) \
+ V(LoadIC_HandlerCacheHit_JSProxy) \
+ V(LoadIC_HandlerCacheHit_NonExistent) \
+ V(LoadIC_HandlerCacheHit_Accessor) \
+ V(LoadIC_HandlerCacheHit_Data) \
+ V(LoadIC_HandlerCacheHit_Transition) \
+ V(LoadIC_LoadApiGetterDH) \
+ V(LoadIC_LoadApiGetterFromPrototypeDH) \
+ V(LoadIC_LoadApiGetterStub) \
+ V(LoadIC_LoadCallback) \
+ V(LoadIC_LoadConstantDH) \
+ V(LoadIC_LoadConstantFromPrototypeDH) \
+ V(LoadIC_LoadConstant) \
+ V(LoadIC_LoadConstantStub) \
+ V(LoadIC_LoadFieldDH) \
+ V(LoadIC_LoadFieldFromPrototypeDH) \
+ V(LoadIC_LoadField) \
+ V(LoadIC_LoadFieldStub) \
+ V(LoadIC_LoadGlobal) \
+ V(LoadIC_LoadInterceptor) \
+ V(LoadIC_LoadNonexistentDH) \
+ V(LoadIC_LoadNonexistent) \
+ V(LoadIC_LoadNormal) \
+ V(LoadIC_LoadScriptContextFieldStub) \
+ V(LoadIC_LoadViaGetter) \
+ V(LoadIC_Premonomorphic) \
+ V(LoadIC_SlowStub) \
+ V(LoadIC_StringLengthStub) \
+ V(StoreIC_HandlerCacheHit_AccessCheck) \
+ V(StoreIC_HandlerCacheHit_Exotic) \
+ V(StoreIC_HandlerCacheHit_Interceptor) \
+ V(StoreIC_HandlerCacheHit_JSProxy) \
+ V(StoreIC_HandlerCacheHit_NonExistent) \
+ V(StoreIC_HandlerCacheHit_Accessor) \
+ V(StoreIC_HandlerCacheHit_Data) \
+ V(StoreIC_HandlerCacheHit_Transition) \
+ V(StoreIC_Premonomorphic) \
+ V(StoreIC_SlowStub) \
+ V(StoreIC_StoreCallback) \
+ V(StoreIC_StoreField) \
+ V(StoreIC_StoreFieldDH) \
+ V(StoreIC_StoreFieldStub) \
+ V(StoreIC_StoreGlobal) \
+ V(StoreIC_StoreGlobalTransition) \
+ V(StoreIC_StoreInterceptorStub) \
+ V(StoreIC_StoreNormal) \
+ V(StoreIC_StoreScriptContextFieldStub) \
+ V(StoreIC_StoreTransition) \
+ V(StoreIC_StoreTransitionDH) \
V(StoreIC_StoreViaSetter)
-class RuntimeCallStats {
+class RuntimeCallStats : public ZoneObject {
public:
typedef RuntimeCallCounter RuntimeCallStats::*CounterId;
@@ -776,6 +837,8 @@ class RuntimeCallStats {
FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER)
#undef CALL_BUILTIN_COUNTER
+ static const CounterId counters[];
+
// Starting measuring the time for a function. This will establish the
// connection to the parent counter for properly calculating the own times.
static void Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
@@ -792,37 +855,37 @@ class RuntimeCallStats {
CounterId counter_id);
void Reset();
- V8_NOINLINE void Print(std::ostream& os);
- V8_NOINLINE std::string Dump();
+ // Add all entries from another stats object.
+ void Add(RuntimeCallStats* other);
+ void Print(std::ostream& os);
+ V8_NOINLINE void Dump(v8::tracing::TracedValue* value);
RuntimeCallStats() {
Reset();
in_use_ = false;
}
- RuntimeCallTimer* current_timer() { return current_timer_; }
+ RuntimeCallTimer* current_timer() { return current_timer_.Value(); }
bool InUse() { return in_use_; }
private:
- std::stringstream buffer_;
// Counter to track recursive time events.
- RuntimeCallTimer* current_timer_ = NULL;
+ base::AtomicValue<RuntimeCallTimer*> current_timer_;
// Used to track nested tracing scopes.
bool in_use_;
};
-#define TRACE_RUNTIME_CALL_STATS(isolate, counter_name) \
- do { \
- if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() || \
- FLAG_runtime_call_stats)) { \
- RuntimeCallStats::CorrectCurrentCounterId( \
- isolate->counters()->runtime_call_stats(), \
- &RuntimeCallStats::counter_name); \
- } \
+#define CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats, counter_name) \
+ do { \
+ if (V8_UNLIKELY(FLAG_runtime_stats)) { \
+ RuntimeCallStats::CorrectCurrentCounterId( \
+ runtime_call_stats, &RuntimeCallStats::counter_name); \
+ } \
} while (false)
-#define TRACE_HANDLER_STATS(isolate, counter_name) \
- TRACE_RUNTIME_CALL_STATS(isolate, Handler_##counter_name)
+#define TRACE_HANDLER_STATS(isolate, counter_name) \
+ CHANGE_CURRENT_RUNTIME_COUNTER(isolate->counters()->runtime_call_stats(), \
+ Handler_##counter_name)
#define HISTOGRAM_RANGE_LIST(HR) \
/* Generic range histograms */ \
@@ -857,10 +920,6 @@ class RuntimeCallStats {
MILLISECOND) \
HT(gc_low_memory_notification, V8.GCLowMemoryNotification, 10000, \
MILLISECOND) \
- /* Parsing timers. */ \
- HT(parse, V8.ParseMicroSeconds, 1000000, MICROSECOND) \
- HT(parse_lazy, V8.ParseLazyMicroSeconds, 1000000, MICROSECOND) \
- HT(pre_parse, V8.PreParseMicroSeconds, 1000000, MICROSECOND) \
/* Compilation times. */ \
HT(compile, V8.CompileMicroSeconds, 1000000, MICROSECOND) \
HT(compile_eval, V8.CompileEvalMicroSeconds, 1000000, MICROSECOND) \
@@ -1249,23 +1308,23 @@ class RuntimeCallTimerScope {
// stats are disabled and the isolate is not directly available.
inline RuntimeCallTimerScope(HeapObject* heap_object,
RuntimeCallStats::CounterId counter_id);
+ inline RuntimeCallTimerScope(RuntimeCallStats* stats,
+ RuntimeCallStats::CounterId counter_id);
inline ~RuntimeCallTimerScope() {
- if (V8_UNLIKELY(isolate_ != nullptr)) {
- RuntimeCallStats::Leave(isolate_->counters()->runtime_call_stats(),
- &timer_);
+ if (V8_UNLIKELY(stats_ != nullptr)) {
+ RuntimeCallStats::Leave(stats_, &timer_);
}
}
private:
- V8_INLINE void Initialize(Isolate* isolate,
+ V8_INLINE void Initialize(RuntimeCallStats* stats,
RuntimeCallStats::CounterId counter_id) {
- isolate_ = isolate;
- RuntimeCallStats::Enter(isolate_->counters()->runtime_call_stats(), &timer_,
- counter_id);
+ stats_ = stats;
+ RuntimeCallStats::Enter(stats_, &timer_, counter_id);
}
- Isolate* isolate_ = nullptr;
+ RuntimeCallStats* stats_ = nullptr;
RuntimeCallTimer timer_;
};
diff --git a/deps/v8/src/crankshaft/arm/lithium-arm.cc b/deps/v8/src/crankshaft/arm/lithium-arm.cc
index 8c4b7356c9..823f5a9f89 100644
--- a/deps/v8/src/crankshaft/arm/lithium-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-arm.cc
@@ -205,14 +205,6 @@ void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
value()->PrintTo(stream);
@@ -875,15 +867,15 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanICStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ ToBooleanHints expected = instr->expected_input_types();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new(zone()) LBranch(UseRegister(value));
- if (!easy_case &&
- ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
- !expected.IsGeneric())) {
+ if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
+ (expected & ToBooleanHint::kNeedsMap)) ||
+ expected != ToBooleanHint::kAny)) {
branch = AssignEnvironment(branch);
}
return branch;
@@ -1725,24 +1717,6 @@ LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
return new(zone()) LHasInstanceTypeAndBranch(value);
}
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
@@ -1999,15 +1973,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
- return MarkAsCall(DefineFixed(result, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2043,18 +2008,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LInstruction* result =
- DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), r0);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
return AssignEnvironment(DefineAsRegister(
@@ -2114,20 +2067,6 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LInstruction* result =
- DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
- r0);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (!instr->is_fixed_typed_array()) {
DCHECK(instr->elements()->representation().IsTagged());
@@ -2405,7 +2344,6 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
diff --git a/deps/v8/src/crankshaft/arm/lithium-arm.h b/deps/v8/src/crankshaft/arm/lithium-arm.h
index abdfbddf4d..0d066c97aa 100644
--- a/deps/v8/src/crankshaft/arm/lithium-arm.h
+++ b/deps/v8/src/crankshaft/arm/lithium-arm.h
@@ -71,9 +71,7 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(GetCachedArrayIndex) \
V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
@@ -89,11 +87,8 @@ class LCodeGen;
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalGeneric) \
V(LoadKeyed) \
- V(LoadKeyedGeneric) \
V(LoadNamedField) \
- V(LoadNamedGeneric) \
V(MathAbs) \
V(MathClz32) \
V(MathCos) \
@@ -1071,35 +1066,6 @@ class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
};
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
@@ -1484,25 +1450,6 @@ class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
- LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
@@ -1551,43 +1498,6 @@ class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
};
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
- LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
- LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
- inputs_[0] = context;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1960,6 +1870,8 @@ class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToNumber(); }
};
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
index f2cc4b447e..e092a9e040 100644
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -253,8 +253,7 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(
- chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -2058,45 +2057,44 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(ip, Operand::Zero());
EmitBranch(instr, ne);
} else {
- ToBooleanICStub::Types expected =
- instr->hydrogen()->expected_input_types();
+ ToBooleanHints expected = instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
- if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+ if (expected & ToBooleanHint::kUndefined) {
// undefined -> false.
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ b(eq, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+ if (expected & ToBooleanHint::kBoolean) {
// Boolean -> its value.
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
__ b(eq, instr->TrueLabel(chunk_));
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
__ b(eq, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+ if (expected & ToBooleanHint::kNull) {
// 'null' -> false.
__ CompareRoot(reg, Heap::kNullValueRootIndex);
__ b(eq, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::SMI)) {
+ if (expected & ToBooleanHint::kSmallInteger) {
// Smis: 0 -> false, all other -> true.
__ cmp(reg, Operand::Zero());
__ b(eq, instr->FalseLabel(chunk_));
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- } else if (expected.NeedsMap()) {
+ } else if (expected & ToBooleanHint::kNeedsMap) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
}
const Register map = scratch0();
- if (expected.NeedsMap()) {
+ if (expected & ToBooleanHint::kNeedsMap) {
__ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
- if (expected.CanBeUndetectable()) {
+ if (expected & ToBooleanHint::kCanBeUndetectable) {
// Undetectable -> false.
__ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsUndetectable));
@@ -2104,13 +2102,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+ if (expected & ToBooleanHint::kReceiver) {
// spec object -> true.
__ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
__ b(ge, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::STRING)) {
+ if (expected & ToBooleanHint::kString) {
// String value -> false iff empty.
Label not_string;
__ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
@@ -2122,19 +2120,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+ if (expected & ToBooleanHint::kSymbol) {
// Symbol value -> true.
__ CompareInstanceType(map, ip, SYMBOL_TYPE);
__ b(eq, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+ if (expected & ToBooleanHint::kSimdValue) {
// SIMD value -> true.
__ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
__ b(eq, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+ if (expected & ToBooleanHint::kHeapNumber) {
// heap number -> false iff +0, -0, or NaN.
DwVfpRegister dbl_scratch = double_scratch0();
Label not_heap_number;
@@ -2148,7 +2146,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_heap_number);
}
- if (!expected.IsGeneric()) {
+ if (expected != ToBooleanHint::kAny) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
@@ -2393,30 +2391,6 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
EmitBranch(instr, BranchCondition(instr->hydrogen()));
}
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = scratch0();
-
- __ ldr(scratch,
- FieldMemOperand(input, String::kHashFieldOffset));
- __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
- EmitBranch(instr, eq);
-}
-
-
// Branches to a label or falls through with the answer in flags. Trashes
// the temp registers, but not the input.
void LCodeGen::EmitClassOfTest(Label* is_true,
@@ -2585,35 +2559,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = LoadDescriptor::SlotRegister();
- DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
- DCHECK(slot_register.is(r0));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ Move(vector_register, vector);
- // No need to allocate this register.
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ mov(slot_register, Operand(Smi::FromInt(index)));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(r0));
-
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2696,19 +2641,6 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->result()).is(r0));
-
- // Name is always in r2.
- __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register scratch = scratch0();
Register function = ToRegister(instr->function());
@@ -2938,11 +2870,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ b(ne, &done);
if (info()->IsStub()) {
// A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+ // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
- __ cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
+ __ cmp(result, Operand(Smi::FromInt(Isolate::kProtectorValid)));
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
@@ -2993,18 +2925,6 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
}
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register scratch = scratch0();
Register result = ToRegister(instr->result());
@@ -4539,8 +4459,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DwVfpRegister result_reg,
NumberUntagDMode mode) {
- bool can_convert_undefined_to_nan =
- instr->hydrogen()->can_convert_undefined_to_nan();
+ bool can_convert_undefined_to_nan = instr->truncating();
bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
Register scratch = scratch0();
@@ -4617,34 +4536,12 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ cmp(scratch1, Operand(ip));
if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- Label no_heap_number, check_bools, check_false;
- __ b(ne, &no_heap_number);
+ Label truncate;
+ __ b(eq, &truncate);
+ __ CompareInstanceType(scratch1, scratch1, ODDBALL_TYPE);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
+ __ bind(&truncate);
__ TruncateHeapNumberToI(input_reg, scratch2);
- __ b(&done);
-
- // Check for Oddballs. Undefined/False is converted to zero and True to one
- // for truncating conversions.
- __ bind(&no_heap_number);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(scratch2, Operand(ip));
- __ b(ne, &check_bools);
- __ mov(input_reg, Operand::Zero());
- __ b(&done);
-
- __ bind(&check_bools);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(scratch2, Operand(ip));
- __ b(ne, &check_false);
- __ mov(input_reg, Operand(1));
- __ b(&done);
-
- __ bind(&check_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(scratch2, Operand(ip));
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
- __ mov(input_reg, Operand::Zero());
} else {
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
@@ -5052,7 +4949,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ mov(result, Operand(Smi::FromInt(0)));
+ __ mov(result, Operand(Smi::kZero));
PushSafepointRegistersScope scope(this);
if (instr->size()->IsRegister()) {
@@ -5386,7 +5283,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register result = ToRegister(instr->result());
Label load_cache, done;
__ EnumLength(result, map);
- __ cmp(result, Operand(Smi::FromInt(0)));
+ __ cmp(result, Operand(Smi::kZero));
__ b(ne, &load_cache);
__ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
__ jmp(&done);
diff --git a/deps/v8/src/crankshaft/arm64/lithium-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
index 8a9ce4266d..e5227e301f 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
@@ -102,14 +102,6 @@ void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
}
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
bool LGoto::HasInterestingComment(LCodeGen* gen) const {
return !gen->IsNextEmittedBlock(block_id());
}
@@ -942,12 +934,13 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
return new(zone()) LBranch(UseRegister(value), TempRegister(), NULL);
}
- ToBooleanICStub::Types expected = instr->expected_input_types();
- bool needs_temps = expected.NeedsMap() || expected.IsEmpty();
+ ToBooleanHints expected = instr->expected_input_types();
+ bool needs_temps = (expected & ToBooleanHint::kNeedsMap) ||
+ expected == ToBooleanHint::kNone;
LOperand* temp1 = needs_temps ? TempRegister() : NULL;
LOperand* temp2 = needs_temps ? TempRegister() : NULL;
- if (expected.IsGeneric() || expected.IsEmpty()) {
+ if (expected == ToBooleanHint::kAny || expected == ToBooleanHint::kNone) {
// The generic case cannot deoptimize because it already supports every
// possible input type.
DCHECK(needs_temps);
@@ -1409,7 +1402,6 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
@@ -1428,28 +1420,10 @@ LInstruction* LChunkBuilder::DoForceRepresentation(
return NULL;
}
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
return new(zone()) LGoto(instr->FirstSuccessor());
}
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(instr->value()), TempRegister());
-}
-
-
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
HHasInstanceTypeAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
@@ -1551,15 +1525,6 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
- return MarkAsCall(DefineFixed(result, x0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
DCHECK(instr->key()->representation().IsSmiOrInteger32());
ElementsKind elements_kind = instr->elements_kind();
@@ -1610,38 +1575,12 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LInstruction* result =
- DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
- x0);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LOperand* object = UseRegisterAtStart(instr->object());
return DefineAsRegister(new(zone()) LLoadNamedField(object));
}
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LInstruction* result =
- DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), x0);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
return DefineAsRegister(new(zone()) LLoadRoot);
}
diff --git a/deps/v8/src/crankshaft/arm64/lithium-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
index 9891f9ee49..a9d85e5a3e 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
@@ -74,9 +74,7 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(GetCachedArrayIndex) \
V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
@@ -91,13 +89,10 @@ class LCodeGen;
V(LoadContextSlot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalGeneric) \
V(LoadKeyedExternal) \
V(LoadKeyedFixed) \
V(LoadKeyedFixedDouble) \
- V(LoadKeyedGeneric) \
V(LoadNamedField) \
- V(LoadNamedGeneric) \
V(LoadRoot) \
V(MathAbs) \
V(MathAbsTagged) \
@@ -1282,38 +1277,6 @@ class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
};
-
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 1> {
- public:
- LHasCachedArrayIndexAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 1> {
public:
LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
@@ -1537,24 +1500,6 @@ class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 1> {
DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
};
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
- LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
- inputs_[0] = context;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
-
template <int T>
class LLoadKeyed : public LTemplateInstruction<1, 3, T> {
public:
@@ -1637,45 +1582,6 @@ class LLoadKeyedFixedDouble: public LLoadKeyed<1> {
};
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
- LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
- LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
@@ -2046,6 +1952,8 @@ class LNumberUntagD final : public LTemplateInstruction<1, 1, 1> {
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToNumber(); }
};
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
index a4aa275b15..4d8e6615e7 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -39,6 +39,29 @@ class SafepointGenerator final : public CallWrapper {
Safepoint::DeoptMode deopt_mode_;
};
+LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
+ LCodeGen* codegen)
+ : codegen_(codegen) {
+ DCHECK(codegen_->info()->is_calling());
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+
+ UseScratchRegisterScope temps(codegen_->masm_);
+ // Preserve the value of lr which must be saved on the stack (the call to
+ // the stub will clobber it).
+ Register to_be_pushed_lr =
+ temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
+ codegen_->masm_->Mov(to_be_pushed_lr, lr);
+ StoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->CallStub(&stub);
+}
+
+LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+ RestoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->CallStub(&stub);
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+}
#define __ masm()->
@@ -681,8 +704,7 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(
- chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -1438,7 +1460,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
+ __ Mov(ToRegister(instr->result()), Smi::kZero);
PushSafepointRegistersScope scope(this);
LoadContextFromDeferred(instr->context());
@@ -1748,7 +1770,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(instr, eq);
} else if (type.IsSmi()) {
DCHECK(!info()->IsStub());
- EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
+ EmitCompareAndBranch(instr, ne, value, Smi::kZero);
} else if (type.IsJSArray()) {
DCHECK(!info()->IsStub());
EmitGoto(instr->TrueDestination(chunk()));
@@ -1764,18 +1786,17 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
EmitCompareAndBranch(instr, ne, temp, 0);
} else {
- ToBooleanICStub::Types expected =
- instr->hydrogen()->expected_input_types();
+ ToBooleanHints expected = instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
- if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+ if (expected & ToBooleanHint::kUndefined) {
// undefined -> false.
__ JumpIfRoot(
value, Heap::kUndefinedValueRootIndex, false_label);
}
- if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+ if (expected & ToBooleanHint::kBoolean) {
// Boolean -> its value.
__ JumpIfRoot(
value, Heap::kTrueValueRootIndex, true_label);
@@ -1783,18 +1804,18 @@ void LCodeGen::DoBranch(LBranch* instr) {
value, Heap::kFalseValueRootIndex, false_label);
}
- if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+ if (expected & ToBooleanHint::kNull) {
// 'null' -> false.
__ JumpIfRoot(
value, Heap::kNullValueRootIndex, false_label);
}
- if (expected.Contains(ToBooleanICStub::SMI)) {
+ if (expected & ToBooleanHint::kSmallInteger) {
// Smis: 0 -> false, all other -> true.
- DCHECK(Smi::FromInt(0) == 0);
+ DCHECK(Smi::kZero == 0);
__ Cbz(value, false_label);
__ JumpIfSmi(value, true_label);
- } else if (expected.NeedsMap()) {
+ } else if (expected & ToBooleanHint::kNeedsMap) {
// If we need a map later and have a smi, deopt.
DeoptimizeIfSmi(value, instr, DeoptimizeReason::kSmi);
}
@@ -1802,14 +1823,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
Register map = NoReg;
Register scratch = NoReg;
- if (expected.NeedsMap()) {
+ if (expected & ToBooleanHint::kNeedsMap) {
DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
map = ToRegister(instr->temp1());
scratch = ToRegister(instr->temp2());
__ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
- if (expected.CanBeUndetectable()) {
+ if (expected & ToBooleanHint::kCanBeUndetectable) {
// Undetectable -> false.
__ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
__ TestAndBranchIfAnySet(
@@ -1817,13 +1838,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+ if (expected & ToBooleanHint::kReceiver) {
// spec object -> true.
__ CompareInstanceType(map, scratch, FIRST_JS_RECEIVER_TYPE);
__ B(ge, true_label);
}
- if (expected.Contains(ToBooleanICStub::STRING)) {
+ if (expected & ToBooleanHint::kString) {
// String value -> false iff empty.
Label not_string;
__ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
@@ -1834,19 +1855,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ Bind(&not_string);
}
- if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+ if (expected & ToBooleanHint::kSymbol) {
// Symbol value -> true.
__ CompareInstanceType(map, scratch, SYMBOL_TYPE);
__ B(eq, true_label);
}
- if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+ if (expected & ToBooleanHint::kSimdValue) {
// SIMD value -> true.
__ CompareInstanceType(map, scratch, SIMD128_VALUE_TYPE);
__ B(eq, true_label);
}
- if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+ if (expected & ToBooleanHint::kHeapNumber) {
Label not_heap_number;
__ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
@@ -1860,7 +1881,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ Bind(&not_heap_number);
}
- if (!expected.IsGeneric()) {
+ if (expected != ToBooleanHint::kAny) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
Deoptimize(instr, DeoptimizeReason::kUnexpectedObject);
@@ -2664,20 +2685,6 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
__ Bind(&use_cache);
}
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- // Assert that we can use a W register load to get the hash.
- DCHECK((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
- __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(result, result);
-}
-
-
void LCodeGen::EmitGoto(int block) {
// Do not emit jump if we are emitting a goto to the next block.
if (!IsNextEmittedBlock(block)) {
@@ -2685,25 +2692,10 @@ void LCodeGen::EmitGoto(int block) {
}
}
-
void LCodeGen::DoGoto(LGoto* instr) {
EmitGoto(instr->block_id());
}
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register temp = ToRegister32(instr->temp());
-
- // Assert that the cache status bits fit in a W register.
- DCHECK(is_uint32(String::kContainsCachedArrayIndexMask));
- __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
- __ Tst(temp, String::kContainsCachedArrayIndexMask);
- EmitBranch(instr, eq);
-}
-
-
// HHasInstanceTypeAndBranch instruction is built with an interval of type
// to test but is only used in very restricted ways. The only possible kinds
// of intervals are:
@@ -3013,35 +3005,6 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
- DCHECK(slot_register.is(x0));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ Mov(vector_register, vector);
- // No need to allocate this register.
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ Mov(slot_register, Smi::FromInt(index));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).Is(x0));
-
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
Register key,
Register base,
@@ -3277,11 +3240,11 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
__ B(ne, &done);
if (info()->IsStub()) {
// A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+ // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
- __ Cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
+ __ Cmp(result, Operand(Smi::FromInt(Isolate::kProtectorValid)));
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
@@ -3290,20 +3253,6 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
}
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- DCHECK(ToRegister(instr->result()).Is(x0));
-}
-
-
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
HObjectAccess access = instr->hydrogen()->access();
int offset = access.offset();
@@ -3345,19 +3294,6 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- // LoadIC expects name and receiver in registers.
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- DCHECK(ToRegister(instr->result()).is(x0));
-}
-
-
void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
Register result = ToRegister(instr->result());
__ LoadRoot(result, instr->index());
@@ -4289,8 +4225,7 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input = ToRegister(instr->value());
Register scratch = ToRegister(instr->temp());
DoubleRegister result = ToDoubleRegister(instr->result());
- bool can_convert_undefined_to_nan =
- instr->hydrogen()->can_convert_undefined_to_nan();
+ bool can_convert_undefined_to_nan = instr->truncating();
Label done, load_smi;
@@ -5267,30 +5202,18 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
Label done;
if (instr->truncating()) {
+ UseScratchRegisterScope temps(masm());
Register output = ToRegister(instr->result());
- Label check_bools;
-
- // If it's not a heap number, jump to undefined check.
- __ JumpIfNotHeapNumber(input, &check_bools);
-
- // A heap number: load value and convert to int32 using truncating function.
+ Register input_map = temps.AcquireX();
+ Register input_instance_type = input_map;
+ Label truncate;
+ __ CompareObjectType(input, input_map, input_instance_type,
+ HEAP_NUMBER_TYPE);
+ __ B(eq, &truncate);
+ __ Cmp(input_instance_type, ODDBALL_TYPE);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
+ __ Bind(&truncate);
__ TruncateHeapNumberToI(output, input);
- __ B(&done);
-
- __ Bind(&check_bools);
-
- Register true_root = output;
- Register false_root = scratch1;
- __ LoadTrueFalseRoots(true_root, false_root);
- __ Cmp(input, true_root);
- __ Cset(output, eq);
- __ Ccmp(input, false_root, ZFlag, ne);
- __ B(eq, &done);
-
- // Output contains zero, undefined is converted to zero for truncating
- // conversions.
- DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
- DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
} else {
Register output = ToRegister32(instr->result());
DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
@@ -5650,7 +5573,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
__ Mov(index, Operand(index, ASR, 1));
- __ Cmp(index, Smi::FromInt(0));
+ __ Cmp(index, Smi::kZero);
__ B(lt, &out_of_object);
STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
index ca04fa27c0..7f444738aa 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
@@ -368,28 +368,9 @@ class LCodeGen: public LCodeGenBase {
class PushSafepointRegistersScope BASE_EMBEDDED {
public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen)
- : codegen_(codegen) {
- DCHECK(codegen_->info()->is_calling());
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
-
- UseScratchRegisterScope temps(codegen_->masm_);
- // Preserve the value of lr which must be saved on the stack (the call to
- // the stub will clobber it).
- Register to_be_pushed_lr =
- temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
- codegen_->masm_->Mov(to_be_pushed_lr, lr);
- StoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->CallStub(&stub);
- }
-
- ~PushSafepointRegistersScope() {
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- RestoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->CallStub(&stub);
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
+ explicit PushSafepointRegistersScope(LCodeGen* codegen);
+
+ ~PushSafepointRegistersScope();
private:
LCodeGen* codegen_;
diff --git a/deps/v8/src/crankshaft/compilation-phase.cc b/deps/v8/src/crankshaft/compilation-phase.cc
index 9b40ccaec4..4be0b1a488 100644
--- a/deps/v8/src/crankshaft/compilation-phase.cc
+++ b/deps/v8/src/crankshaft/compilation-phase.cc
@@ -11,7 +11,7 @@ namespace v8 {
namespace internal {
CompilationPhase::CompilationPhase(const char* name, CompilationInfo* info)
- : name_(name), info_(info), zone_(info->isolate()->allocator()) {
+ : name_(name), info_(info), zone_(info->isolate()->allocator(), ZONE_NAME) {
if (FLAG_hydrogen_stats) {
info_zone_start_allocation_size_ = info->zone()->allocation_size();
timer_.Start();
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.cc b/deps/v8/src/crankshaft/hydrogen-instructions.cc
index 3a0aaa70e7..be1ac9a18c 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.cc
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.cc
@@ -7,6 +7,7 @@
#include "src/base/bits.h"
#include "src/base/ieee754.h"
#include "src/base/safe_math.h"
+#include "src/codegen.h"
#include "src/crankshaft/hydrogen-infer-representation.h"
#include "src/double.h"
#include "src/elements.h"
@@ -44,6 +45,21 @@ namespace internal {
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
+Representation RepresentationFromMachineType(MachineType type) {
+ if (type == MachineType::Int32()) {
+ return Representation::Integer32();
+ }
+
+ if (type == MachineType::TaggedSigned()) {
+ return Representation::Smi();
+ }
+
+ if (type == MachineType::Pointer()) {
+ return Representation::External();
+ }
+
+ return Representation::Tagged();
+}
Isolate* HValue::isolate() const {
DCHECK(block() != NULL);
@@ -808,9 +824,7 @@ bool HInstruction::CanDeoptimize() {
case HValue::kEnterInlined:
case HValue::kEnvironmentMarker:
case HValue::kForceRepresentation:
- case HValue::kGetCachedArrayIndex:
case HValue::kGoto:
- case HValue::kHasCachedArrayIndexAndBranch:
case HValue::kHasInstanceTypeAndBranch:
case HValue::kInnerAllocatedObject:
case HValue::kIsSmiAndBranch:
@@ -818,9 +832,7 @@ bool HInstruction::CanDeoptimize() {
case HValue::kIsUndetectableAndBranch:
case HValue::kLeaveInlined:
case HValue::kLoadFieldByIndex:
- case HValue::kLoadGlobalGeneric:
case HValue::kLoadNamedField:
- case HValue::kLoadNamedGeneric:
case HValue::kLoadRoot:
case HValue::kMathMinMax:
case HValue::kParameter:
@@ -864,7 +876,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kLoadContextSlot:
case HValue::kLoadFunctionPrototype:
case HValue::kLoadKeyed:
- case HValue::kLoadKeyedGeneric:
case HValue::kMathFloorOfDiv:
case HValue::kMaybeGrowElements:
case HValue::kMod:
@@ -1061,23 +1072,21 @@ std::ostream& HReturn::PrintDataTo(std::ostream& os) const { // NOLINT
Representation HBranch::observed_input_representation(int index) {
- if (expected_input_types_.Contains(ToBooleanICStub::NULL_TYPE) ||
- expected_input_types_.Contains(ToBooleanICStub::SPEC_OBJECT) ||
- expected_input_types_.Contains(ToBooleanICStub::STRING) ||
- expected_input_types_.Contains(ToBooleanICStub::SYMBOL) ||
- expected_input_types_.Contains(ToBooleanICStub::SIMD_VALUE)) {
+ if (expected_input_types_ & (ToBooleanHint::kNull | ToBooleanHint::kReceiver |
+ ToBooleanHint::kString | ToBooleanHint::kSymbol |
+ ToBooleanHint::kSimdValue)) {
return Representation::Tagged();
}
- if (expected_input_types_.Contains(ToBooleanICStub::UNDEFINED)) {
- if (expected_input_types_.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+ if (expected_input_types_ & ToBooleanHint::kUndefined) {
+ if (expected_input_types_ & ToBooleanHint::kHeapNumber) {
return Representation::Double();
}
return Representation::Tagged();
}
- if (expected_input_types_.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+ if (expected_input_types_ & ToBooleanHint::kHeapNumber) {
return Representation::Double();
}
- if (expected_input_types_.Contains(ToBooleanICStub::SMI)) {
+ if (expected_input_types_ & ToBooleanHint::kSmallInteger) {
return Representation::Smi();
}
return Representation::None();
@@ -1483,8 +1492,8 @@ std::ostream& HChange::PrintDataTo(std::ostream& os) const { // NOLINT
if (CanTruncateToSmi()) os << " truncating-smi";
if (CanTruncateToInt32()) os << " truncating-int32";
+ if (CanTruncateToNumber()) os << " truncating-number";
if (CheckFlag(kBailoutOnMinusZero)) os << " -0?";
- if (CheckFlag(kAllowUndefinedAsNaN)) os << " allow-undefined-as-nan";
return os;
}
@@ -1495,8 +1504,8 @@ HValue* HUnaryMathOperation::Canonicalize() {
if (val->IsChange()) val = HChange::cast(val)->value();
if (val->representation().IsSmiOrInteger32()) {
if (val->representation().Equals(representation())) return val;
- return Prepend(new(block()->zone()) HChange(
- val, representation(), false, false));
+ return Prepend(new (block()->zone())
+ HChange(val, representation(), false, false, true));
}
}
if (op() == kMathFloor && representation().IsSmiOrInteger32() &&
@@ -1511,8 +1520,8 @@ HValue* HUnaryMathOperation::Canonicalize() {
// A change from an integer32 can be replaced by the integer32 value.
left = HChange::cast(left)->value();
} else if (hdiv->observed_input_representation(1).IsSmiOrInteger32()) {
- left = Prepend(new(block()->zone()) HChange(
- left, Representation::Integer32(), false, false));
+ left = Prepend(new (block()->zone()) HChange(
+ left, Representation::Integer32(), false, false, true));
} else {
return this;
}
@@ -1530,8 +1539,8 @@ HValue* HUnaryMathOperation::Canonicalize() {
// A change from an integer32 can be replaced by the integer32 value.
right = HChange::cast(right)->value();
} else if (hdiv->observed_input_representation(2).IsSmiOrInteger32()) {
- right = Prepend(new(block()->zone()) HChange(
- right, Representation::Integer32(), false, false));
+ right = Prepend(new (block()->zone()) HChange(
+ right, Representation::Integer32(), false, false, true));
} else {
return this;
}
@@ -2871,7 +2880,7 @@ void HCompareNumericAndBranch::InferRepresentation(
// comparisons must cause a deopt when one of their arguments is undefined.
// See also v8:1434
if (Token::IsOrderedRelationalCompareOp(token_)) {
- SetFlag(kAllowUndefinedAsNaN);
+ SetFlag(kTruncatingToNumber);
}
}
ChangeRepresentation(rep);
@@ -2899,13 +2908,6 @@ std::ostream& HLoadNamedField::PrintDataTo(std::ostream& os) const { // NOLINT
}
-std::ostream& HLoadNamedGeneric::PrintDataTo(
- std::ostream& os) const { // NOLINT
- Handle<String> n = Handle<String>::cast(name());
- return os << NameOf(object()) << "." << n->ToCString().get();
-}
-
-
std::ostream& HLoadKeyed::PrintDataTo(std::ostream& os) const { // NOLINT
if (!is_fixed_typed_array()) {
os << NameOf(elements());
@@ -2977,7 +2979,7 @@ bool HLoadKeyed::UsesMustHandleHole() const {
bool HLoadKeyed::AllUsesCanTreatHoleAsNaN() const {
return IsFastDoubleElementsKind(elements_kind()) &&
- CheckUsesForFlag(HValue::kAllowUndefinedAsNaN);
+ CheckUsesForFlag(HValue::kTruncatingToNumber);
}
@@ -2997,46 +2999,40 @@ bool HLoadKeyed::RequiresHoleCheck() const {
return !UsesMustHandleHole();
}
+HValue* HCallWithDescriptor::Canonicalize() {
+ if (kind() != Code::KEYED_LOAD_IC) return this;
-std::ostream& HLoadKeyedGeneric::PrintDataTo(
- std::ostream& os) const { // NOLINT
- return os << NameOf(object()) << "[" << NameOf(key()) << "]";
-}
-
-
-HValue* HLoadKeyedGeneric::Canonicalize() {
// Recognize generic keyed loads that use property name generated
// by for-in statement as a key and rewrite them into fast property load
// by index.
- if (key()->IsLoadKeyed()) {
- HLoadKeyed* key_load = HLoadKeyed::cast(key());
+ typedef LoadWithVectorDescriptor Descriptor;
+ HValue* key = parameter(Descriptor::kName);
+ if (key->IsLoadKeyed()) {
+ HLoadKeyed* key_load = HLoadKeyed::cast(key);
if (key_load->elements()->IsForInCacheArray()) {
HForInCacheArray* names_cache =
HForInCacheArray::cast(key_load->elements());
- if (names_cache->enumerable() == object()) {
+ HValue* object = parameter(Descriptor::kReceiver);
+ if (names_cache->enumerable() == object) {
HForInCacheArray* index_cache =
names_cache->index_cache();
HCheckMapValue* map_check = HCheckMapValue::New(
block()->graph()->isolate(), block()->graph()->zone(),
- block()->graph()->GetInvalidContext(), object(),
- names_cache->map());
+ block()->graph()->GetInvalidContext(), object, names_cache->map());
HInstruction* index = HLoadKeyed::New(
block()->graph()->isolate(), block()->graph()->zone(),
block()->graph()->GetInvalidContext(), index_cache, key_load->key(),
key_load->key(), nullptr, key_load->elements_kind());
map_check->InsertBefore(this);
index->InsertBefore(this);
- return Prepend(new(block()->zone()) HLoadFieldByIndex(
- object(), index));
+ return Prepend(new (block()->zone()) HLoadFieldByIndex(object, index));
}
}
}
-
return this;
}
-
std::ostream& HStoreNamedField::PrintDataTo(std::ostream& os) const { // NOLINT
os << NameOf(object()) << access_ << " = " << NameOf(value());
if (NeedsWriteBarrier()) os << " (write-barrier)";
@@ -3074,12 +3070,6 @@ std::ostream& HTransitionElementsKind::PrintDataTo(
}
-std::ostream& HLoadGlobalGeneric::PrintDataTo(
- std::ostream& os) const { // NOLINT
- return os << name()->ToCString().get() << " ";
-}
-
-
std::ostream& HInnerAllocatedObject::PrintDataTo(
std::ostream& os) const { // NOLINT
os << NameOf(base_object()) << " offset ";
@@ -3596,16 +3586,21 @@ HInstruction* HDiv::New(Isolate* isolate, Zone* zone, HValue* context,
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
- if (c_right->DoubleValue() != 0) {
+ if (std::isnan(c_left->DoubleValue()) ||
+ std::isnan(c_right->DoubleValue())) {
+ return H_CONSTANT_DOUBLE(std::numeric_limits<double>::quiet_NaN());
+ } else if (c_right->DoubleValue() != 0) {
double double_res = c_left->DoubleValue() / c_right->DoubleValue();
if (IsInt32Double(double_res)) {
return H_CONSTANT_INT(double_res);
}
return H_CONSTANT_DOUBLE(double_res);
- } else {
+ } else if (c_left->DoubleValue() != 0) {
int sign = Double(c_left->DoubleValue()).Sign() *
Double(c_right->DoubleValue()).Sign(); // Right could be -0.
return H_CONSTANT_DOUBLE(sign * V8_INFINITY);
+ } else {
+ return H_CONSTANT_DOUBLE(std::numeric_limits<double>::quiet_NaN());
}
}
}
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.h b/deps/v8/src/crankshaft/hydrogen-instructions.h
index cfede98039..9b9e6742e4 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.h
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.h
@@ -12,12 +12,12 @@
#include "src/ast/ast.h"
#include "src/base/bits.h"
#include "src/bit-vector.h"
-#include "src/code-stubs.h"
#include "src/conversions.h"
#include "src/crankshaft/hydrogen-types.h"
#include "src/crankshaft/unique.h"
#include "src/deoptimizer.h"
#include "src/globals.h"
+#include "src/interface-descriptors.h"
#include "src/small-pointer-list.h"
#include "src/utils.h"
#include "src/zone/zone.h"
@@ -91,9 +91,7 @@ class SmallMapList;
V(ForceRepresentation) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(GetCachedArrayIndex) \
V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
V(InvokeFunction) \
@@ -105,11 +103,8 @@ class SmallMapList;
V(LoadContextSlot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalGeneric) \
V(LoadKeyed) \
- V(LoadKeyedGeneric) \
V(LoadNamedField) \
- V(LoadNamedGeneric) \
V(LoadRoot) \
V(MathFloorOfDiv) \
V(MathMinMax) \
@@ -191,6 +186,7 @@ class SmallMapList;
enum PropertyAccessType { LOAD, STORE };
+Representation RepresentationFromMachineType(MachineType type);
class Range final : public ZoneObject {
public:
@@ -416,7 +412,7 @@ class HValue : public ZoneObject {
kLeftCanBeMinInt,
kLeftCanBeNegative,
kLeftCanBePositive,
- kAllowUndefinedAsNaN,
+ kTruncatingToNumber,
kIsArguments,
kTruncatingToInt32,
kAllUsesTruncatingToInt32,
@@ -490,9 +486,6 @@ class HValue : public ZoneObject {
virtual ~HValue() {}
virtual SourcePosition position() const { return SourcePosition::Unknown(); }
- virtual SourcePosition operand_position(int index) const {
- return position();
- }
HBasicBlock* block() const { return block_; }
void SetBlock(HBasicBlock* block);
@@ -952,99 +945,6 @@ std::ostream& operator<<(std::ostream& os, const ChangesOf& v);
return new (zone) I(context, p1, p2, p3, p4, p5, p6); \
}
-
-// A helper class to represent per-operand position information attached to
-// the HInstruction in the compact form. Uses tagging to distinguish between
-// case when only instruction's position is available and case when operands'
-// positions are also available.
-// In the first case it contains intruction's position as a tagged value.
-// In the second case it points to an array which contains instruction's
-// position and operands' positions.
-class HPositionInfo {
- public:
- explicit HPositionInfo(int pos) : data_(TagPosition(pos)) { }
-
- SourcePosition position() const {
- if (has_operand_positions()) {
- return operand_positions()[kInstructionPosIndex];
- }
- return SourcePosition::FromRaw(static_cast<int>(UntagPosition(data_)));
- }
-
- void set_position(SourcePosition pos) {
- if (has_operand_positions()) {
- operand_positions()[kInstructionPosIndex] = pos;
- } else {
- data_ = TagPosition(pos.raw());
- }
- }
-
- void ensure_storage_for_operand_positions(Zone* zone, int operand_count) {
- if (has_operand_positions()) {
- return;
- }
-
- const int length = kFirstOperandPosIndex + operand_count;
- SourcePosition* positions = zone->NewArray<SourcePosition>(length);
- for (int i = 0; i < length; i++) {
- positions[i] = SourcePosition::Unknown();
- }
-
- const SourcePosition pos = position();
- data_ = reinterpret_cast<intptr_t>(positions);
- set_position(pos);
-
- DCHECK(has_operand_positions());
- }
-
- SourcePosition operand_position(int idx) const {
- if (!has_operand_positions()) {
- return position();
- }
- return *operand_position_slot(idx);
- }
-
- void set_operand_position(int idx, SourcePosition pos) {
- *operand_position_slot(idx) = pos;
- }
-
- private:
- static const intptr_t kInstructionPosIndex = 0;
- static const intptr_t kFirstOperandPosIndex = 1;
-
- SourcePosition* operand_position_slot(int idx) const {
- DCHECK(has_operand_positions());
- return &(operand_positions()[kFirstOperandPosIndex + idx]);
- }
-
- bool has_operand_positions() const {
- return !IsTaggedPosition(data_);
- }
-
- SourcePosition* operand_positions() const {
- DCHECK(has_operand_positions());
- return reinterpret_cast<SourcePosition*>(data_);
- }
-
- static const intptr_t kPositionTag = 1;
- static const intptr_t kPositionShift = 1;
- static bool IsTaggedPosition(intptr_t val) {
- return (val & kPositionTag) != 0;
- }
- static intptr_t UntagPosition(intptr_t val) {
- DCHECK(IsTaggedPosition(val));
- return val >> kPositionShift;
- }
- static intptr_t TagPosition(intptr_t val) {
- const intptr_t result = (val << kPositionShift) | kPositionTag;
- DCHECK(UntagPosition(result) == val);
- return result;
- }
-
- intptr_t data_;
-};
-
-
class HInstruction : public HValue {
public:
HInstruction* next() const { return next_; }
@@ -1071,31 +971,17 @@ class HInstruction : public HValue {
}
// The position is a write-once variable.
- SourcePosition position() const override {
- return SourcePosition(position_.position());
- }
- bool has_position() const {
- return !position().IsUnknown();
- }
+ SourcePosition position() const override { return position_; }
+ bool has_position() const { return position_.IsKnown(); }
void set_position(SourcePosition position) {
- DCHECK(!has_position());
- DCHECK(!position.IsUnknown());
- position_.set_position(position);
- }
-
- SourcePosition operand_position(int index) const override {
- const SourcePosition pos = position_.operand_position(index);
- return pos.IsUnknown() ? position() : pos;
- }
- void set_operand_position(Zone* zone, int index, SourcePosition pos) {
- DCHECK(0 <= index && index < OperandCount());
- position_.ensure_storage_for_operand_positions(zone, OperandCount());
- position_.set_operand_position(index, pos);
+ DCHECK(position.IsKnown());
+ position_ = position;
}
bool Dominates(HInstruction* other);
bool CanTruncateToSmi() const { return CheckFlag(kTruncatingToSmi); }
bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
+ bool CanTruncateToNumber() const { return CheckFlag(kTruncatingToNumber); }
virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
@@ -1114,7 +1000,7 @@ class HInstruction : public HValue {
: HValue(type),
next_(NULL),
previous_(NULL),
- position_(kNoSourcePosition) {
+ position_(SourcePosition::Unknown()) {
SetDependsOnFlag(kOsrEntries);
}
@@ -1128,7 +1014,7 @@ class HInstruction : public HValue {
HInstruction* next_;
HInstruction* previous_;
- HPositionInfo position_;
+ SourcePosition position_;
friend class HBasicBlock;
};
@@ -1353,9 +1239,9 @@ class HUnaryControlInstruction : public HTemplateControlInstruction<2, 1> {
class HBranch final : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HBranch, HValue*);
- DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*, ToBooleanICStub::Types);
- DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*, ToBooleanICStub::Types,
- HBasicBlock*, HBasicBlock*);
+ DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*, ToBooleanHints);
+ DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*, ToBooleanHints, HBasicBlock*,
+ HBasicBlock*);
Representation RequiredInputRepresentation(int index) override {
return Representation::None();
@@ -1366,22 +1252,18 @@ class HBranch final : public HUnaryControlInstruction {
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
- ToBooleanICStub::Types expected_input_types() const {
- return expected_input_types_;
- }
+ ToBooleanHints expected_input_types() const { return expected_input_types_; }
DECLARE_CONCRETE_INSTRUCTION(Branch)
private:
- HBranch(HValue* value, ToBooleanICStub::Types expected_input_types =
- ToBooleanICStub::Types(),
+ HBranch(HValue* value,
+ ToBooleanHints expected_input_types = ToBooleanHint::kNone,
HBasicBlock* true_target = NULL, HBasicBlock* false_target = NULL)
: HUnaryControlInstruction(value, true_target, false_target),
- expected_input_types_(expected_input_types) {
- SetFlag(kAllowUndefinedAsNaN);
- }
+ expected_input_types_(expected_input_types) {}
- ToBooleanICStub::Types expected_input_types_;
+ ToBooleanHints expected_input_types_;
};
@@ -1575,13 +1457,10 @@ class HForceRepresentation final : public HTemplateInstruction<1> {
}
};
-
class HChange final : public HUnaryOperation {
public:
- HChange(HValue* value,
- Representation to,
- bool is_truncating_to_smi,
- bool is_truncating_to_int32)
+ HChange(HValue* value, Representation to, bool is_truncating_to_smi,
+ bool is_truncating_to_int32, bool is_truncating_to_number)
: HUnaryOperation(value) {
DCHECK(!value->representation().IsNone());
DCHECK(!to.IsNone());
@@ -1592,8 +1471,13 @@ class HChange final : public HUnaryOperation {
if (is_truncating_to_smi && to.IsSmi()) {
SetFlag(kTruncatingToSmi);
SetFlag(kTruncatingToInt32);
+ SetFlag(kTruncatingToNumber);
+ } else if (is_truncating_to_int32) {
+ SetFlag(kTruncatingToInt32);
+ SetFlag(kTruncatingToNumber);
+ } else if (is_truncating_to_number) {
+ SetFlag(kTruncatingToNumber);
}
- if (is_truncating_to_int32) SetFlag(kTruncatingToInt32);
if (value->representation().IsSmi() || value->type().IsSmi()) {
set_type(HType::Smi());
} else {
@@ -1602,10 +1486,6 @@ class HChange final : public HUnaryOperation {
}
}
- bool can_convert_undefined_to_nan() {
- return CheckUsesForFlag(kAllowUndefinedAsNaN);
- }
-
HType CalculateInferredType() override;
HValue* Canonicalize() override;
@@ -1651,7 +1531,7 @@ class HClampToUint8 final : public HUnaryOperation {
explicit HClampToUint8(HValue* value)
: HUnaryOperation(value) {
set_representation(Representation::Integer32());
- SetFlag(kAllowUndefinedAsNaN);
+ SetFlag(kTruncatingToNumber);
SetFlag(kUseGVN);
}
@@ -1929,7 +1809,7 @@ class HEnterInlined final : public HTemplateInstruction<0> {
function_(function),
inlining_kind_(inlining_kind),
syntactic_tail_call_mode_(syntactic_tail_call_mode),
- inlining_id_(0),
+ inlining_id_(-1),
arguments_var_(arguments_var),
arguments_object_(arguments_object),
return_targets_(2, zone) {}
@@ -2160,9 +2040,21 @@ class HCallWithDescriptor final : public HInstruction {
const Vector<HValue*>& operands,
TailCallMode syntactic_tail_call_mode = TailCallMode::kDisallow,
TailCallMode tail_call_mode = TailCallMode::kDisallow) {
- HCallWithDescriptor* res = new (zone)
- HCallWithDescriptor(target, argument_count, descriptor, operands,
- syntactic_tail_call_mode, tail_call_mode, zone);
+ HCallWithDescriptor* res = new (zone) HCallWithDescriptor(
+ Code::STUB, context, target, argument_count, descriptor, operands,
+ syntactic_tail_call_mode, tail_call_mode, zone);
+ return res;
+ }
+
+ static HCallWithDescriptor* New(
+ Isolate* isolate, Zone* zone, HValue* context, Code::Kind kind,
+ HValue* target, int argument_count, CallInterfaceDescriptor descriptor,
+ const Vector<HValue*>& operands,
+ TailCallMode syntactic_tail_call_mode = TailCallMode::kDisallow,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow) {
+ HCallWithDescriptor* res = new (zone) HCallWithDescriptor(
+ kind, context, target, argument_count, descriptor, operands,
+ syntactic_tail_call_mode, tail_call_mode, zone);
return res;
}
@@ -2194,6 +2086,8 @@ class HCallWithDescriptor final : public HInstruction {
}
bool IsTailCall() const { return tail_call_mode() == TailCallMode::kAllow; }
+ Code::Kind kind() const { return KindField::decode(bit_field_); }
+
virtual int argument_count() const {
return argument_count_;
}
@@ -2202,29 +2096,36 @@ class HCallWithDescriptor final : public HInstruction {
CallInterfaceDescriptor descriptor() const { return descriptor_; }
- HValue* target() {
- return OperandAt(0);
+ HValue* target() { return OperandAt(0); }
+ HValue* context() { return OperandAt(1); }
+ HValue* parameter(int index) {
+ DCHECK_LT(index, GetParameterCount());
+ return OperandAt(index + 2);
}
+ HValue* Canonicalize() override;
+
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
private:
// The argument count includes the receiver.
- HCallWithDescriptor(HValue* target, int argument_count,
- CallInterfaceDescriptor descriptor,
+ HCallWithDescriptor(Code::Kind kind, HValue* context, HValue* target,
+ int argument_count, CallInterfaceDescriptor descriptor,
const Vector<HValue*>& operands,
TailCallMode syntactic_tail_call_mode,
TailCallMode tail_call_mode, Zone* zone)
: descriptor_(descriptor),
- values_(GetParameterCount() + 1, zone), // +1 here is for target.
+ values_(GetParameterCount() + 2, zone), // +2 for context and target.
argument_count_(argument_count),
bit_field_(
TailCallModeField::encode(tail_call_mode) |
- SyntacticTailCallModeField::encode(syntactic_tail_call_mode)) {
+ SyntacticTailCallModeField::encode(syntactic_tail_call_mode) |
+ KindField::encode(kind)) {
DCHECK_EQ(operands.length(), GetParameterCount());
// We can only tail call without any stack arguments.
DCHECK(tail_call_mode != TailCallMode::kAllow || argument_count == 0);
AddOperand(target, zone);
+ AddOperand(context, zone);
for (int i = 0; i < operands.length(); i++) {
AddOperand(operands[i], zone);
}
@@ -2237,9 +2138,7 @@ class HCallWithDescriptor final : public HInstruction {
SetOperandAt(values_.length() - 1, v);
}
- int GetParameterCount() const {
- return descriptor_.GetParameterCount() + 1; // +1 here is for context.
- }
+ int GetParameterCount() const { return descriptor_.GetParameterCount(); }
void InternalSetOperandAt(int index, HValue* value) final {
values_[index] = value;
@@ -2251,6 +2150,8 @@ class HCallWithDescriptor final : public HInstruction {
class TailCallModeField : public BitField<TailCallMode, 0, 1> {};
class SyntacticTailCallModeField
: public BitField<TailCallMode, TailCallModeField::kNext, 1> {};
+ class KindField
+ : public BitField<Code::Kind, SyntacticTailCallModeField::kNext, 5> {};
uint32_t bit_field_;
};
@@ -2484,7 +2385,7 @@ class HUnaryMathOperation final : public HTemplateInstruction<2> {
UNREACHABLE();
}
SetFlag(kUseGVN);
- SetFlag(kAllowUndefinedAsNaN);
+ SetFlag(kTruncatingToNumber);
}
bool IsDeletable() const override {
@@ -2898,7 +2799,6 @@ class HPhi final : public HValue {
: inputs_(2, zone), merged_index_(merged_index) {
DCHECK(merged_index >= 0 || merged_index == kInvalidMergedIndex);
SetFlag(kFlexibleRepresentation);
- SetFlag(kAllowUndefinedAsNaN);
}
Representation RepresentationFromInputs() override;
@@ -3463,12 +3363,6 @@ class HBinaryOperation : public HTemplateInstruction<3> {
return representation();
}
- void SetOperandPositions(Zone* zone, SourcePosition left_pos,
- SourcePosition right_pos) {
- set_operand_position(zone, 1, left_pos);
- set_operand_position(zone, 2, right_pos);
- }
-
bool RightIsPowerOf2() {
if (!right()->IsInteger32Constant()) return false;
int32_t value = right()->GetInteger32Constant();
@@ -3714,7 +3608,7 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
: HBinaryOperation(context, left, right, type) {
SetFlag(kFlexibleRepresentation);
SetFlag(kTruncatingToInt32);
- SetFlag(kAllowUndefinedAsNaN);
+ SetFlag(kTruncatingToNumber);
SetAllSideEffects();
}
@@ -3777,7 +3671,7 @@ class HMathFloorOfDiv final : public HBinaryOperation {
SetFlag(kLeftCanBeMinInt);
SetFlag(kLeftCanBeNegative);
SetFlag(kLeftCanBePositive);
- SetFlag(kAllowUndefinedAsNaN);
+ SetFlag(kTruncatingToNumber);
}
Range* InferRange(Zone* zone) override;
@@ -3788,11 +3682,12 @@ class HMathFloorOfDiv final : public HBinaryOperation {
class HArithmeticBinaryOperation : public HBinaryOperation {
public:
- HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right, HType::TaggedNumber()) {
+ HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right,
+ HType type = HType::TaggedNumber())
+ : HBinaryOperation(context, left, right, type) {
SetAllSideEffects();
SetFlag(kFlexibleRepresentation);
- SetFlag(kAllowUndefinedAsNaN);
+ SetFlag(kTruncatingToNumber);
}
void RepresentationChanged(Representation to) override {
@@ -3880,12 +3775,6 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
- void SetOperandPositions(Zone* zone, SourcePosition left_pos,
- SourcePosition right_pos) {
- set_operand_position(zone, 0, left_pos);
- set_operand_position(zone, 1, right_pos);
- }
-
DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch)
private:
@@ -3925,7 +3814,6 @@ class HCompareHoleAndBranch final : public HUnaryControlInstruction {
HBasicBlock* false_target = NULL)
: HUnaryControlInstruction(value, true_target, false_target) {
SetFlag(kFlexibleRepresentation);
- SetFlag(kAllowUndefinedAsNaN);
}
};
@@ -4128,45 +4016,6 @@ class HHasInstanceTypeAndBranch final : public HUnaryControlInstruction {
InstanceType to_; // Inclusive range, not all combinations work.
};
-
-class HHasCachedArrayIndexAndBranch final : public HUnaryControlInstruction {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HHasCachedArrayIndexAndBranch, HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch)
- private:
- explicit HHasCachedArrayIndexAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) { }
-};
-
-
-class HGetCachedArrayIndex final : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HGetCachedArrayIndex, HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- }
-
- bool IsDeletable() const override { return true; }
-};
-
-
class HClassOfTestAndBranch final : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HClassOfTestAndBranch, HValue*,
@@ -4321,12 +4170,12 @@ class HAdd final : public HArithmeticBinaryOperation {
}
if (to.IsTagged()) {
SetChangesFlag(kNewSpacePromotion);
- ClearFlag(kAllowUndefinedAsNaN);
+ ClearFlag(kTruncatingToNumber);
}
if (!right()->type().IsTaggedNumber() &&
!right()->representation().IsDouble() &&
!right()->representation().IsSmiOrInteger32()) {
- ClearFlag(kAllowUndefinedAsNaN);
+ ClearFlag(kTruncatingToNumber);
}
}
@@ -4354,7 +4203,7 @@ class HAdd final : public HArithmeticBinaryOperation {
private:
HAdd(HValue* context, HValue* left, HValue* right,
ExternalAddType external_add_type = NoExternalAdd)
- : HArithmeticBinaryOperation(context, left, right),
+ : HArithmeticBinaryOperation(context, left, right, HType::Tagged()),
external_add_type_(external_add_type) {
SetFlag(kCanOverflow);
switch (external_add_type_) {
@@ -4826,48 +4675,6 @@ class HUnknownOSRValue final : public HTemplateInstruction<0> {
HPhi* incoming_value_;
};
-class HLoadGlobalGeneric final : public HTemplateInstruction<1> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadGlobalGeneric,
- Handle<String>, TypeofMode,
- Handle<TypeFeedbackVector>,
- FeedbackVectorSlot);
-
- HValue* context() { return OperandAt(0); }
- Handle<String> name() const { return name_; }
- TypeofMode typeof_mode() const { return typeof_mode_; }
- FeedbackVectorSlot slot() const { return slot_; }
- Handle<TypeFeedbackVector> feedback_vector() const {
- return feedback_vector_;
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric)
-
- private:
- HLoadGlobalGeneric(HValue* context, Handle<String> name,
- TypeofMode typeof_mode, Handle<TypeFeedbackVector> vector,
- FeedbackVectorSlot slot)
- : name_(name),
- typeof_mode_(typeof_mode),
- feedback_vector_(vector),
- slot_(slot) {
- SetOperandAt(0, context);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- Handle<String> name_;
- TypeofMode typeof_mode_;
- Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorSlot slot_;
-};
-
class HAllocate final : public HTemplateInstruction<3> {
public:
static bool CompatibleInstanceTypes(InstanceType type1,
@@ -5408,11 +5215,6 @@ class HObjectAccess final {
SharedFunctionInfo::kOptimizedCodeMapOffset);
}
- static HObjectAccess ForOptimizedCodeMapSharedCode() {
- return HObjectAccess(kInobject, FixedArray::OffsetOfElementAt(
- SharedFunctionInfo::kSharedCodeIndex));
- }
-
static HObjectAccess ForFunctionContextPointer() {
return HObjectAccess(kInobject, JSFunction::kContextOffset);
}
@@ -5852,46 +5654,6 @@ class HLoadNamedField final : public HTemplateInstruction<2> {
};
-class HLoadNamedGeneric final : public HTemplateInstruction<2> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadNamedGeneric, HValue*,
- Handle<Name>,
- Handle<TypeFeedbackVector>,
- FeedbackVectorSlot);
-
- HValue* context() const { return OperandAt(0); }
- HValue* object() const { return OperandAt(1); }
- Handle<Name> name() const { return name_; }
-
- FeedbackVectorSlot slot() const { return slot_; }
- Handle<TypeFeedbackVector> feedback_vector() const {
- return feedback_vector_;
- }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
-
- private:
- HLoadNamedGeneric(HValue* context, HValue* object, Handle<Name> name,
- Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
- : name_(name), feedback_vector_(vector), slot_(slot) {
- SetOperandAt(0, context);
- SetOperandAt(1, object);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
- }
-
- Handle<Name> name_;
- Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorSlot slot_;
-};
-
-
class HLoadFunctionPrototype final : public HUnaryOperation {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HLoadFunctionPrototype, HValue*);
@@ -6128,47 +5890,6 @@ class HLoadKeyed final : public HTemplateInstruction<4>,
};
-class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadKeyedGeneric, HValue*,
- HValue*,
- Handle<TypeFeedbackVector>,
- FeedbackVectorSlot);
- HValue* object() const { return OperandAt(0); }
- HValue* key() const { return OperandAt(1); }
- HValue* context() const { return OperandAt(2); }
- FeedbackVectorSlot slot() const { return slot_; }
- Handle<TypeFeedbackVector> feedback_vector() const {
- return feedback_vector_;
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- Representation RequiredInputRepresentation(int index) override {
- // tagged[tagged]
- return Representation::Tagged();
- }
-
- HValue* Canonicalize() override;
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
-
- private:
- HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key,
- Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
- : feedback_vector_(vector), slot_(slot) {
- set_representation(Representation::Tagged());
- SetOperandAt(0, obj);
- SetOperandAt(1, key);
- SetOperandAt(2, context);
- SetAllSideEffects();
- }
-
- Handle<TypeFeedbackVector> feedback_vector_;
- FeedbackVectorSlot slot_;
-};
-
-
// Indicates whether the store is a store to an entry that was previously
// initialized or not.
enum StoreFieldOrKeyedMode {
@@ -6488,7 +6209,7 @@ class HStoreKeyed final : public HTemplateInstruction<4>,
} else if (is_fixed_typed_array()) {
SetChangesFlag(kTypedArrayElements);
SetChangesFlag(kExternalMemory);
- SetFlag(kAllowUndefinedAsNaN);
+ SetFlag(kTruncatingToNumber);
} else {
SetChangesFlag(kArrayElements);
}
diff --git a/deps/v8/src/crankshaft/hydrogen-mark-deoptimize.cc b/deps/v8/src/crankshaft/hydrogen-mark-deoptimize.cc
deleted file mode 100644
index a706d91323..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-mark-deoptimize.cc
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/crankshaft/hydrogen-mark-deoptimize.h"
-
-namespace v8 {
-namespace internal {
-
-void HMarkDeoptimizeOnUndefinedPhase::Run() {
- const ZoneList<HPhi*>* phi_list = graph()->phi_list();
- for (int i = 0; i < phi_list->length(); i++) {
- HPhi* phi = phi_list->at(i);
- if (phi->CheckFlag(HValue::kAllowUndefinedAsNaN) &&
- !phi->CheckUsesForFlag(HValue::kAllowUndefinedAsNaN)) {
- ProcessPhi(phi);
- }
- }
-}
-
-
-void HMarkDeoptimizeOnUndefinedPhase::ProcessPhi(HPhi* phi) {
- DCHECK(phi->CheckFlag(HValue::kAllowUndefinedAsNaN));
- DCHECK(worklist_.is_empty());
-
- // Push the phi onto the worklist
- phi->ClearFlag(HValue::kAllowUndefinedAsNaN);
- worklist_.Add(phi, zone());
-
- // Process all phis that can reach this phi
- while (!worklist_.is_empty()) {
- phi = worklist_.RemoveLast();
- for (int i = phi->OperandCount() - 1; i >= 0; --i) {
- HValue* input = phi->OperandAt(i);
- if (input->IsPhi() && input->CheckFlag(HValue::kAllowUndefinedAsNaN)) {
- input->ClearFlag(HValue::kAllowUndefinedAsNaN);
- worklist_.Add(HPhi::cast(input), zone());
- }
- }
- }
-}
-
-
-void HComputeChangeUndefinedToNaN::Run() {
- const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
- for (int i = 0; i < blocks->length(); ++i) {
- const HBasicBlock* block(blocks->at(i));
- for (HInstruction* current = block->first(); current != NULL; ) {
- HInstruction* next = current->next();
- if (current->IsChange()) {
- if (HChange::cast(current)->can_convert_undefined_to_nan()) {
- current->SetFlag(HValue::kAllowUndefinedAsNaN);
- }
- }
- current = next;
- }
- }
-}
-
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/crankshaft/hydrogen-mark-deoptimize.h b/deps/v8/src/crankshaft/hydrogen-mark-deoptimize.h
deleted file mode 100644
index 45d40acd95..0000000000
--- a/deps/v8/src/crankshaft/hydrogen-mark-deoptimize.h
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CRANKSHAFT_HYDROGEN_MARK_DEOPTIMIZE_H_
-#define V8_CRANKSHAFT_HYDROGEN_MARK_DEOPTIMIZE_H_
-
-#include "src/crankshaft/hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Compute DeoptimizeOnUndefined flag for phis. Any phi that can reach a use
-// with DeoptimizeOnUndefined set must have DeoptimizeOnUndefined set.
-// Currently only HCompareNumericAndBranch, with double input representation,
-// has this flag set. The flag is used by HChange tagged->double, which must
-// deoptimize if one of its uses has this flag set.
-class HMarkDeoptimizeOnUndefinedPhase : public HPhase {
- public:
- explicit HMarkDeoptimizeOnUndefinedPhase(HGraph* graph)
- : HPhase("H_Mark deoptimize on undefined", graph),
- worklist_(16, zone()) {}
-
- void Run();
-
- private:
- void ProcessPhi(HPhi* phi);
-
- // Preallocated worklist used as an optimization so we don't have
- // to allocate a new ZoneList for every ProcessPhi() invocation.
- ZoneList<HPhi*> worklist_;
-
- DISALLOW_COPY_AND_ASSIGN(HMarkDeoptimizeOnUndefinedPhase);
-};
-
-
-class HComputeChangeUndefinedToNaN : public HPhase {
- public:
- explicit HComputeChangeUndefinedToNaN(HGraph* graph)
- : HPhase("H_Compute change undefined to nan", graph) {}
-
- void Run();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(HComputeChangeUndefinedToNaN);
-};
-
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_CRANKSHAFT_HYDROGEN_MARK_DEOPTIMIZE_H_
diff --git a/deps/v8/src/crankshaft/hydrogen-osr.cc b/deps/v8/src/crankshaft/hydrogen-osr.cc
index 8de3ac0705..607bfbd85d 100644
--- a/deps/v8/src/crankshaft/hydrogen-osr.cc
+++ b/deps/v8/src/crankshaft/hydrogen-osr.cc
@@ -30,7 +30,7 @@ HBasicBlock* HOsrBuilder::BuildOsrLoopEntry(IterationStatement* statement) {
HBasicBlock* non_osr_entry = graph->CreateBasicBlock();
osr_entry_ = graph->CreateBasicBlock();
HValue* true_value = graph->GetConstantTrue();
- HBranch* test = builder_->New<HBranch>(true_value, ToBooleanICStub::Types(),
+ HBranch* test = builder_->New<HBranch>(true_value, ToBooleanHint::kNone,
non_osr_entry, osr_entry_);
builder_->FinishCurrentBlock(test);
diff --git a/deps/v8/src/crankshaft/hydrogen-representation-changes.cc b/deps/v8/src/crankshaft/hydrogen-representation-changes.cc
index 32b614c56c..4d74df4952 100644
--- a/deps/v8/src/crankshaft/hydrogen-representation-changes.cc
+++ b/deps/v8/src/crankshaft/hydrogen-representation-changes.cc
@@ -24,6 +24,8 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
HInstruction* new_value = NULL;
bool is_truncating_to_smi = use_value->CheckFlag(HValue::kTruncatingToSmi);
bool is_truncating_to_int = use_value->CheckFlag(HValue::kTruncatingToInt32);
+ bool is_truncating_to_number =
+ use_value->CheckFlag(HValue::kTruncatingToNumber);
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
// Try to create a new copy of the constant with the new representation.
@@ -36,14 +38,9 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
}
if (new_value == NULL) {
- new_value = new(graph()->zone()) HChange(
- value, to, is_truncating_to_smi, is_truncating_to_int);
- if (!use_value->operand_position(use_index).IsUnknown()) {
- new_value->set_position(use_value->operand_position(use_index));
- } else {
- DCHECK(!FLAG_hydrogen_track_positions ||
- !graph()->info()->IsOptimizing());
- }
+ new_value = new (graph()->zone())
+ HChange(value, to, is_truncating_to_smi, is_truncating_to_int,
+ is_truncating_to_number);
}
new_value->InsertBefore(next);
@@ -116,10 +113,15 @@ void HRepresentationChangesPhase::InsertRepresentationChangesForValue(
void HRepresentationChangesPhase::Run() {
- // Compute truncation flag for phis: Initially assume that all
- // int32-phis allow truncation and iteratively remove the ones that
- // are used in an operation that does not allow a truncating
- // conversion.
+ // Compute truncation flag for phis:
+ //
+ // - Initially assume that all phis allow truncation to number and iteratively
+ // remove the ones that are used in an operation that not do an implicit
+ // ToNumber conversion.
+ // - Also assume that all Integer32 phis allow ToInt32 truncation and all
+ // Smi phis allow truncation to Smi.
+ //
+ ZoneList<HPhi*> number_worklist(8, zone());
ZoneList<HPhi*> int_worklist(8, zone());
ZoneList<HPhi*> smi_worklist(8, zone());
@@ -132,23 +134,34 @@ void HRepresentationChangesPhase::Run() {
phi->SetFlag(HValue::kTruncatingToSmi);
phi->SetFlag(HValue::kTruncatingToInt32);
}
+ phi->SetFlag(HValue::kTruncatingToNumber);
}
for (int i = 0; i < phi_list->length(); i++) {
HPhi* phi = phi_list->at(i);
HValue* value = NULL;
- if (phi->representation().IsSmiOrInteger32() &&
- !phi->CheckUsesForFlag(HValue::kTruncatingToInt32, &value)) {
+
+ if (phi->CheckFlag(HValue::kTruncatingToNumber) &&
+ !phi->CheckUsesForFlag(HValue::kTruncatingToNumber, &value)) {
+ number_worklist.Add(phi, zone());
+ phi->ClearFlag(HValue::kTruncatingToNumber);
+ phi->ClearFlag(HValue::kTruncatingToInt32);
+ phi->ClearFlag(HValue::kTruncatingToSmi);
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating Number because of #%d %s\n",
+ phi->id(), value->id(), value->Mnemonic());
+ }
+ } else if (phi->representation().IsSmiOrInteger32() &&
+ !phi->CheckUsesForFlag(HValue::kTruncatingToInt32, &value)) {
int_worklist.Add(phi, zone());
phi->ClearFlag(HValue::kTruncatingToInt32);
+ phi->ClearFlag(HValue::kTruncatingToSmi);
if (FLAG_trace_representation) {
PrintF("#%d Phi is not truncating Int32 because of #%d %s\n",
phi->id(), value->id(), value->Mnemonic());
}
- }
-
- if (phi->representation().IsSmi() &&
- !phi->CheckUsesForFlag(HValue::kTruncatingToSmi, &value)) {
+ } else if (phi->representation().IsSmi() &&
+ !phi->CheckUsesForFlag(HValue::kTruncatingToSmi, &value)) {
smi_worklist.Add(phi, zone());
phi->ClearFlag(HValue::kTruncatingToSmi);
if (FLAG_trace_representation) {
@@ -158,6 +171,23 @@ void HRepresentationChangesPhase::Run() {
}
}
+ while (!number_worklist.is_empty()) {
+ HPhi* current = number_worklist.RemoveLast();
+ for (int i = current->OperandCount() - 1; i >= 0; --i) {
+ HValue* input = current->OperandAt(i);
+ if (input->IsPhi() && input->CheckFlag(HValue::kTruncatingToNumber)) {
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating Number because of #%d %s\n",
+ input->id(), current->id(), current->Mnemonic());
+ }
+ input->ClearFlag(HValue::kTruncatingToNumber);
+ input->ClearFlag(HValue::kTruncatingToInt32);
+ input->ClearFlag(HValue::kTruncatingToSmi);
+ number_worklist.Add(HPhi::cast(input), zone());
+ }
+ }
+ }
+
while (!int_worklist.is_empty()) {
HPhi* current = int_worklist.RemoveLast();
for (int i = 0; i < current->OperandCount(); ++i) {
diff --git a/deps/v8/src/crankshaft/hydrogen.cc b/deps/v8/src/crankshaft/hydrogen.cc
index 8d7b4797c5..754da77c94 100644
--- a/deps/v8/src/crankshaft/hydrogen.cc
+++ b/deps/v8/src/crankshaft/hydrogen.cc
@@ -23,7 +23,6 @@
#include "src/crankshaft/hydrogen-infer-representation.h"
#include "src/crankshaft/hydrogen-infer-types.h"
#include "src/crankshaft/hydrogen-load-elimination.h"
-#include "src/crankshaft/hydrogen-mark-deoptimize.h"
#include "src/crankshaft/hydrogen-mark-unreachable.h"
#include "src/crankshaft/hydrogen-osr.h"
#include "src/crankshaft/hydrogen-range-analysis.h"
@@ -87,7 +86,7 @@ class HOptimizedGraphBuilderWithPositions : public HOptimizedGraphBuilder {
SetSourcePosition(node->position()); \
} \
HOptimizedGraphBuilder::Visit##type(node); \
- if (!old_position.IsUnknown()) { \
+ if (old_position.IsKnown()) { \
set_source_position(old_position); \
} \
}
@@ -102,7 +101,7 @@ class HOptimizedGraphBuilderWithPositions : public HOptimizedGraphBuilder {
SetSourcePosition(node->position()); \
} \
HOptimizedGraphBuilder::Visit##type(node); \
- if (!old_position.IsUnknown()) { \
+ if (old_position.IsKnown()) { \
set_source_position(old_position); \
} \
}
@@ -313,7 +312,7 @@ void HBasicBlock::AddInstruction(HInstruction* instr, SourcePosition position) {
DCHECK(!instr->IsLinked());
DCHECK(!IsFinished());
- if (!position.IsUnknown()) {
+ if (position.IsKnown()) {
instr->set_position(position);
}
if (first_ == NULL) {
@@ -321,7 +320,7 @@ void HBasicBlock::AddInstruction(HInstruction* instr, SourcePosition position) {
DCHECK(!last_environment()->ast_id().IsNone());
HBlockEntry* entry = new(zone()) HBlockEntry();
entry->InitializeAsFirst(this);
- if (!position.IsUnknown()) {
+ if (position.IsKnown()) {
entry->set_position(position);
} else {
DCHECK(!FLAG_hydrogen_track_positions ||
@@ -1088,8 +1087,7 @@ void HGraphBuilder::IfBuilder::Then() {
// so that the graph builder visits it and sees any live range extending
// constructs within it.
HConstant* constant_false = builder()->graph()->GetConstantFalse();
- ToBooleanICStub::Types boolean_type = ToBooleanICStub::Types();
- boolean_type.Add(ToBooleanICStub::BOOLEAN);
+ ToBooleanHints boolean_type = ToBooleanHint::kBoolean;
HBranch* branch = builder()->New<HBranch>(
constant_false, boolean_type, first_true_block_, first_false_block_);
builder()->FinishCurrentBlock(branch);
@@ -1366,7 +1364,8 @@ HGraph* HGraphBuilder::CreateGraph() {
graph_ = new (zone()) HGraph(info_, descriptor_);
if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_);
if (!info_->IsStub() && is_tracking_positions()) {
- TraceInlinedFunction(info_->shared_info(), SourcePosition::Unknown());
+ TraceInlinedFunction(info_->shared_info(), SourcePosition::Unknown(),
+ SourcePosition::kNotInlined);
}
CompilationPhase phase("H_Block building", info_);
set_current_block(graph()->entry_block());
@@ -1375,12 +1374,11 @@ HGraph* HGraphBuilder::CreateGraph() {
return graph_;
}
-int HGraphBuilder::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
- SourcePosition position) {
+void HGraphBuilder::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
+ SourcePosition position,
+ int inlining_id) {
DCHECK(is_tracking_positions());
- int inline_id = static_cast<int>(graph()->inlined_function_infos().size());
- HInlinedFunctionInfo info(shared->start_position());
if (!shared->script()->IsUndefined(isolate())) {
Handle<Script> script(Script::cast(shared->script()), isolate());
@@ -1394,7 +1392,7 @@ int HGraphBuilder::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
os << String::cast(source_name)->ToCString().get() << ":";
}
os << shared->DebugName()->ToCString().get() << ") id{";
- os << info_->optimization_id() << "," << inline_id << "} ---\n";
+ os << info_->optimization_id() << "," << inlining_id << "} ---\n";
{
DisallowHeapAllocation no_allocation;
int start = shared->start_position();
@@ -1410,23 +1408,19 @@ int HGraphBuilder::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
}
}
- graph()->inlined_function_infos().push_back(info);
-
- if (FLAG_hydrogen_track_positions && inline_id != 0) {
+ if (FLAG_hydrogen_track_positions &&
+ inlining_id != SourcePosition::kNotInlined) {
CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "INLINE (" << shared->DebugName()->ToCString().get() << ") id{"
- << info_->optimization_id() << "," << inline_id << "} AS " << inline_id
- << " AT " << position << std::endl;
+ << info_->optimization_id() << "," << inlining_id << "} AS "
+ << inlining_id << " AT " << position.ScriptOffset() << std::endl;
}
-
- return inline_id;
}
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
DCHECK(current_block() != NULL);
- DCHECK(!FLAG_hydrogen_track_positions ||
- !position_.IsUnknown() ||
+ DCHECK(!FLAG_hydrogen_track_positions || position_.IsKnown() ||
!info_->IsOptimizing());
current_block()->AddInstruction(instr, source_position());
if (graph()->IsInsideNoSideEffectsScope()) {
@@ -1437,9 +1431,8 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
- DCHECK(!FLAG_hydrogen_track_positions ||
- !info_->IsOptimizing() ||
- !position_.IsUnknown());
+ DCHECK(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() ||
+ position_.IsKnown());
current_block()->Finish(last, source_position());
if (last->IsReturn() || last->IsAbnormalExit()) {
set_current_block(NULL);
@@ -1449,7 +1442,7 @@ void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
DCHECK(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() ||
- !position_.IsUnknown());
+ position_.IsKnown());
current_block()->FinishExit(instruction, source_position());
if (instruction->IsReturn() || instruction->IsAbnormalExit()) {
set_current_block(NULL);
@@ -1647,190 +1640,6 @@ HValue* HGraphBuilder::BuildCopyElementsOnWrite(HValue* object,
return environment()->Pop();
}
-
-void HGraphBuilder::BuildJSObjectCheck(HValue* receiver,
- int bit_field_mask) {
- // Check that the object isn't a smi.
- Add<HCheckHeapObject>(receiver);
-
- // Get the map of the receiver.
- HValue* map =
- Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
-
- // Check the instance type and if an access check is needed, this can be
- // done with a single load, since both bytes are adjacent in the map.
- HObjectAccess access(HObjectAccess::ForMapInstanceTypeAndBitField());
- HValue* instance_type_and_bit_field =
- Add<HLoadNamedField>(map, nullptr, access);
-
- HValue* mask = Add<HConstant>(0x00FF | (bit_field_mask << 8));
- HValue* and_result = AddUncasted<HBitwise>(Token::BIT_AND,
- instance_type_and_bit_field,
- mask);
- HValue* sub_result = AddUncasted<HSub>(and_result,
- Add<HConstant>(JS_OBJECT_TYPE));
- Add<HBoundsCheck>(sub_result,
- Add<HConstant>(LAST_JS_OBJECT_TYPE + 1 - JS_OBJECT_TYPE));
-}
-
-
-void HGraphBuilder::BuildKeyedIndexCheck(HValue* key,
- HIfContinuation* join_continuation) {
- // The sometimes unintuitively backward ordering of the ifs below is
- // convoluted, but necessary. All of the paths must guarantee that the
- // if-true of the continuation returns a smi element index and the if-false of
- // the continuation returns either a symbol or a unique string key. All other
- // object types cause a deopt to fall back to the runtime.
-
- IfBuilder key_smi_if(this);
- key_smi_if.If<HIsSmiAndBranch>(key);
- key_smi_if.Then();
- {
- Push(key); // Nothing to do, just continue to true of continuation.
- }
- key_smi_if.Else();
- {
- HValue* map = Add<HLoadNamedField>(key, nullptr, HObjectAccess::ForMap());
- HValue* instance_type =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
-
- // Non-unique string, check for a string with a hash code that is actually
- // an index.
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- IfBuilder not_string_or_name_if(this);
- not_string_or_name_if.If<HCompareNumericAndBranch>(
- instance_type,
- Add<HConstant>(LAST_UNIQUE_NAME_TYPE),
- Token::GT);
-
- not_string_or_name_if.Then();
- {
- // Non-smi, non-Name, non-String: Try to convert to smi in case of
- // HeapNumber.
- // TODO(danno): This could call some variant of ToString
- Push(AddUncasted<HForceRepresentation>(key, Representation::Smi()));
- }
- not_string_or_name_if.Else();
- {
- // String or Name: check explicitly for Name, they can short-circuit
- // directly to unique non-index key path.
- IfBuilder not_symbol_if(this);
- not_symbol_if.If<HCompareNumericAndBranch>(
- instance_type,
- Add<HConstant>(SYMBOL_TYPE),
- Token::NE);
-
- not_symbol_if.Then();
- {
- // String: check whether the String is a String of an index. If it is,
- // extract the index value from the hash.
- HValue* hash = Add<HLoadNamedField>(key, nullptr,
- HObjectAccess::ForNameHashField());
- HValue* not_index_mask = Add<HConstant>(static_cast<int>(
- String::kContainsCachedArrayIndexMask));
-
- HValue* not_index_test = AddUncasted<HBitwise>(
- Token::BIT_AND, hash, not_index_mask);
-
- IfBuilder string_index_if(this);
- string_index_if.If<HCompareNumericAndBranch>(not_index_test,
- graph()->GetConstant0(),
- Token::EQ);
- string_index_if.Then();
- {
- // String with index in hash: extract string and merge to index path.
- Push(BuildDecodeField<String::ArrayIndexValueBits>(hash));
- }
- string_index_if.Else();
- {
- // Key is a non-index String, check for uniqueness/internalization.
- // If it's not internalized yet, internalize it now.
- HValue* not_internalized_bit = AddUncasted<HBitwise>(
- Token::BIT_AND,
- instance_type,
- Add<HConstant>(static_cast<int>(kIsNotInternalizedMask)));
-
- IfBuilder internalized(this);
- internalized.If<HCompareNumericAndBranch>(not_internalized_bit,
- graph()->GetConstant0(),
- Token::EQ);
- internalized.Then();
- Push(key);
-
- internalized.Else();
- Add<HPushArguments>(key);
- HValue* intern_key = Add<HCallRuntime>(
- Runtime::FunctionForId(Runtime::kInternalizeString), 1);
- Push(intern_key);
-
- internalized.End();
- // Key guaranteed to be a unique string
- }
- string_index_if.JoinContinuation(join_continuation);
- }
- not_symbol_if.Else();
- {
- Push(key); // Key is symbol
- }
- not_symbol_if.JoinContinuation(join_continuation);
- }
- not_string_or_name_if.JoinContinuation(join_continuation);
- }
- key_smi_if.JoinContinuation(join_continuation);
-}
-
-
-void HGraphBuilder::BuildNonGlobalObjectCheck(HValue* receiver) {
- // Get the the instance type of the receiver, and make sure that it is
- // not one of the global object types.
- HValue* map =
- Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
- HValue* instance_type =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
- HValue* global_type = Add<HConstant>(JS_GLOBAL_OBJECT_TYPE);
-
- IfBuilder if_global_object(this);
- if_global_object.If<HCompareNumericAndBranch>(instance_type, global_type,
- Token::EQ);
- if_global_object.ThenDeopt(DeoptimizeReason::kReceiverWasAGlobalObject);
- if_global_object.End();
-}
-
-
-void HGraphBuilder::BuildTestForDictionaryProperties(
- HValue* object,
- HIfContinuation* continuation) {
- HValue* properties = Add<HLoadNamedField>(
- object, nullptr, HObjectAccess::ForPropertiesPointer());
- HValue* properties_map =
- Add<HLoadNamedField>(properties, nullptr, HObjectAccess::ForMap());
- HValue* hash_map = Add<HLoadRoot>(Heap::kHashTableMapRootIndex);
- IfBuilder builder(this);
- builder.If<HCompareObjectEqAndBranch>(properties_map, hash_map);
- builder.CaptureContinuation(continuation);
-}
-
-
-HValue* HGraphBuilder::BuildKeyedLookupCacheHash(HValue* object,
- HValue* key) {
- // Load the map of the receiver, compute the keyed lookup cache hash
- // based on 32 bits of the map pointer and the string hash.
- HValue* object_map =
- Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMapAsInteger32());
- HValue* shifted_map = AddUncasted<HShr>(
- object_map, Add<HConstant>(KeyedLookupCache::kMapHashShift));
- HValue* string_hash =
- Add<HLoadNamedField>(key, nullptr, HObjectAccess::ForStringHashField());
- HValue* shifted_hash = AddUncasted<HShr>(
- string_hash, Add<HConstant>(String::kHashShift));
- HValue* xor_result = AddUncasted<HBitwise>(Token::BIT_XOR, shifted_map,
- shifted_hash);
- int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
- return AddUncasted<HBitwise>(Token::BIT_AND, xor_result,
- Add<HConstant>(mask));
-}
-
-
HValue* HGraphBuilder::BuildElementIndexHash(HValue* index) {
int32_t seed_value = static_cast<uint32_t>(isolate()->heap()->HashSeed());
HValue* seed = Add<HConstant>(seed_value);
@@ -1997,7 +1806,6 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
return Pop();
}
-
HValue* HGraphBuilder::BuildCreateIterResultObject(HValue* value,
HValue* done) {
NoObservableSideEffectsScope scope(this);
@@ -2029,67 +1837,6 @@ HValue* HGraphBuilder::BuildCreateIterResultObject(HValue* value,
}
-HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
- HValue* index,
- HValue* input) {
- NoObservableSideEffectsScope scope(this);
- HConstant* max_length = Add<HConstant>(JSArray::kInitialMaxFastElementArray);
- Add<HBoundsCheck>(length, max_length);
-
- // Generate size calculation code here in order to make it dominate
- // the JSRegExpResult allocation.
- ElementsKind elements_kind = FAST_ELEMENTS;
- HValue* size = BuildCalculateElementsSize(elements_kind, length);
-
- // Allocate the JSRegExpResult and the FixedArray in one step.
- HValue* result =
- Add<HAllocate>(Add<HConstant>(JSRegExpResult::kSize), HType::JSArray(),
- NOT_TENURED, JS_ARRAY_TYPE, graph()->GetConstant0());
-
- // Initialize the JSRegExpResult header.
- HValue* native_context = Add<HLoadNamedField>(
- context(), nullptr,
- HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
- Add<HStoreNamedField>(
- result, HObjectAccess::ForMap(),
- Add<HLoadNamedField>(
- native_context, nullptr,
- HObjectAccess::ForContextSlot(Context::REGEXP_RESULT_MAP_INDEX)));
- HConstant* empty_fixed_array =
- Add<HConstant>(isolate()->factory()->empty_fixed_array());
- Add<HStoreNamedField>(
- result, HObjectAccess::ForJSArrayOffset(JSArray::kPropertiesOffset),
- empty_fixed_array);
- Add<HStoreNamedField>(
- result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
- empty_fixed_array);
- Add<HStoreNamedField>(
- result, HObjectAccess::ForJSArrayOffset(JSArray::kLengthOffset), length);
-
- // Initialize the additional fields.
- Add<HStoreNamedField>(
- result, HObjectAccess::ForJSArrayOffset(JSRegExpResult::kIndexOffset),
- index);
- Add<HStoreNamedField>(
- result, HObjectAccess::ForJSArrayOffset(JSRegExpResult::kInputOffset),
- input);
-
- // Allocate and initialize the elements header.
- HAllocate* elements = BuildAllocateElements(elements_kind, size);
- BuildInitializeElementsHeader(elements, elements_kind, length);
-
- Add<HStoreNamedField>(
- result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
- elements);
-
- // Initialize the elements contents with undefined.
- BuildFillElementsWithValue(
- elements, elements_kind, graph()->GetConstant0(), length,
- graph()->GetConstantUndefined());
-
- return result;
-}
-
HValue* HGraphBuilder::BuildNumberToString(HValue* object, AstType* type) {
NoObservableSideEffectsScope scope(this);
@@ -2232,12 +1979,13 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, AstType* type) {
}
HValue* HGraphBuilder::BuildToNumber(HValue* input) {
- if (input->type().IsTaggedNumber()) {
+ if (input->type().IsTaggedNumber() ||
+ input->representation().IsSpecialization()) {
return input;
}
Callable callable = CodeFactory::ToNumber(isolate());
HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {context(), input};
+ HValue* values[] = {input};
HCallWithDescriptor* instr = Add<HCallWithDescriptor>(
stub, 0, callable.descriptor(), ArrayVector(values));
instr->set_type(HType::TaggedNumber());
@@ -3215,12 +2963,12 @@ void HGraphBuilder::BuildCopyElements(HValue* from_elements,
if_hole.Else();
HStoreKeyed* store =
Add<HStoreKeyed>(to_elements, key, element, nullptr, kind);
- store->SetFlag(HValue::kAllowUndefinedAsNaN);
+ store->SetFlag(HValue::kTruncatingToNumber);
if_hole.End();
} else {
HStoreKeyed* store =
Add<HStoreKeyed>(to_elements, key, element, nullptr, kind);
- store->SetFlag(HValue::kAllowUndefinedAsNaN);
+ store->SetFlag(HValue::kTruncatingToNumber);
}
builder.EndBody();
@@ -3373,7 +3121,7 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
bool track_positions)
: HGraphBuilder(info, CallInterfaceDescriptor(), track_positions),
function_state_(NULL),
- initial_function_state_(this, info, NORMAL_RETURN, 0,
+ initial_function_state_(this, info, NORMAL_RETURN, -1,
TailCallMode::kAllow),
ast_context_(NULL),
break_scope_(NULL),
@@ -3489,8 +3237,7 @@ HGraph::HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor)
type_change_checksum_(0),
maximum_environment_size_(0),
no_side_effects_scope_count_(0),
- disallow_adding_new_values_(false),
- inlined_function_infos_(info->zone()) {
+ disallow_adding_new_values_(false) {
if (info->IsStub()) {
// For stubs, explicitly add the context to the environment.
start_environment_ =
@@ -3522,14 +3269,6 @@ void HGraph::FinalizeUniqueness() {
}
-int HGraph::SourcePositionToScriptPosition(SourcePosition pos) {
- return (FLAG_hydrogen_track_positions && !pos.IsUnknown())
- ? inlined_function_infos_.at(pos.inlining_id()).start_position +
- pos.position()
- : pos.raw();
-}
-
-
// Block ordering was implemented with two mutually recursive methods,
// HGraph::Postorder and HGraph::PostorderLoopBlocks.
// The recursion could lead to stack overflow so the algorithm has been
@@ -3954,9 +3693,7 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
if (owner->is_tracking_positions()) {
outer_source_position_ = owner->source_position();
- owner->EnterInlinedSource(
- info->shared_info()->start_position(),
- inlining_id);
+ owner->EnterInlinedSource(inlining_id);
owner->SetSourcePosition(info->shared_info()->start_position());
}
}
@@ -3968,9 +3705,7 @@ FunctionState::~FunctionState() {
if (owner_->is_tracking_positions()) {
owner_->set_source_position(outer_source_position_);
- owner_->EnterInlinedSource(
- outer_->compilation_info()->shared_info()->start_position(),
- outer_->inlining_id());
+ owner_->EnterInlinedSource(outer_->inlining_id());
}
}
@@ -4181,7 +3916,7 @@ void TestContext::BuildBranch(HValue* value) {
if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
builder->Bailout(kArgumentsObjectValueInATestContext);
}
- ToBooleanICStub::Types expected(condition()->to_boolean_types());
+ ToBooleanHints expected(condition()->to_boolean_types());
ReturnControl(owner()->New<HBranch>(value, expected), BailoutId::None());
}
@@ -4376,7 +4111,6 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
// This must happen after inferring representations.
Run<HMergeRemovableSimulatesPhase>();
- Run<HMarkDeoptimizeOnUndefinedPhase>();
Run<HRepresentationChangesPhase>();
Run<HInferTypesPhase>();
@@ -4396,8 +4130,6 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) {
Run<HRangeAnalysisPhase>();
- Run<HComputeChangeUndefinedToNaN>();
-
// Eliminate redundant stack checks on backwards branches.
Run<HStackCheckEliminationPhase>();
@@ -5261,11 +4993,10 @@ void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
}
set_current_block(if_slow);
{
- ForInFilterStub stub(isolate());
- HValue* values[] = {context(), key, enumerable};
- HConstant* stub_value = Add<HConstant>(stub.GetCode());
- Push(Add<HCallWithDescriptor>(stub_value, 0,
- stub.GetCallInterfaceDescriptor(),
+ Callable callable = CodeFactory::ForInFilter(isolate());
+ HValue* values[] = {key, enumerable};
+ HConstant* stub_value = Add<HConstant>(callable.code());
+ Push(Add<HCallWithDescriptor>(stub_value, 0, callable.descriptor(),
ArrayVector(values)));
Add<HSimulate>(stmt->FilterId());
FinishCurrentBlock(New<HCompareObjectEqAndBranch>(
@@ -5295,7 +5026,7 @@ void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
}
HBasicBlock* body_exit = JoinContinue(
- stmt, stmt->ContinueId(), current_block(), break_info.continue_block());
+ stmt, stmt->IncrementId(), current_block(), break_info.continue_block());
if (body_exit != NULL) {
set_current_block(body_exit);
@@ -5371,7 +5102,7 @@ void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
if (!expr->pretenure()) {
FastNewClosureStub stub(isolate());
FastNewClosureDescriptor descriptor(isolate());
- HValue* values[] = {context(), shared_info_value};
+ HValue* values[] = {shared_info_value};
HConstant* stub_value = Add<HConstant>(stub.GetCode());
instr = New<HCallWithDescriptor>(stub_value, 0, descriptor,
ArrayVector(values));
@@ -5449,12 +5180,16 @@ void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
}
}
+bool HOptimizedGraphBuilder::CanInlineGlobalPropertyAccess(
+ Variable* var, LookupIterator* it, PropertyAccessType access_type) {
+ if (var->is_this()) return false;
+ return CanInlineGlobalPropertyAccess(it, access_type);
+}
-HOptimizedGraphBuilder::GlobalPropertyAccess
-HOptimizedGraphBuilder::LookupGlobalProperty(Variable* var, LookupIterator* it,
- PropertyAccessType access_type) {
- if (var->is_this() || !current_info()->has_global_object()) {
- return kUseGeneric;
+bool HOptimizedGraphBuilder::CanInlineGlobalPropertyAccess(
+ LookupIterator* it, PropertyAccessType access_type) {
+ if (!current_info()->has_global_object()) {
+ return false;
}
switch (it->state()) {
@@ -5463,17 +5198,17 @@ HOptimizedGraphBuilder::LookupGlobalProperty(Variable* var, LookupIterator* it,
case LookupIterator::INTERCEPTOR:
case LookupIterator::INTEGER_INDEXED_EXOTIC:
case LookupIterator::NOT_FOUND:
- return kUseGeneric;
+ return false;
case LookupIterator::DATA:
- if (access_type == STORE && it->IsReadOnly()) return kUseGeneric;
- if (!it->GetHolder<JSObject>()->IsJSGlobalObject()) return kUseGeneric;
- return kUseCell;
+ if (access_type == STORE && it->IsReadOnly()) return false;
+ if (!it->GetHolder<JSObject>()->IsJSGlobalObject()) return false;
+ return true;
case LookupIterator::JSPROXY:
case LookupIterator::TRANSITION:
UNREACHABLE();
}
UNREACHABLE();
- return kUseGeneric;
+ return false;
}
@@ -5489,6 +5224,55 @@ HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
return context;
}
+void HOptimizedGraphBuilder::InlineGlobalPropertyLoad(LookupIterator* it,
+ BailoutId ast_id) {
+ Handle<PropertyCell> cell = it->GetPropertyCell();
+ top_info()->dependencies()->AssumePropertyCell(cell);
+ auto cell_type = it->property_details().cell_type();
+ if (cell_type == PropertyCellType::kConstant ||
+ cell_type == PropertyCellType::kUndefined) {
+ Handle<Object> constant_object(cell->value(), isolate());
+ if (constant_object->IsConsString()) {
+ constant_object = String::Flatten(Handle<String>::cast(constant_object));
+ }
+ HConstant* constant = New<HConstant>(constant_object);
+ return ast_context()->ReturnInstruction(constant, ast_id);
+ } else {
+ auto access = HObjectAccess::ForPropertyCellValue();
+ UniqueSet<Map>* field_maps = nullptr;
+ if (cell_type == PropertyCellType::kConstantType) {
+ switch (cell->GetConstantType()) {
+ case PropertyCellConstantType::kSmi:
+ access = access.WithRepresentation(Representation::Smi());
+ break;
+ case PropertyCellConstantType::kStableMap: {
+ // Check that the map really is stable. The heap object could
+ // have mutated without the cell updating state. In that case,
+ // make no promises about the loaded value except that it's a
+ // heap object.
+ access = access.WithRepresentation(Representation::HeapObject());
+ Handle<Map> map(HeapObject::cast(cell->value())->map());
+ if (map->is_stable()) {
+ field_maps = new (zone())
+ UniqueSet<Map>(Unique<Map>::CreateImmovable(map), zone());
+ }
+ break;
+ }
+ }
+ }
+ HConstant* cell_constant = Add<HConstant>(cell);
+ HLoadNamedField* instr;
+ if (field_maps == nullptr) {
+ instr = New<HLoadNamedField>(cell_constant, nullptr, access);
+ } else {
+ instr = New<HLoadNamedField>(cell_constant, nullptr, access, field_maps,
+ HType::HeapObject());
+ }
+ instr->ClearDependsOnFlag(kInobjectFields);
+ instr->SetDependsOnFlag(kGlobalVars);
+ return ast_context()->ReturnInstruction(instr, ast_id);
+ }
+}
void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
DCHECK(!HasStackOverflow());
@@ -5537,62 +5321,23 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
}
LookupIterator it(global, variable->name(), LookupIterator::OWN);
- GlobalPropertyAccess type = LookupGlobalProperty(variable, &it, LOAD);
-
- if (type == kUseCell) {
- Handle<PropertyCell> cell = it.GetPropertyCell();
- top_info()->dependencies()->AssumePropertyCell(cell);
- auto cell_type = it.property_details().cell_type();
- if (cell_type == PropertyCellType::kConstant ||
- cell_type == PropertyCellType::kUndefined) {
- Handle<Object> constant_object(cell->value(), isolate());
- if (constant_object->IsConsString()) {
- constant_object =
- String::Flatten(Handle<String>::cast(constant_object));
- }
- HConstant* constant = New<HConstant>(constant_object);
- return ast_context()->ReturnInstruction(constant, expr->id());
- } else {
- auto access = HObjectAccess::ForPropertyCellValue();
- UniqueSet<Map>* field_maps = nullptr;
- if (cell_type == PropertyCellType::kConstantType) {
- switch (cell->GetConstantType()) {
- case PropertyCellConstantType::kSmi:
- access = access.WithRepresentation(Representation::Smi());
- break;
- case PropertyCellConstantType::kStableMap: {
- // Check that the map really is stable. The heap object could
- // have mutated without the cell updating state. In that case,
- // make no promises about the loaded value except that it's a
- // heap object.
- access =
- access.WithRepresentation(Representation::HeapObject());
- Handle<Map> map(HeapObject::cast(cell->value())->map());
- if (map->is_stable()) {
- field_maps = new (zone())
- UniqueSet<Map>(Unique<Map>::CreateImmovable(map), zone());
- }
- break;
- }
- }
- }
- HConstant* cell_constant = Add<HConstant>(cell);
- HLoadNamedField* instr;
- if (field_maps == nullptr) {
- instr = New<HLoadNamedField>(cell_constant, nullptr, access);
- } else {
- instr = New<HLoadNamedField>(cell_constant, nullptr, access,
- field_maps, HType::HeapObject());
- }
- instr->ClearDependsOnFlag(kInobjectFields);
- instr->SetDependsOnFlag(kGlobalVars);
- return ast_context()->ReturnInstruction(instr, expr->id());
- }
+ it.TryLookupCachedProperty();
+ if (CanInlineGlobalPropertyAccess(variable, &it, LOAD)) {
+ InlineGlobalPropertyLoad(&it, expr->id());
+ return;
} else {
Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
- HLoadGlobalGeneric* instr = New<HLoadGlobalGeneric>(
- variable->name(), ast_context()->typeof_mode(), vector,
- expr->VariableFeedbackSlot());
+
+ HValue* vector_value = Add<HConstant>(vector);
+ HValue* slot_value =
+ Add<HConstant>(vector->GetIndex(expr->VariableFeedbackSlot()));
+ Callable callable = CodeFactory::LoadGlobalICInOptimizedCode(
+ isolate(), ast_context()->typeof_mode());
+ HValue* stub = Add<HConstant>(callable.code());
+ HValue* values[] = {slot_value, vector_value};
+ HCallWithDescriptor* instr = New<HCallWithDescriptor>(
+ Code::LOAD_GLOBAL_IC, stub, 0, callable.descriptor(),
+ ArrayVector(values));
return ast_context()->ReturnInstruction(instr, expr->id());
}
}
@@ -5648,9 +5393,9 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
Callable callable = CodeFactory::FastCloneRegExp(isolate());
- HValue* values[] = {
- context(), AddThisFunction(), Add<HConstant>(expr->literal_index()),
- Add<HConstant>(expr->pattern()), Add<HConstant>(expr->flags())};
+ HValue* values[] = {AddThisFunction(), Add<HConstant>(expr->literal_index()),
+ Add<HConstant>(expr->pattern()),
+ Add<HConstant>(expr->flags())};
HConstant* stub_value = Add<HConstant>(callable.code());
HInstruction* instr = New<HCallWithDescriptor>(
stub_value, 0, callable.descriptor(), ArrayVector(values));
@@ -6216,7 +5961,7 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadFieldMaps(
DCHECK(field_type_.IsHeapObject());
// Add dependency on the map that introduced the field.
- top_info()->dependencies()->AssumeFieldType(GetFieldOwnerFromMap(map));
+ top_info()->dependencies()->AssumeFieldOwner(GetFieldOwnerFromMap(map));
return true;
}
@@ -6388,6 +6133,18 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
}
if (info->IsAccessorConstant()) {
+ MaybeHandle<Name> maybe_name =
+ FunctionTemplateInfo::TryGetCachedPropertyName(isolate(),
+ info->accessor());
+ if (!maybe_name.is_null()) {
+ Handle<Name> name = maybe_name.ToHandleChecked();
+ PropertyAccessInfo cache_info(this, LOAD, info->map(), name);
+ // Load new target.
+ if (cache_info.CanAccessMonomorphic()) {
+ return BuildLoadNamedField(&cache_info, checked_object);
+ }
+ }
+
Push(checked_object);
int argument_count = 1;
if (!info->IsLoad()) {
@@ -6683,6 +6440,67 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
expr->AssignmentId(), expr->IsUninitialized());
}
+HInstruction* HOptimizedGraphBuilder::InlineGlobalPropertyStore(
+ LookupIterator* it, HValue* value, BailoutId ast_id) {
+ Handle<PropertyCell> cell = it->GetPropertyCell();
+ top_info()->dependencies()->AssumePropertyCell(cell);
+ auto cell_type = it->property_details().cell_type();
+ if (cell_type == PropertyCellType::kConstant ||
+ cell_type == PropertyCellType::kUndefined) {
+ Handle<Object> constant(cell->value(), isolate());
+ if (value->IsConstant()) {
+ HConstant* c_value = HConstant::cast(value);
+ if (!constant.is_identical_to(c_value->handle(isolate()))) {
+ Add<HDeoptimize>(DeoptimizeReason::kConstantGlobalVariableAssignment,
+ Deoptimizer::EAGER);
+ }
+ } else {
+ HValue* c_constant = Add<HConstant>(constant);
+ IfBuilder builder(this);
+ if (constant->IsNumber()) {
+ builder.If<HCompareNumericAndBranch>(value, c_constant, Token::EQ);
+ } else {
+ builder.If<HCompareObjectEqAndBranch>(value, c_constant);
+ }
+ builder.Then();
+ builder.Else();
+ Add<HDeoptimize>(DeoptimizeReason::kConstantGlobalVariableAssignment,
+ Deoptimizer::EAGER);
+ builder.End();
+ }
+ }
+ HConstant* cell_constant = Add<HConstant>(cell);
+ auto access = HObjectAccess::ForPropertyCellValue();
+ if (cell_type == PropertyCellType::kConstantType) {
+ switch (cell->GetConstantType()) {
+ case PropertyCellConstantType::kSmi:
+ access = access.WithRepresentation(Representation::Smi());
+ break;
+ case PropertyCellConstantType::kStableMap: {
+ // First check that the previous value of the {cell} still has the
+ // map that we are about to check the new {value} for. If not, then
+ // the stable map assumption was invalidated and we cannot continue
+ // with the optimized code.
+ Handle<HeapObject> cell_value(HeapObject::cast(cell->value()));
+ Handle<Map> cell_value_map(cell_value->map());
+ if (!cell_value_map->is_stable()) {
+ Bailout(kUnstableConstantTypeHeapObject);
+ return nullptr;
+ }
+ top_info()->dependencies()->AssumeMapStable(cell_value_map);
+ // Now check that the new {value} is a HeapObject with the same map
+ Add<HCheckHeapObject>(value);
+ value = Add<HCheckMaps>(value, cell_value_map);
+ access = access.WithRepresentation(Representation::HeapObject());
+ break;
+ }
+ }
+ }
+ HInstruction* instr = New<HStoreNamedField>(cell_constant, access, value);
+ instr->ClearChangesFlag(kInobjectFields);
+ instr->SetChangesFlag(kGlobalVars);
+ return instr;
+}
// Because not every expression has a position and there is not common
// superclass of Assignment and CountOperation, we cannot just pass the
@@ -6723,64 +6541,10 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
}
LookupIterator it(global, var->name(), LookupIterator::OWN);
- GlobalPropertyAccess type = LookupGlobalProperty(var, &it, STORE);
- if (type == kUseCell) {
- Handle<PropertyCell> cell = it.GetPropertyCell();
- top_info()->dependencies()->AssumePropertyCell(cell);
- auto cell_type = it.property_details().cell_type();
- if (cell_type == PropertyCellType::kConstant ||
- cell_type == PropertyCellType::kUndefined) {
- Handle<Object> constant(cell->value(), isolate());
- if (value->IsConstant()) {
- HConstant* c_value = HConstant::cast(value);
- if (!constant.is_identical_to(c_value->handle(isolate()))) {
- Add<HDeoptimize>(DeoptimizeReason::kConstantGlobalVariableAssignment,
- Deoptimizer::EAGER);
- }
- } else {
- HValue* c_constant = Add<HConstant>(constant);
- IfBuilder builder(this);
- if (constant->IsNumber()) {
- builder.If<HCompareNumericAndBranch>(value, c_constant, Token::EQ);
- } else {
- builder.If<HCompareObjectEqAndBranch>(value, c_constant);
- }
- builder.Then();
- builder.Else();
- Add<HDeoptimize>(DeoptimizeReason::kConstantGlobalVariableAssignment,
- Deoptimizer::EAGER);
- builder.End();
- }
- }
- HConstant* cell_constant = Add<HConstant>(cell);
- auto access = HObjectAccess::ForPropertyCellValue();
- if (cell_type == PropertyCellType::kConstantType) {
- switch (cell->GetConstantType()) {
- case PropertyCellConstantType::kSmi:
- access = access.WithRepresentation(Representation::Smi());
- break;
- case PropertyCellConstantType::kStableMap: {
- // First check that the previous value of the {cell} still has the
- // map that we are about to check the new {value} for. If not, then
- // the stable map assumption was invalidated and we cannot continue
- // with the optimized code.
- Handle<HeapObject> cell_value(HeapObject::cast(cell->value()));
- Handle<Map> cell_value_map(cell_value->map());
- if (!cell_value_map->is_stable()) {
- return Bailout(kUnstableConstantTypeHeapObject);
- }
- top_info()->dependencies()->AssumeMapStable(cell_value_map);
- // Now check that the new {value} is a HeapObject with the same map.
- Add<HCheckHeapObject>(value);
- value = Add<HCheckMaps>(value, cell_value_map);
- access = access.WithRepresentation(Representation::HeapObject());
- break;
- }
- }
- }
- HInstruction* instr = Add<HStoreNamedField>(cell_constant, access, value);
- instr->ClearChangesFlag(kInobjectFields);
- instr->SetChangesFlag(kGlobalVars);
+ if (CanInlineGlobalPropertyAccess(var, &it, STORE)) {
+ HInstruction* instr = InlineGlobalPropertyStore(&it, value, ast_id);
+ if (!instr) return;
+ AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
@@ -6796,10 +6560,9 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
Callable callable = CodeFactory::StoreICInOptimizedCode(
isolate(), function_language_mode());
HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {context(), global_object, name,
- value, slot_value, vector_value};
+ HValue* values[] = {global_object, name, value, slot_value, vector_value};
HCallWithDescriptor* instr = Add<HCallWithDescriptor>(
- stub, 0, callable.descriptor(), ArrayVector(values));
+ Code::STORE_IC, stub, 0, callable.descriptor(), ArrayVector(values));
USE(instr);
DCHECK(instr->HasObservableSideEffects());
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -7098,36 +6861,35 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess,
Deoptimizer::SOFT);
}
- if (access_type == LOAD) {
- Handle<TypeFeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
+ Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
+
+ HValue* key = Add<HConstant>(name);
+ HValue* vector_value = Add<HConstant>(vector);
+ HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
+ if (access_type == LOAD) {
+ HValue* values[] = {object, key, slot_value, vector_value};
if (!expr->AsProperty()->key()->IsPropertyName()) {
// It's possible that a keyed load of a constant string was converted
// to a named load. Here, at the last minute, we need to make sure to
// use a generic Keyed Load if we are using the type vector, because
// it has to share information with full code.
- HConstant* key = Add<HConstant>(name);
- HLoadKeyedGeneric* result =
- New<HLoadKeyedGeneric>(object, key, vector, slot);
+ Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
+ HValue* stub = Add<HConstant>(callable.code());
+ HCallWithDescriptor* result =
+ New<HCallWithDescriptor>(Code::KEYED_LOAD_IC, stub, 0,
+ callable.descriptor(), ArrayVector(values));
return result;
}
-
- HLoadNamedGeneric* result =
- New<HLoadNamedGeneric>(object, name, vector, slot);
+ Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
+ HValue* stub = Add<HConstant>(callable.code());
+ HCallWithDescriptor* result = New<HCallWithDescriptor>(
+ Code::LOAD_IC, stub, 0, callable.descriptor(), ArrayVector(values));
return result;
- } else {
- Handle<TypeFeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
-
- HValue* key = Add<HConstant>(name);
- HValue* vector_value = Add<HConstant>(vector);
- HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
- HValue* values[] = {context(), object, key,
- value, slot_value, vector_value};
- if (current_feedback_vector()->GetKind(slot) ==
- FeedbackVectorSlotKind::KEYED_STORE_IC) {
+ } else {
+ HValue* values[] = {object, key, value, slot_value, vector_value};
+ if (vector->GetKind(slot) == FeedbackVectorSlotKind::KEYED_STORE_IC) {
// It's possible that a keyed store of a constant string was converted
// to a named store. Here, at the last minute, we need to make sure to
// use a generic Keyed Store if we are using the type vector, because
@@ -7135,15 +6897,16 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), function_language_mode());
HValue* stub = Add<HConstant>(callable.code());
- HCallWithDescriptor* result = New<HCallWithDescriptor>(
- stub, 0, callable.descriptor(), ArrayVector(values));
+ HCallWithDescriptor* result =
+ New<HCallWithDescriptor>(Code::KEYED_STORE_IC, stub, 0,
+ callable.descriptor(), ArrayVector(values));
return result;
}
Callable callable = CodeFactory::StoreICInOptimizedCode(
isolate(), function_language_mode());
HValue* stub = Add<HConstant>(callable.code());
HCallWithDescriptor* result = New<HCallWithDescriptor>(
- stub, 0, callable.descriptor(), ArrayVector(values));
+ Code::STORE_IC, stub, 0, callable.descriptor(), ArrayVector(values));
return result;
}
}
@@ -7152,23 +6915,28 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
HValue* object, HValue* key, HValue* value) {
- Handle<TypeFeedbackVector> vector =
- handle(current_feedback_vector(), isolate());
+ Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
+ HValue* vector_value = Add<HConstant>(vector);
+ HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
+
if (access_type == LOAD) {
- HLoadKeyedGeneric* result =
- New<HLoadKeyedGeneric>(object, key, vector, slot);
+ HValue* values[] = {object, key, slot_value, vector_value};
+
+ Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
+ HValue* stub = Add<HConstant>(callable.code());
+ HCallWithDescriptor* result =
+ New<HCallWithDescriptor>(Code::KEYED_LOAD_IC, stub, 0,
+ callable.descriptor(), ArrayVector(values));
return result;
} else {
- HValue* vector_value = Add<HConstant>(vector);
- HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
- HValue* values[] = {context(), object, key,
- value, slot_value, vector_value};
+ HValue* values[] = {object, key, value, slot_value, vector_value};
Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), function_language_mode());
HValue* stub = Add<HConstant>(callable.code());
- HCallWithDescriptor* result = New<HCallWithDescriptor>(
- stub, 0, callable.descriptor(), ArrayVector(values));
+ HCallWithDescriptor* result =
+ New<HCallWithDescriptor>(Code::KEYED_STORE_IC, stub, 0,
+ callable.descriptor(), ArrayVector(values));
return result;
}
}
@@ -7634,6 +7402,16 @@ void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
function_state()->set_arguments_elements(arguments_elements);
}
+bool HOptimizedGraphBuilder::IsAnyParameterContextAllocated() {
+ int count = current_info()->scope()->num_parameters();
+ for (int i = 0; i < count; ++i) {
+ if (current_info()->scope()->parameter(i)->location() ==
+ VariableLocation::CONTEXT) {
+ return true;
+ }
+ }
+ return false;
+}
bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
VariableProxy* proxy = expr->obj()->AsVariableProxy();
@@ -7665,6 +7443,10 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
result = New<HConstant>(argument_count);
}
} else {
+ // We need to take into account the KEYED_LOAD_IC feedback to guard the
+ // HBoundsCheck instructions below.
+ if (!expr->IsMonomorphic() && !expr->IsUninitialized()) return false;
+ if (IsAnyParameterContextAllocated()) return false;
CHECK_ALIVE_OR_RETURN(VisitForValue(expr->obj(), ARGUMENTS_ALLOWED), true);
CHECK_ALIVE_OR_RETURN(VisitForValue(expr->key()), true);
HValue* key = Pop();
@@ -7698,7 +7480,35 @@ HValue* HOptimizedGraphBuilder::BuildNamedAccess(
ComputeReceiverTypes(expr, object, &maps, this);
DCHECK(maps != NULL);
+ // Check for special case: Access via a single map to the global proxy
+ // can also be handled monomorphically.
if (maps->length() > 0) {
+ Handle<Object> map_constructor =
+ handle(maps->first()->GetConstructor(), isolate());
+ if (map_constructor->IsJSFunction()) {
+ Handle<Context> map_context =
+ handle(Handle<JSFunction>::cast(map_constructor)->context());
+ Handle<Context> current_context(current_info()->context());
+ bool is_same_context_global_proxy_access =
+ maps->length() == 1 && // >1 map => fallback to polymorphic
+ maps->first()->IsJSGlobalProxyMap() &&
+ (*map_context == *current_context);
+ if (is_same_context_global_proxy_access) {
+ Handle<JSGlobalObject> global_object(current_info()->global_object());
+ LookupIterator it(global_object, name, LookupIterator::OWN);
+ if (CanInlineGlobalPropertyAccess(&it, access)) {
+ BuildCheckHeapObject(object);
+ Add<HCheckMaps>(object, maps);
+ if (access == LOAD) {
+ InlineGlobalPropertyLoad(&it, expr->id());
+ return nullptr;
+ } else {
+ return InlineGlobalPropertyStore(&it, value, expr->id());
+ }
+ }
+ }
+ }
+
PropertyAccessInfo info(this, access, maps->first(), name);
if (!info.CanAccessAsMonomorphic(maps)) {
HandlePolymorphicNamedFieldAccess(access, expr, slot, ast_id, return_id,
@@ -7861,7 +7671,7 @@ HInstruction* HOptimizedGraphBuilder::NewCallFunction(
}
HValue* arity = Add<HConstant>(argument_count - 1);
- HValue* op_vals[] = {context(), function, arity};
+ HValue* op_vals[] = {function, arity};
Callable callable =
CodeFactory::Call(isolate(), convert_mode, tail_call_mode);
@@ -7883,13 +7693,13 @@ HInstruction* HOptimizedGraphBuilder::NewCallFunctionViaIC(
}
int arity = argument_count - 1;
Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
+ HValue* arity_val = Add<HConstant>(arity);
HValue* index_val = Add<HConstant>(vector->GetIndex(slot));
HValue* vector_val = Add<HConstant>(vector);
- HValue* op_vals[] = {context(), function, index_val, vector_val};
-
+ HValue* op_vals[] = {function, arity_val, index_val, vector_val};
Callable callable = CodeFactory::CallICInOptimizedCode(
- isolate(), arity, convert_mode, tail_call_mode);
+ isolate(), convert_mode, tail_call_mode);
HConstant* stub = Add<HConstant>(callable.code());
return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
@@ -8241,13 +8051,13 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
// Parse and allocate variables.
// Use the same AstValueFactory for creating strings in the sub-compilation
// step, but don't transfer ownership to target_info.
- ParseInfo parse_info(zone(), target);
+ Handle<SharedFunctionInfo> target_shared(target->shared());
+ ParseInfo parse_info(zone(), target_shared);
parse_info.set_ast_value_factory(
top_info()->parse_info()->ast_value_factory());
parse_info.set_ast_value_factory_owned(false);
CompilationInfo target_info(&parse_info, target);
- Handle<SharedFunctionInfo> target_shared(target->shared());
if (inlining_kind != CONSTRUCT_CALL_RETURN &&
IsClassConstructor(target_shared->kind())) {
@@ -8315,11 +8125,10 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
}
// All declarations must be inlineable.
- ZoneList<Declaration*>* decls = target_info.scope()->declarations();
- int decl_count = decls->length();
- for (int i = 0; i < decl_count; ++i) {
- if (decls->at(i)->IsFunctionDeclaration() ||
- !decls->at(i)->proxy()->var()->IsStackAllocated()) {
+ Declaration::List* decls = target_info.scope()->declarations();
+ for (Declaration* decl : *decls) {
+ if (decl->IsFunctionDeclaration() ||
+ !decl->proxy()->var()->IsStackAllocated()) {
TraceInline(target, caller, "target has non-trivial declaration");
return false;
}
@@ -8335,7 +8144,8 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
// Remember that we inlined this function. This needs to be called right
// after the EnsureDeoptimizationSupport call so that the code flusher
// does not remove the code with the deoptimization support.
- top_info()->AddInlinedFunction(target_info.shared_info());
+ int inlining_id = top_info()->AddInlinedFunction(target_info.shared_info(),
+ source_position());
// ----------------------------------------------------------------
// After this point, we've made a decision to inline this function (so
@@ -8351,9 +8161,8 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
&bounds_)
.Run();
- int inlining_id = 0;
if (is_tracking_positions()) {
- inlining_id = TraceInlinedFunction(target_shared, source_position());
+ TraceInlinedFunction(target_shared, source_position(), inlining_id);
}
// Save the pending call context. Set up new one for the inlined function.
@@ -8404,6 +8213,7 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
if (is_tracking_positions()) {
enter_inlined->set_inlining_id(inlining_id);
}
+
function_state()->set_entry(enter_inlined);
VisitDeclarations(target_info.scope()->declarations());
@@ -9011,7 +8821,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
copy_kind, ALLOW_RETURN_HOLE);
HStoreKeyed* store = Add<HStoreKeyed>(elements, new_key, element,
nullptr, copy_kind);
- store->SetFlag(HValue::kAllowUndefinedAsNaN);
+ store->SetFlag(HValue::kTruncatingToNumber);
}
loop.EndBody();
@@ -9092,6 +8902,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
bool HOptimizedGraphBuilder::TryInlineApiFunctionCall(Call* expr,
HValue* receiver) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
Handle<JSFunction> function = expr->target();
int argc = expr->arguments()->length();
SmallMapList receiver_maps;
@@ -9104,6 +8915,7 @@ bool HOptimizedGraphBuilder::TryInlineApiMethodCall(
Call* expr,
HValue* receiver,
SmallMapList* receiver_maps) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
Handle<JSFunction> function = expr->target();
int argc = expr->arguments()->length();
return TryInlineApiCall(function, receiver, receiver_maps, argc, expr->id(),
@@ -9113,6 +8925,7 @@ bool HOptimizedGraphBuilder::TryInlineApiMethodCall(
bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<Object> function,
Handle<Map> receiver_map,
BailoutId ast_id) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
SmallMapList receiver_maps(1, zone());
receiver_maps.Add(receiver_map, zone());
return TryInlineApiCall(function,
@@ -9136,6 +8949,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(
Handle<Object> function, HValue* receiver, SmallMapList* receiver_maps,
int argc, BailoutId ast_id, ApiCallType call_type,
TailCallMode syntactic_tail_call_mode) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
if (function->IsJSFunction() &&
Handle<JSFunction>::cast(function)->context()->native_context() !=
top_info()->closure()->context()->native_context()) {
@@ -9236,7 +9050,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(
isolate());
HValue* api_function_address = Add<HConstant>(ExternalReference(ref));
- HValue* op_vals[] = {context(), Add<HConstant>(function), call_data, holder,
+ HValue* op_vals[] = {Add<HConstant>(function), call_data, holder,
api_function_address};
HInstruction* call = nullptr;
@@ -9745,7 +9559,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
syntactic_tail_call_mode, tail_call_mode);
} else {
PushArgumentsFromEnvironment(argument_count);
- if (expr->is_uninitialized() && expr->IsUsingCallFeedbackICSlot()) {
+ if (expr->is_uninitialized()) {
// We've never seen this call before, so let's have Crankshaft learn
// through the type vector.
call = NewCallFunctionViaIC(function, argument_count,
@@ -9975,7 +9789,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
}
HValue* arity = Add<HConstant>(argument_count - 1);
- HValue* op_vals[] = {context(), function, function, arity};
+ HValue* op_vals[] = {function, function, arity};
Callable callable = CodeFactory::Construct(isolate());
HConstant* stub = Add<HConstant>(callable.code());
PushArgumentsFromEnvironment(argument_count);
@@ -10232,9 +10046,9 @@ void HOptimizedGraphBuilder::GenerateTypedArrayInitialize(
HValue* byte_offset;
bool is_zero_byte_offset;
- if (arguments->at(kByteOffsetArg)->IsLiteral()
- && Smi::FromInt(0) ==
- *static_cast<Literal*>(arguments->at(kByteOffsetArg))->value()) {
+ if (arguments->at(kByteOffsetArg)->IsLiteral() &&
+ Smi::kZero ==
+ *static_cast<Literal*>(arguments->at(kByteOffsetArg))->value()) {
byte_offset = Add<HConstant>(static_cast<int32_t>(0));
is_zero_byte_offset = true;
} else {
@@ -10554,28 +10368,23 @@ static Representation RepresentationFor(AstType* type) {
return Representation::Tagged();
}
-
-HInstruction* HOptimizedGraphBuilder::BuildIncrement(
- bool returns_original_input,
- CountOperation* expr) {
+HInstruction* HOptimizedGraphBuilder::BuildIncrement(CountOperation* expr) {
// The input to the count operation is on top of the expression stack.
Representation rep = RepresentationFor(expr->type());
if (rep.IsNone() || rep.IsTagged()) {
rep = Representation::Smi();
}
- if (returns_original_input) {
- // We need an explicit HValue representing ToNumber(input). The
- // actual HChange instruction we need is (sometimes) added in a later
- // phase, so it is not available now to be used as an input to HAdd and
- // as the return value.
- HInstruction* number_input = AddUncasted<HForceRepresentation>(Pop(), rep);
- if (!rep.IsDouble()) {
- number_input->SetFlag(HInstruction::kFlexibleRepresentation);
- number_input->SetFlag(HInstruction::kCannotBeTagged);
- }
- Push(number_input);
+ // We need an explicit HValue representing ToNumber(input). The
+ // actual HChange instruction we need is (sometimes) added in a later
+ // phase, so it is not available now to be used as an input to HAdd and
+ // as the return value.
+ HInstruction* number_input = AddUncasted<HForceRepresentation>(Pop(), rep);
+ if (!rep.IsDouble()) {
+ number_input->SetFlag(HInstruction::kFlexibleRepresentation);
+ number_input->SetFlag(HInstruction::kCannotBeTagged);
}
+ Push(number_input);
// The addition has no side effects, so we do not need
// to simulate the expression stack after this instruction.
@@ -10634,7 +10443,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
DCHECK(prop == NULL);
CHECK_ALIVE(VisitForValue(target));
- after = BuildIncrement(returns_original_input, expr);
+ after = BuildIncrement(expr);
input = returns_original_input ? Top() : Pop();
Push(after);
@@ -10650,21 +10459,6 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
break;
case VariableLocation::CONTEXT: {
- // Bail out if we try to mutate a parameter value in a function
- // using the arguments object. We do not (yet) correctly handle the
- // arguments property of the function.
- if (current_info()->scope()->arguments() != NULL) {
- // Parameters will rewrite to context slots. We have no direct
- // way to detect that the variable is a parameter so we use a
- // linear search of the parameter list.
- int count = current_info()->scope()->num_parameters();
- for (int i = 0; i < count; ++i) {
- if (var == current_info()->scope()->parameter(i)) {
- return Bailout(kAssignmentToParameterInArgumentsObject);
- }
- }
- }
-
HValue* context = BuildContextChainWalk(var);
HStoreContextSlot::Mode mode = IsLexicalVariableMode(var->mode())
? HStoreContextSlot::kCheckDeoptimize : HStoreContextSlot::kNoCheck;
@@ -10702,7 +10496,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
CHECK_ALIVE(PushLoad(prop, object, key));
- after = BuildIncrement(returns_original_input, expr);
+ after = BuildIncrement(expr);
if (returns_original_input) {
input = Pop();
@@ -11041,7 +10835,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(
// inline several instructions (including the two pushes) for every tagged
// operation in optimized code, which is more expensive, than a stub call.
if (graph()->info()->IsStub() && is_non_primitive) {
- HValue* values[] = {context(), left, right};
+ HValue* values[] = {left, right};
#define GET_STUB(Name) \
do { \
Callable callable = CodeFactory::Name(isolate()); \
@@ -11255,7 +11049,7 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
// We need an extra block to maintain edge-split form.
HBasicBlock* empty_block = graph()->CreateBasicBlock();
HBasicBlock* eval_right = graph()->CreateBasicBlock();
- ToBooleanICStub::Types expected(expr->left()->to_boolean_types());
+ ToBooleanHints expected(expr->left()->to_boolean_types());
HBranch* test = is_logical_and
? New<HBranch>(left_value, expected, eval_right, empty_block)
: New<HBranch>(left_value, expected, empty_block, eval_right);
@@ -11325,12 +11119,6 @@ void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
BuildBinaryOperation(expr, left, right,
ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
: PUSH_BEFORE_SIMULATE);
- if (is_tracking_positions() && result->IsBinaryOperation()) {
- HBinaryOperation::cast(result)->SetOperandPositions(
- zone(),
- ScriptPositionToSourcePosition(expr->left()->position()),
- ScriptPositionToSourcePosition(expr->right()->position()));
- }
return ast_context()->ReturnValue(result);
}
@@ -11454,7 +11242,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Callable callable = CodeFactory::InstanceOf(isolate());
HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {context(), left, right};
+ HValue* values[] = {left, right};
HCallWithDescriptor* result = New<HCallWithDescriptor>(
stub, 0, callable.descriptor(), ArrayVector(values));
result->set_type(HType::Boolean());
@@ -11463,7 +11251,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
} else if (op == Token::IN) {
Callable callable = CodeFactory::HasProperty(isolate());
HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {context(), left, right};
+ HValue* values[] = {left, right};
HInstruction* result =
New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
Vector<HValue*>(values, arraysize(values)));
@@ -11515,27 +11303,35 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
// The caller expects a branch instruction, so make it happy.
return New<HBranch>(graph()->GetConstantTrue());
}
- // Can we get away with map check and not instance type check?
- HValue* operand_to_check =
- left->block()->block_id() < right->block()->block_id() ? left : right;
- if (combined_type->IsClass()) {
- Handle<Map> map = combined_type->AsClass()->Map();
- AddCheckMap(operand_to_check, map);
- HCompareObjectEqAndBranch* result =
- New<HCompareObjectEqAndBranch>(left, right);
- if (is_tracking_positions()) {
- result->set_operand_position(zone(), 0, left_position);
- result->set_operand_position(zone(), 1, right_position);
+ if (op == Token::EQ) {
+ // For abstract equality we need to check both sides are receivers.
+ if (combined_type->IsClass()) {
+ Handle<Map> map = combined_type->AsClass()->Map();
+ AddCheckMap(left, map);
+ AddCheckMap(right, map);
+ } else {
+ BuildCheckHeapObject(left);
+ Add<HCheckInstanceType>(left, HCheckInstanceType::IS_JS_RECEIVER);
+ BuildCheckHeapObject(right);
+ Add<HCheckInstanceType>(right, HCheckInstanceType::IS_JS_RECEIVER);
}
- return result;
} else {
- BuildCheckHeapObject(operand_to_check);
- Add<HCheckInstanceType>(operand_to_check,
- HCheckInstanceType::IS_JS_RECEIVER);
- HCompareObjectEqAndBranch* result =
- New<HCompareObjectEqAndBranch>(left, right);
- return result;
+ // For strict equality we only need to check one side.
+ HValue* operand_to_check =
+ left->block()->block_id() < right->block()->block_id() ? left
+ : right;
+ if (combined_type->IsClass()) {
+ Handle<Map> map = combined_type->AsClass()->Map();
+ AddCheckMap(operand_to_check, map);
+ } else {
+ BuildCheckHeapObject(operand_to_check);
+ Add<HCheckInstanceType>(operand_to_check,
+ HCheckInstanceType::IS_JS_RECEIVER);
+ }
}
+ HCompareObjectEqAndBranch* result =
+ New<HCompareObjectEqAndBranch>(left, right);
+ return result;
} else {
if (combined_type->IsClass()) {
// TODO(bmeurer): This is an optimized version of an x < y, x > y,
@@ -11573,8 +11369,11 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
// We depend on the prototype chain to stay the same, because we
// also need to deoptimize when someone installs @@toPrimitive
// or @@toStringTag somewhere in the prototype chain.
- BuildCheckPrototypeMaps(handle(JSObject::cast(map->prototype())),
- Handle<JSObject>::null());
+ Handle<Object> prototype(map->prototype(), isolate());
+ if (prototype->IsJSObject()) {
+ BuildCheckPrototypeMaps(Handle<JSObject>::cast(prototype),
+ Handle<JSObject>::null());
+ }
AddCheckMap(left, map);
AddCheckMap(right, map);
// The caller expects a branch instruction, so make it happy.
@@ -11666,9 +11465,6 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
HCompareNumericAndBranch* result =
New<HCompareNumericAndBranch>(left, right, op);
result->set_observed_input_representation(left_rep, right_rep);
- if (is_tracking_positions()) {
- result->SetOperandPositions(zone(), left_position, right_position);
- }
return result;
}
}
@@ -11967,7 +11763,7 @@ void HOptimizedGraphBuilder::BuildEmitFixedDoubleArray(
kind, ALLOW_RETURN_HOLE);
HInstruction* store = Add<HStoreKeyed>(object_elements, key_constant,
value_instruction, nullptr, kind);
- store->SetFlag(HValue::kAllowUndefinedAsNaN);
+ store->SetFlag(HValue::kTruncatingToNumber);
}
}
@@ -12028,9 +11824,8 @@ void HOptimizedGraphBuilder::VisitSuperCallReference(SuperCallReference* expr) {
return Bailout(kSuperReference);
}
-
void HOptimizedGraphBuilder::VisitDeclarations(
- ZoneList<Declaration*>* declarations) {
+ Declaration::List* declarations) {
DCHECK(globals_.is_empty());
AstVisitor<HOptimizedGraphBuilder>::VisitDeclarations(declarations);
if (!globals_.is_empty()) {
@@ -12154,17 +11949,6 @@ void HOptimizedGraphBuilder::GenerateIsJSReceiver(CallRuntime* call) {
return ast_context()->ReturnControl(result, call->id());
}
-
-void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HHasCachedArrayIndexAndBranch* result =
- New<HHasCachedArrayIndexAndBranch>(value);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12204,7 +11988,7 @@ void HOptimizedGraphBuilder::GenerateToInteger(CallRuntime* call) {
} else {
Callable callable = CodeFactory::ToInteger(isolate());
HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {context(), input};
+ HValue* values[] = {input};
HInstruction* result = New<HCallWithDescriptor>(
stub, 0, callable.descriptor(), ArrayVector(values));
return ast_context()->ReturnInstruction(result, call->id());
@@ -12230,7 +12014,7 @@ void HOptimizedGraphBuilder::GenerateToString(CallRuntime* call) {
} else {
Callable callable = CodeFactory::ToString(isolate());
HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {context(), input};
+ HValue* values[] = {input};
HInstruction* result = New<HCallWithDescriptor>(
stub, 0, callable.descriptor(), ArrayVector(values));
return ast_context()->ReturnInstruction(result, call->id());
@@ -12244,7 +12028,7 @@ void HOptimizedGraphBuilder::GenerateToLength(CallRuntime* call) {
Callable callable = CodeFactory::ToLength(isolate());
HValue* input = Pop();
HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {context(), input};
+ HValue* values[] = {input};
HInstruction* result = New<HCallWithDescriptor>(
stub, 0, callable.descriptor(), ArrayVector(values));
return ast_context()->ReturnInstruction(result, call->id());
@@ -12336,7 +12120,7 @@ void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
HValue* to = Pop();
HValue* from = Pop();
HValue* string = Pop();
- HValue* values[] = {context(), string, from, to};
+ HValue* values[] = {string, from, to};
HInstruction* result = New<HCallWithDescriptor>(
stub, 0, callable.descriptor(), ArrayVector(values));
result->set_type(HType::String());
@@ -12349,7 +12133,7 @@ void HOptimizedGraphBuilder::GenerateNewObject(CallRuntime* call) {
CHECK_ALIVE(VisitExpressions(call->arguments()));
FastNewObjectStub stub(isolate());
FastNewObjectDescriptor descriptor(isolate());
- HValue* values[] = {context(), Pop(), Pop()};
+ HValue* values[] = {Pop(), Pop()};
HConstant* stub_value = Add<HConstant>(stub.GetCode());
HInstruction* result =
New<HCallWithDescriptor>(stub_value, 0, descriptor, ArrayVector(values));
@@ -12366,48 +12150,13 @@ void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
HValue* subject = Pop();
HValue* regexp_object = Pop();
HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {context(), regexp_object, subject, index,
- last_match_info};
+ HValue* values[] = {regexp_object, subject, index, last_match_info};
HInstruction* result = New<HCallWithDescriptor>(
stub, 0, callable.descriptor(), ArrayVector(values));
return ast_context()->ReturnInstruction(result, call->id());
}
-void HOptimizedGraphBuilder::GenerateRegExpFlags(CallRuntime* call) {
- DCHECK_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitExpressions(call->arguments()));
- HValue* regexp = Pop();
- HInstruction* result =
- New<HLoadNamedField>(regexp, nullptr, HObjectAccess::ForJSRegExpFlags());
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateRegExpSource(CallRuntime* call) {
- DCHECK_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitExpressions(call->arguments()));
- HValue* regexp = Pop();
- HInstruction* result =
- New<HLoadNamedField>(regexp, nullptr, HObjectAccess::ForJSRegExpSource());
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Construct a RegExp exec result with two in-object properties.
-void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
- DCHECK_EQ(3, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
- HValue* input = Pop();
- HValue* index = Pop();
- HValue* length = Pop();
- HValue* result = BuildRegExpConstructResult(length, index, input);
- return ast_context()->ReturnValue(result);
-}
-
-
// Fast support for number to string.
void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
DCHECK_EQ(1, call->arguments()->length());
@@ -12426,8 +12175,7 @@ void HOptimizedGraphBuilder::GenerateCall(CallRuntime* call) {
PushArgumentsFromEnvironment(call->arguments()->length() - 1);
HValue* trampoline = Add<HConstant>(isolate()->builtins()->Call());
HValue* target = Pop();
- HValue* values[] = {context(), target,
- Add<HConstant>(call->arguments()->length() - 2)};
+ HValue* values[] = {target, Add<HConstant>(call->arguments()->length() - 2)};
HInstruction* result =
New<HCallWithDescriptor>(trampoline, call->arguments()->length() - 1,
descriptor, ArrayVector(values));
@@ -12615,16 +12363,6 @@ void HOptimizedGraphBuilder::GenerateMapClear(CallRuntime* call) {
return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
-
-void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HGetCachedArrayIndex* result = New<HGetCachedArrayIndex>(value);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateDebugBreakInOptimizedCode(
CallRuntime* call) {
Add<HDebugBreak>();
@@ -13095,11 +12833,11 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
PrintIndent();
std::ostringstream os;
os << "0 " << uses << " " << NameOf(instruction) << " " << *instruction;
- if (instruction->has_position() && instruction->position().raw() != 0) {
+ if (instruction->has_position()) {
const SourcePosition pos = instruction->position();
os << " pos:";
- if (pos.inlining_id() != 0) os << pos.inlining_id() << "_";
- os << pos.position();
+ if (pos.isInlined()) os << "inlining(" << pos.InliningId() << "),";
+ os << pos.ScriptOffset();
}
os << " <|@\n";
trace_.Add(os.str().c_str());
diff --git a/deps/v8/src/crankshaft/hydrogen.h b/deps/v8/src/crankshaft/hydrogen.h
index d2f1637d11..9f2508a7bf 100644
--- a/deps/v8/src/crankshaft/hydrogen.h
+++ b/deps/v8/src/crankshaft/hydrogen.h
@@ -37,8 +37,8 @@ class HCompilationJob final : public CompilationJob {
public:
explicit HCompilationJob(Handle<JSFunction> function)
: CompilationJob(function->GetIsolate(), &info_, "Crankshaft"),
- zone_(function->GetIsolate()->allocator()),
- parse_info_(&zone_, function),
+ zone_(function->GetIsolate()->allocator(), ZONE_NAME),
+ parse_info_(&zone_, handle(function->shared())),
info_(&parse_info_, function),
graph_(nullptr),
chunk_(nullptr) {}
@@ -318,12 +318,6 @@ class HLoopInformation final : public ZoneObject {
HStackCheck* stack_check_;
};
-struct HInlinedFunctionInfo {
- explicit HInlinedFunctionInfo(int start_position)
- : start_position(start_position) {}
- int start_position;
-};
-
class HGraph final : public ZoneObject {
public:
explicit HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor);
@@ -475,14 +469,6 @@ class HGraph final : public ZoneObject {
int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
SourcePosition position);
- // Converts given SourcePosition to the absolute offset from the start of
- // the corresponding script.
- int SourcePositionToScriptPosition(SourcePosition position);
-
- ZoneVector<HInlinedFunctionInfo>& inlined_function_infos() {
- return inlined_function_infos_;
- }
-
private:
HConstant* ReinsertConstantIfNecessary(HConstant* constant);
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
@@ -528,8 +514,6 @@ class HGraph final : public ZoneObject {
int no_side_effects_scope_count_;
bool disallow_adding_new_values_;
- ZoneVector<HInlinedFunctionInfo> inlined_function_infos_;
-
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
@@ -1073,7 +1057,6 @@ class HGraphBuilder {
current_block_(NULL),
scope_(info->scope()),
position_(SourcePosition::Unknown()),
- start_position_(0),
track_positions_(track_positions) {}
virtual ~HGraphBuilder() {}
@@ -1410,28 +1393,6 @@ class HGraphBuilder {
HValue* BuildToNumber(HValue* input);
HValue* BuildToObject(HValue* receiver);
- void BuildJSObjectCheck(HValue* receiver,
- int bit_field_mask);
-
- // Checks a key value that's being used for a keyed element access context. If
- // the key is a index, i.e. a smi or a number in a unique string with a cached
- // numeric value, the "true" of the continuation is joined. Otherwise,
- // if the key is a name or a unique string, the "false" of the continuation is
- // joined. Otherwise, a deoptimization is triggered. In both paths of the
- // continuation, the key is pushed on the top of the environment.
- void BuildKeyedIndexCheck(HValue* key,
- HIfContinuation* join_continuation);
-
- // Checks the properties of an object if they are in dictionary case, in which
- // case "true" of continuation is taken, otherwise the "false"
- void BuildTestForDictionaryProperties(HValue* object,
- HIfContinuation* continuation);
-
- void BuildNonGlobalObjectCheck(HValue* receiver);
-
- HValue* BuildKeyedLookupCacheHash(HValue* object,
- HValue* key);
-
HValue* BuildUncheckedDictionaryElementLoad(HValue* receiver,
HValue* elements, HValue* key,
HValue* hash);
@@ -1439,10 +1400,6 @@ class HGraphBuilder {
// ES6 section 7.4.7 CreateIterResultObject ( value, done )
HValue* BuildCreateIterResultObject(HValue* value, HValue* done);
- HValue* BuildRegExpConstructResult(HValue* length,
- HValue* index,
- HValue* input);
-
// Allocates a new object according with the given allocation properties.
HAllocate* BuildAllocate(HValue* object_size,
HType type,
@@ -1870,28 +1827,25 @@ class HGraphBuilder {
protected:
void SetSourcePosition(int position) {
if (position != kNoSourcePosition) {
- position_.set_position(position - start_position_);
+ position_.SetScriptOffset(position);
}
// Otherwise position remains unknown.
}
- void EnterInlinedSource(int start_position, int id) {
+ void EnterInlinedSource(int inlining_id) {
if (is_tracking_positions()) {
- start_position_ = start_position;
- position_.set_inlining_id(id);
+ position_.SetInliningId(inlining_id);
}
}
// Convert the given absolute offset from the start of the script to
// the SourcePosition assuming that this position corresponds to the
- // same function as current position_.
+ // same function as position_.
SourcePosition ScriptPositionToSourcePosition(int position) {
if (position == kNoSourcePosition) {
return SourcePosition::Unknown();
}
- SourcePosition pos = position_;
- pos.set_position(position - start_position_);
- return pos;
+ return SourcePosition(position, position_.InliningId());
}
SourcePosition source_position() { return position_; }
@@ -1899,8 +1853,8 @@ class HGraphBuilder {
bool is_tracking_positions() { return track_positions_; }
- int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
- SourcePosition position);
+ void TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
+ SourcePosition position, int inlining_id);
HValue* BuildAllocateEmptyArrayBuffer(HValue* byte_length);
template <typename ViewClass>
@@ -1923,7 +1877,6 @@ class HGraphBuilder {
HBasicBlock* current_block_;
Scope* scope_;
SourcePosition position_;
- int start_position_;
bool track_positions_;
};
@@ -2142,7 +2095,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
FunctionState* function_state() const { return function_state_; }
- void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void VisitDeclarations(Declaration::List* declarations);
AstTypeBounds* bounds() { return &bounds_; }
@@ -2220,15 +2173,10 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
F(ToLength) \
F(ToNumber) \
F(IsJSReceiver) \
- F(HasCachedArrayIndex) \
- F(GetCachedArrayIndex) \
F(DebugBreakInOptimizedCode) \
F(StringCharCodeAt) \
F(SubString) \
F(RegExpExec) \
- F(RegExpConstructResult) \
- F(RegExpFlags) \
- F(RegExpSource) \
F(NumberToString) \
F(DebugIsActive) \
/* Typed Arrays */ \
@@ -2364,13 +2312,15 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
#undef DECLARE_VISIT
private:
- // Helpers for flow graph construction.
- enum GlobalPropertyAccess {
- kUseCell,
- kUseGeneric
- };
- GlobalPropertyAccess LookupGlobalProperty(Variable* var, LookupIterator* it,
- PropertyAccessType access_type);
+ bool CanInlineGlobalPropertyAccess(Variable* var, LookupIterator* it,
+ PropertyAccessType access_type);
+
+ bool CanInlineGlobalPropertyAccess(LookupIterator* it,
+ PropertyAccessType access_type);
+
+ void InlineGlobalPropertyLoad(LookupIterator* it, BailoutId ast_id);
+ HInstruction* InlineGlobalPropertyStore(LookupIterator* it, HValue* value,
+ BailoutId ast_id);
void EnsureArgumentsArePushedForAccess();
bool TryArgumentsAccess(Property* expr);
@@ -2716,8 +2666,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
HValue* left,
HValue* right,
PushBeforeSimulateBehavior push_sim_result);
- HInstruction* BuildIncrement(bool returns_original_input,
- CountOperation* expr);
+ HInstruction* BuildIncrement(CountOperation* expr);
HInstruction* BuildKeyedGeneric(PropertyAccessType access_type,
Expression* expr, FeedbackVectorSlot slot,
HValue* object, HValue* key, HValue* value);
@@ -2829,6 +2778,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
bool CanBeFunctionApplyArguments(Call* expr);
+ bool IsAnyParameterContextAllocated();
+
// The translation state of the currently-being-translated function.
FunctionState* function_state_;
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
index 6c121dd271..d9044cab45 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -327,8 +327,7 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(
- chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -1854,16 +1853,15 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
- ToBooleanICStub::Types expected =
- instr->hydrogen()->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ ToBooleanHints expected = instr->hydrogen()->expected_input_types();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
- if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+ if (expected & ToBooleanHint::kUndefined) {
// undefined -> false.
__ cmp(reg, factory()->undefined_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+ if (expected & ToBooleanHint::kBoolean) {
// true -> true.
__ cmp(reg, factory()->true_value());
__ j(equal, instr->TrueLabel(chunk_));
@@ -1871,30 +1869,30 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(reg, factory()->false_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+ if (expected & ToBooleanHint::kNull) {
// 'null' -> false.
__ cmp(reg, factory()->null_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::SMI)) {
+ if (expected & ToBooleanHint::kSmallInteger) {
// Smis: 0 -> false, all other -> true.
__ test(reg, Operand(reg));
__ j(equal, instr->FalseLabel(chunk_));
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- } else if (expected.NeedsMap()) {
+ } else if (expected & ToBooleanHint::kNeedsMap) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
}
Register map = no_reg; // Keep the compiler happy.
- if (expected.NeedsMap()) {
+ if (expected & ToBooleanHint::kNeedsMap) {
map = ToRegister(instr->temp());
DCHECK(!map.is(reg));
__ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
- if (expected.CanBeUndetectable()) {
+ if (expected & ToBooleanHint::kCanBeUndetectable) {
// Undetectable -> false.
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
@@ -1902,13 +1900,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+ if (expected & ToBooleanHint::kReceiver) {
// spec object -> true.
__ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::STRING)) {
+ if (expected & ToBooleanHint::kString) {
// String value -> false iff empty.
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
@@ -1919,19 +1917,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+ if (expected & ToBooleanHint::kSymbol) {
// Symbol value -> true.
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+ if (expected & ToBooleanHint::kSimdValue) {
// SIMD value -> true.
__ CmpInstanceType(map, SIMD128_VALUE_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+ if (expected & ToBooleanHint::kHeapNumber) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
@@ -1945,7 +1943,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_heap_number);
}
- if (!expected.IsGeneric()) {
+ if (expected != ToBooleanHint::kAny) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
@@ -2192,28 +2190,6 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
EmitBranch(instr, BranchCondition(instr->hydrogen()));
}
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- __ mov(result, FieldOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
-
- __ test(FieldOperand(input, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(instr, equal);
-}
-
-
// Branches to a label or falls through with the answer in the z flag. Trashes
// the temp registers, but not the input.
void LCodeGen::EmitClassOfTest(Label* is_true,
@@ -2380,35 +2356,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
- DCHECK(slot_register.is(eax));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ mov(vector_register, vector);
- // No need to allocate this register.
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ mov(slot_register, Immediate(Smi::FromInt(index)));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2512,18 +2459,6 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
}
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- __ mov(LoadDescriptor::NameRegister(), instr->name());
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register function = ToRegister(instr->function());
Register temp = ToRegister(instr->temp());
@@ -2690,11 +2625,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ j(not_equal, &done);
if (info()->IsStub()) {
// A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kArrayProtectorValid.
+ // protector cell contains (Smi) Isolate::kProtectorValid.
// Otherwise it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ cmp(FieldOperand(result, PropertyCell::kValueOffset),
- Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
+ Immediate(Smi::FromInt(Isolate::kProtectorValid)));
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
}
__ mov(result, isolate()->factory()->undefined_value());
@@ -2745,18 +2680,6 @@ Operand LCodeGen::BuildFastArrayOperand(
}
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
@@ -4317,8 +4240,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
Register temp_reg, XMMRegister result_reg,
NumberUntagDMode mode) {
- bool can_convert_undefined_to_nan =
- instr->hydrogen()->can_convert_undefined_to_nan();
+ bool can_convert_undefined_to_nan = instr->truncating();
bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
Label convert, load_smi, done;
@@ -4384,34 +4306,18 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
if (instr->truncating()) {
- Label no_heap_number, check_bools, check_false;
-
- // Heap number map check.
+ Label truncate;
+ Label::Distance truncate_distance =
+ DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- __ j(not_equal, &no_heap_number, Label::kNear);
+ __ j(equal, &truncate, truncate_distance);
+ __ push(input_reg);
+ __ CmpObjectType(input_reg, ODDBALL_TYPE, input_reg);
+ __ pop(input_reg);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball);
+ __ bind(&truncate);
__ TruncateHeapNumberToI(input_reg, input_reg);
- __ jmp(done);
-
- __ bind(&no_heap_number);
- // Check for Oddballs. Undefined/False is converted to zero and True to one
- // for truncating conversions.
- __ cmp(input_reg, factory()->undefined_value());
- __ j(not_equal, &check_bools, Label::kNear);
- __ Move(input_reg, Immediate(0));
- __ jmp(done);
-
- __ bind(&check_bools);
- __ cmp(input_reg, factory()->true_value());
- __ j(not_equal, &check_false, Label::kNear);
- __ Move(input_reg, Immediate(1));
- __ jmp(done);
-
- __ bind(&check_false);
- __ cmp(input_reg, factory()->false_value());
- DeoptimizeIf(not_equal, instr,
- DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
- __ Move(input_reg, Immediate(0));
} else {
XMMRegister scratch = ToDoubleRegister(instr->temp());
DCHECK(!scratch.is(xmm0));
@@ -4844,7 +4750,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ Move(result, Immediate(Smi::FromInt(0)));
+ __ Move(result, Immediate(Smi::kZero));
PushSafepointRegistersScope scope(this);
if (instr->size()->IsRegister()) {
@@ -5148,7 +5054,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register result = ToRegister(instr->result());
Label load_cache, done;
__ EnumLength(result, map);
- __ cmp(result, Immediate(Smi::FromInt(0)));
+ __ cmp(result, Immediate(Smi::kZero));
__ j(not_equal, &load_cache, Label::kNear);
__ mov(result, isolate()->factory()->empty_fixed_array());
__ jmp(&done, Label::kNear);
diff --git a/deps/v8/src/crankshaft/ia32/lithium-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
index e6077cc4ad..7272a9112f 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
@@ -224,14 +224,6 @@ void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
value()->PrintTo(stream);
@@ -908,16 +900,18 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanICStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ ToBooleanHints expected = instr->expected_input_types();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
- LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
+ LOperand* temp = !easy_case && (expected & ToBooleanHint::kNeedsMap)
+ ? TempRegister()
+ : NULL;
LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
- if (!easy_case &&
- ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
- !expected.IsGeneric())) {
+ if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
+ (expected & ToBooleanHint::kNeedsMap)) ||
+ expected != ToBooleanHint::kAny)) {
branch = AssignEnvironment(branch);
}
return branch;
@@ -1700,24 +1694,6 @@ LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
TempRegister());
}
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
@@ -2007,15 +1983,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2055,17 +2022,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
- context, object, vector);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
return AssignEnvironment(DefineAsRegister(
@@ -2124,18 +2080,6 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- LLoadKeyedGeneric* result =
- new(zone()) LLoadKeyedGeneric(context, object, key, vector);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
@@ -2470,7 +2414,6 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
diff --git a/deps/v8/src/crankshaft/ia32/lithium-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
index 816d8fd2c1..13ad4bd1a3 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-ia32.h
+++ b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
@@ -75,9 +75,7 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(GetCachedArrayIndex) \
V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
@@ -92,11 +90,8 @@ class LCodeGen;
V(LoadContextSlot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalGeneric) \
V(LoadKeyed) \
- V(LoadKeyedGeneric) \
V(LoadNamedField) \
- V(LoadNamedGeneric) \
V(LoadRoot) \
V(MathAbs) \
V(MathClz32) \
@@ -1080,35 +1075,6 @@ class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 1> {
void PrintDataTo(StringStream* stream) override;
};
-
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
@@ -1493,25 +1459,6 @@ class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
- LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 1> {
public:
LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
@@ -1576,43 +1523,6 @@ inline static bool ExternalArrayOpRequiresTemp(
}
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
- LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = obj;
- inputs_[2] = key;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
- LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
- inputs_[0] = context;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1975,6 +1885,8 @@ class LNumberUntagD final : public LTemplateInstruction<1, 1, 1> {
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change);
+
+ bool truncating() { return hydrogen()->CanTruncateToNumber(); }
};
diff --git a/deps/v8/src/crankshaft/lithium-allocator.cc b/deps/v8/src/crankshaft/lithium-allocator.cc
index d17cd27c10..aa4459b23a 100644
--- a/deps/v8/src/crankshaft/lithium-allocator.cc
+++ b/deps/v8/src/crankshaft/lithium-allocator.cc
@@ -513,7 +513,7 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
}
LAllocator::LAllocator(int num_values, HGraph* graph)
- : zone_(graph->isolate()->allocator()),
+ : zone_(graph->isolate()->allocator(), ZONE_NAME),
chunk_(NULL),
live_in_sets_(graph->blocks()->length(), zone()),
live_ranges_(num_values * 2, zone()),
diff --git a/deps/v8/src/crankshaft/lithium-codegen.cc b/deps/v8/src/crankshaft/lithium-codegen.cc
index decc2a596f..2d165601d7 100644
--- a/deps/v8/src/crankshaft/lithium-codegen.cc
+++ b/deps/v8/src/crankshaft/lithium-codegen.cc
@@ -103,9 +103,8 @@ bool LCodeGenBase::GenerateBody() {
GenerateBodyInstructionPre(instr);
HValue* value = instr->hydrogen_value();
- if (!value->position().IsUnknown()) {
- RecordAndWritePosition(
- chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+ if (value->position().IsKnown()) {
+ RecordAndWritePosition(value->position());
}
instr->CompileToNative(codegen);
@@ -141,8 +140,8 @@ void LCodeGenBase::CheckEnvironmentUsage() {
#endif
}
-void LCodeGenBase::RecordAndWritePosition(int pos) {
- if (pos == kNoSourcePosition) return;
+void LCodeGenBase::RecordAndWritePosition(SourcePosition pos) {
+ if (!pos.IsKnown()) return;
source_position_table_builder_.AddPosition(masm_->pc_offset(), pos, false);
}
@@ -167,8 +166,7 @@ void LCodeGenBase::Comment(const char* format, ...) {
void LCodeGenBase::DeoptComment(const Deoptimizer::DeoptInfo& deopt_info) {
SourcePosition position = deopt_info.position;
int deopt_id = deopt_info.deopt_id;
- int raw_position = position.IsUnknown() ? 0 : position.raw();
- masm()->RecordDeoptReason(deopt_info.deopt_reason, raw_position, deopt_id);
+ masm()->RecordDeoptReason(deopt_info.deopt_reason, position, deopt_id);
}
@@ -311,6 +309,26 @@ void LCodeGenBase::WriteTranslationFrame(LEnvironment* environment,
}
}
+namespace {
+
+Handle<PodArray<InliningPosition>> CreateInliningPositions(
+ CompilationInfo* info) {
+ const CompilationInfo::InlinedFunctionList& inlined_functions =
+ info->inlined_functions();
+ if (inlined_functions.size() == 0) {
+ return Handle<PodArray<InliningPosition>>::cast(
+ info->isolate()->factory()->empty_byte_array());
+ }
+ Handle<PodArray<InliningPosition>> inl_positions =
+ PodArray<InliningPosition>::New(
+ info->isolate(), static_cast<int>(inlined_functions.size()), TENURED);
+ for (size_t i = 0; i < inlined_functions.size(); ++i) {
+ inl_positions->set(static_cast<int>(i), inlined_functions[i].position);
+ }
+ return inl_positions;
+}
+
+} // namespace
void LCodeGenBase::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
@@ -328,9 +346,9 @@ void LCodeGenBase::PopulateDeoptimizationData(Handle<Code> code) {
AllowDeferredHandleDereference allow_handle_dereference;
data->SetSharedFunctionInfo(*info_->shared_info());
} else {
- data->SetSharedFunctionInfo(Smi::FromInt(0));
+ data->SetSharedFunctionInfo(Smi::kZero);
}
- data->SetWeakCellCache(Smi::FromInt(0));
+ data->SetWeakCellCache(Smi::kZero);
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -342,6 +360,9 @@ void LCodeGenBase::PopulateDeoptimizationData(Handle<Code> code) {
data->SetLiteralArray(*literals);
}
+ Handle<PodArray<InliningPosition>> inl_pos = CreateInliningPositions(info_);
+ data->SetInliningPositions(*inl_pos);
+
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
@@ -360,16 +381,22 @@ void LCodeGenBase::PopulateDeoptimizationData(Handle<Code> code) {
void LCodeGenBase::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
DCHECK_EQ(0, deoptimization_literals_.length());
- for (Handle<SharedFunctionInfo> function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(function);
+ for (CompilationInfo::InlinedFunctionHolder& inlined :
+ info()->inlined_functions()) {
+ if (!inlined.shared_info.is_identical_to(info()->shared_info())) {
+ int index = DefineDeoptimizationLiteral(inlined.shared_info);
+ inlined.RegisterInlinedFunctionId(index);
+ }
}
inlined_function_count_ = deoptimization_literals_.length();
// Define deoptimization literals for all unoptimized code objects of inlined
// functions. This ensures unoptimized code is kept alive by optimized code.
- AllowDeferredHandleDereference allow_shared_function_info_dereference;
- for (Handle<SharedFunctionInfo> function : chunk()->inlined_functions()) {
- DefineDeoptimizationLiteral(handle(function->code()));
+ for (const CompilationInfo::InlinedFunctionHolder& inlined :
+ info()->inlined_functions()) {
+ if (!inlined.shared_info.is_identical_to(info()->shared_info())) {
+ DefineDeoptimizationLiteral(inlined.inlined_code_object_root);
+ }
}
}
diff --git a/deps/v8/src/crankshaft/lithium-codegen.h b/deps/v8/src/crankshaft/lithium-codegen.h
index c6bf447543..03ece53bf4 100644
--- a/deps/v8/src/crankshaft/lithium-codegen.h
+++ b/deps/v8/src/crankshaft/lithium-codegen.h
@@ -50,7 +50,7 @@ class LCodeGenBase BASE_EMBEDDED {
virtual void GenerateBodyInstructionPost(LInstruction* instr) {}
virtual void EnsureSpaceForLazyDeopt(int space_needed) = 0;
- void RecordAndWritePosition(int position);
+ void RecordAndWritePosition(SourcePosition position);
int GetNextEmittedBlock() const;
diff --git a/deps/v8/src/crankshaft/lithium.cc b/deps/v8/src/crankshaft/lithium.cc
index 8cf3a3f0e6..94d60418fd 100644
--- a/deps/v8/src/crankshaft/lithium.cc
+++ b/deps/v8/src/crankshaft/lithium.cc
@@ -5,6 +5,7 @@
#include "src/crankshaft/lithium.h"
#include "src/ast/scopes.h"
+#include "src/codegen.h"
#if V8_TARGET_ARCH_IA32
#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
@@ -260,7 +261,6 @@ LChunk::LChunk(CompilationInfo* info, HGraph* graph)
graph_(graph),
instructions_(32, info->zone()),
pointer_maps_(8, info->zone()),
- inlined_functions_(1, info->zone()),
deprecation_dependencies_(32, info->zone()),
stability_dependencies_(8, info->zone()) {}
@@ -468,8 +468,6 @@ Handle<Code> LChunk::Codegen() {
code->set_is_crankshafted(true);
CodeGenerator::PrintCode(code, info());
- DCHECK(!(info()->GetMustNotHaveEagerFrame() &&
- generator.NeedsEagerFrame()));
return code;
}
assembler.AbortedCodeGeneration();
diff --git a/deps/v8/src/crankshaft/lithium.h b/deps/v8/src/crankshaft/lithium.h
index d04bd5674a..d67c4908eb 100644
--- a/deps/v8/src/crankshaft/lithium.h
+++ b/deps/v8/src/crankshaft/lithium.h
@@ -660,14 +660,6 @@ class LChunk : public ZoneObject {
int LookupDestination(int block_id) const;
Label* GetAssemblyLabel(int block_id) const;
- const ZoneList<Handle<SharedFunctionInfo>>& inlined_functions() const {
- return inlined_functions_;
- }
-
- void AddInlinedFunction(Handle<SharedFunctionInfo> closure) {
- inlined_functions_.Add(closure, zone());
- }
-
void AddDeprecationDependency(Handle<Map> map) {
DCHECK(!map->is_deprecated());
if (!map->CanBeDeprecated()) return;
@@ -705,7 +697,6 @@ class LChunk : public ZoneObject {
BitVector* allocated_double_registers_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
- ZoneList<Handle<SharedFunctionInfo>> inlined_functions_;
ZoneList<Handle<Map>> deprecation_dependencies_;
ZoneList<Handle<Map>> stability_dependencies_;
};
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
index b24b1c5f08..abbf2085c6 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -61,6 +61,25 @@ class SafepointGenerator final : public CallWrapper {
Safepoint::DeoptMode deopt_mode_;
};
+LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
+ LCodeGen* codegen)
+ : codegen_(codegen) {
+ DCHECK(codegen_->info()->is_calling());
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+
+ StoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub);
+}
+
+LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+ RestoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub);
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+}
#define __ masm()->
@@ -267,8 +286,7 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(
- chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -1935,43 +1953,42 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ lw(at, FieldMemOperand(reg, String::kLengthOffset));
EmitBranch(instr, ne, at, Operand(zero_reg));
} else {
- ToBooleanICStub::Types expected =
- instr->hydrogen()->expected_input_types();
+ ToBooleanHints expected = instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
- if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+ if (expected & ToBooleanHint::kUndefined) {
// undefined -> false.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+ if (expected & ToBooleanHint::kBoolean) {
// Boolean -> its value.
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+ if (expected & ToBooleanHint::kNull) {
// 'null' -> false.
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanICStub::SMI)) {
+ if (expected & ToBooleanHint::kSmallInteger) {
// Smis: 0 -> false, all other -> true.
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- } else if (expected.NeedsMap()) {
+ } else if (expected & ToBooleanHint::kNeedsMap) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg, at);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
}
const Register map = scratch0();
- if (expected.NeedsMap()) {
+ if (expected & ToBooleanHint::kNeedsMap) {
__ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
- if (expected.CanBeUndetectable()) {
+ if (expected & ToBooleanHint::kCanBeUndetectable) {
// Undetectable -> false.
__ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(at, at, Operand(1 << Map::kIsUndetectable));
@@ -1979,14 +1996,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+ if (expected & ToBooleanHint::kReceiver) {
// spec object -> true.
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_),
ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
}
- if (expected.Contains(ToBooleanICStub::STRING)) {
+ if (expected & ToBooleanHint::kString) {
// String value -> false iff empty.
Label not_string;
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -1997,14 +2014,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+ if (expected & ToBooleanHint::kSymbol) {
// Symbol value -> true.
const Register scratch = scratch1();
__ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
}
- if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+ if (expected & ToBooleanHint::kSimdValue) {
// SIMD value -> true.
const Register scratch = scratch1();
__ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2012,7 +2029,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Operand(SIMD128_VALUE_TYPE));
}
- if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+ if (expected & ToBooleanHint::kHeapNumber) {
// heap number -> false iff +0, -0, or NaN.
DoubleRegister dbl_scratch = double_scratch0();
Label not_heap_number;
@@ -2026,7 +2043,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_heap_number);
}
- if (!expected.IsGeneric()) {
+ if (expected != ToBooleanHint::kAny) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg,
@@ -2286,30 +2303,6 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Operand(TestType(instr->hydrogen())));
}
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = scratch0();
-
- __ lw(scratch,
- FieldMemOperand(input, String::kHashFieldOffset));
- __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
- EmitBranch(instr, eq, at, Operand(zero_reg));
-}
-
-
// Branches to a label or falls through with the answer in flags. Trashes
// the temp registers, but not the input.
void LCodeGen::EmitClassOfTest(Label* is_true,
@@ -2482,35 +2475,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
- DCHECK(slot_register.is(a0));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ li(vector_register, vector);
- // No need to allocate this register.
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ li(slot_register, Operand(Smi::FromInt(index)));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2597,19 +2561,6 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- // Name is always in a2.
- __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register scratch = scratch0();
Register function = ToRegister(instr->function());
@@ -2845,12 +2796,12 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ Branch(&done, ne, result, Operand(scratch));
if (info()->IsStub()) {
// A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+ // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ lw(result, FieldMemOperand(result, Cell::kValueOffset));
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
- Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
+ Operand(Smi::FromInt(Isolate::kProtectorValid)));
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ bind(&done);
@@ -2906,18 +2857,6 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
}
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register scratch = scratch0();
Register temp = scratch1();
@@ -4495,8 +4434,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DoubleRegister result_reg,
NumberUntagDMode mode) {
- bool can_convert_undefined_to_nan =
- instr->hydrogen()->can_convert_undefined_to_nan();
+ bool can_convert_undefined_to_nan = instr->truncating();
bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
Register scratch = scratch0();
@@ -4566,36 +4504,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// of the if.
if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- Label no_heap_number, check_bools, check_false;
- // Check HeapNumber map.
- __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
+ Label truncate;
+ __ Branch(USE_DELAY_SLOT, &truncate, eq, scratch1, Operand(at));
__ mov(scratch2, input_reg); // In delay slot.
+ __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball, scratch1,
+ Operand(ODDBALL_TYPE));
+ __ bind(&truncate);
__ TruncateHeapNumberToI(input_reg, scratch2);
- __ Branch(&done);
-
- // Check for Oddballs. Undefined/False is converted to zero and True to one
- // for truncating conversions.
- __ bind(&no_heap_number);
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&check_bools, ne, input_reg, Operand(at));
- DCHECK(ToRegister(instr->result()).is(input_reg));
- __ Branch(USE_DELAY_SLOT, &done);
- __ mov(input_reg, zero_reg); // In delay slot.
-
- __ bind(&check_bools);
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- __ Branch(&check_false, ne, scratch2, Operand(at));
- __ Branch(USE_DELAY_SLOT, &done);
- __ li(input_reg, Operand(1)); // In delay slot.
-
- __ bind(&check_false);
- __ LoadRoot(at, Heap::kFalseValueRootIndex);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean,
- scratch2, Operand(at));
- __ Branch(USE_DELAY_SLOT, &done);
- __ mov(input_reg, zero_reg); // In delay slot.
} else {
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1,
Operand(at));
@@ -5401,7 +5317,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register result = ToRegister(instr->result());
Label load_cache, done;
__ EnumLength(result, map);
- __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
+ __ Branch(&load_cache, ne, result, Operand(Smi::kZero));
__ li(result, Operand(isolate()->factory()->empty_fixed_array()));
__ jmp(&done);
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
index bb09abc1df..28ca01cd71 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
@@ -357,24 +357,9 @@ class LCodeGen: public LCodeGenBase {
class PushSafepointRegistersScope final BASE_EMBEDDED {
public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen)
- : codegen_(codegen) {
- DCHECK(codegen_->info()->is_calling());
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
-
- StoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->push(ra);
- codegen_->masm_->CallStub(&stub);
- }
-
- ~PushSafepointRegistersScope() {
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- RestoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->push(ra);
- codegen_->masm_->CallStub(&stub);
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
+ explicit PushSafepointRegistersScope(LCodeGen* codegen);
+
+ ~PushSafepointRegistersScope();
private:
LCodeGen* codegen_;
diff --git a/deps/v8/src/crankshaft/mips/lithium-mips.cc b/deps/v8/src/crankshaft/mips/lithium-mips.cc
index 5533b8f59d..26d422a710 100644
--- a/deps/v8/src/crankshaft/mips/lithium-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-mips.cc
@@ -212,14 +212,6 @@ void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
value()->PrintTo(stream);
@@ -885,15 +877,15 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanICStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ ToBooleanHints expected = instr->expected_input_types();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new(zone()) LBranch(UseRegister(value));
- if (!easy_case &&
- ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
- !expected.IsGeneric())) {
+ if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
+ (expected & ToBooleanHint::kNeedsMap)) ||
+ expected != ToBooleanHint::kAny)) {
branch = AssignEnvironment(branch);
}
return branch;
@@ -1672,24 +1664,6 @@ LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
return new(zone()) LHasInstanceTypeAndBranch(value);
}
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
@@ -1946,15 +1920,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -1990,18 +1955,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LInstruction* result =
- DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), v0);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
return AssignEnvironment(DefineAsRegister(
@@ -2061,20 +2014,6 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LInstruction* result =
- DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
- v0);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (!instr->is_fixed_typed_array()) {
DCHECK(instr->elements()->representation().IsTagged());
@@ -2352,7 +2291,6 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
diff --git a/deps/v8/src/crankshaft/mips/lithium-mips.h b/deps/v8/src/crankshaft/mips/lithium-mips.h
index f49fb93c59..209987b012 100644
--- a/deps/v8/src/crankshaft/mips/lithium-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-mips.h
@@ -71,9 +71,7 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(GetCachedArrayIndex) \
V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
@@ -89,11 +87,8 @@ class LCodeGen;
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalGeneric) \
V(LoadKeyed) \
- V(LoadKeyedGeneric) \
V(LoadNamedField) \
- V(LoadNamedGeneric) \
V(MathAbs) \
V(MathCos) \
V(MathSin) \
@@ -1049,36 +1044,6 @@ class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
void PrintDataTo(StringStream* stream) override;
};
-
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
@@ -1448,25 +1413,6 @@ class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
- LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
@@ -1515,43 +1461,6 @@ class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
};
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
- LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
- LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
- inputs_[0] = context;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1924,6 +1833,8 @@ class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToNumber(); }
};
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
index 5f93e55fde..1531996c19 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -37,6 +37,25 @@ class SafepointGenerator final : public CallWrapper {
Safepoint::DeoptMode deopt_mode_;
};
+LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
+ LCodeGen* codegen)
+ : codegen_(codegen) {
+ DCHECK(codegen_->info()->is_calling());
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+
+ StoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub);
+}
+
+LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+ RestoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->push(ra);
+ codegen_->masm_->CallStub(&stub);
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+}
#define __ masm()->
@@ -243,8 +262,7 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(
- chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -2057,43 +2075,42 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ ld(at, FieldMemOperand(reg, String::kLengthOffset));
EmitBranch(instr, ne, at, Operand(zero_reg));
} else {
- ToBooleanICStub::Types expected =
- instr->hydrogen()->expected_input_types();
+ ToBooleanHints expected = instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
- if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+ if (expected & ToBooleanHint::kUndefined) {
// undefined -> false.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+ if (expected & ToBooleanHint::kBoolean) {
// Boolean -> its value.
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+ if (expected & ToBooleanHint::kNull) {
// 'null' -> false.
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
}
- if (expected.Contains(ToBooleanICStub::SMI)) {
+ if (expected & ToBooleanHint::kSmallInteger) {
// Smis: 0 -> false, all other -> true.
__ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- } else if (expected.NeedsMap()) {
+ } else if (expected & ToBooleanHint::kNeedsMap) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg, at);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, at, Operand(zero_reg));
}
const Register map = scratch0();
- if (expected.NeedsMap()) {
+ if (expected & ToBooleanHint::kNeedsMap) {
__ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
- if (expected.CanBeUndetectable()) {
+ if (expected & ToBooleanHint::kCanBeUndetectable) {
// Undetectable -> false.
__ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(at, at, Operand(1 << Map::kIsUndetectable));
@@ -2101,14 +2118,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+ if (expected & ToBooleanHint::kReceiver) {
// spec object -> true.
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_),
ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
}
- if (expected.Contains(ToBooleanICStub::STRING)) {
+ if (expected & ToBooleanHint::kString) {
// String value -> false iff empty.
Label not_string;
__ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2119,14 +2136,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+ if (expected & ToBooleanHint::kSymbol) {
// Symbol value -> true.
const Register scratch = scratch1();
__ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
}
- if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+ if (expected & ToBooleanHint::kSimdValue) {
// SIMD value -> true.
const Register scratch = scratch1();
__ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -2134,7 +2151,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Operand(SIMD128_VALUE_TYPE));
}
- if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+ if (expected & ToBooleanHint::kHeapNumber) {
// heap number -> false iff +0, -0, or NaN.
DoubleRegister dbl_scratch = double_scratch0();
Label not_heap_number;
@@ -2148,7 +2165,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_heap_number);
}
- if (!expected.IsGeneric()) {
+ if (expected != ToBooleanHint::kAny) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject, zero_reg,
@@ -2408,30 +2425,6 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Operand(TestType(instr->hydrogen())));
}
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- __ lwu(result, FieldMemOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = scratch0();
-
- __ lwu(scratch,
- FieldMemOperand(input, String::kHashFieldOffset));
- __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
- EmitBranch(instr, eq, at, Operand(zero_reg));
-}
-
-
// Branches to a label or falls through with the answer in flags. Trashes
// the temp registers, but not the input.
void LCodeGen::EmitClassOfTest(Label* is_true,
@@ -2606,35 +2599,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
- DCHECK(slot_register.is(a0));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ li(vector_register, vector);
- // No need to allocate this register.
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ li(slot_register, Operand(Smi::FromInt(index)));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2736,19 +2700,6 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->result()).is(v0));
-
- // Name is always in a2.
- __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register scratch = scratch0();
Register function = ToRegister(instr->function());
@@ -3021,13 +2972,13 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ Branch(&done, ne, result, Operand(scratch));
if (info()->IsStub()) {
// A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+ // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
// The comparison only needs LS bits of value, which is a smi.
__ ld(result, FieldMemOperand(result, Cell::kValueOffset));
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
- Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
+ Operand(Smi::FromInt(Isolate::kProtectorValid)));
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ bind(&done);
@@ -3089,18 +3040,6 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
}
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register scratch = scratch0();
Register temp = scratch1();
@@ -4703,8 +4642,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DoubleRegister result_reg,
NumberUntagDMode mode) {
- bool can_convert_undefined_to_nan =
- instr->hydrogen()->can_convert_undefined_to_nan();
+ bool can_convert_undefined_to_nan = instr->truncating();
bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
Register scratch = scratch0();
@@ -4774,36 +4712,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// of the if.
if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- Label no_heap_number, check_bools, check_false;
- // Check HeapNumber map.
- __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
+ Label truncate;
+ __ Branch(USE_DELAY_SLOT, &truncate, eq, scratch1, Operand(at));
__ mov(scratch2, input_reg); // In delay slot.
+ __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball, scratch1,
+ Operand(ODDBALL_TYPE));
+ __ bind(&truncate);
__ TruncateHeapNumberToI(input_reg, scratch2);
- __ Branch(&done);
-
- // Check for Oddballs. Undefined/False is converted to zero and True to one
- // for truncating conversions.
- __ bind(&no_heap_number);
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&check_bools, ne, input_reg, Operand(at));
- DCHECK(ToRegister(instr->result()).is(input_reg));
- __ Branch(USE_DELAY_SLOT, &done);
- __ mov(input_reg, zero_reg); // In delay slot.
-
- __ bind(&check_bools);
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- __ Branch(&check_false, ne, scratch2, Operand(at));
- __ Branch(USE_DELAY_SLOT, &done);
- __ li(input_reg, Operand(1)); // In delay slot.
-
- __ bind(&check_false);
- __ LoadRoot(at, Heap::kFalseValueRootIndex);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean,
- scratch2, Operand(at));
- __ Branch(USE_DELAY_SLOT, &done);
- __ mov(input_reg, zero_reg); // In delay slot.
} else {
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch1,
Operand(at));
@@ -5610,7 +5526,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register result = ToRegister(instr->result());
Label load_cache, done;
__ EnumLength(result, map);
- __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
+ __ Branch(&load_cache, ne, result, Operand(Smi::kZero));
__ li(result, Operand(isolate()->factory()->empty_fixed_array()));
__ jmp(&done);
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
index aaa2e6be17..ba332ae360 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
@@ -360,24 +360,9 @@ class LCodeGen: public LCodeGenBase {
class PushSafepointRegistersScope final BASE_EMBEDDED {
public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen)
- : codegen_(codegen) {
- DCHECK(codegen_->info()->is_calling());
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
-
- StoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->push(ra);
- codegen_->masm_->CallStub(&stub);
- }
-
- ~PushSafepointRegistersScope() {
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- RestoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->push(ra);
- codegen_->masm_->CallStub(&stub);
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
+ explicit PushSafepointRegistersScope(LCodeGen* codegen);
+
+ ~PushSafepointRegistersScope();
private:
LCodeGen* codegen_;
diff --git a/deps/v8/src/crankshaft/mips64/lithium-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
index 0855754d31..fd0ebc8206 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
@@ -212,14 +212,6 @@ void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
value()->PrintTo(stream);
@@ -885,15 +877,15 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanICStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ ToBooleanHints expected = instr->expected_input_types();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new(zone()) LBranch(UseRegister(value));
- if (!easy_case &&
- ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
- !expected.IsGeneric())) {
+ if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
+ (expected & ToBooleanHint::kNeedsMap)) ||
+ expected != ToBooleanHint::kAny)) {
branch = AssignEnvironment(branch);
}
return branch;
@@ -1678,24 +1670,6 @@ LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
return new(zone()) LHasInstanceTypeAndBranch(value);
}
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
@@ -1949,15 +1923,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
- return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -1993,18 +1958,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LInstruction* result =
- DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), v0);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
return AssignEnvironment(DefineAsRegister(
@@ -2065,20 +2018,6 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LInstruction* result =
- DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
- v0);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (!instr->is_fixed_typed_array()) {
DCHECK(instr->elements()->representation().IsTagged());
@@ -2357,7 +2296,6 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
diff --git a/deps/v8/src/crankshaft/mips64/lithium-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
index 7bc89afd46..f5b402a636 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
@@ -73,9 +73,7 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(GetCachedArrayIndex) \
V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
@@ -91,11 +89,8 @@ class LCodeGen;
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalGeneric) \
V(LoadKeyed) \
- V(LoadKeyedGeneric) \
V(LoadNamedField) \
- V(LoadNamedGeneric) \
V(MathAbs) \
V(MathCos) \
V(MathSin) \
@@ -1067,36 +1062,6 @@ class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
void PrintDataTo(StringStream* stream) override;
};
-
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
@@ -1510,25 +1475,6 @@ class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
- LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
@@ -1577,43 +1523,6 @@ class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
};
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
- LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
- LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
- inputs_[0] = context;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1970,6 +1879,8 @@ class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToNumber(); }
};
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
index 321c39355f..9c65586820 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -35,6 +35,22 @@ class SafepointGenerator final : public CallWrapper {
Safepoint::DeoptMode deopt_mode_;
};
+LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
+ LCodeGen* codegen)
+ : codegen_(codegen) {
+ DCHECK(codegen_->info()->is_calling());
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+ StoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->CallStub(&stub);
+}
+
+LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+ RestoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->CallStub(&stub);
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+}
#define __ masm()->
@@ -254,8 +270,7 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(
- chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+ RecordAndWritePosition(value->position());
Comment(
";;; <@%d,#%d> "
@@ -2099,45 +2114,44 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmpi(ip, Operand::Zero());
EmitBranch(instr, ne);
} else {
- ToBooleanICStub::Types expected =
- instr->hydrogen()->expected_input_types();
+ ToBooleanHints expected = instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
- if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+ if (expected & ToBooleanHint::kUndefined) {
// undefined -> false.
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ beq(instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+ if (expected & ToBooleanHint::kBoolean) {
// Boolean -> its value.
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
__ beq(instr->TrueLabel(chunk_));
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
__ beq(instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+ if (expected & ToBooleanHint::kNull) {
// 'null' -> false.
__ CompareRoot(reg, Heap::kNullValueRootIndex);
__ beq(instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::SMI)) {
+ if (expected & ToBooleanHint::kSmallInteger) {
// Smis: 0 -> false, all other -> true.
__ cmpi(reg, Operand::Zero());
__ beq(instr->FalseLabel(chunk_));
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- } else if (expected.NeedsMap()) {
+ } else if (expected & ToBooleanHint::kNeedsMap) {
// If we need a map later and have a Smi -> deopt.
__ TestIfSmi(reg, r0);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
}
const Register map = scratch0();
- if (expected.NeedsMap()) {
+ if (expected & ToBooleanHint::kNeedsMap) {
__ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
- if (expected.CanBeUndetectable()) {
+ if (expected & ToBooleanHint::kCanBeUndetectable) {
// Undetectable -> false.
__ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
__ TestBit(ip, Map::kIsUndetectable, r0);
@@ -2145,13 +2159,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+ if (expected & ToBooleanHint::kReceiver) {
// spec object -> true.
__ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
__ bge(instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::STRING)) {
+ if (expected & ToBooleanHint::kString) {
// String value -> false iff empty.
Label not_string;
__ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
@@ -2163,20 +2177,20 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+ if (expected & ToBooleanHint::kSymbol) {
// Symbol value -> true.
__ CompareInstanceType(map, ip, SYMBOL_TYPE);
__ beq(instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+ if (expected & ToBooleanHint::kSimdValue) {
// SIMD value -> true.
Label not_simd;
__ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
__ beq(instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+ if (expected & ToBooleanHint::kHeapNumber) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
@@ -2191,7 +2205,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_heap_number);
}
- if (!expected.IsGeneric()) {
+ if (expected != ToBooleanHint::kAny) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
@@ -2459,30 +2473,6 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
EmitBranch(instr, BranchCondition(instr->hydrogen()));
}
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = scratch0();
-
- __ lwz(scratch, FieldMemOperand(input, String::kHashFieldOffset));
- __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
- __ and_(r0, scratch, r0, SetRC);
- EmitBranch(instr, eq, cr0);
-}
-
-
// Branches to a label or falls through with the answer in flags. Trashes
// the temp registers, but not the input.
void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
@@ -2660,35 +2650,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = LoadDescriptor::SlotRegister();
- DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
- DCHECK(slot_register.is(r3));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ Move(vector_register, vector);
- // No need to allocate this register.
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(r3));
-
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2791,19 +2752,6 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->result()).is(r3));
-
- // Name is always in r5.
- __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register scratch = scratch0();
Register function = ToRegister(instr->function());
@@ -3098,11 +3046,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ bne(&done);
if (info()->IsStub()) {
// A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+ // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
- __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
+ __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kProtectorValid), r0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
@@ -3153,17 +3101,6 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
}
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register scratch = scratch0();
Register result = ToRegister(instr->result());
@@ -4802,8 +4739,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DoubleRegister result_reg,
NumberUntagDMode mode) {
- bool can_convert_undefined_to_nan =
- instr->hydrogen()->can_convert_undefined_to_nan();
+ bool can_convert_undefined_to_nan = instr->truncating();
bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
Register scratch = scratch0();
@@ -4871,35 +4807,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ cmp(scratch1, ip);
if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- Label no_heap_number, check_bools, check_false;
- __ bne(&no_heap_number);
+ Label truncate;
+ __ beq(&truncate);
+ __ CompareInstanceType(scratch1, scratch1, ODDBALL_TYPE);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
+ __ bind(&truncate);
__ mr(scratch2, input_reg);
__ TruncateHeapNumberToI(input_reg, scratch2);
- __ b(&done);
-
- // Check for Oddballs. Undefined/False is converted to zero and True to one
- // for truncating conversions.
- __ bind(&no_heap_number);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, ip);
- __ bne(&check_bools);
- __ li(input_reg, Operand::Zero());
- __ b(&done);
-
- __ bind(&check_bools);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(input_reg, ip);
- __ bne(&check_false);
- __ li(input_reg, Operand(1));
- __ b(&done);
-
- __ bind(&check_false);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(input_reg, ip);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
- __ li(input_reg, Operand::Zero());
} else {
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
@@ -5313,7 +5227,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ LoadSmiLiteral(result, Smi::FromInt(0));
+ __ LoadSmiLiteral(result, Smi::kZero);
PushSafepointRegistersScope scope(this);
if (instr->size()->IsRegister()) {
@@ -5404,8 +5318,8 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
__ mov(r3, Operand(isolate()->factory()->number_string()));
__ b(&end);
__ bind(&do_call);
- TypeofStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Callable callable = CodeFactory::Typeof(isolate());
+ CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
__ bind(&end);
}
@@ -5646,7 +5560,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register result = ToRegister(instr->result());
Label load_cache, done;
__ EnumLength(result, map);
- __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
+ __ CmpSmiLiteral(result, Smi::kZero, r0);
__ bne(&load_cache);
__ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
__ b(&done);
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
index a4a90a7184..32b9e18487 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
@@ -294,21 +294,9 @@ class LCodeGen : public LCodeGenBase {
class PushSafepointRegistersScope final BASE_EMBEDDED {
public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen)
- : codegen_(codegen) {
- DCHECK(codegen_->info()->is_calling());
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
- StoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->CallStub(&stub);
- }
-
- ~PushSafepointRegistersScope() {
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- RestoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->CallStub(&stub);
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
+ explicit PushSafepointRegistersScope(LCodeGen* codegen);
+
+ ~PushSafepointRegistersScope();
private:
LCodeGen* codegen_;
diff --git a/deps/v8/src/crankshaft/ppc/lithium-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
index 738cf231ce..75aec2f86d 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
@@ -220,14 +220,6 @@ void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
value()->PrintTo(stream);
@@ -890,15 +882,15 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanICStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ ToBooleanHints expected = instr->expected_input_types();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new (zone()) LBranch(UseRegister(value));
- if (!easy_case &&
- ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
- !expected.IsGeneric())) {
+ if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
+ (expected & ToBooleanHint::kNeedsMap)) ||
+ expected != ToBooleanHint::kAny)) {
branch = AssignEnvironment(branch);
}
return branch;
@@ -1702,24 +1694,6 @@ LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
return new (zone()) LHasInstanceTypeAndBranch(value);
}
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new (zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new (zone())
- LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
@@ -1974,15 +1948,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
- return MarkAsCall(DefineFixed(result, r3), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2018,18 +1983,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LInstruction* result =
- DefineFixed(new (zone()) LLoadNamedGeneric(context, object, vector), r3);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
return AssignEnvironment(DefineAsRegister(
@@ -2087,19 +2040,6 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LInstruction* result = DefineFixed(
- new (zone()) LLoadKeyedGeneric(context, object, key, vector), r3);
- return MarkAsCall(result, instr);
-}
-
-
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (!instr->is_fixed_typed_array()) {
DCHECK(instr->elements()->representation().IsTagged());
@@ -2373,7 +2313,6 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
diff --git a/deps/v8/src/crankshaft/ppc/lithium-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
index 626f00ab8e..4dda385cfe 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
@@ -71,9 +71,7 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(GetCachedArrayIndex) \
V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
@@ -89,11 +87,8 @@ class LCodeGen;
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalGeneric) \
V(LoadKeyed) \
- V(LoadKeyedGeneric) \
V(LoadNamedField) \
- V(LoadNamedGeneric) \
V(MathAbs) \
V(MathClz32) \
V(MathCos) \
@@ -1058,34 +1053,6 @@ class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
void PrintDataTo(StringStream* stream) override;
};
-
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
@@ -1460,25 +1427,6 @@ class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
- LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) { inputs_[0] = function; }
@@ -1523,43 +1471,6 @@ class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
};
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
- LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
- LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
- inputs_[0] = context;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; }
@@ -1910,6 +1821,8 @@ class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToNumber(); }
};
diff --git a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
index 71881ada39..c44df9550a 100644
--- a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
+++ b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
@@ -35,6 +35,23 @@ class SafepointGenerator final : public CallWrapper {
Safepoint::DeoptMode deopt_mode_;
};
+LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
+ LCodeGen* codegen)
+ : codegen_(codegen) {
+ DCHECK(codegen_->info()->is_calling());
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+ codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+ StoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->CallStub(&stub);
+}
+
+LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
+ DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+ RestoreRegistersStateStub stub(codegen_->isolate());
+ codegen_->masm_->CallStub(&stub);
+ codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+}
+
#define __ masm()->
bool LCodeGen::GenerateCode() {
@@ -241,8 +258,7 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(
- chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+ RecordAndWritePosition(value->position());
Comment(
";;; <@%d,#%d> "
@@ -2102,45 +2118,44 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CmpP(ip, Operand::Zero());
EmitBranch(instr, ne);
} else {
- ToBooleanICStub::Types expected =
- instr->hydrogen()->expected_input_types();
+ ToBooleanHints expected = instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
- if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+ if (expected & ToBooleanHint::kUndefined) {
// undefined -> false.
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ beq(instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+ if (expected & ToBooleanHint::kBoolean) {
// Boolean -> its value.
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
__ beq(instr->TrueLabel(chunk_));
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
__ beq(instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+ if (expected & ToBooleanHint::kNull) {
// 'null' -> false.
__ CompareRoot(reg, Heap::kNullValueRootIndex);
__ beq(instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::SMI)) {
+ if (expected & ToBooleanHint::kSmallInteger) {
// Smis: 0 -> false, all other -> true.
__ CmpP(reg, Operand::Zero());
__ beq(instr->FalseLabel(chunk_));
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- } else if (expected.NeedsMap()) {
+ } else if (expected & ToBooleanHint::kNeedsMap) {
// If we need a map later and have a Smi -> deopt.
__ TestIfSmi(reg);
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
}
const Register map = scratch0();
- if (expected.NeedsMap()) {
+ if (expected & ToBooleanHint::kNeedsMap) {
__ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
- if (expected.CanBeUndetectable()) {
+ if (expected & ToBooleanHint::kCanBeUndetectable) {
// Undetectable -> false.
__ tm(FieldMemOperand(map, Map::kBitFieldOffset),
Operand(1 << Map::kIsUndetectable));
@@ -2148,13 +2163,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+ if (expected & ToBooleanHint::kReceiver) {
// spec object -> true.
__ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
__ bge(instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::STRING)) {
+ if (expected & ToBooleanHint::kString) {
// String value -> false iff empty.
Label not_string;
__ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
@@ -2166,20 +2181,20 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+ if (expected & ToBooleanHint::kSymbol) {
// Symbol value -> true.
__ CompareInstanceType(map, ip, SYMBOL_TYPE);
__ beq(instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+ if (expected & ToBooleanHint::kSimdValue) {
// SIMD value -> true.
Label not_simd;
__ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
__ beq(instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+ if (expected & ToBooleanHint::kHeapNumber) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
@@ -2194,7 +2209,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_heap_number);
}
- if (!expected.IsGeneric()) {
+ if (expected != ToBooleanHint::kAny) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
@@ -2448,27 +2463,6 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
EmitBranch(instr, BranchCondition(instr->hydrogen()));
}
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- __ LoadlW(result, FieldMemOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(result, result);
-}
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
- Register scratch = scratch0();
-
- __ LoadlW(scratch, FieldMemOperand(input, String::kHashFieldOffset));
- __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
- __ AndP(r0, scratch);
- EmitBranch(instr, eq);
-}
-
// Branches to a label or falls through with the answer in flags. Trashes
// the temp registers, but not the input.
void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
@@ -2637,33 +2631,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ Ret();
}
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = LoadDescriptor::SlotRegister();
- DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
- DCHECK(slot_register.is(r2));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ Move(vector_register, vector);
- // No need to allocate this register.
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
-}
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(r2));
-
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2753,18 +2720,6 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
r0);
}
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->result()).is(r2));
-
- // Name is always in r4.
- __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register scratch = scratch0();
Register function = ToRegister(instr->function());
@@ -3054,11 +3009,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ bne(&done);
if (info()->IsStub()) {
// A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+ // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
- __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
+ __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kProtectorValid), r0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
@@ -3112,16 +3067,6 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
return MemOperand(scratch, base, base_offset);
}
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register scratch = scratch0();
Register result = ToRegister(instr->result());
@@ -3135,7 +3080,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
__ LoadP(
result,
MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ CmpP(result, r0);
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
@@ -4729,8 +4675,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DoubleRegister result_reg,
NumberUntagDMode mode) {
- bool can_convert_undefined_to_nan =
- instr->hydrogen()->can_convert_undefined_to_nan();
+ bool can_convert_undefined_to_nan = instr->truncating();
bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
Register scratch = scratch0();
@@ -4797,32 +4742,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
if (instr->truncating()) {
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations.
- Label no_heap_number, check_bools, check_false;
- __ bne(&no_heap_number, Label::kNear);
+ Label truncate;
+ __ beq(&truncate);
+ __ CompareInstanceType(scratch1, scratch1, ODDBALL_TYPE);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
+ __ bind(&truncate);
__ LoadRR(scratch2, input_reg);
__ TruncateHeapNumberToI(input_reg, scratch2);
- __ b(&done, Label::kNear);
-
- // Check for Oddballs. Undefined/False is converted to zero and True to one
- // for truncating conversions.
- __ bind(&no_heap_number);
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- __ bne(&check_bools);
- __ LoadImmP(input_reg, Operand::Zero());
- __ b(&done, Label::kNear);
-
- __ bind(&check_bools);
- __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
- __ bne(&check_false, Label::kNear);
- __ LoadImmP(input_reg, Operand(1));
- __ b(&done, Label::kNear);
-
- __ bind(&check_false);
- __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
- __ LoadImmP(input_reg, Operand::Zero());
} else {
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
@@ -5229,7 +5155,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ LoadSmiLiteral(result, Smi::FromInt(0));
+ __ LoadSmiLiteral(result, Smi::kZero);
PushSafepointRegistersScope scope(this);
if (instr->size()->IsRegister()) {
@@ -5319,8 +5245,8 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
__ mov(r2, Operand(isolate()->factory()->number_string()));
__ b(&end);
__ bind(&do_call);
- TypeofStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ Callable callable = CodeFactory::Typeof(isolate());
+ CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
__ bind(&end);
}
@@ -5547,7 +5473,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register result = ToRegister(instr->result());
Label load_cache, done;
__ EnumLength(result, map);
- __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
+ __ CmpSmiLiteral(result, Smi::kZero, r0);
__ bne(&load_cache, Label::kNear);
__ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
__ b(&done, Label::kNear);
diff --git a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h
index 30e9d2b997..a8d59ff5b1 100644
--- a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h
+++ b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.h
@@ -293,21 +293,9 @@ class LCodeGen : public LCodeGenBase {
class PushSafepointRegistersScope final BASE_EMBEDDED {
public:
- explicit PushSafepointRegistersScope(LCodeGen* codegen)
- : codegen_(codegen) {
- DCHECK(codegen_->info()->is_calling());
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
- codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
- StoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->CallStub(&stub);
- }
-
- ~PushSafepointRegistersScope() {
- DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
- RestoreRegistersStateStub stub(codegen_->isolate());
- codegen_->masm_->CallStub(&stub);
- codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
- }
+ explicit PushSafepointRegistersScope(LCodeGen* codegen);
+
+ ~PushSafepointRegistersScope();
private:
LCodeGen* codegen_;
diff --git a/deps/v8/src/crankshaft/s390/lithium-s390.cc b/deps/v8/src/crankshaft/s390/lithium-s390.cc
index bf9dfd56ba..3d14764032 100644
--- a/deps/v8/src/crankshaft/s390/lithium-s390.cc
+++ b/deps/v8/src/crankshaft/s390/lithium-s390.cc
@@ -203,12 +203,6 @@ void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
value()->PrintTo(stream);
@@ -813,15 +807,15 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanICStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ ToBooleanHints expected = instr->expected_input_types();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new (zone()) LBranch(UseRegister(value));
- if (!easy_case &&
- ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
- !expected.IsGeneric())) {
+ if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
+ (expected & ToBooleanHint::kNeedsMap)) ||
+ expected != ToBooleanHint::kAny)) {
branch = AssignEnvironment(branch);
}
return branch;
@@ -1552,21 +1546,6 @@ LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
return new (zone()) LHasInstanceTypeAndBranch(value);
}
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new (zone()) LGetCachedArrayIndex(value));
-}
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new (zone())
- LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
-}
-
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
@@ -1804,14 +1783,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
}
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
- return MarkAsCall(DefineFixed(result, r2), instr);
-}
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -1844,17 +1815,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
return DefineAsRegister(new (zone()) LLoadNamedField(obj));
}
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LInstruction* result =
- DefineFixed(new (zone()) LLoadNamedGeneric(context, object, vector), r2);
- return MarkAsCall(result, instr);
-}
-
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
return AssignEnvironment(DefineAsRegister(
@@ -1909,18 +1869,6 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
return result;
}
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LInstruction* result = DefineFixed(
- new (zone()) LLoadKeyedGeneric(context, object, key, vector), r2);
- return MarkAsCall(result, instr);
-}
-
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (!instr->is_fixed_typed_array()) {
DCHECK(instr->elements()->representation().IsTagged());
@@ -2175,7 +2123,6 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
diff --git a/deps/v8/src/crankshaft/s390/lithium-s390.h b/deps/v8/src/crankshaft/s390/lithium-s390.h
index 70670ac3e8..b946d4f271 100644
--- a/deps/v8/src/crankshaft/s390/lithium-s390.h
+++ b/deps/v8/src/crankshaft/s390/lithium-s390.h
@@ -71,9 +71,7 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(GetCachedArrayIndex) \
V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
@@ -89,11 +87,8 @@ class LCodeGen;
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalGeneric) \
V(LoadKeyed) \
- V(LoadKeyedGeneric) \
V(LoadNamedField) \
- V(LoadNamedGeneric) \
V(MathAbs) \
V(MathClz32) \
V(MathCos) \
@@ -982,31 +977,6 @@ class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
void PrintDataTo(StringStream* stream) override;
};
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
@@ -1358,24 +1328,6 @@ class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
};
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
- LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) { inputs_[0] = function; }
@@ -1417,42 +1369,6 @@ class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
- LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- inputs_[2] = key;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
- LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
- inputs_[0] = context;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; }
@@ -1781,6 +1697,8 @@ class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change)
+
+ bool truncating() { return hydrogen()->CanTruncateToNumber(); }
};
class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
diff --git a/deps/v8/src/crankshaft/typing.cc b/deps/v8/src/crankshaft/typing.cc
index d2b56e255b..f21d235cb3 100644
--- a/deps/v8/src/crankshaft/typing.cc
+++ b/deps/v8/src/crankshaft/typing.cc
@@ -85,10 +85,8 @@ void AstTyper::ObserveTypesAtOsrEntry(IterationStatement* stmt) {
store_.LookupBounds(parameter_index(i)).lower);
}
- ZoneList<Variable*>* local_vars = scope_->locals();
int local_index = 0;
- for (int i = 0; i < local_vars->length(); i++) {
- Variable* var = local_vars->at(i);
+ for (Variable* var : *scope_->locals()) {
if (var->IsStackLocal()) {
PrintObserved(
var, frame->GetExpression(local_index),
@@ -517,16 +515,12 @@ void AstTyper::VisitProperty(Property* expr) {
void AstTyper::VisitCall(Call* expr) {
// Collect type feedback.
RECURSE(Visit(expr->expression()));
- bool is_uninitialized = true;
- if (expr->IsUsingCallFeedbackICSlot()) {
- FeedbackVectorSlot slot = expr->CallFeedbackICSlot();
- is_uninitialized = oracle()->CallIsUninitialized(slot);
- if (!expr->expression()->IsProperty() &&
- oracle()->CallIsMonomorphic(slot)) {
- expr->set_target(oracle()->GetCallTarget(slot));
- Handle<AllocationSite> site = oracle()->GetCallAllocationSite(slot);
- expr->set_allocation_site(site);
- }
+ FeedbackVectorSlot slot = expr->CallFeedbackICSlot();
+ bool is_uninitialized = oracle()->CallIsUninitialized(slot);
+ if (!expr->expression()->IsProperty() && oracle()->CallIsMonomorphic(slot)) {
+ expr->set_target(oracle()->GetCallTarget(slot));
+ Handle<AllocationSite> site = oracle()->GetCallAllocationSite(slot);
+ expr->set_allocation_site(site);
}
expr->set_is_uninitialized(is_uninitialized);
@@ -785,9 +779,8 @@ int AstTyper::variable_index(Variable* var) {
: var->IsParameter() ? parameter_index(var->index()) : kNoVar;
}
-void AstTyper::VisitDeclarations(ZoneList<Declaration*>* decls) {
- for (int i = 0; i < decls->length(); ++i) {
- Declaration* decl = decls->at(i);
+void AstTyper::VisitDeclarations(Declaration::List* decls) {
+ for (Declaration* decl : *decls) {
RECURSE(Visit(decl));
}
}
diff --git a/deps/v8/src/crankshaft/typing.h b/deps/v8/src/crankshaft/typing.h
index eb88634777..add457bfe3 100644
--- a/deps/v8/src/crankshaft/typing.h
+++ b/deps/v8/src/crankshaft/typing.h
@@ -69,7 +69,7 @@ class AstTyper final : public AstVisitor<AstTyper> {
int variable_index(Variable* var);
- void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void VisitDeclarations(Declaration::List* declarations);
void VisitStatements(ZoneList<Statement*>* statements);
#define DECLARE_VISIT(type) void Visit##type(type* node);
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
index 50e2aa0915..6889040996 100644
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -356,8 +356,7 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(
- chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -1977,7 +1976,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(instr, equal);
} else if (type.IsSmi()) {
DCHECK(!info()->IsStub());
- __ SmiCompare(reg, Smi::FromInt(0));
+ __ SmiCompare(reg, Smi::kZero);
EmitBranch(instr, not_equal);
} else if (type.IsJSArray()) {
DCHECK(!info()->IsStub());
@@ -1993,17 +1992,16 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
- ToBooleanICStub::Types expected =
- instr->hydrogen()->expected_input_types();
+ ToBooleanHints expected = instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
- if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+ if (expected & ToBooleanHint::kUndefined) {
// undefined -> false.
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+ if (expected & ToBooleanHint::kBoolean) {
// true -> true.
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
__ j(equal, instr->TrueLabel(chunk_));
@@ -2011,28 +2009,28 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+ if (expected & ToBooleanHint::kNull) {
// 'null' -> false.
__ CompareRoot(reg, Heap::kNullValueRootIndex);
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::SMI)) {
+ if (expected & ToBooleanHint::kSmallInteger) {
// Smis: 0 -> false, all other -> true.
- __ Cmp(reg, Smi::FromInt(0));
+ __ Cmp(reg, Smi::kZero);
__ j(equal, instr->FalseLabel(chunk_));
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- } else if (expected.NeedsMap()) {
+ } else if (expected & ToBooleanHint::kNeedsMap) {
// If we need a map later and have a Smi -> deopt.
__ testb(reg, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
}
const Register map = kScratchRegister;
- if (expected.NeedsMap()) {
+ if (expected & ToBooleanHint::kNeedsMap) {
__ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
- if (expected.CanBeUndetectable()) {
+ if (expected & ToBooleanHint::kCanBeUndetectable) {
// Undetectable -> false.
__ testb(FieldOperand(map, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
@@ -2040,13 +2038,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+ if (expected & ToBooleanHint::kReceiver) {
// spec object -> true.
__ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::STRING)) {
+ if (expected & ToBooleanHint::kString) {
// String value -> false iff empty.
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
@@ -2057,19 +2055,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+ if (expected & ToBooleanHint::kSymbol) {
// Symbol value -> true.
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+ if (expected & ToBooleanHint::kSimdValue) {
// SIMD value -> true.
__ CmpInstanceType(map, SIMD128_VALUE_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+ if (expected & ToBooleanHint::kHeapNumber) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
@@ -2082,7 +2080,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_heap_number);
}
- if (!expected.IsGeneric()) {
+ if (expected != ToBooleanHint::kAny) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
@@ -2337,29 +2335,6 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
EmitBranch(instr, BranchCondition(instr->hydrogen()));
}
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- __ movl(result, FieldOperand(input, String::kHashFieldOffset));
- DCHECK(String::kHashShift >= kSmiTagSize);
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
-
- __ testl(FieldOperand(input, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(instr, equal);
-}
-
-
// Branches to a label or falls through with the answer in the z flag.
// Trashes the temp register.
void LCodeGen::EmitClassOfTest(Label* is_true,
@@ -2522,35 +2497,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
- DCHECK(slot_register.is(rax));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ Move(vector_register, vector);
- // No need to allocate this register.
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ Move(slot_register, Smi::FromInt(index));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->result()).is(rax));
-
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2654,18 +2600,6 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
}
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->result()).is(rax));
-
- __ Move(LoadDescriptor::NameRegister(), instr->name());
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register function = ToRegister(instr->function());
Register result = ToRegister(instr->result());
@@ -2888,11 +2822,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ j(not_equal, &done);
if (info()->IsStub()) {
// A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+ // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ Cmp(FieldOperand(result, Cell::kValueOffset),
- Smi::FromInt(Isolate::kArrayProtectorValid));
+ Smi::FromInt(Isolate::kProtectorValid));
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
}
__ Move(result, isolate()->factory()->undefined_value());
@@ -2940,18 +2874,6 @@ Operand LCodeGen::BuildFastArrayOperand(
}
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
@@ -4190,7 +4112,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register result = rax;
- __ Move(result, Smi::FromInt(0));
+ __ Move(result, Smi::kZero);
// We have to call a stub.
{
@@ -4559,7 +4481,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register reg = ToRegister(instr->result());
- __ Move(reg, Smi::FromInt(0));
+ __ Move(reg, Smi::kZero);
{
PushSafepointRegistersScope scope(this);
@@ -4608,8 +4530,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
XMMRegister result_reg, NumberUntagDMode mode) {
- bool can_convert_undefined_to_nan =
- instr->hydrogen()->can_convert_undefined_to_nan();
+ bool can_convert_undefined_to_nan = instr->truncating();
bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
Label convert, load_smi, done;
@@ -4671,34 +4592,17 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
Register input_reg = ToRegister(instr->value());
if (instr->truncating()) {
- Label no_heap_number, check_bools, check_false;
-
- // Heap number map check.
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &no_heap_number, Label::kNear);
+ Register input_map_reg = kScratchRegister;
+ Label truncate;
+ Label::Distance truncate_distance =
+ DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+ __ movp(input_map_reg, FieldOperand(input_reg, HeapObject::kMapOffset));
+ __ JumpIfRoot(input_map_reg, Heap::kHeapNumberMapRootIndex, &truncate,
+ truncate_distance);
+ __ CmpInstanceType(input_map_reg, ODDBALL_TYPE);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball);
+ __ bind(&truncate);
__ TruncateHeapNumberToI(input_reg, input_reg);
- __ jmp(done);
-
- __ bind(&no_heap_number);
- // Check for Oddballs. Undefined/False is converted to zero and True to one
- // for truncating conversions.
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &check_bools, Label::kNear);
- __ Set(input_reg, 0);
- __ jmp(done);
-
- __ bind(&check_bools);
- __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
- __ j(not_equal, &check_false, Label::kNear);
- __ Set(input_reg, 1);
- __ jmp(done);
-
- __ bind(&check_false);
- __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
- DeoptimizeIf(not_equal, instr,
- DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
- __ Set(input_reg, 0);
} else {
XMMRegister scratch = ToDoubleRegister(instr->temp());
DCHECK(!scratch.is(double_scratch0()));
@@ -5123,7 +5027,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ Move(result, Smi::FromInt(0));
+ __ Move(result, Smi::kZero);
PushSafepointRegistersScope scope(this);
if (instr->size()->IsRegister()) {
@@ -5431,7 +5335,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register result = ToRegister(instr->result());
Label load_cache, done;
__ EnumLength(result, map);
- __ Cmp(result, Smi::FromInt(0));
+ __ Cmp(result, Smi::kZero);
__ j(not_equal, &load_cache, Label::kNear);
__ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
__ jmp(&done, Label::kNear);
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.cc b/deps/v8/src/crankshaft/x64/lithium-x64.cc
index 18fb5d4d09..bc9040b94c 100644
--- a/deps/v8/src/crankshaft/x64/lithium-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.cc
@@ -219,13 +219,6 @@ void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
}
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
value()->PrintTo(stream);
@@ -905,15 +898,15 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanICStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ ToBooleanHints expected = instr->expected_input_types();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
LInstruction* branch = new(zone()) LBranch(UseRegister(value));
- if (!easy_case &&
- ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
- !expected.IsGeneric())) {
+ if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
+ (expected & ToBooleanHint::kNeedsMap)) ||
+ expected != ToBooleanHint::kAny)) {
branch = AssignEnvironment(branch);
}
return branch;
@@ -1704,24 +1697,6 @@ LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
return new(zone()) LHasInstanceTypeAndBranch(value);
}
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- return new(zone()) LHasCachedArrayIndexAndBranch(value);
-}
-
-
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
LOperand* value = UseRegister(instr->value());
@@ -1984,15 +1959,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2041,17 +2007,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
- context, object, vector);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
return AssignEnvironment(DefineAsRegister(
@@ -2138,19 +2093,6 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LLoadKeyedGeneric* result =
- new(zone()) LLoadKeyedGeneric(context, object, key, vector);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
@@ -2474,7 +2416,6 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.h b/deps/v8/src/crankshaft/x64/lithium-x64.h
index e7eaa01529..3c953ffefa 100644
--- a/deps/v8/src/crankshaft/x64/lithium-x64.h
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.h
@@ -71,9 +71,7 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(GetCachedArrayIndex) \
V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
@@ -89,11 +87,8 @@ class LCodeGen;
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalGeneric) \
V(LoadKeyed) \
- V(LoadKeyedGeneric) \
V(LoadNamedField) \
- V(LoadNamedGeneric) \
V(MathAbs) \
V(MathClz32) \
V(MathCos) \
@@ -1074,36 +1069,6 @@ class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
void PrintDataTo(StringStream* stream) override;
};
-
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
@@ -1490,26 +1455,6 @@ class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
- explicit LLoadNamedGeneric(LOperand* context, LOperand* object,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- temps_[0] = vector;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadFunctionPrototype(LOperand* function) {
@@ -1569,43 +1514,6 @@ class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
};
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
- LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = obj;
- inputs_[2] = key;
- temps_[0] = vector;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* temp_vector() { return temps_[0]; }
-};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
- inputs_[0] = context;
- temps_[0] = vector;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- LOperand* context() { return inputs_[0]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- Handle<Object> name() const { return hydrogen()->name(); }
- TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1968,6 +1876,8 @@ class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change);
+
+ bool truncating() { return hydrogen()->CanTruncateToNumber(); }
};
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
index 2d597d4c3b..b83d97f981 100644
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -330,8 +330,7 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(
- chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+ RecordAndWritePosition(value->position());
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -2125,16 +2124,15 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
- ToBooleanICStub::Types expected =
- instr->hydrogen()->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ ToBooleanHints expected = instr->hydrogen()->expected_input_types();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
- if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
+ if (expected & ToBooleanHint::kUndefined) {
// undefined -> false.
__ cmp(reg, factory()->undefined_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
+ if (expected & ToBooleanHint::kBoolean) {
// true -> true.
__ cmp(reg, factory()->true_value());
__ j(equal, instr->TrueLabel(chunk_));
@@ -2142,30 +2140,30 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(reg, factory()->false_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
+ if (expected & ToBooleanHint::kNull) {
// 'null' -> false.
__ cmp(reg, factory()->null_value());
__ j(equal, instr->FalseLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::SMI)) {
+ if (expected & ToBooleanHint::kSmallInteger) {
// Smis: 0 -> false, all other -> true.
__ test(reg, Operand(reg));
__ j(equal, instr->FalseLabel(chunk_));
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
- } else if (expected.NeedsMap()) {
+ } else if (expected & ToBooleanHint::kNeedsMap) {
// If we need a map later and have a Smi -> deopt.
__ test(reg, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
}
Register map = no_reg; // Keep the compiler happy.
- if (expected.NeedsMap()) {
+ if (expected & ToBooleanHint::kNeedsMap) {
map = ToRegister(instr->temp());
DCHECK(!map.is(reg));
__ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
- if (expected.CanBeUndetectable()) {
+ if (expected & ToBooleanHint::kCanBeUndetectable) {
// Undetectable -> false.
__ test_b(FieldOperand(map, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
@@ -2173,13 +2171,13 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
}
- if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
+ if (expected & ToBooleanHint::kReceiver) {
// spec object -> true.
__ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
__ j(above_equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::STRING)) {
+ if (expected & ToBooleanHint::kString) {
// String value -> false iff empty.
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
@@ -2190,19 +2188,19 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_string);
}
- if (expected.Contains(ToBooleanICStub::SYMBOL)) {
+ if (expected & ToBooleanHint::kSymbol) {
// Symbol value -> true.
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
+ if (expected & ToBooleanHint::kSimdValue) {
// SIMD value -> true.
__ CmpInstanceType(map, SIMD128_VALUE_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
- if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
+ if (expected & ToBooleanHint::kHeapNumber) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
@@ -2216,7 +2214,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ bind(&not_heap_number);
}
- if (!expected.IsGeneric()) {
+ if (expected != ToBooleanHint::kAny) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
@@ -2477,28 +2475,6 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
EmitBranch(instr, BranchCondition(instr->hydrogen()));
}
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
- Register input = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
-
- __ AssertString(input);
-
- __ mov(result, FieldOperand(input, String::kHashFieldOffset));
- __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
- LHasCachedArrayIndexAndBranch* instr) {
- Register input = ToRegister(instr->value());
-
- __ test(FieldOperand(input, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- EmitBranch(instr, equal);
-}
-
-
// Branches to a label or falls through with the answer in the z flag. Trashes
// the temp registers, but not the input.
void LCodeGen::EmitClassOfTest(Label* is_true,
@@ -2664,35 +2640,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
-template <class T>
-void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
- Register vector_register = ToRegister(instr->temp_vector());
- Register slot_register = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
- DCHECK(slot_register.is(eax));
-
- AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- __ mov(vector_register, vector);
- // No need to allocate this register.
- FeedbackVectorSlot slot = instr->hydrogen()->slot();
- int index = vector->GetIndex(slot);
- __ mov(slot_register, Immediate(Smi::FromInt(index)));
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
- .code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -2790,18 +2737,6 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
}
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->result()).is(eax));
-
- __ mov(LoadDescriptor::NameRegister(), instr->name());
- EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register function = ToRegister(instr->function());
Register temp = ToRegister(instr->temp());
@@ -2965,11 +2900,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ j(not_equal, &done);
if (info()->IsStub()) {
// A stub can safely convert the hole to undefined only if the array
- // protector cell contains (Smi) Isolate::kArrayProtectorValid.
+ // protector cell contains (Smi) Isolate::kProtectorValid.
// Otherwise it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ cmp(FieldOperand(result, PropertyCell::kValueOffset),
- Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
+ Immediate(Smi::FromInt(Isolate::kProtectorValid)));
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
}
__ mov(result, isolate()->factory()->undefined_value());
@@ -3020,18 +2955,6 @@ Operand LCodeGen::BuildFastArrayOperand(
}
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
- DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
-
- EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
-
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
@@ -4671,8 +4594,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
Register temp_reg, X87Register res_reg,
NumberUntagDMode mode) {
- bool can_convert_undefined_to_nan =
- instr->hydrogen()->can_convert_undefined_to_nan();
+ bool can_convert_undefined_to_nan = instr->truncating();
bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
Label load_smi, done;
@@ -4748,34 +4670,18 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
__ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
if (instr->truncating()) {
- Label no_heap_number, check_bools, check_false;
-
- // Heap number map check.
+ Label truncate;
+ Label::Distance truncate_distance =
+ DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
- __ j(not_equal, &no_heap_number, Label::kNear);
+ __ j(equal, &truncate, truncate_distance);
+ __ push(input_reg);
+ __ CmpObjectType(input_reg, ODDBALL_TYPE, input_reg);
+ __ pop(input_reg);
+ DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball);
+ __ bind(&truncate);
__ TruncateHeapNumberToI(input_reg, input_reg);
- __ jmp(done);
-
- __ bind(&no_heap_number);
- // Check for Oddballs. Undefined/False is converted to zero and True to one
- // for truncating conversions.
- __ cmp(input_reg, factory()->undefined_value());
- __ j(not_equal, &check_bools, Label::kNear);
- __ Move(input_reg, Immediate(0));
- __ jmp(done);
-
- __ bind(&check_bools);
- __ cmp(input_reg, factory()->true_value());
- __ j(not_equal, &check_false, Label::kNear);
- __ Move(input_reg, Immediate(1));
- __ jmp(done);
-
- __ bind(&check_false);
- __ cmp(input_reg, factory()->false_value());
- DeoptimizeIf(not_equal, instr,
- DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
- __ Move(input_reg, Immediate(0));
} else {
// TODO(olivf) Converting a number on the fpu is actually quite slow. We
// should first try a fast conversion and then bailout to this slow case.
@@ -5332,7 +5238,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ Move(result, Immediate(Smi::FromInt(0)));
+ __ Move(result, Immediate(Smi::kZero));
PushSafepointRegistersScope scope(this);
if (instr->size()->IsRegister()) {
@@ -5638,7 +5544,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register result = ToRegister(instr->result());
Label load_cache, done;
__ EnumLength(result, map);
- __ cmp(result, Immediate(Smi::FromInt(0)));
+ __ cmp(result, Immediate(Smi::kZero));
__ j(not_equal, &load_cache, Label::kNear);
__ mov(result, isolate()->factory()->empty_fixed_array());
__ jmp(&done, Label::kNear);
diff --git a/deps/v8/src/crankshaft/x87/lithium-x87.cc b/deps/v8/src/crankshaft/x87/lithium-x87.cc
index a319c0c718..1844d24117 100644
--- a/deps/v8/src/crankshaft/x87/lithium-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-x87.cc
@@ -235,14 +235,6 @@ void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if has_cached_array_index(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if class_of_test(");
value()->PrintTo(stream);
@@ -923,18 +915,20 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* value = instr->value();
Representation r = value->representation();
HType type = value->type();
- ToBooleanICStub::Types expected = instr->expected_input_types();
- if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
+ ToBooleanHints expected = instr->expected_input_types();
+ if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
type.IsJSArray() || type.IsHeapNumber() || type.IsString();
- LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
+ LOperand* temp = !easy_case && (expected & ToBooleanHint::kNeedsMap)
+ ? TempRegister()
+ : NULL;
LInstruction* branch =
temp != NULL ? new (zone()) LBranch(UseRegister(value), temp)
: new (zone()) LBranch(UseRegisterAtStart(value), temp);
- if (!easy_case &&
- ((!expected.Contains(ToBooleanICStub::SMI) && expected.NeedsMap()) ||
- !expected.IsGeneric())) {
+ if (!easy_case && ((!(expected & ToBooleanHint::kSmallInteger) &&
+ (expected & ToBooleanHint::kNeedsMap)) ||
+ expected != ToBooleanHint::kAny)) {
branch = AssignEnvironment(branch);
}
return branch;
@@ -1703,24 +1697,6 @@ LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
TempRegister());
}
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
- HGetCachedArrayIndex* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
- HHasCachedArrayIndexAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
HClassOfTestAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
@@ -2003,15 +1979,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
-
- LLoadGlobalGeneric* result = new (zone()) LLoadGlobalGeneric(context, vector);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
@@ -2051,17 +2018,6 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
}
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
- context, object, vector);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
HLoadFunctionPrototype* instr) {
return AssignEnvironment(DefineAsRegister(
@@ -2120,18 +2076,6 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
}
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* object =
- UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
- LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
- LOperand* vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
- LLoadKeyedGeneric* result =
- new(zone()) LLoadKeyedGeneric(context, object, key, vector);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
@@ -2472,7 +2416,6 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
inner->BindContext(instr->closure_context());
inner->set_entry(instr);
current_block_->UpdateEnvironment(inner);
- chunk_->AddInlinedFunction(instr->shared());
return NULL;
}
diff --git a/deps/v8/src/crankshaft/x87/lithium-x87.h b/deps/v8/src/crankshaft/x87/lithium-x87.h
index e2b804322a..3653a2de2f 100644
--- a/deps/v8/src/crankshaft/x87/lithium-x87.h
+++ b/deps/v8/src/crankshaft/x87/lithium-x87.h
@@ -76,9 +76,7 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(GetCachedArrayIndex) \
V(Goto) \
- V(HasCachedArrayIndexAndBranch) \
V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
@@ -93,11 +91,8 @@ class LCodeGen;
V(LoadContextSlot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalGeneric) \
V(LoadKeyed) \
- V(LoadKeyedGeneric) \
V(LoadNamedField) \
- V(LoadNamedGeneric) \
V(LoadRoot) \
V(MathAbs) \
V(MathClz32) \
@@ -1074,35 +1069,6 @@ class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 1> {
void PrintDataTo(StringStream* stream) override;
};
-
-class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LGetCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
- "has-cached-array-index-and-branch")
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
public:
LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
@@ -1483,25 +1449,6 @@ class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
};
-class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
- public:
- LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = object;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 1> {
public:
LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
@@ -1566,43 +1513,6 @@ inline static bool ExternalArrayOpRequiresTemp(
}
-class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
- public:
- LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
- LOperand* vector) {
- inputs_[0] = context;
- inputs_[1] = obj;
- inputs_[2] = key;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* object() { return inputs_[1]; }
- LOperand* key() { return inputs_[2]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
-};
-
-class LLoadGlobalGeneric final : public LTemplateInstruction<1, 1, 1> {
- public:
- LLoadGlobalGeneric(LOperand* context, LOperand* vector) {
- inputs_[0] = context;
- temps_[0] = vector;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* temp_vector() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
- Handle<Object> name() const { return hydrogen()->name(); }
- TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
-};
-
-
class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
@@ -1961,6 +1871,8 @@ class LNumberUntagD final : public LTemplateInstruction<1, 1, 1> {
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change);
+
+ bool truncating() { return hydrogen()->CanTruncateToNumber(); }
};
diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc
index d2cf573d4c..cdfc39cc5d 100644
--- a/deps/v8/src/d8-posix.cc
+++ b/deps/v8/src/d8-posix.cc
@@ -772,4 +772,12 @@ void Shell::AddOSMethods(Isolate* isolate, Local<ObjectTemplate> os_templ) {
FunctionTemplate::New(isolate, RemoveDirectory));
}
+void Shell::Exit(int exit_code) {
+ // Use _exit instead of exit to avoid races between isolate
+ // threads and static destructors.
+ fflush(stdout);
+ fflush(stderr);
+ _exit(exit_code);
+}
+
} // namespace v8
diff --git a/deps/v8/src/d8-windows.cc b/deps/v8/src/d8-windows.cc
index ba89c4156f..e7ddca694f 100644
--- a/deps/v8/src/d8-windows.cc
+++ b/deps/v8/src/d8-windows.cc
@@ -10,5 +10,12 @@ namespace v8 {
void Shell::AddOSMethods(Isolate* isolate, Local<ObjectTemplate> os_templ) {}
+void Shell::Exit(int exit_code) {
+ // Use TerminateProcess avoid races between isolate threads and
+ // static destructors.
+ fflush(stdout);
+ fflush(stderr);
+ TerminateProcess(GetCurrentProcess(), exit_code);
+}
} // namespace v8
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 01801f80f6..fd9afee808 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -9,7 +9,7 @@
#include <algorithm>
#include <fstream>
-#include <map>
+#include <unordered_map>
#include <utility>
#include <vector>
@@ -34,6 +34,10 @@
#include "src/utils.h"
#include "src/v8.h"
+#ifdef V8_INSPECTOR_ENABLED
+#include "include/v8-inspector.h"
+#endif // V8_INSPECTOR_ENABLED
+
#if !defined(_WIN32) && !defined(_WIN64)
#include <unistd.h> // NOLINT
#else
@@ -149,7 +153,6 @@ class PredictablePlatform : public Platform {
v8::Platform* g_platform = NULL;
-
static Local<Value> Throw(Isolate* isolate, const char* message) {
return isolate->ThrowException(
String::NewFromUtf8(isolate, message, NewStringType::kNormal)
@@ -196,11 +199,9 @@ const char kRecordContinuously[] = "record-continuously";
const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible";
const char kRecordModeParam[] = "record_mode";
-const char kEnableSamplingParam[] = "enable_sampling";
const char kEnableSystraceParam[] = "enable_systrace";
const char kEnableArgumentFilterParam[] = "enable_argument_filter";
const char kIncludedCategoriesParam[] = "included_categories";
-const char kExcludedCategoriesParam[] = "excluded_categories";
class TraceConfigParser {
public:
@@ -221,10 +222,6 @@ class TraceConfigParser {
trace_config->SetTraceRecordMode(
GetTraceRecordMode(isolate, context, trace_config_object));
if (GetBoolean(isolate, context, trace_config_object,
- kEnableSamplingParam)) {
- trace_config->EnableSampling();
- }
- if (GetBoolean(isolate, context, trace_config_object,
kEnableSystraceParam)) {
trace_config->EnableSystrace();
}
@@ -232,10 +229,8 @@ class TraceConfigParser {
kEnableArgumentFilterParam)) {
trace_config->EnableArgumentFilter();
}
- UpdateCategoriesList(isolate, context, trace_config_object,
- kIncludedCategoriesParam, trace_config);
- UpdateCategoriesList(isolate, context, trace_config_object,
- kExcludedCategoriesParam, trace_config);
+ UpdateIncludedCategoriesList(isolate, context, trace_config_object,
+ trace_config);
}
private:
@@ -249,10 +244,11 @@ class TraceConfigParser {
return false;
}
- static int UpdateCategoriesList(
+ static int UpdateIncludedCategoriesList(
v8::Isolate* isolate, Local<Context> context, Local<v8::Object> object,
- const char* property, platform::tracing::TraceConfig* trace_config) {
- Local<Value> value = GetValue(isolate, context, object, property);
+ platform::tracing::TraceConfig* trace_config) {
+ Local<Value> value =
+ GetValue(isolate, context, object, kIncludedCategoriesParam);
if (value->IsArray()) {
Local<Array> v8_array = Local<Array>::Cast(value);
for (int i = 0, length = v8_array->Length(); i < length; ++i) {
@@ -261,11 +257,7 @@ class TraceConfigParser {
->ToString(context)
.ToLocalChecked();
String::Utf8Value str(v->ToString(context).ToLocalChecked());
- if (kIncludedCategoriesParam == property) {
- trace_config->AddIncludedCategory(*str);
- } else {
- trace_config->AddExcludedCategory(*str);
- }
+ trace_config->AddIncludedCategory(*str);
}
return v8_array->Length();
}
@@ -553,34 +545,94 @@ std::string DirName(const std::string& path) {
return path.substr(0, last_slash);
}
-std::string EnsureAbsolutePath(const std::string& path,
- const std::string& dir_name) {
- return IsAbsolutePath(path) ? path : dir_name + '/' + path;
+// Resolves path to an absolute path if necessary, and does some
+// normalization (eliding references to the current directory
+// and replacing backslashes with slashes).
+std::string NormalizePath(const std::string& path,
+ const std::string& dir_name) {
+ std::string result;
+ if (IsAbsolutePath(path)) {
+ result = path;
+ } else {
+ result = dir_name + '/' + path;
+ }
+ std::replace(result.begin(), result.end(), '\\', '/');
+ size_t i;
+ while ((i = result.find("/./")) != std::string::npos) {
+ result.erase(i, 2);
+ }
+ return result;
+}
+
+// Per-context Module data, allowing sharing of module maps
+// across top-level module loads.
+class ModuleEmbedderData {
+ private:
+ class ModuleGlobalHash {
+ public:
+ explicit ModuleGlobalHash(Isolate* isolate) : isolate_(isolate) {}
+ size_t operator()(const Global<Module>& module) const {
+ return module.Get(isolate_)->GetIdentityHash();
+ }
+
+ private:
+ Isolate* isolate_;
+ };
+
+ public:
+ explicit ModuleEmbedderData(Isolate* isolate)
+ : module_to_directory_map(10, ModuleGlobalHash(isolate)) {}
+
+ // Map from normalized module specifier to Module.
+ std::unordered_map<std::string, Global<Module>> specifier_to_module_map;
+ // Map from Module to the directory that Module was loaded from.
+ std::unordered_map<Global<Module>, std::string, ModuleGlobalHash>
+ module_to_directory_map;
+};
+
+enum {
+ // The debugger reserves the first slot in the Context embedder data.
+ kDebugIdIndex = Context::kDebugIdIndex,
+ kModuleEmbedderDataIndex,
+ kInspectorClientIndex
+};
+
+void InitializeModuleEmbedderData(Local<Context> context) {
+ context->SetAlignedPointerInEmbedderData(
+ kModuleEmbedderDataIndex, new ModuleEmbedderData(context->GetIsolate()));
+}
+
+ModuleEmbedderData* GetModuleDataFromContext(Local<Context> context) {
+ return static_cast<ModuleEmbedderData*>(
+ context->GetAlignedPointerFromEmbedderData(kModuleEmbedderDataIndex));
+}
+
+void DisposeModuleEmbedderData(Local<Context> context) {
+ delete GetModuleDataFromContext(context);
+ context->SetAlignedPointerInEmbedderData(kModuleEmbedderDataIndex, nullptr);
}
MaybeLocal<Module> ResolveModuleCallback(Local<Context> context,
Local<String> specifier,
- Local<Module> referrer,
- Local<Value> data) {
+ Local<Module> referrer) {
Isolate* isolate = context->GetIsolate();
- auto module_map = static_cast<std::map<std::string, Global<Module>>*>(
- External::Cast(*data)->Value());
- Local<String> dir_name = Local<String>::Cast(referrer->GetEmbedderData());
+ ModuleEmbedderData* d = GetModuleDataFromContext(context);
+ auto dir_name_it =
+ d->module_to_directory_map.find(Global<Module>(isolate, referrer));
+ CHECK(dir_name_it != d->module_to_directory_map.end());
std::string absolute_path =
- EnsureAbsolutePath(ToSTLString(specifier), ToSTLString(dir_name));
- auto it = module_map->find(absolute_path);
- if (it != module_map->end()) {
- return it->second.Get(isolate);
- }
- return MaybeLocal<Module>();
+ NormalizePath(ToSTLString(specifier), dir_name_it->second);
+ auto module_it = d->specifier_to_module_map.find(absolute_path);
+ CHECK(module_it != d->specifier_to_module_map.end());
+ return module_it->second.Get(isolate);
}
} // anonymous namespace
-MaybeLocal<Module> Shell::FetchModuleTree(
- Isolate* isolate, const std::string& file_name,
- std::map<std::string, Global<Module>>* module_map) {
+MaybeLocal<Module> Shell::FetchModuleTree(Local<Context> context,
+ const std::string& file_name) {
DCHECK(IsAbsolutePath(file_name));
+ Isolate* isolate = context->GetIsolate();
TryCatch try_catch(isolate);
try_catch.SetVerbose(true);
Local<String> source_text = ReadFile(isolate, file_name.c_str());
@@ -597,19 +649,22 @@ MaybeLocal<Module> Shell::FetchModuleTree(
ReportException(isolate, &try_catch);
return MaybeLocal<Module>();
}
- module_map->insert(
- std::make_pair(file_name, Global<Module>(isolate, module)));
+
+ ModuleEmbedderData* d = GetModuleDataFromContext(context);
+ CHECK(d->specifier_to_module_map
+ .insert(std::make_pair(file_name, Global<Module>(isolate, module)))
+ .second);
std::string dir_name = DirName(file_name);
- module->SetEmbedderData(
- String::NewFromUtf8(isolate, dir_name.c_str(), NewStringType::kNormal)
- .ToLocalChecked());
+ CHECK(d->module_to_directory_map
+ .insert(std::make_pair(Global<Module>(isolate, module), dir_name))
+ .second);
for (int i = 0, length = module->GetModuleRequestsLength(); i < length; ++i) {
Local<String> name = module->GetModuleRequest(i);
- std::string absolute_path = EnsureAbsolutePath(ToSTLString(name), dir_name);
- if (!module_map->count(absolute_path)) {
- if (FetchModuleTree(isolate, absolute_path, module_map).IsEmpty()) {
+ std::string absolute_path = NormalizePath(ToSTLString(name), dir_name);
+ if (!d->specifier_to_module_map.count(absolute_path)) {
+ if (FetchModuleTree(context, absolute_path).IsEmpty()) {
return MaybeLocal<Module>();
}
}
@@ -621,14 +676,14 @@ MaybeLocal<Module> Shell::FetchModuleTree(
bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
HandleScope handle_scope(isolate);
- std::string absolute_path =
- EnsureAbsolutePath(file_name, GetWorkingDirectory());
- std::replace(absolute_path.begin(), absolute_path.end(), '\\', '/');
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ Local<Context> realm = data->realms_[data->realm_current_].Get(isolate);
+ Context::Scope context_scope(realm);
+
+ std::string absolute_path = NormalizePath(file_name, GetWorkingDirectory());
Local<Module> root_module;
- std::map<std::string, Global<Module>> module_map;
- if (!FetchModuleTree(isolate, absolute_path, &module_map)
- .ToLocal(&root_module)) {
+ if (!FetchModuleTree(realm, absolute_path).ToLocal(&root_module)) {
return false;
}
@@ -636,16 +691,9 @@ bool Shell::ExecuteModule(Isolate* isolate, const char* file_name) {
try_catch.SetVerbose(true);
MaybeLocal<Value> maybe_result;
- {
- PerIsolateData* data = PerIsolateData::Get(isolate);
- Local<Context> realm = data->realms_[data->realm_current_].Get(isolate);
- Context::Scope context_scope(realm);
-
- if (root_module->Instantiate(realm, ResolveModuleCallback,
- External::New(isolate, &module_map))) {
- maybe_result = root_module->Evaluate(realm);
- EmptyMessageQueues(isolate);
- }
+ if (root_module->Instantiate(realm, ResolveModuleCallback)) {
+ maybe_result = root_module->Evaluate(realm);
+ EmptyMessageQueues(isolate);
}
Local<Value> result;
if (!maybe_result.ToLocal(&result)) {
@@ -670,9 +718,15 @@ PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
PerIsolateData::RealmScope::~RealmScope() {
// Drop realms to avoid keeping them alive.
- for (int i = 0; i < data_->realm_count_; ++i)
- data_->realms_[i].Reset();
+ for (int i = 0; i < data_->realm_count_; ++i) {
+ Global<Context>& realm = data_->realms_[i];
+ if (realm.IsEmpty()) continue;
+ DisposeModuleEmbedderData(realm.Get(data_->isolate_));
+ // TODO(adamk): No need to reset manually, Globals reset when destructed.
+ realm.Reset();
+ }
delete[] data_->realms_;
+ // TODO(adamk): No need to reset manually, Globals reset when destructed.
if (!data_->realm_shared_.IsEmpty())
data_->realm_shared_.Reset();
}
@@ -775,6 +829,7 @@ MaybeLocal<Context> Shell::CreateRealm(
try_catch.ReThrow();
return MaybeLocal<Context>();
}
+ InitializeModuleEmbedderData(context);
data->realms_[index].Reset(isolate, context);
args.GetReturnValue().Set(index);
return context;
@@ -808,6 +863,7 @@ void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw(args.GetIsolate(), "Invalid realm index");
return;
}
+ DisposeModuleEmbedderData(data->realms_[index].Get(isolate));
data->realms_[index].Reset();
isolate->ContextDisposedNotification();
isolate->IdleNotificationDeadline(g_platform->MonotonicallyIncreasingTime());
@@ -870,19 +926,11 @@ void Shell::RealmSharedSet(Local<String> property,
data->realm_shared_.Reset(isolate, value);
}
-
-void Shell::Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
- Write(args);
- printf("\n");
- fflush(stdout);
-}
-
-
-void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
+void WriteToFile(FILE* file, const v8::FunctionCallbackInfo<v8::Value>& args) {
for (int i = 0; i < args.Length(); i++) {
HandleScope handle_scope(args.GetIsolate());
if (i != 0) {
- printf(" ");
+ fprintf(file, " ");
}
// Explicitly catch potential exceptions in toString().
@@ -900,14 +948,32 @@ void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
v8::String::Utf8Value str(str_obj);
- int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), stdout));
+ int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), file));
if (n != str.length()) {
printf("Error in fwrite\n");
- Exit(1);
+ Shell::Exit(1);
}
}
}
+void WriteAndFlush(FILE* file,
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ WriteToFile(file, args);
+ fprintf(file, "\n");
+ fflush(file);
+}
+
+void Shell::Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ WriteAndFlush(stdout, args);
+}
+
+void Shell::PrintErr(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ WriteAndFlush(stderr, args);
+}
+
+void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ WriteToFile(stdout, args);
+}
void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::Utf8Value file(args[0]);
@@ -1324,6 +1390,10 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
.ToLocalChecked(),
FunctionTemplate::New(isolate, Print));
global_template->Set(
+ String::NewFromUtf8(isolate, "printErr", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, PrintErr));
+ global_template->Set(
String::NewFromUtf8(isolate, "write", NewStringType::kNormal)
.ToLocalChecked(),
FunctionTemplate::New(isolate, Write));
@@ -1476,6 +1546,7 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
EscapableHandleScope handle_scope(isolate);
Local<Context> context = Context::New(isolate, NULL, global_template);
DCHECK(!context.IsEmpty());
+ InitializeModuleEmbedderData(context);
Context::Scope scope(context);
i::Factory* factory = reinterpret_cast<i::Isolate*>(isolate)->factory();
@@ -1498,16 +1569,6 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
return handle_scope.Escape(context);
}
-
-void Shell::Exit(int exit_code) {
- // Use _exit instead of exit to avoid races between isolate
- // threads and static destructors.
- fflush(stdout);
- fflush(stderr);
- _exit(exit_code);
-}
-
-
struct CounterAndKey {
Counter* counter;
const char* key;
@@ -1700,6 +1761,128 @@ void Shell::RunShell(Isolate* isolate) {
printf("\n");
}
+#ifdef V8_INSPECTOR_ENABLED
+class InspectorFrontend final : public v8_inspector::V8Inspector::Channel {
+ public:
+ explicit InspectorFrontend(Local<Context> context) {
+ isolate_ = context->GetIsolate();
+ context_.Reset(isolate_, context);
+ }
+ virtual ~InspectorFrontend() = default;
+
+ private:
+ void sendProtocolResponse(int callId,
+ const v8_inspector::StringView& message) override {
+ Send(message);
+ }
+ void sendProtocolNotification(
+ const v8_inspector::StringView& message) override {
+ Send(message);
+ }
+ void flushProtocolNotifications() override {}
+
+ void Send(const v8_inspector::StringView& string) {
+ int length = static_cast<int>(string.length());
+ DCHECK(length < v8::String::kMaxLength);
+ Local<String> message =
+ (string.is8Bit()
+ ? v8::String::NewFromOneByte(
+ isolate_,
+ reinterpret_cast<const uint8_t*>(string.characters8()),
+ v8::NewStringType::kNormal, length)
+ : v8::String::NewFromTwoByte(
+ isolate_,
+ reinterpret_cast<const uint16_t*>(string.characters16()),
+ v8::NewStringType::kNormal, length))
+ .ToLocalChecked();
+ Local<String> callback_name =
+ v8::String::NewFromUtf8(isolate_, "receive", v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ Local<Context> context = context_.Get(isolate_);
+ Local<Value> callback =
+ context->Global()->Get(context, callback_name).ToLocalChecked();
+ if (callback->IsFunction()) {
+ v8::TryCatch try_catch(isolate_);
+ Local<Value> args[] = {message};
+ MaybeLocal<Value> result = Local<Function>::Cast(callback)->Call(
+ context, Undefined(isolate_), 1, args);
+ CHECK(!result.IsEmpty()); // Listeners may not throw.
+ }
+ }
+
+ Isolate* isolate_;
+ Global<Context> context_;
+};
+
+class InspectorClient : public v8_inspector::V8InspectorClient {
+ public:
+ InspectorClient(Local<Context> context, bool connect) {
+ if (!connect) return;
+ isolate_ = context->GetIsolate();
+ channel_.reset(new InspectorFrontend(context));
+ inspector_ = v8_inspector::V8Inspector::create(isolate_, this);
+ session_ =
+ inspector_->connect(1, channel_.get(), v8_inspector::StringView());
+ context->SetAlignedPointerInEmbedderData(kInspectorClientIndex, this);
+ inspector_->contextCreated(v8_inspector::V8ContextInfo(
+ context, kContextGroupId, v8_inspector::StringView()));
+
+ Local<Value> function =
+ FunctionTemplate::New(isolate_, SendInspectorMessage)
+ ->GetFunction(context)
+ .ToLocalChecked();
+ Local<String> function_name =
+ String::NewFromUtf8(isolate_, "send", NewStringType::kNormal)
+ .ToLocalChecked();
+ CHECK(context->Global()->Set(context, function_name, function).FromJust());
+
+ context_.Reset(isolate_, context);
+ }
+
+ private:
+ static v8_inspector::V8InspectorSession* GetSession(Local<Context> context) {
+ InspectorClient* inspector_client = static_cast<InspectorClient*>(
+ context->GetAlignedPointerFromEmbedderData(kInspectorClientIndex));
+ return inspector_client->session_.get();
+ }
+
+ Local<Context> ensureDefaultContextInGroup(int group_id) override {
+ DCHECK(isolate_);
+ DCHECK_EQ(kContextGroupId, group_id);
+ return context_.Get(isolate_);
+ }
+
+ static void SendInspectorMessage(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+ Local<Context> context = isolate->GetCurrentContext();
+ args.GetReturnValue().Set(Undefined(isolate));
+ Local<String> message = args[0]->ToString(context).ToLocalChecked();
+ v8_inspector::V8InspectorSession* session =
+ InspectorClient::GetSession(context);
+ int length = message->Length();
+ std::unique_ptr<uint16_t[]> buffer(new uint16_t[length]);
+ message->Write(buffer.get(), 0, length);
+ v8_inspector::StringView message_view(buffer.get(), length);
+ session->dispatchProtocolMessage(message_view);
+ args.GetReturnValue().Set(True(isolate));
+ }
+
+ static const int kContextGroupId = 1;
+
+ std::unique_ptr<v8_inspector::V8Inspector> inspector_;
+ std::unique_ptr<v8_inspector::V8InspectorSession> session_;
+ std::unique_ptr<v8_inspector::V8Inspector::Channel> channel_;
+ Global<Context> context_;
+ Isolate* isolate_;
+};
+#else // V8_INSPECTOR_ENABLED
+class InspectorClient {
+ public:
+ InspectorClient(Local<Context> context, bool connect) { CHECK(!connect); }
+};
+#endif // V8_INSPECTOR_ENABLED
SourceGroup::~SourceGroup() {
delete thread_;
@@ -1783,7 +1966,6 @@ base::Thread::Options SourceGroup::GetThreadOptions() {
return base::Thread::Options("IsolateThread", 2 * MB);
}
-
void SourceGroup::ExecuteInThread() {
Isolate::CreateParams create_params;
create_params.array_buffer_allocator = Shell::array_buffer_allocator;
@@ -1798,9 +1980,12 @@ void SourceGroup::ExecuteInThread() {
Local<Context> context = Shell::CreateEvaluationContext(isolate);
{
Context::Scope cscope(context);
+ InspectorClient inspector_client(context,
+ Shell::options.enable_inspector);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
Execute(isolate);
}
+ DisposeModuleEmbedderData(context);
}
Shell::CollectGarbage(isolate);
}
@@ -2060,6 +2245,7 @@ void Worker::ExecuteInThread() {
}
}
}
+ DisposeModuleEmbedderData(context);
}
Shell::CollectGarbage(isolate);
}
@@ -2191,6 +2377,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strncmp(argv[i], "--trace-config=", 15) == 0) {
options.trace_config = argv[i] + 15;
argv[i] = NULL;
+ } else if (strcmp(argv[i], "--enable-inspector") == 0) {
+ options.enable_inspector = true;
+ argv[i] = NULL;
}
}
@@ -2240,9 +2429,11 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
}
{
Context::Scope cscope(context);
+ InspectorClient inspector_client(context, options.enable_inspector);
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
options.isolate_sources[0].Execute(isolate);
}
+ DisposeModuleEmbedderData(context);
}
CollectGarbage(isolate);
for (int i = 1; i < options.num_isolates; ++i) {
@@ -2620,6 +2811,20 @@ int Shell::Main(int argc, char* argv[]) {
? new PredictablePlatform()
: v8::platform::CreateDefaultPlatform();
+ platform::tracing::TracingController* tracing_controller;
+ if (options.trace_enabled) {
+ trace_file.open("v8_trace.json");
+ tracing_controller = new platform::tracing::TracingController();
+ platform::tracing::TraceBuffer* trace_buffer =
+ platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(
+ platform::tracing::TraceBuffer::kRingBufferChunks,
+ platform::tracing::TraceWriter::CreateJSONTraceWriter(trace_file));
+ tracing_controller->Initialize(trace_buffer);
+ if (!i::FLAG_verify_predictable) {
+ platform::SetTracingController(g_platform, tracing_controller);
+ }
+ }
+
v8::V8::InitializePlatform(g_platform);
v8::V8::Initialize();
if (options.natives_blob || options.snapshot_blob) {
@@ -2649,11 +2854,12 @@ int Shell::Main(int argc, char* argv[]) {
base::SysInfo::AmountOfVirtualMemory());
Shell::counter_map_ = new CounterMap();
- if (i::FLAG_dump_counters || i::FLAG_track_gc_object_stats) {
+ if (i::FLAG_dump_counters || i::FLAG_gc_stats) {
create_params.counter_lookup_callback = LookupCounter;
create_params.create_histogram_callback = CreateHistogram;
create_params.add_histogram_sample_callback = AddHistogramSample;
}
+
Isolate* isolate = Isolate::New(create_params);
{
Isolate::Scope scope(isolate);
@@ -2661,14 +2867,6 @@ int Shell::Main(int argc, char* argv[]) {
PerIsolateData data(isolate);
if (options.trace_enabled) {
- trace_file.open("v8_trace.json");
- platform::tracing::TracingController* tracing_controller =
- new platform::tracing::TracingController();
- platform::tracing::TraceBuffer* trace_buffer =
- platform::tracing::TraceBuffer::CreateTraceBufferRingBuffer(
- platform::tracing::TraceBuffer::kRingBufferChunks,
- platform::tracing::TraceWriter::CreateJSONTraceWriter(
- trace_file));
platform::tracing::TraceConfig* trace_config;
if (options.trace_config) {
int size = 0;
@@ -2681,11 +2879,7 @@ int Shell::Main(int argc, char* argv[]) {
trace_config =
platform::tracing::TraceConfig::CreateDefaultTraceConfig();
}
- tracing_controller->Initialize(trace_buffer);
tracing_controller->StartTracing(trace_config);
- if (!i::FLAG_verify_predictable) {
- platform::SetTracingController(g_platform, tracing_controller);
- }
}
if (options.dump_heap_constants) {
@@ -2726,7 +2920,7 @@ int Shell::Main(int argc, char* argv[]) {
RunShell(isolate);
}
- if (i::FLAG_ignition && i::FLAG_trace_ignition_dispatches &&
+ if (i::FLAG_trace_ignition_dispatches &&
i::FLAG_trace_ignition_dispatches_output_file != nullptr) {
WriteIgnitionDispatchCountersFile(isolate);
}
@@ -2747,6 +2941,9 @@ int Shell::Main(int argc, char* argv[]) {
V8::Dispose();
V8::ShutdownPlatform();
delete g_platform;
+ if (i::FLAG_verify_predictable) {
+ delete tracing_controller;
+ }
return result;
}
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index e0270f5178..f6ceeaa78b 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -39,6 +39,7 @@
'type': 'executable',
'dependencies': [
'v8.gyp:v8',
+ 'v8.gyp:v8_libbase',
'v8.gyp:v8_libplatform',
],
# Generated source files need this explicitly:
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 32a7d25c2f..5e7abafb04 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -5,7 +5,6 @@
#ifndef V8_D8_H_
#define V8_D8_H_
-#include <map>
#include <string>
#include "src/allocation.h"
@@ -275,6 +274,7 @@ class ShellOptions {
dump_heap_constants(false),
expected_to_throw(false),
mock_arraybuffer_allocator(false),
+ enable_inspector(false),
num_isolates(1),
compile_options(v8::ScriptCompiler::kNoCompileOptions),
isolate_sources(NULL),
@@ -304,6 +304,7 @@ class ShellOptions {
bool dump_heap_constants;
bool expected_to_throw;
bool mock_arraybuffer_allocator;
+ bool enable_inspector;
int num_isolates;
v8::ScriptCompiler::CompileOptions compile_options;
SourceGroup* isolate_sources;
@@ -371,6 +372,7 @@ class Shell : public i::AllStatic {
const PropertyCallbackInfo<void>& info);
static void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void PrintErr(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Write(const v8::FunctionCallbackInfo<v8::Value>& args);
static void QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args);
static void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -455,9 +457,8 @@ class Shell : public i::AllStatic {
static Local<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
static MaybeLocal<Context> CreateRealm(
const v8::FunctionCallbackInfo<v8::Value>& args);
- static MaybeLocal<Module> FetchModuleTree(
- Isolate* isolate, const std::string& file_name,
- std::map<std::string, Global<Module>>* module_map);
+ static MaybeLocal<Module> FetchModuleTree(v8::Local<v8::Context> context,
+ const std::string& file_name);
};
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date.cc
index f98ad64f1f..cc7603322d 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date.cc
@@ -25,7 +25,7 @@ static const char kDaysInMonths[] =
void DateCache::ResetDateCache() {
static const int kMaxStamp = Smi::kMaxValue;
if (stamp_->value() >= kMaxStamp) {
- stamp_ = Smi::FromInt(0);
+ stamp_ = Smi::kZero;
} else {
stamp_ = Smi::FromInt(stamp_->value() + 1);
}
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index c98f911f75..5da1656fad 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -77,23 +77,11 @@ Handle<Object> FrameInspector::GetExpression(int index) {
: handle(frame_->GetExpression(index), isolate_);
}
-
int FrameInspector::GetSourcePosition() {
- if (is_optimized_) return deoptimized_frame_->GetSourcePosition();
- AbstractCode* code;
- int code_offset;
- if (is_interpreted_) {
- InterpretedFrame* frame = reinterpret_cast<InterpretedFrame*>(frame_);
- code = AbstractCode::cast(frame->GetBytecodeArray());
- code_offset = frame->GetBytecodeOffset();
- } else {
- code = AbstractCode::cast(frame_->LookupCode());
- code_offset = static_cast<int>(frame_->pc() - code->instruction_start());
- }
- return code->SourcePosition(code_offset);
+ return is_optimized_ ? deoptimized_frame_->GetSourcePosition()
+ : frame_->position();
}
-
bool FrameInspector::IsConstructor() {
return is_optimized_ && !is_bottommost_
? deoptimized_frame_->HasConstructStub()
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
new file mode 100644
index 0000000000..443ed4232f
--- /dev/null
+++ b/deps/v8/src/debug/debug-interface.h
@@ -0,0 +1,209 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_DEBUG_INTERFACE_H_
+#define V8_DEBUG_DEBUG_INTERFACE_H_
+
+#include "include/v8-debug.h"
+#include "include/v8-util.h"
+#include "include/v8.h"
+
+namespace v8 {
+
+class DebugInterface {
+ public:
+ /**
+ * An event details object passed to the debug event listener.
+ */
+ class EventDetails : public v8::Debug::EventDetails {
+ public:
+ /**
+ * Event type.
+ */
+ virtual v8::DebugEvent GetEvent() const = 0;
+
+ /**
+ * Access to execution state and event data of the debug event. Don't store
+ * these cross callbacks as their content becomes invalid.
+ */
+ virtual Local<Object> GetExecutionState() const = 0;
+ virtual Local<Object> GetEventData() const = 0;
+
+ /**
+ * Get the context active when the debug event happened. Note this is not
+ * the current active context as the JavaScript part of the debugger is
+ * running in its own context which is entered at this point.
+ */
+ virtual Local<Context> GetEventContext() const = 0;
+
+ /**
+ * Client data passed with the corresponding callback when it was
+ * registered.
+ */
+ virtual Local<Value> GetCallbackData() const = 0;
+
+ virtual ~EventDetails() {}
+ };
+
+ /**
+ * Debug event callback function.
+ *
+ * \param event_details object providing information about the debug event
+ *
+ * A EventCallback does not take possession of the event data,
+ * and must not rely on the data persisting after the handler returns.
+ */
+ typedef void (*EventCallback)(const EventDetails& event_details);
+
+ static bool SetDebugEventListener(Isolate* isolate, EventCallback that,
+ Local<Value> data = Local<Value>());
+
+ /**
+ * Debugger is running in its own context which is entered while debugger
+ * messages are being dispatched. This is an explicit getter for this
+ * debugger context. Note that the content of the debugger context is subject
+ * to change. The Context exists only when the debugger is active, i.e. at
+ * least one DebugEventListener or MessageHandler is set.
+ */
+ static Local<Context> GetDebugContext(Isolate* isolate);
+
+ /**
+ * Run a JavaScript function in the debugger.
+ * \param fun the function to call
+ * \param data passed as second argument to the function
+ * With this call the debugger is entered and the function specified is called
+ * with the execution state as the first argument. This makes it possible to
+ * get access to information otherwise not available during normal JavaScript
+ * execution e.g. details on stack frames. Receiver of the function call will
+ * be the debugger context global object, however this is a subject to change.
+ * The following example shows a JavaScript function which when passed to
+ * v8::Debug::Call will return the current line of JavaScript execution.
+ *
+ * \code
+ * function frame_source_line(exec_state) {
+ * return exec_state.frame(0).sourceLine();
+ * }
+ * \endcode
+ */
+ // TODO(dcarney): data arg should be a MaybeLocal
+ static MaybeLocal<Value> Call(Local<Context> context,
+ v8::Local<v8::Function> fun,
+ Local<Value> data = Local<Value>());
+
+ /**
+ * Enable/disable LiveEdit functionality for the given Isolate
+ * (default Isolate if not provided). V8 will abort if LiveEdit is
+ * unexpectedly used. LiveEdit is enabled by default.
+ */
+ static void SetLiveEditEnabled(Isolate* isolate, bool enable);
+
+ // Schedule a debugger break to happen when JavaScript code is run
+ // in the given isolate.
+ static void DebugBreak(Isolate* isolate);
+
+ // Remove scheduled debugger break in given isolate if it has not
+ // happened yet.
+ static void CancelDebugBreak(Isolate* isolate);
+
+ /**
+ * Returns array of internal properties specific to the value type. Result has
+ * the following format: [<name>, <value>,...,<name>, <value>]. Result array
+ * will be allocated in the current context.
+ */
+ static MaybeLocal<Array> GetInternalProperties(Isolate* isolate,
+ Local<Value> value);
+
+ enum ExceptionBreakState {
+ NoBreakOnException = 0,
+ BreakOnUncaughtException = 1,
+ BreakOnAnyException = 2
+ };
+
+ /**
+ * Defines if VM will pause on exceptions or not.
+ * If BreakOnAnyExceptions is set then VM will pause on caught and uncaught
+ * exception, if BreakOnUncaughtException is set then VM will pause only on
+ * uncaught exception, otherwise VM won't stop on any exception.
+ */
+ static void ChangeBreakOnException(Isolate* isolate,
+ ExceptionBreakState state);
+
+ enum StepAction {
+ StepOut = 0, // Step out of the current function.
+ StepNext = 1, // Step to the next statement in the current function.
+ StepIn = 2, // Step into new functions invoked or the next statement
+ // in the current function.
+ StepFrame = 3 // Step into a new frame or return to previous frame.
+ };
+
+ static void PrepareStep(Isolate* isolate, StepAction action);
+ static void ClearStepping(Isolate* isolate);
+
+ /**
+ * Defines location inside script.
+ * Lines and columns are 0-based.
+ */
+ class Location {
+ public:
+ Location(int lineNumber, int columnNumber);
+ /**
+ * Create empty location.
+ */
+ Location();
+
+ int GetLineNumber() const;
+ int GetColumnNumber() const;
+ bool IsEmpty() const;
+
+ private:
+ int lineNumber_;
+ int columnNumber_;
+ };
+
+ /**
+ * Native wrapper around v8::internal::Script object.
+ */
+ class Script {
+ public:
+ v8::Isolate* GetIsolate() const;
+
+ ScriptOriginOptions OriginOptions() const;
+ bool WasCompiled() const;
+ int Id() const;
+ int LineOffset() const;
+ int ColumnOffset() const;
+ std::vector<int> LineEnds() const;
+ MaybeLocal<String> Name() const;
+ MaybeLocal<String> SourceURL() const;
+ MaybeLocal<String> SourceMappingURL() const;
+ MaybeLocal<String> ContextData() const;
+ MaybeLocal<String> Source() const;
+ bool GetPossibleBreakpoints(const Location& start, const Location& end,
+ std::vector<Location>* locations) const;
+
+ /**
+ * script parameter is a wrapper v8::internal::JSObject for
+ * v8::internal::Script.
+ * This function gets v8::internal::Script from v8::internal::JSObject and
+ * wraps it with DebugInterface::Script.
+ * Returns empty local if not called with a valid wrapper of
+ * v8::internal::Script.
+ */
+ static MaybeLocal<Script> Wrap(Isolate* isolate,
+ v8::Local<v8::Object> script);
+
+ private:
+ int GetSourcePosition(const Location& location) const;
+ };
+
+ /**
+ * Return array of compiled scripts.
+ */
+ static void GetLoadedScripts(Isolate* isolate,
+ PersistentValueVector<Script>& scripts);
+};
+
+} // namespace v8
+
+#endif // V8_DEBUG_DEBUG_INTERFACE_H_
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index c7eb0f75f9..c84d32ae7d 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -87,17 +87,13 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
// Reparse the code and analyze the scopes.
// Check whether we are in global, eval or function code.
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
std::unique_ptr<ParseInfo> info;
if (scope_info->scope_type() != FUNCTION_SCOPE) {
// Global or eval code.
Handle<Script> script(Script::cast(shared_info->script()));
info.reset(new ParseInfo(&zone, script));
- info->set_toplevel();
- if (scope_info->scope_type() == SCRIPT_SCOPE) {
- info->set_global();
- } else {
- DCHECK(scope_info->scope_type() == EVAL_SCOPE);
+ if (scope_info->scope_type() == EVAL_SCOPE) {
info->set_eval();
if (!function->context()->IsNativeContext()) {
info->set_outer_scope_info(handle(function->context()->scope_info()));
@@ -105,10 +101,14 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
// Language mode may be inherited from the eval caller.
// Retrieve it from shared function info.
info->set_language_mode(shared_info->language_mode());
+ } else if (scope_info->scope_type() == MODULE_SCOPE) {
+ info->set_module();
+ } else {
+ DCHECK(scope_info->scope_type() == SCRIPT_SCOPE);
}
} else {
// Inner function.
- info.reset(new ParseInfo(&zone, function));
+ info.reset(new ParseInfo(&zone, shared_info));
}
if (Parser::ParseStatic(info.get()) && Rewriter::Rewrite(info.get())) {
DeclarationScope* scope = info->literal()->scope();
@@ -610,17 +610,10 @@ MaybeHandle<JSObject> ScopeIterator::MaterializeModuleScope() {
Handle<Context> context = CurrentContext();
DCHECK(context->IsModuleContext());
Handle<ScopeInfo> scope_info(context->scope_info());
-
- // Allocate and initialize a JSObject with all the members of the debugged
- // module.
Handle<JSObject> module_scope =
isolate_->factory()->NewJSObjectWithNullProto();
-
- // Fill all context locals.
CopyContextLocalsToScopeObject(scope_info, context, module_scope);
-
- // TODO(neis): Also collect stack locals as well as imports and exports.
-
+ CopyModuleVarsToScopeObject(scope_info, context, module_scope);
return module_scope;
}
@@ -791,6 +784,36 @@ void ScopeIterator::CopyContextLocalsToScopeObject(
}
}
+void ScopeIterator::CopyModuleVarsToScopeObject(Handle<ScopeInfo> scope_info,
+ Handle<Context> context,
+ Handle<JSObject> scope_object) {
+ Isolate* isolate = scope_info->GetIsolate();
+
+ int module_variable_count =
+ Smi::cast(scope_info->get(scope_info->ModuleVariableCountIndex()))
+ ->value();
+ for (int i = 0; i < module_variable_count; ++i) {
+ Handle<String> local_name;
+ Handle<Object> value;
+ {
+ String* name;
+ int index;
+ scope_info->ModuleVariable(i, &name, &index);
+ CHECK(!ScopeInfo::VariableIsSynthetic(name));
+ local_name = handle(name, isolate);
+ value = Module::LoadVariable(handle(context->module(), isolate), index);
+ }
+
+ // Reflect variables under TDZ as undefined in scope object.
+ if (value->IsTheHole(isolate)) continue;
+ // This should always succeed.
+ // TODO(verwaest): Use AddDataProperty instead.
+ JSObject::SetOwnPropertyIgnoreAttributes(scope_object, local_name, value,
+ NONE)
+ .Check();
+ }
+}
+
void ScopeIterator::CopyContextExtensionToScopeObject(
Handle<Context> context, Handle<JSObject> scope_object,
KeyCollectionMode mode) {
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index 026a1da0f5..87c85b8ba5 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -153,6 +153,9 @@ class ScopeIterator {
void CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
Handle<Context> context,
Handle<JSObject> scope_object);
+ void CopyModuleVarsToScopeObject(Handle<ScopeInfo> scope_info,
+ Handle<Context> context,
+ Handle<JSObject> scope_object);
void CopyContextExtensionToScopeObject(Handle<Context> context,
Handle<JSObject> scope_object,
KeyCollectionMode mode);
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index e93dd35661..960327b545 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -27,7 +27,6 @@
#include "src/log.h"
#include "src/messages.h"
#include "src/snapshot/natives.h"
-#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-module.h"
#include "include/v8-debug.h"
@@ -212,7 +211,7 @@ void CodeBreakIterator::Next() {
int offset = code_offset();
while (!source_position_iterator_.done() &&
source_position_iterator_.code_offset() <= offset) {
- position_ = source_position_iterator_.source_position();
+ position_ = source_position_iterator_.source_position().ScriptOffset();
if (source_position_iterator_.is_statement()) {
statement_position_ = position_;
}
@@ -296,7 +295,7 @@ void BytecodeArrayBreakIterator::Next() {
if (!first) source_position_iterator_.Advance();
first = false;
if (Done()) return;
- position_ = source_position_iterator_.source_position();
+ position_ = source_position_iterator_.source_position().ScriptOffset();
if (source_position_iterator_.is_statement()) {
statement_position_ = position_;
}
@@ -1264,7 +1263,8 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
DCHECK(shared->is_compiled());
if (isolate_->concurrent_recompilation_enabled()) {
- isolate_->optimizing_compile_dispatcher()->Flush();
+ isolate_->optimizing_compile_dispatcher()->Flush(
+ OptimizingCompileDispatcher::BlockingBehavior::kBlock);
}
List<Handle<JSFunction> > functions;
@@ -1329,8 +1329,7 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
// We do not need to recompile to debug bytecode.
if (baseline_exists && !shared->code()->has_debug_break_slots()) {
- DCHECK(functions.length() > 0);
- if (!Compiler::CompileDebugCode(functions.first())) return false;
+ if (!Compiler::CompileDebugCode(shared)) return false;
}
for (Handle<JSFunction> const function : functions) {
@@ -1352,6 +1351,87 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
return true;
}
+namespace {
+template <typename Iterator>
+void GetBreakablePositions(Iterator* it, int start_position, int end_position,
+ BreakPositionAlignment alignment,
+ std::set<int>* positions) {
+ it->SkipToPosition(start_position, alignment);
+ while (!it->Done() && it->position() < end_position &&
+ it->position() >= start_position) {
+ positions->insert(alignment == STATEMENT_ALIGNED ? it->statement_position()
+ : it->position());
+ it->Next();
+ }
+}
+
+void FindBreakablePositions(Handle<DebugInfo> debug_info, int start_position,
+ int end_position, BreakPositionAlignment alignment,
+ std::set<int>* positions) {
+ if (debug_info->HasDebugCode()) {
+ CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ GetBreakablePositions(&it, start_position, end_position, alignment,
+ positions);
+ } else {
+ DCHECK(debug_info->HasDebugBytecodeArray());
+ BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+ GetBreakablePositions(&it, start_position, end_position, alignment,
+ positions);
+ }
+}
+} // namespace
+
+bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
+ int end_position, std::set<int>* positions) {
+ while (true) {
+ if (!script->shared_function_infos()->IsWeakFixedArray()) return false;
+
+ WeakFixedArray* infos =
+ WeakFixedArray::cast(script->shared_function_infos());
+ HandleScope scope(isolate_);
+ List<Handle<SharedFunctionInfo>> candidates;
+ {
+ WeakFixedArray::Iterator iterator(infos);
+ SharedFunctionInfo* info;
+ while ((info = iterator.Next<SharedFunctionInfo>())) {
+ if (info->end_position() < start_position ||
+ info->start_position() >= end_position) {
+ continue;
+ }
+ if (!info->IsSubjectToDebugging()) continue;
+ if (!info->HasDebugCode() && !info->allows_lazy_compilation()) continue;
+ candidates.Add(i::handle(info));
+ }
+ }
+
+ bool was_compiled = false;
+ for (int i = 0; i < candidates.length(); ++i) {
+ // Code that cannot be compiled lazily are internal and not debuggable.
+ DCHECK(candidates[i]->allows_lazy_compilation());
+ if (!candidates[i]->HasDebugCode()) {
+ if (!Compiler::CompileDebugCode(candidates[i])) {
+ return false;
+ } else {
+ was_compiled = true;
+ }
+ }
+ if (!EnsureDebugInfo(candidates[i], Handle<JSFunction>::null()))
+ return false;
+ }
+ if (was_compiled) continue;
+
+ for (int i = 0; i < candidates.length(); ++i) {
+ CHECK(candidates[i]->HasDebugInfo());
+ Handle<DebugInfo> debug_info(candidates[i]->GetDebugInfo());
+ FindBreakablePositions(debug_info, start_position, end_position,
+ STATEMENT_ALIGNED, positions);
+ }
+ return true;
+ }
+ UNREACHABLE();
+ return false;
+}
+
void Debug::RecordAsyncFunction(Handle<JSGeneratorObject> generator_object) {
if (last_step_action() <= StepOut) return;
if (!IsAsyncFunction(generator_object->function()->shared()->kind())) return;
@@ -1450,44 +1530,11 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
return shared_handle;
}
}
- // If not, compile to reveal inner functions, if possible.
- if (shared->allows_lazy_compilation_without_context()) {
- HandleScope scope(isolate_);
- if (!Compiler::CompileDebugCode(handle(shared))) break;
- continue;
- }
-
- // If not possible, comb the heap for the best suitable compile target.
- JSFunction* closure;
- {
- HeapIterator it(isolate_->heap());
- SharedFunctionInfoFinder finder(position);
- while (HeapObject* object = it.next()) {
- JSFunction* candidate_closure = NULL;
- SharedFunctionInfo* candidate = NULL;
- if (object->IsJSFunction()) {
- candidate_closure = JSFunction::cast(object);
- candidate = candidate_closure->shared();
- } else if (object->IsSharedFunctionInfo()) {
- candidate = SharedFunctionInfo::cast(object);
- if (!candidate->allows_lazy_compilation_without_context()) continue;
- } else {
- continue;
- }
- if (candidate->script() == *script) {
- finder.NewCandidate(candidate, candidate_closure);
- }
- }
- closure = finder.ResultClosure();
- shared = finder.Result();
- }
- if (shared == NULL) break;
+ // If not, compile to reveal inner functions.
HandleScope scope(isolate_);
- if (closure == NULL) {
- if (!Compiler::CompileDebugCode(handle(shared))) break;
- } else {
- if (!Compiler::CompileDebugCode(handle(closure))) break;
- }
+ // Code that cannot be compiled lazily are internal and not debuggable.
+ DCHECK(shared->allows_lazy_compilation());
+ if (!Compiler::CompileDebugCode(handle(shared))) break;
}
return isolate_->factory()->undefined_value();
}
@@ -1658,10 +1705,12 @@ MaybeHandle<Object> Debug::MakeCompileEvent(Handle<Script> script,
return CallFunction("MakeCompileEvent", arraysize(argv), argv);
}
-
-MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<JSObject> task_event) {
+MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<String> type,
+ Handle<Object> id,
+ Handle<String> name) {
+ DCHECK(id->IsNumber());
// Create the async task event object.
- Handle<Object> argv[] = { task_event };
+ Handle<Object> argv[] = {type, id, name};
return CallFunction("MakeAsyncTaskEvent", arraysize(argv), argv);
}
@@ -1786,8 +1835,9 @@ void Debug::OnAfterCompile(Handle<Script> script) {
ProcessCompileEvent(v8::AfterCompile, script);
}
-
-void Debug::OnAsyncTaskEvent(Handle<JSObject> data) {
+void Debug::OnAsyncTaskEvent(Handle<String> type, Handle<Object> id,
+ Handle<String> name) {
+ DCHECK(id->IsNumber());
if (in_debug_scope() || ignore_events()) return;
HandleScope scope(isolate_);
@@ -1797,7 +1847,7 @@ void Debug::OnAsyncTaskEvent(Handle<JSObject> data) {
// Create the script collected state object.
Handle<Object> event_data;
// Bail out and don't call debugger if exception.
- if (!MakeAsyncTaskEvent(data).ToHandle(&event_data)) return;
+ if (!MakeAsyncTaskEvent(type, id, name).ToHandle(&event_data)) return;
// Process debug event.
ProcessDebugEvent(v8::AsyncTaskEvent,
@@ -1843,8 +1893,8 @@ void Debug::CallEventCallback(v8::DebugEvent event,
in_debug_event_listener_ = true;
if (event_listener_->IsForeign()) {
// Invoke the C debug event listener.
- v8::Debug::EventCallback callback =
- FUNCTION_CAST<v8::Debug::EventCallback>(
+ v8::DebugInterface::EventCallback callback =
+ FUNCTION_CAST<v8::DebugInterface::EventCallback>(
Handle<Foreign>::cast(event_listener_)->foreign_address());
EventDetailsImpl event_details(event,
Handle<JSObject>::cast(exec_state),
@@ -1852,7 +1902,7 @@ void Debug::CallEventCallback(v8::DebugEvent event,
event_listener_data_,
client_data);
callback(event_details);
- DCHECK(!isolate_->has_scheduled_exception());
+ CHECK(!isolate_->has_scheduled_exception());
} else {
// Invoke the JavaScript debug event listener.
DCHECK(event_listener_->IsJSFunction());
@@ -1861,8 +1911,10 @@ void Debug::CallEventCallback(v8::DebugEvent event,
event_data,
event_listener_data_ };
Handle<JSReceiver> global = isolate_->global_proxy();
- Execution::TryCall(isolate_, Handle<JSFunction>::cast(event_listener_),
- global, arraysize(argv), argv);
+ MaybeHandle<Object> result =
+ Execution::Call(isolate_, Handle<JSFunction>::cast(event_listener_),
+ global, arraysize(argv), argv);
+ CHECK(!result.is_null()); // Listeners must not throw.
}
in_debug_event_listener_ = previous;
}
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index c4e8c17246..6e49db6ebd 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -11,6 +11,7 @@
#include "src/base/atomicops.h"
#include "src/base/hashmap.h"
#include "src/base/platform/platform.h"
+#include "src/debug/debug-interface.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/flags.h"
@@ -290,7 +291,7 @@ class MessageImpl: public v8::Debug::Message {
// Details of the debug event delivered to the debug event listener.
-class EventDetailsImpl : public v8::Debug::EventDetails {
+class EventDetailsImpl : public v8::DebugInterface::EventDetails {
public:
EventDetailsImpl(DebugEvent event,
Handle<JSObject> exec_state,
@@ -417,7 +418,8 @@ class Debug {
void OnCompileError(Handle<Script> script);
void OnBeforeCompile(Handle<Script> script);
void OnAfterCompile(Handle<Script> script);
- void OnAsyncTaskEvent(Handle<JSObject> data);
+ void OnAsyncTaskEvent(Handle<String> type, Handle<Object> id,
+ Handle<String> name);
// API facing.
void SetEventListener(Handle<Object> callback, Handle<Object> data);
@@ -459,6 +461,8 @@ class Debug {
void ClearStepOut();
bool PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared);
+ bool GetPossibleBreakpoints(Handle<Script> script, int start_position,
+ int end_position, std::set<int>* positions);
void RecordAsyncFunction(Handle<JSGeneratorObject> generator_object);
@@ -499,8 +503,11 @@ class Debug {
void Iterate(ObjectVisitor* v);
bool CheckExecutionState(int id) {
- return is_active() && !debug_context().is_null() && break_id() != 0 &&
- break_id() == id;
+ return CheckExecutionState() && break_id() == id;
+ }
+
+ bool CheckExecutionState() {
+ return is_active() && !debug_context().is_null() && break_id() != 0;
}
// Flags and states.
@@ -569,11 +576,11 @@ class Debug {
}
void clear_suspended_generator() {
- thread_local_.suspended_generator_ = Smi::FromInt(0);
+ thread_local_.suspended_generator_ = Smi::kZero;
}
bool has_suspended_generator() const {
- return thread_local_.suspended_generator_ != Smi::FromInt(0);
+ return thread_local_.suspended_generator_ != Smi::kZero;
}
void OnException(Handle<Object> exception, Handle<Object> promise);
@@ -588,8 +595,9 @@ class Debug {
Handle<Object> promise);
MUST_USE_RESULT MaybeHandle<Object> MakeCompileEvent(
Handle<Script> script, v8::DebugEvent type);
- MUST_USE_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(
- Handle<JSObject> task_event);
+ MUST_USE_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(Handle<String> type,
+ Handle<Object> id,
+ Handle<String> name);
// Mirror cache handling.
void ClearMirrorCache();
diff --git a/deps/v8/src/debug/debug.js b/deps/v8/src/debug/debug.js
index b2111eb8d7..8031763a13 100644
--- a/deps/v8/src/debug/debug.js
+++ b/deps/v8/src/debug/debug.js
@@ -858,16 +858,6 @@ Debug.debuggerFlags = function() {
return debugger_flags;
};
-Debug.getWasmFunctionOffsetTable = function(scriptId) {
- var script = scriptById(scriptId);
- return script ? %GetWasmFunctionOffsetTable(script) : UNDEFINED;
-}
-
-Debug.disassembleWasmFunction = function(scriptId) {
- var script = scriptById(scriptId);
- return script ? %DisassembleWasmFunction(script) : UNDEFINED;
-}
-
Debug.MakeMirror = MakeMirror;
function MakeExecutionState(break_id) {
@@ -1142,15 +1132,15 @@ function MakeScriptObject_(script, include_source) {
}
-function MakeAsyncTaskEvent(event_data) {
- return new AsyncTaskEvent(event_data);
+function MakeAsyncTaskEvent(type, id, name) {
+ return new AsyncTaskEvent(type, id, name);
}
-function AsyncTaskEvent(event_data) {
- this.type_ = event_data.type;
- this.name_ = event_data.name;
- this.id_ = event_data.id;
+function AsyncTaskEvent(type, id, name) {
+ this.type_ = type;
+ this.id_ = id;
+ this.name_ = name;
}
@@ -2196,6 +2186,7 @@ DebugCommandProcessor.prototype.suspendRequest_ = function(request, response) {
};
+// TODO(5510): remove this.
DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
response.body = {
V8Version: %GetV8Version()
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index b451842f90..ace829739f 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -1105,15 +1105,16 @@ static int TranslatePosition(int original_position,
void TranslateSourcePositionTable(Handle<AbstractCode> code,
Handle<JSArray> position_change_array) {
Isolate* isolate = code->GetIsolate();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
SourcePositionTableBuilder builder(&zone);
Handle<ByteArray> source_position_table(code->source_position_table());
for (SourcePositionTableIterator iterator(*source_position_table);
!iterator.done(); iterator.Advance()) {
- int position = iterator.source_position();
- int new_position = TranslatePosition(position, position_change_array);
- builder.AddPosition(iterator.code_offset(), new_position,
+ SourcePosition position = iterator.source_position();
+ position.SetScriptOffset(
+ TranslatePosition(position.ScriptOffset(), position_change_array));
+ builder.AddPosition(iterator.code_offset(), position,
iterator.is_statement());
}
@@ -1426,7 +1427,7 @@ static const char* DropFrames(Vector<StackFrame*> frames, int top_frame_index,
for (Address a = unused_stack_top;
a < unused_stack_bottom;
a += kPointerSize) {
- Memory::Object_at(a) = Smi::FromInt(0);
+ Memory::Object_at(a) = Smi::kZero;
}
return NULL;
@@ -1517,7 +1518,7 @@ static const char* DropActivationsInActiveThreadImpl(Isolate* isolate,
TARGET& target, // NOLINT
bool do_drop) {
Debug* debug = isolate->debug();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
Vector<StackFrame*> frames = CreateStackMap(isolate, &zone);
@@ -1900,9 +1901,7 @@ Handle<Object> LiveEditFunctionTracker::SerializeFunctionScope(Scope* scope) {
Scope* current_scope = scope;
while (current_scope != NULL) {
HandleScope handle_scope(isolate_);
- ZoneList<Variable*>* locals = current_scope->locals();
- for (int i = 0; i < locals->length(); i++) {
- Variable* var = locals->at(i);
+ for (Variable* var : *current_scope->locals()) {
if (!var->IsContextSlot()) continue;
int context_index = var->index() - Context::MIN_CONTEXT_SLOTS;
int location = scope_info_length + context_index * 2;
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
index 165e172449..4bc86da7f4 100644
--- a/deps/v8/src/debug/mirrors.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -257,6 +257,7 @@ var ScopeType = { Global: 0,
Block: 5,
Script: 6,
Eval: 7,
+ Module: 8,
};
/**
@@ -1539,7 +1540,7 @@ PropertyMirror.prototype.value = function() {
/**
* Returns whether this property value is an exception.
- * @return {booolean} True if this property value is an exception
+ * @return {boolean} True if this property value is an exception
*/
PropertyMirror.prototype.isException = function() {
return this.exception_ ? true : false;
@@ -1558,7 +1559,7 @@ PropertyMirror.prototype.propertyType = function() {
/**
* Returns whether this property has a getter defined through __defineGetter__.
- * @return {booolean} True if this property has a getter
+ * @return {boolean} True if this property has a getter
*/
PropertyMirror.prototype.hasGetter = function() {
return this.getter_ ? true : false;
@@ -1567,7 +1568,7 @@ PropertyMirror.prototype.hasGetter = function() {
/**
* Returns whether this property has a setter defined through __defineSetter__.
- * @return {booolean} True if this property has a setter
+ * @return {boolean} True if this property has a setter
*/
PropertyMirror.prototype.hasSetter = function() {
return this.setter_ ? true : false;
@@ -1878,6 +1879,15 @@ FrameMirror.prototype.func = function() {
};
+FrameMirror.prototype.script = function() {
+ if (!this.script_) {
+ this.script_ = MakeMirror(this.details_.script());
+ }
+
+ return this.script_;
+}
+
+
FrameMirror.prototype.receiver = function() {
return MakeMirror(this.details_.receiver());
};
@@ -1954,12 +1964,9 @@ FrameMirror.prototype.sourcePosition = function() {
FrameMirror.prototype.sourceLocation = function() {
- var func = this.func();
- if (func.resolved()) {
- var script = func.script();
- if (script) {
- return script.locationFromPosition(this.sourcePosition(), true);
- }
+ var script = this.script();
+ if (script) {
+ return script.locationFromPosition(this.sourcePosition(), true);
}
};
diff --git a/deps/v8/src/deoptimize-reason.h b/deps/v8/src/deoptimize-reason.h
index d28ec4750b..8b93839e10 100644
--- a/deps/v8/src/deoptimize-reason.h
+++ b/deps/v8/src/deoptimize-reason.h
@@ -10,74 +10,70 @@
namespace v8 {
namespace internal {
-#define DEOPTIMIZE_REASON_LIST(V) \
- V(AccessCheck, "Access check needed") \
- V(NoReason, "no reason") \
- V(ConstantGlobalVariableAssignment, "Constant global variable assignment") \
- V(ConversionOverflow, "conversion overflow") \
- V(DivisionByZero, "division by zero") \
- V(ElementsKindUnhandledInKeyedLoadGenericStub, \
- "ElementsKind unhandled in KeyedLoadGenericStub") \
- V(ExpectedHeapNumber, "Expected heap number") \
- V(ExpectedSmi, "Expected smi") \
- V(ForcedDeoptToRuntime, "Forced deopt to runtime") \
- V(Hole, "hole") \
- V(InstanceMigrationFailed, "instance migration failed") \
- V(InsufficientTypeFeedbackForCall, "Insufficient type feedback for call") \
- V(InsufficientTypeFeedbackForCallWithArguments, \
- "Insufficient type feedback for call with arguments") \
- V(FastPathFailed, "Falling off the fast path") \
- V(InsufficientTypeFeedbackForCombinedTypeOfBinaryOperation, \
- "Insufficient type feedback for combined type of binary operation") \
- V(InsufficientTypeFeedbackForGenericNamedAccess, \
- "Insufficient type feedback for generic named access") \
- V(InsufficientTypeFeedbackForGenericKeyedAccess, \
- "Insufficient type feedback for generic keyed access") \
- V(InsufficientTypeFeedbackForLHSOfBinaryOperation, \
- "Insufficient type feedback for LHS of binary operation") \
- V(InsufficientTypeFeedbackForRHSOfBinaryOperation, \
- "Insufficient type feedback for RHS of binary operation") \
- V(KeyIsNegative, "key is negative") \
- V(LostPrecision, "lost precision") \
- V(LostPrecisionOrNaN, "lost precision or NaN") \
- V(MementoFound, "memento found") \
- V(MinusZero, "minus zero") \
- V(NaN, "NaN") \
- V(NegativeKeyEncountered, "Negative key encountered") \
- V(NegativeValue, "negative value") \
- V(NoCache, "no cache") \
- V(NonStrictElementsInKeyedLoadGenericStub, \
- "non-strict elements in KeyedLoadGenericStub") \
- V(NotAHeapNumber, "not a heap number") \
- V(NotAHeapNumberUndefinedBoolean, "not a heap number/undefined/true/false") \
- V(NotAHeapNumberUndefined, "not a heap number/undefined") \
- V(NotAJavaScriptObject, "not a JavaScript object") \
- V(NotASmi, "not a Smi") \
- V(OutOfBounds, "out of bounds") \
- V(OutsideOfRange, "Outside of range") \
- V(Overflow, "overflow") \
- V(Proxy, "proxy") \
- V(ReceiverWasAGlobalObject, "receiver was a global object") \
- V(Smi, "Smi") \
- V(TooManyArguments, "too many arguments") \
- V(TracingElementsTransitions, "Tracing elements transitions") \
- V(TypeMismatchBetweenFeedbackAndConstant, \
- "Type mismatch between feedback and constant") \
- V(UnexpectedCellContentsInConstantGlobalStore, \
- "Unexpected cell contents in constant global store") \
- V(UnexpectedCellContentsInGlobalStore, \
- "Unexpected cell contents in global store") \
- V(UnexpectedObject, "unexpected object") \
- V(UnexpectedRHSOfBinaryOperation, "Unexpected RHS of binary operation") \
- V(UnknownMapInPolymorphicAccess, "Unknown map in polymorphic access") \
- V(UnknownMapInPolymorphicCall, "Unknown map in polymorphic call") \
- V(UnknownMapInPolymorphicElementAccess, \
- "Unknown map in polymorphic element access") \
- V(UnknownMap, "Unknown map") \
- V(ValueMismatch, "value mismatch") \
- V(WrongInstanceType, "wrong instance type") \
- V(WrongMap, "wrong map") \
- V(UndefinedOrNullInForIn, "null or undefined in for-in") \
+#define DEOPTIMIZE_REASON_LIST(V) \
+ V(AccessCheck, "Access check needed") \
+ V(NoReason, "no reason") \
+ V(ConstantGlobalVariableAssignment, "Constant global variable assignment") \
+ V(ConversionOverflow, "conversion overflow") \
+ V(DivisionByZero, "division by zero") \
+ V(ExpectedHeapNumber, "Expected heap number") \
+ V(ExpectedSmi, "Expected smi") \
+ V(ForcedDeoptToRuntime, "Forced deopt to runtime") \
+ V(Hole, "hole") \
+ V(InstanceMigrationFailed, "instance migration failed") \
+ V(InsufficientTypeFeedbackForCall, "Insufficient type feedback for call") \
+ V(InsufficientTypeFeedbackForCallWithArguments, \
+ "Insufficient type feedback for call with arguments") \
+ V(FastPathFailed, "Falling off the fast path") \
+ V(InsufficientTypeFeedbackForCombinedTypeOfBinaryOperation, \
+ "Insufficient type feedback for combined type of binary operation") \
+ V(InsufficientTypeFeedbackForGenericNamedAccess, \
+ "Insufficient type feedback for generic named access") \
+ V(InsufficientTypeFeedbackForGenericKeyedAccess, \
+ "Insufficient type feedback for generic keyed access") \
+ V(InsufficientTypeFeedbackForLHSOfBinaryOperation, \
+ "Insufficient type feedback for LHS of binary operation") \
+ V(InsufficientTypeFeedbackForRHSOfBinaryOperation, \
+ "Insufficient type feedback for RHS of binary operation") \
+ V(KeyIsNegative, "key is negative") \
+ V(LostPrecision, "lost precision") \
+ V(LostPrecisionOrNaN, "lost precision or NaN") \
+ V(MementoFound, "memento found") \
+ V(MinusZero, "minus zero") \
+ V(NaN, "NaN") \
+ V(NegativeKeyEncountered, "Negative key encountered") \
+ V(NegativeValue, "negative value") \
+ V(NoCache, "no cache") \
+ V(NotAHeapNumber, "not a heap number") \
+ V(NotAHeapNumberUndefined, "not a heap number/undefined") \
+ V(NotAJavaScriptObject, "not a JavaScript object") \
+ V(NotANumberOrOddball, "not a Number or Oddball") \
+ V(NotASmi, "not a Smi") \
+ V(OutOfBounds, "out of bounds") \
+ V(OutsideOfRange, "Outside of range") \
+ V(Overflow, "overflow") \
+ V(Proxy, "proxy") \
+ V(ReceiverWasAGlobalObject, "receiver was a global object") \
+ V(Smi, "Smi") \
+ V(TooManyArguments, "too many arguments") \
+ V(TracingElementsTransitions, "Tracing elements transitions") \
+ V(TypeMismatchBetweenFeedbackAndConstant, \
+ "Type mismatch between feedback and constant") \
+ V(UnexpectedCellContentsInConstantGlobalStore, \
+ "Unexpected cell contents in constant global store") \
+ V(UnexpectedCellContentsInGlobalStore, \
+ "Unexpected cell contents in global store") \
+ V(UnexpectedObject, "unexpected object") \
+ V(UnexpectedRHSOfBinaryOperation, "Unexpected RHS of binary operation") \
+ V(UnknownMapInPolymorphicAccess, "Unknown map in polymorphic access") \
+ V(UnknownMapInPolymorphicCall, "Unknown map in polymorphic call") \
+ V(UnknownMapInPolymorphicElementAccess, \
+ "Unknown map in polymorphic element access") \
+ V(UnknownMap, "Unknown map") \
+ V(ValueMismatch, "value mismatch") \
+ V(WrongInstanceType, "wrong instance type") \
+ V(WrongMap, "wrong map") \
+ V(UndefinedOrNullInForIn, "null or undefined in for-in") \
V(UndefinedOrNullInToObject, "null or undefined in ToObject")
enum class DeoptimizeReason : uint8_t {
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 971de9ec77..dddf62e1cc 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -24,9 +24,8 @@ namespace internal {
static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
- base::OS::CommitPageSize(),
- EXECUTABLE,
- NULL);
+ MemoryAllocator::GetCommitPageSize(),
+ EXECUTABLE, NULL);
}
@@ -88,7 +87,7 @@ static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
size_t Deoptimizer::GetMaxDeoptTableSize() {
int entries_size =
Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
- int commit_page_size = static_cast<int>(base::OS::CommitPageSize());
+ int commit_page_size = static_cast<int>(MemoryAllocator::GetCommitPageSize());
int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
commit_page_size) + 1;
return static_cast<size_t>(commit_page_size * page_count);
@@ -276,7 +275,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
// Move marked code from the optimized code list to the deoptimized
// code list, collecting them into a ZoneList.
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
ZoneList<Code*> codes(10, &zone);
// Walk over all optimized code objects in this native context.
@@ -1001,7 +1000,7 @@ void Deoptimizer::DoComputeJSFrame(TranslatedFrame* translated_frame,
}
}
- // Compute this frame's PC, state, and continuation.
+ // Compute this frame's PC and state.
FixedArray* raw_data = non_optimized_code->deoptimization_data();
DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
Address start = non_optimized_code->instruction_start();
@@ -1024,7 +1023,7 @@ void Deoptimizer::DoComputeJSFrame(TranslatedFrame* translated_frame,
// and will be materialized by {Runtime_NotifyDeoptimized}. For additional
// safety we use Smi(0) instead of the potential {arguments_marker} here.
if (is_topmost) {
- intptr_t context_value = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+ intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
Register context_reg = JavaScriptFrame::context_register();
output_frame->SetRegister(context_reg.code(), context_value);
}
@@ -1243,7 +1242,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// Translate the accumulator register (depending on frame position).
if (is_topmost) {
- // For topmost frmae, p ut the accumulator on the stack. The bailout state
+ // For topmost frame, put the accumulator on the stack. The bailout state
// for interpreted frames is always set to {BailoutState::TOS_REGISTER} and
// the {NotifyDeoptimized} builtin pops it off the topmost frame (possibly
// after materialization).
@@ -1268,9 +1267,15 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
}
CHECK_EQ(0u, output_offset);
+ // Compute this frame's PC and state. The PC will be a special builtin that
+ // continues the bytecode dispatch. Note that non-topmost and lazy-style
+ // bailout handlers also advance the bytecode offset before dispatch, hence
+ // simulating what normal handlers do upon completion of the operation.
Builtins* builtins = isolate_->builtins();
Code* dispatch_builtin =
- builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
+ (!is_topmost || (bailout_type_ == LAZY)) && !goto_catch_handler
+ ? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance)
+ : builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
output_frame->SetPc(reinterpret_cast<intptr_t>(dispatch_builtin->entry()));
// Restore accumulator (TOS) register.
output_frame->SetState(
@@ -1292,7 +1297,7 @@ void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
// and will be materialized by {Runtime_NotifyDeoptimized}. For additional
// safety we use Smi(0) instead of the potential {arguments_marker} here.
if (is_topmost) {
- intptr_t context_value = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+ intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
Register context_reg = JavaScriptFrame::context_register();
output_frame->SetRegister(context_reg.code(), context_value);
}
@@ -1602,12 +1607,6 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
- // The allocation site.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(isolate_->heap()->undefined_value());
- output_frame->SetFrameSlot(output_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_offset, "allocation site\n");
-
// Number of incoming arguments.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
@@ -1659,7 +1658,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
// and will be materialized by {Runtime_NotifyDeoptimized}. For additional
// safety we use Smi(0) instead of the potential {arguments_marker} here.
if (is_topmost) {
- intptr_t context_value = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+ intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
Register context_reg = JavaScriptFrame::context_register();
output_frame->SetRegister(context_reg.code(), context_value);
}
@@ -1843,7 +1842,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
// and will be materialized by {Runtime_NotifyDeoptimized}. For additional
// safety we use Smi(0) instead of the potential {arguments_marker} here.
if (is_topmost) {
- intptr_t context_value = reinterpret_cast<intptr_t>(Smi::FromInt(0));
+ intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
Register context_reg = JavaScriptFrame::context_register();
output_frame->SetRegister(context_reg.code(), context_value);
}
@@ -2287,7 +2286,7 @@ FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
}
}
-void TranslationBuffer::Add(int32_t value, Zone* zone) {
+void TranslationBuffer::Add(int32_t value) {
// This wouldn't handle kMinInt correctly if it ever encountered it.
DCHECK(value != kMinInt);
// Encode the sign bit in the least significant bit.
@@ -2298,7 +2297,7 @@ void TranslationBuffer::Add(int32_t value, Zone* zone) {
// each byte to indicate whether or not more bytes follow.
do {
uint32_t next = bits >> 7;
- contents_.Add(((bits << 1) & 0xFF) | (next != 0), zone);
+ contents_.push_back(((bits << 1) & 0xFF) | (next != 0));
bits = next;
} while (bits != 0);
}
@@ -2322,167 +2321,166 @@ int32_t TranslationIterator::Next() {
Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
- int length = contents_.length();
- Handle<ByteArray> result = factory->NewByteArray(length, TENURED);
- MemCopy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
+ Handle<ByteArray> result = factory->NewByteArray(CurrentIndex(), TENURED);
+ contents_.CopyTo(result->GetDataStartAddress());
return result;
}
void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
- buffer_->Add(CONSTRUCT_STUB_FRAME, zone());
- buffer_->Add(literal_id, zone());
- buffer_->Add(height, zone());
+ buffer_->Add(CONSTRUCT_STUB_FRAME);
+ buffer_->Add(literal_id);
+ buffer_->Add(height);
}
void Translation::BeginGetterStubFrame(int literal_id) {
- buffer_->Add(GETTER_STUB_FRAME, zone());
- buffer_->Add(literal_id, zone());
+ buffer_->Add(GETTER_STUB_FRAME);
+ buffer_->Add(literal_id);
}
void Translation::BeginSetterStubFrame(int literal_id) {
- buffer_->Add(SETTER_STUB_FRAME, zone());
- buffer_->Add(literal_id, zone());
+ buffer_->Add(SETTER_STUB_FRAME);
+ buffer_->Add(literal_id);
}
void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
- buffer_->Add(ARGUMENTS_ADAPTOR_FRAME, zone());
- buffer_->Add(literal_id, zone());
- buffer_->Add(height, zone());
+ buffer_->Add(ARGUMENTS_ADAPTOR_FRAME);
+ buffer_->Add(literal_id);
+ buffer_->Add(height);
}
void Translation::BeginTailCallerFrame(int literal_id) {
- buffer_->Add(TAIL_CALLER_FRAME, zone());
- buffer_->Add(literal_id, zone());
+ buffer_->Add(TAIL_CALLER_FRAME);
+ buffer_->Add(literal_id);
}
void Translation::BeginJSFrame(BailoutId node_id,
int literal_id,
unsigned height) {
- buffer_->Add(JS_FRAME, zone());
- buffer_->Add(node_id.ToInt(), zone());
- buffer_->Add(literal_id, zone());
- buffer_->Add(height, zone());
+ buffer_->Add(JS_FRAME);
+ buffer_->Add(node_id.ToInt());
+ buffer_->Add(literal_id);
+ buffer_->Add(height);
}
void Translation::BeginInterpretedFrame(BailoutId bytecode_offset,
int literal_id, unsigned height) {
- buffer_->Add(INTERPRETED_FRAME, zone());
- buffer_->Add(bytecode_offset.ToInt(), zone());
- buffer_->Add(literal_id, zone());
- buffer_->Add(height, zone());
+ buffer_->Add(INTERPRETED_FRAME);
+ buffer_->Add(bytecode_offset.ToInt());
+ buffer_->Add(literal_id);
+ buffer_->Add(height);
}
void Translation::BeginCompiledStubFrame(int height) {
- buffer_->Add(COMPILED_STUB_FRAME, zone());
- buffer_->Add(height, zone());
+ buffer_->Add(COMPILED_STUB_FRAME);
+ buffer_->Add(height);
}
void Translation::BeginArgumentsObject(int args_length) {
- buffer_->Add(ARGUMENTS_OBJECT, zone());
- buffer_->Add(args_length, zone());
+ buffer_->Add(ARGUMENTS_OBJECT);
+ buffer_->Add(args_length);
}
void Translation::BeginCapturedObject(int length) {
- buffer_->Add(CAPTURED_OBJECT, zone());
- buffer_->Add(length, zone());
+ buffer_->Add(CAPTURED_OBJECT);
+ buffer_->Add(length);
}
void Translation::DuplicateObject(int object_index) {
- buffer_->Add(DUPLICATED_OBJECT, zone());
- buffer_->Add(object_index, zone());
+ buffer_->Add(DUPLICATED_OBJECT);
+ buffer_->Add(object_index);
}
void Translation::StoreRegister(Register reg) {
- buffer_->Add(REGISTER, zone());
- buffer_->Add(reg.code(), zone());
+ buffer_->Add(REGISTER);
+ buffer_->Add(reg.code());
}
void Translation::StoreInt32Register(Register reg) {
- buffer_->Add(INT32_REGISTER, zone());
- buffer_->Add(reg.code(), zone());
+ buffer_->Add(INT32_REGISTER);
+ buffer_->Add(reg.code());
}
void Translation::StoreUint32Register(Register reg) {
- buffer_->Add(UINT32_REGISTER, zone());
- buffer_->Add(reg.code(), zone());
+ buffer_->Add(UINT32_REGISTER);
+ buffer_->Add(reg.code());
}
void Translation::StoreBoolRegister(Register reg) {
- buffer_->Add(BOOL_REGISTER, zone());
- buffer_->Add(reg.code(), zone());
+ buffer_->Add(BOOL_REGISTER);
+ buffer_->Add(reg.code());
}
void Translation::StoreFloatRegister(FloatRegister reg) {
- buffer_->Add(FLOAT_REGISTER, zone());
- buffer_->Add(reg.code(), zone());
+ buffer_->Add(FLOAT_REGISTER);
+ buffer_->Add(reg.code());
}
void Translation::StoreDoubleRegister(DoubleRegister reg) {
- buffer_->Add(DOUBLE_REGISTER, zone());
- buffer_->Add(reg.code(), zone());
+ buffer_->Add(DOUBLE_REGISTER);
+ buffer_->Add(reg.code());
}
void Translation::StoreStackSlot(int index) {
- buffer_->Add(STACK_SLOT, zone());
- buffer_->Add(index, zone());
+ buffer_->Add(STACK_SLOT);
+ buffer_->Add(index);
}
void Translation::StoreInt32StackSlot(int index) {
- buffer_->Add(INT32_STACK_SLOT, zone());
- buffer_->Add(index, zone());
+ buffer_->Add(INT32_STACK_SLOT);
+ buffer_->Add(index);
}
void Translation::StoreUint32StackSlot(int index) {
- buffer_->Add(UINT32_STACK_SLOT, zone());
- buffer_->Add(index, zone());
+ buffer_->Add(UINT32_STACK_SLOT);
+ buffer_->Add(index);
}
void Translation::StoreBoolStackSlot(int index) {
- buffer_->Add(BOOL_STACK_SLOT, zone());
- buffer_->Add(index, zone());
+ buffer_->Add(BOOL_STACK_SLOT);
+ buffer_->Add(index);
}
void Translation::StoreFloatStackSlot(int index) {
- buffer_->Add(FLOAT_STACK_SLOT, zone());
- buffer_->Add(index, zone());
+ buffer_->Add(FLOAT_STACK_SLOT);
+ buffer_->Add(index);
}
void Translation::StoreDoubleStackSlot(int index) {
- buffer_->Add(DOUBLE_STACK_SLOT, zone());
- buffer_->Add(index, zone());
+ buffer_->Add(DOUBLE_STACK_SLOT);
+ buffer_->Add(index);
}
void Translation::StoreLiteral(int literal_id) {
- buffer_->Add(LITERAL, zone());
- buffer_->Add(literal_id, zone());
+ buffer_->Add(LITERAL);
+ buffer_->Add(literal_id);
}
void Translation::StoreArgumentsObject(bool args_known,
int args_index,
int args_length) {
- buffer_->Add(ARGUMENTS_OBJECT, zone());
- buffer_->Add(args_known, zone());
- buffer_->Add(args_index, zone());
- buffer_->Add(args_length, zone());
+ buffer_->Add(ARGUMENTS_OBJECT);
+ buffer_->Add(args_known);
+ buffer_->Add(args_index);
+ buffer_->Add(args_length);
}
@@ -2728,16 +2726,19 @@ Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
int last_deopt_id = kNoDeoptimizationId;
int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) |
RelocInfo::ModeMask(RelocInfo::DEOPT_ID) |
- RelocInfo::ModeMask(RelocInfo::DEOPT_POSITION);
+ RelocInfo::ModeMask(RelocInfo::DEOPT_SCRIPT_OFFSET) |
+ RelocInfo::ModeMask(RelocInfo::DEOPT_INLINING_ID);
for (RelocIterator it(code, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
if (info->pc() >= pc) {
return DeoptInfo(last_position, last_reason, last_deopt_id);
}
- if (info->rmode() == RelocInfo::DEOPT_POSITION) {
- int raw_position = static_cast<int>(info->data());
- last_position = raw_position ? SourcePosition::FromRaw(raw_position)
- : SourcePosition::Unknown();
+ if (info->rmode() == RelocInfo::DEOPT_SCRIPT_OFFSET) {
+ int script_offset = static_cast<int>(info->data());
+ it.next();
+ DCHECK(it.rinfo()->rmode() == RelocInfo::DEOPT_INLINING_ID);
+ int inlining_id = static_cast<int>(it.rinfo()->data());
+ last_position = SourcePosition(script_offset, inlining_id);
} else if (info->rmode() == RelocInfo::DEOPT_ID) {
last_deopt_id = static_cast<int>(info->data());
} else if (info->rmode() == RelocInfo::DEOPT_REASON) {
@@ -2765,11 +2766,8 @@ int Deoptimizer::ComputeSourcePositionFromBaselineCode(
int Deoptimizer::ComputeSourcePositionFromBytecodeArray(
SharedFunctionInfo* shared, BailoutId node_id) {
DCHECK(shared->HasBytecodeArray());
- // BailoutId points to the next bytecode in the bytecode aray. Subtract
- // 1 to get the end of current bytecode.
- int code_offset = node_id.ToInt() - 1;
return AbstractCode::cast(shared->bytecode_array())
- ->SourcePosition(code_offset);
+ ->SourcePosition(node_id.ToInt());
}
// static
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 4fb7851710..4d84fb76e8 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -9,6 +9,7 @@
#include "src/deoptimize-reason.h"
#include "src/macro-assembler.h"
#include "src/source-position.h"
+#include "src/zone/zone-chunk-list.h"
namespace v8 {
namespace internal {
@@ -844,15 +845,15 @@ class DeoptimizerData {
class TranslationBuffer BASE_EMBEDDED {
public:
- explicit TranslationBuffer(Zone* zone) : contents_(256, zone) { }
+ explicit TranslationBuffer(Zone* zone) : contents_(zone) {}
- int CurrentIndex() const { return contents_.length(); }
- void Add(int32_t value, Zone* zone);
+ int CurrentIndex() const { return static_cast<int>(contents_.size()); }
+ void Add(int32_t value);
Handle<ByteArray> CreateByteArray(Factory* factory);
private:
- ZoneList<uint8_t> contents_;
+ ZoneChunkList<uint8_t> contents_;
};
@@ -917,9 +918,9 @@ class Translation BASE_EMBEDDED {
: buffer_(buffer),
index_(buffer->CurrentIndex()),
zone_(zone) {
- buffer_->Add(BEGIN, zone);
- buffer_->Add(frame_count, zone);
- buffer_->Add(jsframe_count, zone);
+ buffer_->Add(BEGIN);
+ buffer_->Add(frame_count);
+ buffer_->Add(jsframe_count);
}
int index() const { return index_; }
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 1da917167f..7036e1b62d 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -172,8 +172,11 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
}
RelocInfo::Mode rmode = relocinfo.rmode();
- if (rmode == RelocInfo::DEOPT_POSITION) {
- out.AddFormatted(" ;; debug: deopt position '%d'",
+ if (rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) {
+ out.AddFormatted(" ;; debug: deopt position, script offset '%d'",
+ static_cast<int>(relocinfo.data()));
+ } else if (rmode == RelocInfo::DEOPT_INLINING_ID) {
+ out.AddFormatted(" ;; debug: deopt position, inlining id '%d'",
static_cast<int>(relocinfo.data()));
} else if (rmode == RelocInfo::DEOPT_REASON) {
DeoptimizeReason reason =
diff --git a/deps/v8/src/eh-frame.cc b/deps/v8/src/eh-frame.cc
index 5f0f1c1b35..ce5552fcce 100644
--- a/deps/v8/src/eh-frame.cc
+++ b/deps/v8/src/eh-frame.cc
@@ -252,7 +252,7 @@ void EhFrameWriter::AdvanceLocation(int pc_offset) {
DCHECK_GE(pc_offset, last_pc_offset_);
uint32_t delta = pc_offset - last_pc_offset_;
- DCHECK_EQ(delta % EhFrameConstants::kCodeAlignmentFactor, 0);
+ DCHECK_EQ(delta % EhFrameConstants::kCodeAlignmentFactor, 0u);
uint32_t factored_delta = delta / EhFrameConstants::kCodeAlignmentFactor;
if (factored_delta <= EhFrameConstants::kLocationMask) {
diff --git a/deps/v8/src/eh-frame.h b/deps/v8/src/eh-frame.h
index 6e703d429a..3da4612f2c 100644
--- a/deps/v8/src/eh-frame.h
+++ b/deps/v8/src/eh-frame.h
@@ -5,12 +5,15 @@
#ifndef V8_EH_FRAME_H_
#define V8_EH_FRAME_H_
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
-class EhFrameConstants final : public AllStatic {
+class V8_EXPORT_PRIVATE EhFrameConstants final
+ : public NON_EXPORTED_BASE(AllStatic) {
public:
enum class DwarfOpcodes : byte {
kNop = 0x00,
@@ -61,7 +64,7 @@ class EhFrameConstants final : public AllStatic {
static const int kEhFrameHdrSize = 20;
};
-class EhFrameWriter {
+class V8_EXPORT_PRIVATE EhFrameWriter {
public:
explicit EhFrameWriter(Zone* zone);
@@ -196,7 +199,7 @@ class EhFrameWriter {
DISALLOW_COPY_AND_ASSIGN(EhFrameWriter);
};
-class EhFrameIterator {
+class V8_EXPORT_PRIVATE EhFrameIterator {
public:
EhFrameIterator(const byte* start, const byte* end)
: start_(start), next_(start), end_(end) {
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index fb73d6c24c..ccbdb40706 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -566,8 +566,10 @@ class ElementsAccessorBase : public ElementsAccessor {
Handle<FixedArrayBase> backing_store, uint32_t start,
uint32_t end) {
if (IsFastPackedElementsKind(kind())) return true;
+ Isolate* isolate = backing_store->GetIsolate();
for (uint32_t i = start; i < end; i++) {
- if (!Subclass::HasElementImpl(holder, i, backing_store, ALL_PROPERTIES)) {
+ if (!Subclass::HasElementImpl(isolate, holder, i, backing_store,
+ ALL_PROPERTIES)) {
return false;
}
}
@@ -594,14 +596,16 @@ class ElementsAccessorBase : public ElementsAccessor {
bool HasElement(Handle<JSObject> holder, uint32_t index,
Handle<FixedArrayBase> backing_store,
PropertyFilter filter) final {
- return Subclass::HasElementImpl(holder, index, backing_store, filter);
+ return Subclass::HasElementImpl(holder->GetIsolate(), holder, index,
+ backing_store, filter);
}
- static bool HasElementImpl(Handle<JSObject> holder, uint32_t index,
+ static bool HasElementImpl(Isolate* isolate, Handle<JSObject> holder,
+ uint32_t index,
Handle<FixedArrayBase> backing_store,
PropertyFilter filter) {
- return Subclass::GetEntryForIndexImpl(*holder, *backing_store, index,
- filter) != kMaxUInt32;
+ return Subclass::GetEntryForIndexImpl(isolate, *holder, *backing_store,
+ index, filter) != kMaxUInt32;
}
bool HasAccessors(JSObject* holder) final {
@@ -772,6 +776,15 @@ class ElementsAccessorBase : public ElementsAccessor {
JSObject::ValidateElements(array);
}
+ uint32_t NumberOfElements(JSObject* receiver) final {
+ return Subclass::NumberOfElementsImpl(receiver, receiver->elements());
+ }
+
+ static uint32_t NumberOfElementsImpl(JSObject* receiver,
+ FixedArrayBase* backing_store) {
+ UNREACHABLE();
+ }
+
static uint32_t GetMaxIndex(JSObject* receiver, FixedArrayBase* elements) {
if (receiver->IsJSArray()) {
DCHECK(JSArray::cast(receiver)->length()->IsSmi());
@@ -973,6 +986,12 @@ class ElementsAccessorBase : public ElementsAccessor {
packed_size, copy_size);
}
+ void CopyElements(Handle<FixedArrayBase> source, ElementsKind source_kind,
+ Handle<FixedArrayBase> destination, int size) {
+ Subclass::CopyElementsImpl(*source, 0, *destination, source_kind, 0,
+ kPackedSizeNotKnown, size);
+ }
+
Handle<SeededNumberDictionary> Normalize(Handle<JSObject> object) final {
return Subclass::NormalizeImpl(object, handle(object->elements()));
}
@@ -1009,7 +1028,7 @@ class ElementsAccessorBase : public ElementsAccessor {
if (!key->ToUint32(&index)) continue;
uint32_t entry = Subclass::GetEntryForIndexImpl(
- *object, object->elements(), index, filter);
+ isolate, *object, object->elements(), index, filter);
if (entry == kMaxUInt32) continue;
PropertyDetails details = Subclass::GetDetailsImpl(*object, entry);
@@ -1045,9 +1064,10 @@ class ElementsAccessorBase : public ElementsAccessor {
// Non-dictionary elements can't have all-can-read accessors.
uint32_t length = Subclass::GetMaxIndex(*object, *backing_store);
PropertyFilter filter = keys->filter();
- Factory* factory = keys->isolate()->factory();
+ Isolate* isolate = keys->isolate();
+ Factory* factory = isolate->factory();
for (uint32_t i = 0; i < length; i++) {
- if (Subclass::HasElementImpl(object, i, backing_store, filter)) {
+ if (Subclass::HasElementImpl(isolate, object, i, backing_store, filter)) {
keys->AddKey(factory->NewNumberFromUint(i));
}
}
@@ -1060,7 +1080,7 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t insertion_index = 0) {
uint32_t length = Subclass::GetMaxIndex(*object, *backing_store);
for (uint32_t i = 0; i < length; i++) {
- if (Subclass::HasElementImpl(object, i, backing_store, filter)) {
+ if (Subclass::HasElementImpl(isolate, object, i, backing_store, filter)) {
if (convert == GetKeysConversion::kConvertToString) {
Handle<String> index_string = isolate->factory()->Uint32ToString(i);
list->set(insertion_index, *index_string);
@@ -1090,6 +1110,7 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t nof_property_keys = keys->length();
uint32_t initial_list_length =
Subclass::GetMaxNumberOfEntries(*object, *backing_store);
+
initial_list_length += nof_property_keys;
if (initial_list_length > FixedArray::kMaxLength ||
initial_list_length < nof_property_keys) {
@@ -1097,13 +1118,30 @@ class ElementsAccessorBase : public ElementsAccessor {
MessageTemplate::kInvalidArrayLength));
}
- bool needs_sorting =
- IsDictionaryElementsKind(kind()) || IsSloppyArgumentsElements(kind());
-
// Collect the element indices into a new list.
+ MaybeHandle<FixedArray> raw_array =
+ isolate->factory()->TryNewFixedArray(initial_list_length);
+ Handle<FixedArray> combined_keys;
+
+ // If we have a holey backing store try to precisely estimate the backing
+ // store size as a last emergency measure if we cannot allocate the big
+ // array.
+ if (!raw_array.ToHandle(&combined_keys)) {
+ if (IsHoleyElementsKind(kind())) {
+ // If we overestimate the result list size we might end up in the
+ // large-object space which doesn't free memory on shrinking the list.
+ // Hence we try to estimate the final size for holey backing stores more
+ // precisely here.
+ initial_list_length =
+ Subclass::NumberOfElementsImpl(*object, *backing_store);
+ initial_list_length += nof_property_keys;
+ }
+ combined_keys = isolate->factory()->NewFixedArray(initial_list_length);
+ }
+
uint32_t nof_indices = 0;
- Handle<FixedArray> combined_keys =
- isolate->factory()->NewFixedArray(initial_list_length);
+ bool needs_sorting =
+ IsDictionaryElementsKind(kind()) || IsSloppyArgumentsElements(kind());
combined_keys = Subclass::DirectCollectElementIndicesImpl(
isolate, object, backing_store,
needs_sorting ? GetKeysConversion::kKeepNumbers : convert, filter,
@@ -1186,13 +1224,14 @@ class ElementsAccessorBase : public ElementsAccessor {
return entry;
}
- static uint32_t GetEntryForIndexImpl(JSObject* holder,
+ static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder,
FixedArrayBase* backing_store,
uint32_t index, PropertyFilter filter) {
uint32_t length = Subclass::GetMaxIndex(holder, backing_store);
if (IsHoleyElementsKind(kind())) {
return index < length &&
- !BackingStore::cast(backing_store)->is_the_hole(index)
+ !BackingStore::cast(backing_store)
+ ->is_the_hole(isolate, index)
? index
: kMaxUInt32;
} else {
@@ -1200,9 +1239,10 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
- uint32_t GetEntryForIndex(JSObject* holder, FixedArrayBase* backing_store,
+ uint32_t GetEntryForIndex(Isolate* isolate, JSObject* holder,
+ FixedArrayBase* backing_store,
uint32_t index) final {
- return Subclass::GetEntryForIndexImpl(holder, backing_store, index,
+ return Subclass::GetEntryForIndexImpl(isolate, holder, backing_store, index,
ALL_PROPERTIES);
}
@@ -1239,6 +1279,11 @@ class DictionaryElementsAccessor
static uint32_t GetMaxNumberOfEntries(JSObject* receiver,
FixedArrayBase* backing_store) {
+ return NumberOfElementsImpl(receiver, backing_store);
+ }
+
+ static uint32_t NumberOfElementsImpl(JSObject* receiver,
+ FixedArrayBase* backing_store) {
SeededNumberDictionary* dict = SeededNumberDictionary::cast(backing_store);
return dict->NumberOfElements();
}
@@ -1387,11 +1432,12 @@ class DictionaryElementsAccessor
object->set_elements(*new_dictionary);
}
- static bool HasEntryImpl(FixedArrayBase* store, uint32_t entry) {
+ static bool HasEntryImpl(Isolate* isolate, FixedArrayBase* store,
+ uint32_t entry) {
DisallowHeapAllocation no_gc;
SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
Object* index = dict->KeyAt(entry);
- return !index->IsTheHole(dict->GetIsolate());
+ return !index->IsTheHole(isolate);
}
static uint32_t GetIndexForEntryImpl(FixedArrayBase* store, uint32_t entry) {
@@ -1402,11 +1448,12 @@ class DictionaryElementsAccessor
return result;
}
- static uint32_t GetEntryForIndexImpl(JSObject* holder, FixedArrayBase* store,
- uint32_t index, PropertyFilter filter) {
+ static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder,
+ FixedArrayBase* store, uint32_t index,
+ PropertyFilter filter) {
DisallowHeapAllocation no_gc;
SeededNumberDictionary* dictionary = SeededNumberDictionary::cast(store);
- int entry = dictionary->FindEntry(index);
+ int entry = dictionary->FindEntry(isolate, index);
if (entry == SeededNumberDictionary::kNotFound) return kMaxUInt32;
if (filter != ALL_PROPERTIES) {
PropertyDetails details = dictionary->DetailsAt(entry);
@@ -1502,8 +1549,8 @@ class DictionaryElementsAccessor
Isolate* isolate = accumulator->isolate();
Handle<Object> undefined = isolate->factory()->undefined_value();
Handle<Object> the_hole = isolate->factory()->the_hole_value();
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(receiver->elements());
+ Handle<SeededNumberDictionary> dictionary(
+ SeededNumberDictionary::cast(receiver->elements()), isolate);
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = dictionary->KeyAt(i);
@@ -1723,7 +1770,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
int j = 0;
for (int i = 0; j < capacity; i++) {
if (IsHoleyElementsKind(kind)) {
- if (BackingStore::cast(*store)->is_the_hole(i)) continue;
+ if (BackingStore::cast(*store)->is_the_hole(isolate, i)) continue;
}
Handle<Object> value = Subclass::GetImpl(*store, i);
dictionary = SeededNumberDictionary::AddNumberEntry(
@@ -1736,12 +1783,12 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
static void DeleteAtEnd(Handle<JSObject> obj,
Handle<BackingStore> backing_store, uint32_t entry) {
uint32_t length = static_cast<uint32_t>(backing_store->length());
- Heap* heap = obj->GetHeap();
+ Isolate* isolate = obj->GetIsolate();
for (; entry > 0; entry--) {
- if (!backing_store->is_the_hole(entry - 1)) break;
+ if (!backing_store->is_the_hole(isolate, entry - 1)) break;
}
if (entry == 0) {
- FixedArray* empty = heap->empty_fixed_array();
+ FixedArray* empty = isolate->heap()->empty_fixed_array();
// Dynamically ask for the elements kind here since we manually redirect
// the operations for argument backing stores.
if (obj->GetElementsKind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS) {
@@ -1752,8 +1799,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
return;
}
- heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(*backing_store,
- length - entry);
+ isolate->heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
+ *backing_store, length - entry);
}
static void DeleteCommon(Handle<JSObject> obj, uint32_t entry,
@@ -1768,6 +1815,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
return;
}
+ Isolate* isolate = obj->GetIsolate();
backing_store->set_the_hole(entry);
// TODO(verwaest): Move this out of elements.cc.
@@ -1784,12 +1832,13 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
} else {
length = static_cast<uint32_t>(store->length());
}
- if ((entry > 0 && backing_store->is_the_hole(entry - 1)) ||
- (entry + 1 < length && backing_store->is_the_hole(entry + 1))) {
+ if ((entry > 0 && backing_store->is_the_hole(isolate, entry - 1)) ||
+ (entry + 1 < length &&
+ backing_store->is_the_hole(isolate, entry + 1))) {
if (!obj->IsJSArray()) {
uint32_t i;
for (i = entry + 1; i < length; i++) {
- if (!backing_store->is_the_hole(i)) break;
+ if (!backing_store->is_the_hole(isolate, i)) break;
}
if (i == length) {
DeleteAtEnd(obj, backing_store, entry);
@@ -1798,7 +1847,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
int num_used = 0;
for (int i = 0; i < backing_store->length(); ++i) {
- if (!backing_store->is_the_hole(i)) {
+ if (!backing_store->is_the_hole(isolate, i)) {
++num_used;
// Bail out if a number dictionary wouldn't be able to save at least
// 75% space.
@@ -1859,19 +1908,32 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
DeleteCommon(obj, entry, handle(obj->elements()));
}
- static bool HasEntryImpl(FixedArrayBase* backing_store, uint32_t entry) {
- return !BackingStore::cast(backing_store)->is_the_hole(entry);
+ static bool HasEntryImpl(Isolate* isolate, FixedArrayBase* backing_store,
+ uint32_t entry) {
+ return !BackingStore::cast(backing_store)->is_the_hole(isolate, entry);
+ }
+
+ static uint32_t NumberOfElementsImpl(JSObject* receiver,
+ FixedArrayBase* backing_store) {
+ uint32_t max_index = Subclass::GetMaxIndex(receiver, backing_store);
+ if (IsFastPackedElementsKind(Subclass::kind())) return max_index;
+ Isolate* isolate = receiver->GetIsolate();
+ uint32_t count = 0;
+ for (uint32_t i = 0; i < max_index; i++) {
+ if (Subclass::HasEntryImpl(isolate, backing_store, i)) count++;
+ }
+ return count;
}
static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
KeyAccumulator* accumulator,
AddKeyConversion convert) {
- Handle<FixedArrayBase> elements(receiver->elements(),
- accumulator->isolate());
+ Isolate* isolate = accumulator->isolate();
+ Handle<FixedArrayBase> elements(receiver->elements(), isolate);
uint32_t length = Subclass::GetMaxNumberOfEntries(*receiver, *elements);
for (uint32_t i = 0; i < length; i++) {
if (IsFastPackedElementsKind(KindTraits::Kind) ||
- HasEntryImpl(*elements, i)) {
+ HasEntryImpl(isolate, *elements, i)) {
accumulator->AddKey(Subclass::GetImpl(*elements, i), convert);
}
}
@@ -1900,12 +1962,12 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
for (int i = 0; i < length; i++) {
DCHECK(BackingStore::get(*backing_store, i, isolate)->IsSmi() ||
(IsFastHoleyElementsKind(KindTraits::Kind) &&
- backing_store->is_the_hole(i)));
+ backing_store->is_the_hole(isolate, i)));
}
} else if (KindTraits::Kind == FAST_ELEMENTS ||
KindTraits::Kind == FAST_DOUBLE_ELEMENTS) {
for (int i = 0; i < length; i++) {
- DCHECK(!backing_store->is_the_hole(i));
+ DCHECK(!backing_store->is_the_hole(isolate, i));
}
} else {
DCHECK(IsFastHoleyElementsKind(KindTraits::Kind));
@@ -1970,7 +2032,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
if (new_length == 0) {
receiver->set_elements(heap->empty_fixed_array());
- receiver->set_length(Smi::FromInt(0));
+ receiver->set_length(Smi::kZero);
return isolate->factory()->NewJSArrayWithElements(
backing_store, KindTraits::Kind, delete_count);
}
@@ -2007,11 +2069,13 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Isolate* isolate, Handle<JSObject> object,
Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items,
PropertyFilter filter) {
+ Handle<BackingStore> elements(BackingStore::cast(object->elements()),
+ isolate);
int count = 0;
- uint32_t length = object->elements()->length();
+ uint32_t length = elements->length();
for (uint32_t index = 0; index < length; ++index) {
- if (!HasEntryImpl(object->elements(), index)) continue;
- Handle<Object> value = Subclass::GetImpl(object->elements(), index);
+ if (!HasEntryImpl(isolate, *elements, index)) continue;
+ Handle<Object> value = Subclass::GetImpl(*elements, index);
if (get_entries) {
value = MakeEntryPair(isolate, index, value);
}
@@ -2351,7 +2415,6 @@ class FastSmiOrObjectElementsAccessor
return backing_store->get(index);
}
-
// NOTE: this method violates the handlified function signature convention:
// raw pointer parameters in the function that allocates.
// See ElementsAccessor::CopyElements() for details.
@@ -2650,7 +2713,8 @@ class TypedElementsAccessor
return PropertyDetails(DONT_DELETE, DATA, 0, PropertyCellType::kNoCell);
}
- static bool HasElementImpl(Handle<JSObject> holder, uint32_t index,
+ static bool HasElementImpl(Isolate* isolate, Handle<JSObject> holder,
+ uint32_t index,
Handle<FixedArrayBase> backing_store,
PropertyFilter filter) {
return index < AccessorClass::GetCapacityImpl(*holder, *backing_store);
@@ -2677,7 +2741,7 @@ class TypedElementsAccessor
return entry;
}
- static uint32_t GetEntryForIndexImpl(JSObject* holder,
+ static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder,
FixedArrayBase* backing_store,
uint32_t index, PropertyFilter filter) {
return index < AccessorClass::GetCapacityImpl(holder, backing_store)
@@ -2692,6 +2756,11 @@ class TypedElementsAccessor
return backing_store->length();
}
+ static uint32_t NumberOfElementsImpl(JSObject* receiver,
+ FixedArrayBase* backing_store) {
+ return AccessorClass::GetCapacityImpl(receiver, backing_store);
+ }
+
static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
KeyAccumulator* accumulator,
AddKeyConversion convert) {
@@ -2930,19 +2999,34 @@ class SloppyArgumentsElementsAccessor
ArgumentsAccessor::GetMaxNumberOfEntries(holder, arguments);
}
+ static uint32_t NumberOfElementsImpl(JSObject* receiver,
+ FixedArrayBase* backing_store) {
+ FixedArray* parameter_map = FixedArray::cast(backing_store);
+ FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+ uint32_t nof_elements = 0;
+ uint32_t length = parameter_map->length() - 2;
+ for (uint32_t entry = 0; entry < length; entry++) {
+ if (HasParameterMapArg(parameter_map, entry)) nof_elements++;
+ }
+ return nof_elements +
+ ArgumentsAccessor::NumberOfElementsImpl(receiver, arguments);
+ }
+
static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
KeyAccumulator* accumulator,
AddKeyConversion convert) {
- FixedArrayBase* elements = receiver->elements();
- uint32_t length = GetCapacityImpl(*receiver, elements);
+ Isolate* isolate = accumulator->isolate();
+ Handle<FixedArrayBase> elements(receiver->elements(), isolate);
+ uint32_t length = GetCapacityImpl(*receiver, *elements);
for (uint32_t entry = 0; entry < length; entry++) {
- if (!HasEntryImpl(elements, entry)) continue;
- Handle<Object> value = GetImpl(elements, entry);
+ if (!HasEntryImpl(isolate, *elements, entry)) continue;
+ Handle<Object> value = GetImpl(*elements, entry);
accumulator->AddKey(value, convert);
}
}
- static bool HasEntryImpl(FixedArrayBase* parameters, uint32_t entry) {
+ static bool HasEntryImpl(Isolate* isolate, FixedArrayBase* parameters,
+ uint32_t entry) {
FixedArray* parameter_map = FixedArray::cast(parameters);
uint32_t length = parameter_map->length() - 2;
if (entry < length) {
@@ -2950,7 +3034,7 @@ class SloppyArgumentsElementsAccessor
}
FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
- return ArgumentsAccessor::HasEntryImpl(arguments, entry - length);
+ return ArgumentsAccessor::HasEntryImpl(isolate, arguments, entry - length);
}
static bool HasAccessorsImpl(JSObject* holder,
@@ -2970,15 +3054,15 @@ class SloppyArgumentsElementsAccessor
return ArgumentsAccessor::GetIndexForEntryImpl(arguments, entry - length);
}
- static uint32_t GetEntryForIndexImpl(JSObject* holder,
+ static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder,
FixedArrayBase* parameters,
uint32_t index, PropertyFilter filter) {
FixedArray* parameter_map = FixedArray::cast(parameters);
if (HasParameterMapArg(parameter_map, index)) return index;
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl(holder, arguments,
- index, filter);
+ uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl(
+ isolate, holder, arguments, index, filter);
if (entry == kMaxUInt32) return kMaxUInt32;
return (parameter_map->length() - 2) + entry;
}
@@ -3065,8 +3149,8 @@ class SloppyArgumentsElementsAccessor
bool search_for_hole = value->IsUndefined(isolate);
for (uint32_t k = start_from; k < length; ++k) {
- uint32_t entry =
- GetEntryForIndexImpl(*object, *parameter_map, k, ALL_PROPERTIES);
+ uint32_t entry = GetEntryForIndexImpl(isolate, *object, *parameter_map, k,
+ ALL_PROPERTIES);
if (entry == kMaxUInt32) {
if (search_for_hole) return Just(true);
continue;
@@ -3105,8 +3189,8 @@ class SloppyArgumentsElementsAccessor
isolate);
for (uint32_t k = start_from; k < length; ++k) {
- uint32_t entry =
- GetEntryForIndexImpl(*object, *parameter_map, k, ALL_PROPERTIES);
+ uint32_t entry = GetEntryForIndexImpl(isolate, *object, *parameter_map, k,
+ ALL_PROPERTIES);
if (entry == kMaxUInt32) {
continue;
}
@@ -3253,9 +3337,9 @@ class FastSloppyArgumentsElementsAccessor
FixedArray* parameters = FixedArray::cast(receiver->elements());
uint32_t insertion_index = 0;
for (uint32_t i = start; i < end; i++) {
- uint32_t entry =
- GetEntryForIndexImpl(*receiver, parameters, i, ALL_PROPERTIES);
- if (entry != kMaxUInt32 && HasEntryImpl(parameters, entry)) {
+ uint32_t entry = GetEntryForIndexImpl(isolate, *receiver, parameters, i,
+ ALL_PROPERTIES);
+ if (entry != kMaxUInt32 && HasEntryImpl(isolate, parameters, entry)) {
elements->set(insertion_index, *GetImpl(parameters, entry));
} else {
elements->set_the_hole(insertion_index);
@@ -3378,13 +3462,13 @@ class StringWrapperElementsAccessor
return BackingStoreAccessor::GetDetailsImpl(holder, entry - length);
}
- static uint32_t GetEntryForIndexImpl(JSObject* holder,
+ static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder,
FixedArrayBase* backing_store,
uint32_t index, PropertyFilter filter) {
uint32_t length = static_cast<uint32_t>(GetString(holder)->length());
if (index < length) return index;
uint32_t backing_store_entry = BackingStoreAccessor::GetEntryForIndexImpl(
- holder, backing_store, index, filter);
+ isolate, holder, backing_store, index, filter);
if (backing_store_entry == kMaxUInt32) return kMaxUInt32;
DCHECK(backing_store_entry < kMaxUInt32 - length);
return backing_store_entry + length;
@@ -3491,6 +3575,13 @@ class StringWrapperElementsAccessor
}
}
+ static uint32_t NumberOfElementsImpl(JSObject* object,
+ FixedArrayBase* backing_store) {
+ uint32_t length = GetString(object)->length();
+ return length +
+ BackingStoreAccessor::NumberOfElementsImpl(object, backing_store);
+ }
+
private:
static String* GetString(JSObject* holder) {
DCHECK(holder->IsJSValue());
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 76e1aa6f39..fc2e6a4fdb 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -56,6 +56,7 @@ class ElementsAccessor {
virtual PropertyDetails GetDetails(JSObject* holder, uint32_t entry) = 0;
virtual bool HasAccessors(JSObject* holder) = 0;
+ virtual uint32_t NumberOfElements(JSObject* holder) = 0;
// Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of
@@ -170,6 +171,10 @@ class ElementsAccessor {
Handle<Object> value, uint32_t start,
uint32_t length) = 0;
+ virtual void CopyElements(Handle<FixedArrayBase> source,
+ ElementsKind source_kind,
+ Handle<FixedArrayBase> destination, int size) = 0;
+
protected:
friend class LookupIterator;
@@ -181,7 +186,7 @@ class ElementsAccessor {
// indices are equivalent to entries. In the NumberDictionary
// ElementsAccessor, entries are mapped to an index using the KeyAt method on
// the NumberDictionary.
- virtual uint32_t GetEntryForIndex(JSObject* holder,
+ virtual uint32_t GetEntryForIndex(Isolate* isolate, JSObject* holder,
FixedArrayBase* backing_store,
uint32_t index) = 0;
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index da533363af..ad412975cf 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -35,14 +35,14 @@ static void AddCounter(v8::Isolate* isolate,
}
}
-static void AddNumber(v8::Isolate* isolate,
- v8::Local<v8::Object> object,
- intptr_t value,
- const char* name) {
- object->Set(isolate->GetCurrentContext(),
- v8::String::NewFromUtf8(isolate, name, NewStringType::kNormal)
- .ToLocalChecked(),
- v8::Number::New(isolate, static_cast<double>(value))).FromJust();
+static void AddNumber(v8::Isolate* isolate, v8::Local<v8::Object> object,
+ double value, const char* name) {
+ object
+ ->Set(isolate->GetCurrentContext(),
+ v8::String::NewFromUtf8(isolate, name, NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Number::New(isolate, value))
+ .FromJust();
}
@@ -112,29 +112,24 @@ void StatisticsExtension::GetCounters(
}
struct StatisticNumber {
- intptr_t number;
+ size_t number;
const char* name;
};
const StatisticNumber numbers[] = {
- {static_cast<intptr_t>(heap->memory_allocator()->Size()),
- "total_committed_bytes"},
+ {heap->memory_allocator()->Size(), "total_committed_bytes"},
{heap->new_space()->Size(), "new_space_live_bytes"},
{heap->new_space()->Available(), "new_space_available_bytes"},
- {static_cast<intptr_t>(heap->new_space()->CommittedMemory()),
- "new_space_commited_bytes"},
+ {heap->new_space()->CommittedMemory(), "new_space_commited_bytes"},
{heap->old_space()->Size(), "old_space_live_bytes"},
{heap->old_space()->Available(), "old_space_available_bytes"},
- {static_cast<intptr_t>(heap->old_space()->CommittedMemory()),
- "old_space_commited_bytes"},
+ {heap->old_space()->CommittedMemory(), "old_space_commited_bytes"},
{heap->code_space()->Size(), "code_space_live_bytes"},
{heap->code_space()->Available(), "code_space_available_bytes"},
- {static_cast<intptr_t>(heap->code_space()->CommittedMemory()),
- "code_space_commited_bytes"},
+ {heap->code_space()->CommittedMemory(), "code_space_commited_bytes"},
{heap->lo_space()->Size(), "lo_space_live_bytes"},
{heap->lo_space()->Available(), "lo_space_available_bytes"},
- {static_cast<intptr_t>(heap->lo_space()->CommittedMemory()),
- "lo_space_commited_bytes"},
+ {heap->lo_space()->CommittedMemory(), "lo_space_commited_bytes"},
};
for (size_t i = 0; i < arraysize(numbers); i++) {
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index f908be1e47..2e9fc46590 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -11,6 +11,11 @@
#include "src/deoptimizer.h"
#include "src/ic/stub-cache.h"
+#if defined(DEBUG) && defined(V8_OS_LINUX) && !defined(V8_OS_ANDROID)
+#define SYMBOLIZE_FUNCTION
+#include <execinfo.h>
+#endif // DEBUG && V8_OS_LINUX && !V8_OS_ANDROID
+
namespace v8 {
namespace internal {
@@ -31,10 +36,11 @@ ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) {
}
ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
+ // nullptr is preserved through serialization/deserialization.
+ Add(nullptr, "nullptr");
AddReferences(isolate);
AddBuiltins(isolate);
AddRuntimeFunctions(isolate);
- AddStatCounters(isolate);
AddIsolateAddresses(isolate);
AddAccessors(isolate);
AddStubCache(isolate);
@@ -42,6 +48,28 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
AddApiReferences(isolate);
}
+#ifdef DEBUG
+void ExternalReferenceTable::ResetCount() {
+ for (ExternalReferenceEntry& entry : refs_) entry.count = 0;
+}
+
+void ExternalReferenceTable::PrintCount() {
+ for (int i = 0; i < refs_.length(); i++) {
+ v8::base::OS::Print("index=%5d count=%5d %-60s\n", i, refs_[i].count,
+ refs_[i].name);
+ }
+}
+#endif // DEBUG
+
+// static
+const char* ExternalReferenceTable::ResolveSymbol(void* address) {
+#ifdef SYMBOLIZE_FUNCTION
+ return backtrace_symbols(&address, 1)[0];
+#else
+ return "<unresolved>";
+#endif // SYMBOLIZE_FUNCTION
+}
+
void ExternalReferenceTable::AddReferences(Isolate* isolate) {
// Miscellaneous
Add(ExternalReference::roots_array_start(isolate).address(),
@@ -56,11 +84,6 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"Heap::NewSpaceAllocationTopAddress()");
Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
"mod_two_doubles");
- // Keyed lookup cache.
- Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
- "KeyedLookupCache::keys()");
- Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
- "KeyedLookupCache::field_offsets()");
Add(ExternalReference::handle_scope_next_address(isolate).address(),
"HandleScope::next");
Add(ExternalReference::handle_scope_limit_address(isolate).address(),
@@ -78,8 +101,6 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
Add(ExternalReference::isolate_address(isolate).address(), "isolate");
Add(ExternalReference::interpreter_dispatch_table_address(isolate).address(),
"Interpreter::dispatch_table_address");
- Add(ExternalReference::interpreter_dispatch_counters(isolate).address(),
- "Interpreter::interpreter_dispatch_counters");
Add(ExternalReference::address_of_negative_infinity().address(),
"LDoubleConstant::negative_infinity");
Add(ExternalReference::power_double_double_function(isolate).address(),
@@ -320,32 +341,6 @@ void ExternalReferenceTable::AddRuntimeFunctions(Isolate* isolate) {
}
}
-void ExternalReferenceTable::AddStatCounters(Isolate* isolate) {
- // Stat counters
- struct StatsRefTableEntry {
- StatsCounter* (Counters::*counter)();
- const char* name;
- };
-
- static const StatsRefTableEntry stats_ref_table[] = {
-#define COUNTER_ENTRY(name, caption) {&Counters::name, "Counters::" #name},
- STATS_COUNTER_LIST_1(COUNTER_ENTRY) STATS_COUNTER_LIST_2(COUNTER_ENTRY)
-#undef COUNTER_ENTRY
- };
-
- Counters* counters = isolate->counters();
- for (unsigned i = 0; i < arraysize(stats_ref_table); ++i) {
- // To make sure the indices are not dependent on whether counters are
- // enabled, use a dummy address as filler.
- Address address = NotAvailable();
- StatsCounter* counter = (counters->*(stats_ref_table[i].counter))();
- if (counter->Enabled()) {
- address = reinterpret_cast<Address>(counter->GetInternalPointer());
- }
- Add(address, stats_ref_table[i].name);
- }
-}
-
void ExternalReferenceTable::AddIsolateAddresses(Isolate* isolate) {
// Top addresses
static const char* address_names[] = {
@@ -368,22 +363,24 @@ void ExternalReferenceTable::AddAccessors(Isolate* isolate) {
};
static const AccessorRefTable getters[] = {
-#define ACCESSOR_INFO_DECLARATION(name) \
- {FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter"},
+#define ACCESSOR_INFO_DECLARATION(name) \
+ { FUNCTION_ADDR(&Accessors::name##Getter), \
+ "Redirect to Accessors::" #name "Getter"},
ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
#undef ACCESSOR_INFO_DECLARATION
};
static const AccessorRefTable setters[] = {
#define ACCESSOR_SETTER_DECLARATION(name) \
- {FUNCTION_ADDR(&Accessors::name), "Accessors::" #name},
+ { FUNCTION_ADDR(&Accessors::name), "Accessors::" #name},
ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
#undef ACCESSOR_INFO_DECLARATION
};
for (unsigned i = 0; i < arraysize(getters); ++i) {
- Add(getters[i].address, getters[i].name);
+ const char* name = getters[i].name + 12; // Skip "Redirect to " prefix.
+ Add(getters[i].address, name);
Add(AccessorInfo::redirect(isolate, getters[i].address, ACCESSOR_GETTER),
- "");
+ getters[i].name);
}
for (unsigned i = 0; i < arraysize(setters); ++i) {
@@ -444,7 +441,8 @@ void ExternalReferenceTable::AddApiReferences(Isolate* isolate) {
intptr_t* api_external_references = isolate->api_external_references();
if (api_external_references != nullptr) {
while (*api_external_references != 0) {
- Add(reinterpret_cast<Address>(*api_external_references), "<embedder>");
+ Address address = reinterpret_cast<Address>(*api_external_references);
+ Add(address, ResolveSymbol(address));
api_external_references++;
}
}
diff --git a/deps/v8/src/external-reference-table.h b/deps/v8/src/external-reference-table.h
index dc30dabd7c..e1b97f957c 100644
--- a/deps/v8/src/external-reference-table.h
+++ b/deps/v8/src/external-reference-table.h
@@ -19,11 +19,18 @@ class ExternalReferenceTable {
public:
static ExternalReferenceTable* instance(Isolate* isolate);
- int size() const { return refs_.length(); }
- Address address(int i) { return refs_[i].address; }
- const char* name(int i) { return refs_[i].name; }
+ uint32_t size() const { return static_cast<uint32_t>(refs_.length()); }
+ Address address(uint32_t i) { return refs_[i].address; }
+ const char* name(uint32_t i) { return refs_[i].name; }
- inline static Address NotAvailable() { return NULL; }
+#ifdef DEBUG
+ void increment_count(uint32_t i) { refs_[i].count++; }
+ int count(uint32_t i) { return refs_[i].count; }
+ void ResetCount();
+ void PrintCount();
+#endif // DEBUG
+
+ static const char* ResolveSymbol(void* address);
static const int kDeoptTableSerializeEntryCount = 64;
@@ -31,19 +38,25 @@ class ExternalReferenceTable {
struct ExternalReferenceEntry {
Address address;
const char* name;
+#ifdef DEBUG
+ int count;
+#endif // DEBUG
};
explicit ExternalReferenceTable(Isolate* isolate);
void Add(Address address, const char* name) {
+#ifdef DEBUG
+ ExternalReferenceEntry entry = {address, name, 0};
+#else
ExternalReferenceEntry entry = {address, name};
+#endif // DEBUG
refs_.Add(entry);
}
void AddReferences(Isolate* isolate);
void AddBuiltins(Isolate* isolate);
void AddRuntimeFunctions(Isolate* isolate);
- void AddStatCounters(Isolate* isolate);
void AddIsolateAddresses(Isolate* isolate);
void AddAccessors(Isolate* isolate);
void AddStubCache(Isolate* isolate);
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 163e86484d..3e812d56e3 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -97,11 +97,20 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
Handle<PrototypeInfo>::cast(NewStruct(PROTOTYPE_INFO_TYPE));
result->set_prototype_users(WeakFixedArray::Empty());
result->set_registry_slot(PrototypeInfo::UNREGISTERED);
- result->set_validity_cell(Smi::FromInt(0));
+ result->set_validity_cell(Smi::kZero);
result->set_bit_field(0);
return result;
}
+Handle<Tuple3> Factory::NewTuple3(Handle<Object> value1, Handle<Object> value2,
+ Handle<Object> value3) {
+ Handle<Tuple3> result = Handle<Tuple3>::cast(NewStruct(TUPLE3_TYPE));
+ result->set_value1(*value1);
+ result->set_value2(*value2);
+ result->set_value3(*value3);
+ return result;
+}
+
Handle<ContextExtension> Factory::NewContextExtension(
Handle<ScopeInfo> scope_info, Handle<Object> extension) {
Handle<ContextExtension> result =
@@ -128,6 +137,15 @@ Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
FixedArray);
}
+MaybeHandle<FixedArray> Factory::TryNewFixedArray(int size,
+ PretenureFlag pretenure) {
+ DCHECK(0 <= size);
+ AllocationResult allocation =
+ isolate()->heap()->AllocateFixedArray(size, pretenure);
+ Object* array = NULL;
+ if (!allocation.To(&array)) return MaybeHandle<FixedArray>();
+ return Handle<FixedArray>(FixedArray::cast(array), isolate());
+}
Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
PretenureFlag pretenure) {
@@ -179,7 +197,7 @@ Handle<FrameArray> Factory::NewFrameArray(int number_of_frames,
DCHECK_LE(0, number_of_frames);
Handle<FixedArray> result =
NewFixedArrayWithHoles(FrameArray::LengthFor(number_of_frames));
- result->set(FrameArray::kFrameCountIndex, Smi::FromInt(0));
+ result->set(FrameArray::kFrameCountIndex, Smi::kZero);
return Handle<FrameArray>::cast(result);
}
@@ -297,6 +315,44 @@ MaybeHandle<String> Factory::NewStringFromUtf8(Vector<const char> string,
return result;
}
+MaybeHandle<String> Factory::NewStringFromUtf8SubString(
+ Handle<SeqOneByteString> str, int begin, int length,
+ PretenureFlag pretenure) {
+ // Check for ASCII first since this is the common case.
+ const char* start = reinterpret_cast<const char*>(str->GetChars() + begin);
+ int non_ascii_start = String::NonAsciiStart(start, length);
+ if (non_ascii_start >= length) {
+ // If the string is ASCII, we can just make a substring.
+ // TODO(v8): the pretenure flag is ignored in this case.
+ return NewSubString(str, begin, begin + length);
+ }
+
+ // Non-ASCII and we need to decode.
+ Access<UnicodeCache::Utf8Decoder> decoder(
+ isolate()->unicode_cache()->utf8_decoder());
+ decoder->Reset(start + non_ascii_start, length - non_ascii_start);
+ int utf16_length = static_cast<int>(decoder->Utf16Length());
+ DCHECK(utf16_length > 0);
+ // Allocate string.
+ Handle<SeqTwoByteString> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ NewRawTwoByteString(non_ascii_start + utf16_length, pretenure), String);
+
+ // Reset the decoder, because the original {str} may have moved.
+ const char* ascii_data =
+ reinterpret_cast<const char*>(str->GetChars() + begin);
+ decoder->Reset(ascii_data + non_ascii_start, length - non_ascii_start);
+ // Copy ASCII portion.
+ uint16_t* data = result->GetChars();
+ for (int i = 0; i < non_ascii_start; i++) {
+ *data++ = *ascii_data++;
+ }
+ // Now write the remainder.
+ decoder->WriteUtf16(data, utf16_length);
+ return result;
+}
+
MaybeHandle<String> Factory::NewStringFromTwoByte(const uc16* string,
int length,
PretenureFlag pretenure) {
@@ -779,7 +835,8 @@ Handle<Context> Factory::NewNativeContext() {
array->set_map_no_write_barrier(*native_context_map());
Handle<Context> context = Handle<Context>::cast(array);
context->set_native_context(*context);
- context->set_errors_thrown(Smi::FromInt(0));
+ context->set_errors_thrown(Smi::kZero);
+ context->set_math_random_index(Smi::kZero);
Handle<WeakCell> weak_cell = NewWeakCell(context);
context->set_self_weak_cell(*weak_cell);
DCHECK(context->IsNativeContext());
@@ -914,6 +971,14 @@ Handle<Context> Factory::NewBlockContext(Handle<JSFunction> function,
return context;
}
+Handle<Context> Factory::NewPromiseResolvingFunctionContext(int length) {
+ DCHECK_GE(length, Context::MIN_CONTEXT_SLOTS);
+ Handle<FixedArray> array = NewFixedArray(length);
+ array->set_map_no_write_barrier(*function_context_map());
+ Handle<Context> context = Handle<Context>::cast(array);
+ context->set_extension(*the_hole_value());
+ return context;
+}
Handle<Struct> Factory::NewStruct(InstanceType type) {
CALL_HEAP_FUNCTION(
@@ -922,18 +987,36 @@ Handle<Struct> Factory::NewStruct(InstanceType type) {
Struct);
}
-Handle<PromiseContainer> Factory::NewPromiseContainer(
+Handle<PromiseResolveThenableJobInfo> Factory::NewPromiseResolveThenableJobInfo(
Handle<JSReceiver> thenable, Handle<JSReceiver> then,
Handle<JSFunction> resolve, Handle<JSFunction> reject,
- Handle<Object> before_debug_event, Handle<Object> after_debug_event) {
- Handle<PromiseContainer> result =
- Handle<PromiseContainer>::cast(NewStruct(PROMISE_CONTAINER_TYPE));
+ Handle<Object> debug_id, Handle<Object> debug_name,
+ Handle<Context> context) {
+ Handle<PromiseResolveThenableJobInfo> result =
+ Handle<PromiseResolveThenableJobInfo>::cast(
+ NewStruct(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE));
result->set_thenable(*thenable);
result->set_then(*then);
result->set_resolve(*resolve);
result->set_reject(*reject);
- result->set_before_debug_event(*before_debug_event);
- result->set_after_debug_event(*after_debug_event);
+ result->set_debug_id(*debug_id);
+ result->set_debug_name(*debug_name);
+ result->set_context(*context);
+ return result;
+}
+
+Handle<PromiseReactionJobInfo> Factory::NewPromiseReactionJobInfo(
+ Handle<Object> value, Handle<Object> tasks, Handle<Object> deferred,
+ Handle<Object> debug_id, Handle<Object> debug_name,
+ Handle<Context> context) {
+ Handle<PromiseReactionJobInfo> result = Handle<PromiseReactionJobInfo>::cast(
+ NewStruct(PROMISE_REACTION_JOB_INFO_TYPE));
+ result->set_value(*value);
+ result->set_tasks(*tasks);
+ result->set_deferred(*deferred);
+ result->set_debug_id(*debug_id);
+ result->set_debug_name(*debug_name);
+ result->set_context(*context);
return result;
}
@@ -970,7 +1053,7 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_line_ends(heap->undefined_value());
script->set_eval_from_shared(heap->undefined_value());
script->set_eval_from_position(0);
- script->set_shared_function_infos(Smi::FromInt(0));
+ script->set_shared_function_infos(Smi::kZero);
script->set_flags(0);
heap->set_script_list(*WeakFixedArray::Add(script_list(), script));
@@ -1272,6 +1355,8 @@ DEFINE_ERROR(RangeError, range_error)
DEFINE_ERROR(ReferenceError, reference_error)
DEFINE_ERROR(SyntaxError, syntax_error)
DEFINE_ERROR(TypeError, type_error)
+DEFINE_ERROR(WasmCompileError, wasm_compile_error)
+DEFINE_ERROR(WasmRuntimeError, wasm_runtime_error)
#undef DEFINE_ERROR
Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
@@ -1446,12 +1531,6 @@ Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
return scope_info;
}
-Handle<ModuleInfoEntry> Factory::NewModuleInfoEntry() {
- Handle<FixedArray> array = NewFixedArray(ModuleInfoEntry::kLength, TENURED);
- array->set_map_no_write_barrier(*module_info_entry_map());
- return Handle<ModuleInfoEntry>::cast(array);
-}
-
Handle<ModuleInfo> Factory::NewModuleInfo() {
Handle<FixedArray> array = NewFixedArray(ModuleInfo::kLength, TENURED);
array->set_map_no_write_barrier(*module_info_map());
@@ -1504,7 +1583,7 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
// The code object has not been fully initialized yet. We rely on the
// fact that no allocation will happen from this point on.
DisallowHeapAllocation no_gc;
- code->set_gc_metadata(Smi::FromInt(0));
+ code->set_gc_metadata(Smi::kZero);
code->set_ic_age(isolate()->heap()->global_ic_age());
code->set_instruction_size(desc.instr_size);
code->set_relocation_info(*reloc_info);
@@ -1514,7 +1593,7 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
code->set_raw_kind_specific_flags2(0);
code->set_is_crankshafted(crankshafted);
code->set_deoptimization_data(*empty_fixed_array(), SKIP_WRITE_BARRIER);
- code->set_raw_type_feedback_info(Smi::FromInt(0));
+ code->set_raw_type_feedback_info(Smi::kZero);
code->set_next_code_link(*undefined_value(), SKIP_WRITE_BARRIER);
code->set_handler_table(*empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_source_position_table(*empty_byte_array(), SKIP_WRITE_BARRIER);
@@ -1572,16 +1651,6 @@ Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
}
-Handle<JSObject> Factory::NewJSObjectWithMemento(
- Handle<JSFunction> constructor,
- Handle<AllocationSite> site) {
- JSFunction::EnsureHasInitialMap(constructor);
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSObject(*constructor, NOT_TENURED, *site),
- JSObject);
-}
-
Handle<JSObject> Factory::NewJSObjectWithNullProto() {
Handle<JSObject> result = NewJSObject(isolate()->object_function());
Handle<Map> new_map =
@@ -1706,7 +1775,7 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array,
DCHECK(capacity >= length);
if (capacity == 0) {
- array->set_length(Smi::FromInt(0));
+ array->set_length(Smi::kZero);
array->set_elements(*empty_fixed_array());
return;
}
@@ -1735,6 +1804,10 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array,
array->set_length(Smi::FromInt(length));
}
+Handle<JSModuleNamespace> Factory::NewJSModuleNamespace() {
+ Handle<Map> map = isolate()->js_module_namespace_map();
+ return Handle<JSModuleNamespace>::cast(NewJSObjectFromMap(map));
+}
Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
Handle<JSFunction> function) {
@@ -1752,23 +1825,26 @@ Handle<Module> Factory::NewModule(Handle<SharedFunctionInfo> code) {
Handle<ModuleInfo> module_info(code->scope_info()->ModuleDescriptorInfo(),
isolate());
Handle<ObjectHashTable> exports =
- ObjectHashTable::New(isolate(), module_info->regular_exports()->length());
+ ObjectHashTable::New(isolate(), module_info->RegularExportCount());
+ Handle<FixedArray> regular_exports =
+ NewFixedArray(module_info->RegularExportCount());
+ Handle<FixedArray> regular_imports =
+ NewFixedArray(module_info->regular_imports()->length());
int requested_modules_length = module_info->module_requests()->length();
Handle<FixedArray> requested_modules =
requested_modules_length > 0 ? NewFixedArray(requested_modules_length)
: empty_fixed_array();
- // To make it easy to hash Modules, we set a new symbol as the name of
- // SharedFunctionInfo representing this Module.
- Handle<Symbol> name_symbol = NewSymbol();
- code->set_name(*name_symbol);
-
Handle<Module> module = Handle<Module>::cast(NewStruct(MODULE_TYPE));
module->set_code(*code);
module->set_exports(*exports);
+ module->set_regular_exports(*regular_exports);
+ module->set_regular_imports(*regular_imports);
+ module->set_hash(isolate()->GenerateIdentityHash(Smi::kMaxValue));
+ module->set_module_namespace(isolate()->heap()->undefined_value());
module->set_requested_modules(*requested_modules);
- module->set_flags(0);
- module->set_embedder_data(isolate()->heap()->undefined_value());
+ DCHECK(!module->instantiated());
+ DCHECK(!module->evaluated());
return module;
}
@@ -1934,6 +2010,12 @@ void SetupArrayBufferView(i::Isolate* isolate,
DCHECK(byte_offset + byte_length <=
static_cast<size_t>(buffer->byte_length()->Number()));
+ DCHECK_EQ(obj->GetInternalFieldCount(),
+ v8::ArrayBufferView::kInternalFieldCount);
+ for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+ obj->SetInternalField(i, Smi::kZero);
+ }
+
obj->set_buffer(*buffer);
i::Handle<i::Object> byte_offset_object =
@@ -2003,6 +2085,11 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
size_t number_of_elements,
PretenureFlag pretenure) {
Handle<JSTypedArray> obj = NewJSTypedArray(elements_kind, pretenure);
+ DCHECK_EQ(obj->GetInternalFieldCount(),
+ v8::ArrayBufferView::kInternalFieldCount);
+ for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+ obj->SetInternalField(i, Smi::kZero);
+ }
size_t element_size = GetFixedTypedArraysElementSize(elements_kind);
ExternalArrayType array_type = GetArrayTypeFromElementsKind(elements_kind);
@@ -2012,7 +2099,7 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
CHECK(number_of_elements <= static_cast<size_t>(Smi::kMaxValue));
size_t byte_length = number_of_elements * element_size;
- obj->set_byte_offset(Smi::FromInt(0));
+ obj->set_byte_offset(Smi::kZero);
i::Handle<i::Object> byte_length_object =
NewNumberFromSize(byte_length, pretenure);
obj->set_byte_length(*byte_length_object);
@@ -2111,11 +2198,10 @@ Handle<JSProxy> Factory::NewJSProxy(Handle<JSReceiver> target,
return result;
}
-
-Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy() {
+Handle<JSGlobalProxy> Factory::NewUninitializedJSGlobalProxy(int size) {
// Create an empty shell of a JSGlobalProxy that needs to be reinitialized
// via ReinitializeJSGlobalProxy later.
- Handle<Map> map = NewMap(JS_GLOBAL_PROXY_TYPE, JSGlobalProxy::kSize);
+ Handle<Map> map = NewMap(JS_GLOBAL_PROXY_TYPE, size);
// Maintain invariant expected from any JSGlobalProxy.
map->set_is_access_check_needed(true);
CALL_HEAP_FUNCTION(
@@ -2133,12 +2219,11 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
// The proxy's hash should be retained across reinitialization.
Handle<Object> hash(object->hash(), isolate());
- JSObject::InvalidatePrototypeChains(*old_map);
if (old_map->is_prototype_map()) {
map = Map::Copy(map, "CopyAsPrototypeForJSGlobalProxy");
map->set_is_prototype_map(true);
}
- JSObject::UpdatePrototypeUserRegistration(old_map, map, isolate());
+ JSObject::NotifyMapChange(old_map, map, isolate());
// Check that the already allocated object has the same size and type as
// objects allocated using the constructor.
@@ -2215,7 +2300,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
code = isolate()->builtins()->Illegal();
}
share->set_code(*code);
- share->set_optimized_code_map(*cleared_optimized_code_map());
+ share->set_optimized_code_map(*empty_fixed_array());
share->set_scope_info(ScopeInfo::Empty(isolate()));
share->set_outer_scope_info(*the_hole_value());
Handle<Code> construct_stub =
@@ -2459,13 +2544,30 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
store->set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
store->set(JSRegExp::kIrregexpLatin1CodeSavedIndex, uninitialized);
store->set(JSRegExp::kIrregexpUC16CodeSavedIndex, uninitialized);
- store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(0));
+ store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::kZero);
store->set(JSRegExp::kIrregexpCaptureCountIndex,
Smi::FromInt(capture_count));
store->set(JSRegExp::kIrregexpCaptureNameMapIndex, uninitialized);
regexp->set_data(*store);
}
+Handle<RegExpMatchInfo> Factory::NewRegExpMatchInfo() {
+ // Initially, the last match info consists of all fixed fields plus space for
+ // the match itself (i.e., 2 capture indices).
+ static const int kInitialSize = RegExpMatchInfo::kFirstCaptureIndex +
+ RegExpMatchInfo::kInitialCaptureIndices;
+
+ Handle<FixedArray> elems = NewFixedArray(kInitialSize);
+ Handle<RegExpMatchInfo> result = Handle<RegExpMatchInfo>::cast(elems);
+
+ result->SetNumberOfCaptureRegisters(RegExpMatchInfo::kInitialCaptureIndices);
+ result->SetLastSubject(*empty_string());
+ result->SetLastInput(*undefined_value());
+ result->SetCapture(0, 0);
+ result->SetCapture(1, 0);
+
+ return result;
+}
Handle<Object> Factory::GlobalConstantFor(Handle<Name> name) {
if (Name::Equals(name, undefined_string())) return undefined_value();
@@ -2608,5 +2710,26 @@ void Factory::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
}
}
+Handle<JSFixedArrayIterator> Factory::NewJSFixedArrayIterator(
+ Handle<FixedArray> array) {
+ // Create the "next" function (must be unique per iterator object).
+ Handle<Code> code(
+ isolate()->builtins()->builtin(Builtins::kFixedArrayIteratorNext));
+ // TODO(neis): Don't create a new SharedFunctionInfo each time.
+ Handle<JSFunction> next = isolate()->factory()->NewFunctionWithoutPrototype(
+ isolate()->factory()->next_string(), code, false);
+ next->shared()->set_native(true);
+
+ // Create the iterator.
+ Handle<Map> map(isolate()->native_context()->fixed_array_iterator_map());
+ Handle<JSFixedArrayIterator> iterator =
+ Handle<JSFixedArrayIterator>::cast(NewJSObjectFromMap(map));
+ iterator->set_initial_next(*next);
+ iterator->set_array(*array);
+ iterator->set_index(0);
+ iterator->InObjectPropertyAtPut(JSFixedArrayIterator::kNextIndex, *next);
+ return iterator;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 82c2317cc0..d059b10888 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -5,6 +5,7 @@
#ifndef V8_FACTORY_H_
#define V8_FACTORY_H_
+#include "src/globals.h"
#include "src/isolate.h"
#include "src/messages.h"
#include "src/type-feedback-vector.h"
@@ -21,14 +22,22 @@ enum FunctionMode {
};
// Interface for handle based allocation.
-class Factory final {
+class V8_EXPORT_PRIVATE Factory final {
public:
Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number, const char* type_of,
byte kind);
// Allocates a fixed array initialized with undefined values.
- V8_EXPORT_PRIVATE Handle<FixedArray> NewFixedArray(
+ Handle<FixedArray> NewFixedArray(int size,
+ PretenureFlag pretenure = NOT_TENURED);
+ // Tries allocating a fixed array initialized with undefined values.
+ // In case of an allocation failure (OOM) an empty handle is returned.
+ // The caller has to manually signal an
+ // v8::internal::Heap::FatalProcessOutOfMemory typically by calling
+ // NewFixedArray as a fallback.
+ MUST_USE_RESULT
+ MaybeHandle<FixedArray> TryNewFixedArray(
int size, PretenureFlag pretenure = NOT_TENURED);
// Allocate a new fixed array with non-existing entries (the hole).
@@ -60,15 +69,26 @@ class Factory final {
// Create a new boxed value.
Handle<Box> NewBox(Handle<Object> value);
- // Create a new PromiseContainer struct.
- Handle<PromiseContainer> NewPromiseContainer(
+ // Create a new PromiseReactionJobInfo struct.
+ Handle<PromiseReactionJobInfo> NewPromiseReactionJobInfo(
+ Handle<Object> value, Handle<Object> tasks, Handle<Object> deferred,
+ Handle<Object> debug_id, Handle<Object> debug_name,
+ Handle<Context> context);
+
+ // Create a new PromiseResolveThenableJobInfo struct.
+ Handle<PromiseResolveThenableJobInfo> NewPromiseResolveThenableJobInfo(
Handle<JSReceiver> thenable, Handle<JSReceiver> then,
Handle<JSFunction> resolve, Handle<JSFunction> reject,
- Handle<Object> before_debug_event, Handle<Object> after_debug_event);
+ Handle<Object> debug_id, Handle<Object> debug_name,
+ Handle<Context> context);
// Create a new PrototypeInfo struct.
Handle<PrototypeInfo> NewPrototypeInfo();
+ // Create a new Tuple3 struct.
+ Handle<Tuple3> NewTuple3(Handle<Object> value1, Handle<Object> value2,
+ Handle<Object> value3);
+
// Create a new ContextExtension struct.
Handle<ContextExtension> NewContextExtension(Handle<ScopeInfo> scope_info,
Handle<Object> extension);
@@ -81,8 +101,7 @@ class Factory final {
// Finds the internalized copy for string in the string table.
// If not found, a new string is added to the table and returned.
- V8_EXPORT_PRIVATE Handle<String> InternalizeUtf8String(
- Vector<const char> str);
+ Handle<String> InternalizeUtf8String(Vector<const char> str);
Handle<String> InternalizeUtf8String(const char* str) {
return InternalizeUtf8String(CStrVector(str));
}
@@ -127,7 +146,7 @@ class Factory final {
// will be converted to Latin1, otherwise it will be left as two-byte.
//
// One-byte strings are pretenured when used as keys in the SourceCodeCache.
- V8_EXPORT_PRIVATE MUST_USE_RESULT MaybeHandle<String> NewStringFromOneByte(
+ MUST_USE_RESULT MaybeHandle<String> NewStringFromOneByte(
Vector<const uint8_t> str, PretenureFlag pretenure = NOT_TENURED);
template <size_t N>
@@ -170,10 +189,14 @@ class Factory final {
// UTF8 strings are pretenured when used for regexp literal patterns and
// flags in the parser.
- MUST_USE_RESULT V8_EXPORT_PRIVATE MaybeHandle<String> NewStringFromUtf8(
+ MUST_USE_RESULT MaybeHandle<String> NewStringFromUtf8(
Vector<const char> str, PretenureFlag pretenure = NOT_TENURED);
- V8_EXPORT_PRIVATE MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
+ MUST_USE_RESULT MaybeHandle<String> NewStringFromUtf8SubString(
+ Handle<SeqOneByteString> str, int begin, int end,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
Vector<const uc16> str, PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeHandle<String> NewStringFromTwoByte(
@@ -296,6 +319,8 @@ class Factory final {
Handle<Context> NewBlockContext(Handle<JSFunction> function,
Handle<Context> previous,
Handle<ScopeInfo> scope_info);
+ // Create a promise context.
+ Handle<Context> NewPromiseResolvingFunctionContext(int length);
// Allocate a new struct. The struct is pretenured (allocated directly in
// the old generation).
@@ -306,7 +331,7 @@ class Factory final {
Handle<AccessorInfo> NewAccessorInfo();
- V8_EXPORT_PRIVATE Handle<Script> NewScript(Handle<String> source);
+ Handle<Script> NewScript(Handle<String> source);
// Foreign objects are pretenured when allocated by the bootstrapper.
Handle<Foreign> NewForeign(Address addr,
@@ -416,12 +441,6 @@ class Factory final {
SIMD128_TYPES(SIMD128_NEW_DECL)
#undef SIMD128_NEW_DECL
- // These objects are used by the api to create env-independent data
- // structures in the heap.
- inline Handle<JSObject> NewNeanderObject() {
- return NewJSObjectFromMap(neander_map());
- }
-
Handle<JSWeakMap> NewJSWeakMap();
Handle<JSObject> NewArgumentsObject(Handle<JSFunction> callee, int length);
@@ -430,9 +449,6 @@ class Factory final {
// runtime.
Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure = NOT_TENURED);
- // JSObject that should have a memento pointing to the allocation site.
- Handle<JSObject> NewJSObjectWithMemento(Handle<JSFunction> constructor,
- Handle<AllocationSite> site);
// JSObject without a prototype.
Handle<JSObject> NewJSObjectWithNullProto();
@@ -450,7 +466,7 @@ class Factory final {
// Create a JSArray with a specified length and elements initialized
// according to the specified mode.
- V8_EXPORT_PRIVATE Handle<JSArray> NewJSArray(
+ Handle<JSArray> NewJSArray(
ElementsKind elements_kind, int length, int capacity,
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
PretenureFlag pretenure = NOT_TENURED);
@@ -466,11 +482,11 @@ class Factory final {
}
// Create a JSArray with the given elements.
- V8_EXPORT_PRIVATE Handle<JSArray> NewJSArrayWithElements(
- Handle<FixedArrayBase> elements, ElementsKind elements_kind, int length,
- PretenureFlag pretenure = NOT_TENURED);
+ Handle<JSArray> NewJSArrayWithElements(Handle<FixedArrayBase> elements,
+ ElementsKind elements_kind, int length,
+ PretenureFlag pretenure = NOT_TENURED);
- V8_EXPORT_PRIVATE Handle<JSArray> NewJSArrayWithElements(
+ Handle<JSArray> NewJSArrayWithElements(
Handle<FixedArrayBase> elements,
ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND,
PretenureFlag pretenure = NOT_TENURED) {
@@ -486,6 +502,8 @@ class Factory final {
Handle<JSGeneratorObject> NewJSGeneratorObject(Handle<JSFunction> function);
+ Handle<JSModuleNamespace> NewJSModuleNamespace();
+
Handle<Module> NewModule(Handle<SharedFunctionInfo> code);
Handle<JSArrayBuffer> NewJSArrayBuffer(
@@ -522,6 +540,9 @@ class Factory final {
Handle<JSMapIterator> NewJSMapIterator();
Handle<JSSetIterator> NewJSSetIterator();
+ Handle<JSFixedArrayIterator> NewJSFixedArrayIterator(
+ Handle<FixedArray> array);
+
// Allocates a bound function.
MaybeHandle<JSBoundFunction> NewJSBoundFunction(
Handle<JSReceiver> target_function, Handle<Object> bound_this,
@@ -538,7 +559,7 @@ class Factory final {
void ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> global,
Handle<JSFunction> constructor);
- Handle<JSGlobalProxy> NewUninitializedJSGlobalProxy();
+ Handle<JSGlobalProxy> NewUninitializedJSGlobalProxy(int size);
Handle<JSFunction> NewFunction(Handle<Map> map,
Handle<SharedFunctionInfo> info,
@@ -623,7 +644,9 @@ class Factory final {
DECLARE_ERROR(ReferenceError)
DECLARE_ERROR(SyntaxError)
DECLARE_ERROR(TypeError)
-#undef DEFINE_ERROR
+ DECLARE_ERROR(WasmCompileError)
+ DECLARE_ERROR(WasmRuntimeError)
+#undef DECLARE_ERROR
Handle<String> NumberToString(Handle<Object> number,
bool check_number_string_cache = true);
@@ -709,6 +732,8 @@ class Factory final {
int number_of_properties,
bool* is_result_from_cache);
+ Handle<RegExpMatchInfo> NewRegExpMatchInfo();
+
// Creates a new FixedArray that holds the data associated with the
// atom regexp and stores it in the regexp.
void SetRegExpAtomData(Handle<JSRegExp> regexp,
diff --git a/deps/v8/src/fast-accessor-assembler.cc b/deps/v8/src/fast-accessor-assembler.cc
index a9cde70a53..ee9b241186 100644
--- a/deps/v8/src/fast-accessor-assembler.cc
+++ b/deps/v8/src/fast-accessor-assembler.cc
@@ -17,7 +17,7 @@ namespace v8 {
namespace internal {
FastAccessorAssembler::FastAccessorAssembler(Isolate* isolate)
- : zone_(isolate->allocator()),
+ : zone_(isolate->allocator(), ZONE_NAME),
isolate_(isolate),
assembler_(new CodeStubAssembler(isolate, zone(), 1,
Code::ComputeFlags(Code::STUB),
diff --git a/deps/v8/src/field-index-inl.h b/deps/v8/src/field-index-inl.h
index c2f25bb7f0..a728eb39df 100644
--- a/deps/v8/src/field-index-inl.h
+++ b/deps/v8/src/field-index-inl.h
@@ -6,7 +6,6 @@
#define V8_FIELD_INDEX_INL_H_
#include "src/field-index.h"
-#include "src/ic/handler-configuration.h"
namespace v8 {
namespace internal {
@@ -85,39 +84,6 @@ inline int FieldIndex::GetLoadByFieldIndex() const {
return is_double() ? (result | 1) : result;
}
-// Takes an offset as computed by GetLoadByFieldOffset and reconstructs a
-// FieldIndex object from it.
-// static
-inline FieldIndex FieldIndex::ForLoadByFieldOffset(Map* map, int offset) {
- DCHECK(LoadHandlerTypeBit::decode(offset) == kLoadICHandlerForProperties);
- bool is_inobject = FieldOffsetIsInobject::decode(offset);
- bool is_double = FieldOffsetIsDouble::decode(offset);
- int field_index = FieldOffsetOffset::decode(offset) >> kPointerSizeLog2;
- int first_inobject_offset = 0;
- if (is_inobject) {
- first_inobject_offset =
- map->IsJSObjectMap() ? map->GetInObjectPropertyOffset(0) : 0;
- } else {
- first_inobject_offset = FixedArray::kHeaderSize;
- }
- int inobject_properties =
- map->IsJSObjectMap() ? map->GetInObjectProperties() : 0;
- FieldIndex result(is_inobject, field_index, is_double, inobject_properties,
- first_inobject_offset);
- DCHECK(result.GetLoadByFieldOffset() == offset);
- return result;
-}
-
-// Returns the offset format consumed by TurboFan stubs:
-// (offset << 3) | (is_double << 2) | (is_inobject << 1) | is_property
-// Where |offset| is relative to object start or FixedArray start, respectively.
-inline int FieldIndex::GetLoadByFieldOffset() const {
- return FieldOffsetIsInobject::encode(is_inobject()) |
- FieldOffsetIsDouble::encode(is_double()) |
- FieldOffsetOffset::encode(index() << kPointerSizeLog2) |
- LoadHandlerTypeBit::encode(kLoadICHandlerForProperties);
-}
-
inline FieldIndex FieldIndex::ForDescriptor(Map* map, int descriptor_index) {
PropertyDetails details =
map->instance_descriptors()->GetDetails(descriptor_index);
@@ -126,30 +92,10 @@ inline FieldIndex FieldIndex::ForDescriptor(Map* map, int descriptor_index) {
details.representation().IsDouble());
}
-
-inline FieldIndex FieldIndex::ForKeyedLookupCacheIndex(Map* map, int index) {
- if (FLAG_compiled_keyed_generic_loads) {
- return ForLoadByFieldIndex(map, index);
- } else {
- return ForPropertyIndex(map, index);
- }
-}
-
-
inline FieldIndex FieldIndex::FromFieldAccessStubKey(int key) {
return FieldIndex(key);
}
-
-inline int FieldIndex::GetKeyedLookupCacheIndex() const {
- if (FLAG_compiled_keyed_generic_loads) {
- return GetLoadByFieldIndex();
- } else {
- return property_index();
- }
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/field-index.h b/deps/v8/src/field-index.h
index 404c0f613f..37b2f3c59d 100644
--- a/deps/v8/src/field-index.h
+++ b/deps/v8/src/field-index.h
@@ -27,12 +27,9 @@ class FieldIndex final {
static FieldIndex ForInObjectOffset(int offset, Map* map = NULL);
static FieldIndex ForDescriptor(Map* map, int descriptor_index);
static FieldIndex ForLoadByFieldIndex(Map* map, int index);
- static FieldIndex ForLoadByFieldOffset(Map* map, int index);
- static FieldIndex ForKeyedLookupCacheIndex(Map* map, int index);
static FieldIndex FromFieldAccessStubKey(int key);
int GetLoadByFieldIndex() const;
- int GetLoadByFieldOffset() const;
bool is_inobject() const {
return IsInObjectBits::decode(bit_field_);
@@ -69,8 +66,6 @@ class FieldIndex final {
return result;
}
- int GetKeyedLookupCacheIndex() const;
-
int GetFieldAccessStubKey() const {
return bit_field_ &
(IsInObjectBits::kMask | IsDoubleBits::kMask | IndexBits::kMask);
diff --git a/deps/v8/src/field-type.cc b/deps/v8/src/field-type.cc
index b3b24e2c14..16bccf294b 100644
--- a/deps/v8/src/field-type.cc
+++ b/deps/v8/src/field-type.cc
@@ -13,7 +13,7 @@ namespace internal {
// static
FieldType* FieldType::None() {
- // Do not Smi::FromInt(0) here or for Any(), as that may translate
+ // Do not Smi::kZero here or for Any(), as that may translate
// as `nullptr` which is not a valid value for `this`.
return reinterpret_cast<FieldType*>(Smi::FromInt(2));
}
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 779a58949e..a7efe1163a 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -199,19 +199,18 @@ DEFINE_IMPLICATION(es_staging, move_object_start)
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
V(harmony_simd, "harmony simd") \
V(harmony_do_expressions, "harmony do-expressions") \
- V(harmony_restrictive_generators, \
- "harmony restrictions on generator declarations") \
V(harmony_regexp_named_captures, "harmony regexp named captures") \
V(harmony_regexp_property, "harmony unicode regexp property classes") \
- V(harmony_for_in, "harmony for-in syntax") \
- V(harmony_trailing_commas, \
- "harmony trailing commas in function parameter lists") \
V(harmony_class_fields, "harmony public fields in class literals")
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED_BASE(V) \
- V(harmony_regexp_lookbehind, "harmony regexp lookbehind") \
- V(harmony_tailcalls, "harmony tail calls") \
+#define HARMONY_STAGED_BASE(V) \
+ V(harmony_regexp_lookbehind, "harmony regexp lookbehind") \
+ V(harmony_restrictive_generators, \
+ "harmony restrictions on generator declarations") \
+ V(harmony_tailcalls, "harmony tail calls") \
+ V(harmony_trailing_commas, \
+ "harmony trailing commas in function parameter lists") \
V(harmony_string_padding, "harmony String-padding methods")
#ifdef V8_I18N_SUPPORT
@@ -224,13 +223,7 @@ DEFINE_IMPLICATION(es_staging, move_object_start)
#endif
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V) \
- V(harmony_async_await, "harmony async-await") \
- V(harmony_restrictive_declarations, \
- "harmony limitations on sloppy mode function declarations") \
- V(harmony_object_values_entries, "harmony Object.values / Object.entries") \
- V(harmony_object_own_property_descriptors, \
- "harmony Object.getOwnPropertyDescriptors()")
+#define HARMONY_SHIPPING(V) V(harmony_async_await, "harmony async-await")
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -255,9 +248,12 @@ HARMONY_STAGED(FLAG_STAGED_FEATURES)
HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
#undef FLAG_SHIPPING_FEATURES
+DEFINE_BOOL(future, false,
+ "Implies all staged features that we want to ship in the "
+ "not-too-far future")
+DEFINE_IMPLICATION(future, ignition_staging)
+
// Flags for experimental implementation features.
-DEFINE_BOOL(compiled_keyed_generic_loads, false,
- "use optimizing compiler to generate keyed generic load stubs")
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
@@ -278,6 +274,8 @@ DEFINE_BOOL(track_field_types, true, "track field types")
DEFINE_IMPLICATION(track_field_types, track_fields)
DEFINE_IMPLICATION(track_field_types, track_heap_object_fields)
DEFINE_BOOL(smi_binop, true, "support smi representation in binary operations")
+DEFINE_BOOL(mark_shared_functions_for_tier_up, true,
+ "mark shared functions for tier up")
// Flags for optimization types.
DEFINE_BOOL(optimize_for_size, false,
@@ -294,20 +292,14 @@ DEFINE_BOOL(string_slices, true, "use string slices")
DEFINE_BOOL(ignition, false, "use ignition interpreter")
DEFINE_BOOL(ignition_staging, false, "use ignition with all staged features")
DEFINE_IMPLICATION(ignition_staging, ignition)
-DEFINE_IMPLICATION(ignition_staging, ignition_osr)
-DEFINE_IMPLICATION(ignition_staging, turbo_from_bytecode)
-DEFINE_IMPLICATION(ignition_staging, ignition_preserve_bytecode)
-DEFINE_BOOL(ignition_eager, false, "eagerly compile and parse with ignition")
DEFINE_STRING(ignition_filter, "*", "filter for ignition interpreter")
DEFINE_BOOL(ignition_deadcode, true,
"use ignition dead code elimination optimizer")
-DEFINE_BOOL(ignition_osr, false, "enable support for OSR from ignition code")
+DEFINE_BOOL(ignition_osr, true, "enable support for OSR from ignition code")
DEFINE_BOOL(ignition_peephole, true, "use ignition peephole optimizer")
DEFINE_BOOL(ignition_reo, true, "use ignition register equivalence optimizer")
DEFINE_BOOL(ignition_filter_expression_positions, true,
"filter expression positions before the bytecode pipeline")
-DEFINE_BOOL(ignition_preserve_bytecode, false,
- "preserve generated bytecode even when switching tiers")
DEFINE_BOOL(print_bytecode, false,
"print bytecode generated by ignition interpreter")
DEFINE_BOOL(trace_ignition, false,
@@ -406,8 +398,7 @@ DEFINE_BOOL(flush_optimized_code_cache, false,
DEFINE_BOOL(inline_construct, true, "inline constructor calls")
DEFINE_BOOL(inline_arguments, true, "inline functions with arguments object")
DEFINE_BOOL(inline_accessors, true, "inline JavaScript accessors")
-DEFINE_BOOL(inline_into_try, false, "inline into try blocks")
-DEFINE_IMPLICATION(turbo, inline_into_try)
+DEFINE_BOOL(inline_into_try, true, "inline into try blocks")
DEFINE_INT(escape_analysis_iterations, 2,
"maximum number of escape analysis fix-point iterations")
@@ -430,7 +421,7 @@ DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
DEFINE_IMPLICATION(turbo, turbo_loop_peeling)
-DEFINE_BOOL(turbo_from_bytecode, false, "enable building graphs from bytecode")
+DEFINE_IMPLICATION(turbo, turbo_escape)
DEFINE_BOOL(turbo_sp_frame_access, false,
"use stack pointer-relative access to frame wherever possible")
DEFINE_BOOL(turbo_preprocess_ranges, true,
@@ -453,21 +444,16 @@ DEFINE_BOOL(turbo_asm, true, "enable TurboFan for asm.js code")
DEFINE_BOOL(turbo_asm_deoptimization, false,
"enable deoptimization in TurboFan for asm.js code")
DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
-DEFINE_BOOL(turbo_verify_machine_graph, false,
- "verify TurboFan machine graph before instruction selection")
+DEFINE_STRING(turbo_verify_machine_graph, nullptr,
+ "verify TurboFan machine graph before instruction selection")
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
DEFINE_BOOL(turbo_stats_nvp, false,
"print TurboFan statistics in machine-readable format")
DEFINE_BOOL(turbo_splitting, true, "split nodes during scheduling in TurboFan")
DEFINE_BOOL(turbo_type_feedback, true,
"use typed feedback for representation inference in Turbofan")
-DEFINE_BOOL(turbo_source_positions, false,
- "track source code positions when building TurboFan IR")
-DEFINE_IMPLICATION(trace_turbo, turbo_source_positions)
DEFINE_BOOL(function_context_specialization, false,
"enable function context specialization in TurboFan")
-DEFINE_BOOL(native_context_specialization, true,
- "enable native context specialization in TurboFan")
DEFINE_BOOL(turbo_inlining, true, "enable inlining in TurboFan")
DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
DEFINE_BOOL(turbo_load_elimination, true, "enable load elimination in TurboFan")
@@ -485,8 +471,6 @@ DEFINE_BOOL(turbo_loop_peeling, false, "Turbofan loop peeling")
DEFINE_BOOL(turbo_loop_variable, true, "Turbofan loop variable optimization")
DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
-DEFINE_BOOL(turbo_cache_shared_code, true, "cache context-independent code")
-DEFINE_BOOL(turbo_preserve_shared_code, false, "keep context-independent code")
DEFINE_BOOL(turbo_escape, false, "enable escape analysis")
DEFINE_BOOL(turbo_instruction_scheduling, false,
"enable instruction scheduling in TurboFan")
@@ -535,6 +519,8 @@ DEFINE_BOOL(wasm_eh_prototype, false,
"enable prototype exception handling opcodes for wasm")
DEFINE_BOOL(wasm_mv_prototype, false,
"enable prototype multi-value support for wasm")
+DEFINE_BOOL(wasm_atomics_prototype, false,
+ "enable prototype atomic opcodes for wasm")
DEFINE_BOOL(wasm_trap_handler, false,
"use signal handlers to catch out of bounds memory access in wasm"
@@ -641,6 +627,10 @@ DEFINE_BOOL(serialize_toplevel, true, "enable caching of toplevel scripts")
DEFINE_BOOL(serialize_eager, false, "compile eagerly when caching scripts")
DEFINE_BOOL(serialize_age_code, false, "pre age code in the code cache")
DEFINE_BOOL(trace_serializer, false, "print code serializer trace")
+#ifdef DEBUG
+DEFINE_BOOL(external_reference_stats, false,
+ "print statistics on external references used during serialization")
+#endif // DEBUG
// compiler.cc
DEFINE_INT(min_preparse_length, 1024,
@@ -739,13 +729,15 @@ DEFINE_BOOL(age_code, true,
"track un-executed functions to age code and flush only "
"old code (required for code flushing)")
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
-DEFINE_BOOL(incremental_marking_wrappers, false,
+DEFINE_BOOL(incremental_marking_wrappers, true,
"use incremental marking for marking wrappers")
DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
"keep finalizing incremental marking as long as we discover at "
"least this many unmarked objects")
DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
"at most try this many times to finalize incremental marking")
+DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
+DEFINE_NEG_IMPLICATION(minor_mc, incremental_marking)
DEFINE_BOOL(black_allocation, false, "use black allocation")
DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
@@ -757,7 +749,10 @@ DEFINE_BOOL(track_gc_object_stats, false,
"track object counts and memory usage")
DEFINE_BOOL(trace_gc_object_stats, false,
"trace object counts and memory usage")
+DEFINE_INT(gc_stats, 0, "Used by tracing internally to enable gc statistics")
DEFINE_IMPLICATION(trace_gc_object_stats, track_gc_object_stats)
+DEFINE_VALUE_IMPLICATION(track_gc_object_stats, gc_stats, 1)
+DEFINE_VALUE_IMPLICATION(trace_gc_object_stats, gc_stats, 1)
DEFINE_NEG_IMPLICATION(trace_gc_object_stats, incremental_marking)
DEFINE_BOOL(track_detached_contexts, true,
"track native contexts that are expected to be garbage collected")
@@ -769,11 +764,12 @@ DEFINE_BOOL(verify_heap, false, "verify heap pointers before and after GC")
#endif
DEFINE_BOOL(move_object_start, true, "enable moving of object starts")
DEFINE_BOOL(memory_reducer, true, "use memory reducer")
-DEFINE_BOOL(scavenge_reclaim_unmodified_objects, true,
- "remove unmodified and unreferenced objects")
DEFINE_INT(heap_growing_percent, 0,
"specifies heap growing factor as (1 + heap_growing_percent/100)")
+// spaces.cc
+DEFINE_INT(v8_os_page_size, 0, "override OS page size (in KBytes)")
+
// execution.cc, messages.cc
DEFINE_BOOL(clear_exceptions_on_js_entry, false,
"clear pending exceptions when entering JavaScript")
@@ -802,7 +798,7 @@ DEFINE_BOOL(use_idle_notification, true,
// ic.cc
DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
-DEFINE_BOOL(tf_load_ic_stub, true, "use TF LoadIC stub")
+DEFINE_BOOL_READONLY(tf_load_ic_stub, true, "use TF LoadIC stub")
DEFINE_BOOL(tf_store_ic_stub, true, "use TF StoreIC stub")
// macro-assembler-ia32.cc
@@ -839,6 +835,7 @@ DEFINE_BOOL(trace_maps, false, "trace map creation")
// parser.cc
DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
DEFINE_BOOL(trace_parse, false, "trace parsing and preparsing")
+DEFINE_BOOL(trace_preparse, false, "trace preparsing decisions")
DEFINE_BOOL(lazy_inner_functions, false, "enable lazy parsing inner functions")
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
@@ -884,9 +881,14 @@ DEFINE_INT(hash_seed, 0,
"Fixed seed to use to hash property keys (0 means random)"
"(with snapshots this option cannot override the baked-in seed)")
DEFINE_BOOL(trace_rail, false, "trace RAIL mode")
+DEFINE_BOOL(print_all_exceptions, false,
+ "print exception object and stack trace on each thrown exception")
// runtime.cc
DEFINE_BOOL(runtime_call_stats, false, "report runtime call counts and times")
+DEFINE_INT(runtime_stats, 0,
+ "internal usage only for controlling runtime statistics")
+DEFINE_VALUE_IMPLICATION(runtime_call_stats, runtime_stats, 1)
// snapshot-common.cc
DEFINE_BOOL(profile_deserialization, false,
@@ -915,12 +917,6 @@ DEFINE_STRING(startup_blob, NULL,
DEFINE_BOOL(profile_hydrogen_code_stub_compilation, false,
"Print the time it takes to lazily compile hydrogen code stubs.")
-DEFINE_BOOL(predictable, false, "enable predictable mode")
-DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
-DEFINE_NEG_IMPLICATION(predictable, concurrent_sweeping)
-DEFINE_NEG_IMPLICATION(predictable, parallel_compaction)
-DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
-
// mark-compact.cc
DEFINE_BOOL(force_marking_deque_overflows, false,
"force overflows of marking deque by reducing it's size "
@@ -1026,7 +1022,6 @@ DEFINE_BOOL(collect_heap_spill_statistics, false,
"(requires heap_stats)")
DEFINE_BOOL(trace_live_bytes, false,
"trace incrementing and resetting of live bytes")
-
DEFINE_BOOL(trace_isolates, false, "trace isolate state changes")
// Regexp
@@ -1067,7 +1062,6 @@ DEFINE_BOOL(prof_cpp, false, "Like --prof, but ignore generated code.")
DEFINE_IMPLICATION(prof, prof_cpp)
DEFINE_BOOL(prof_browser_mode, true,
"Used with --prof, turns on browser-compatible mode for profiling.")
-DEFINE_BOOL(log_regexp, false, "Log regular expression execution.")
DEFINE_STRING(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_BOOL(logfile_per_isolate, true, "Separate log files for each isolate.")
DEFINE_BOOL(ll_prof, false, "Enable low-level linux profiler.")
@@ -1165,10 +1159,27 @@ DEFINE_IMPLICATION(print_all_code, trace_codegen)
#endif
#endif
+#undef FLAG
+#define FLAG FLAG_FULL
//
-// VERIFY_PREDICTABLE related flags
+// Predictable mode related flags.
//
+
+DEFINE_BOOL(predictable, false, "enable predictable mode")
+DEFINE_IMPLICATION(predictable, single_threaded)
+DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
+
+//
+// Threading related flags.
+//
+
+DEFINE_BOOL(single_threaded, false, "disable the use of background tasks")
+DEFINE_NEG_IMPLICATION(single_threaded, concurrent_recompilation)
+DEFINE_NEG_IMPLICATION(single_threaded, concurrent_sweeping)
+DEFINE_NEG_IMPLICATION(single_threaded, parallel_compaction)
+
+
#undef FLAG
#ifdef VERIFY_PREDICTABLE
@@ -1182,7 +1193,6 @@ DEFINE_BOOL(verify_predictable, false,
DEFINE_INT(dump_allocations_digest_at_alloc, -1,
"dump allocations digest each n-th allocation")
-
//
// Read-only flags
//
@@ -1197,7 +1207,6 @@ DEFINE_BOOL(unbox_double_fields, V8_DOUBLE_FIELDS_UNBOXING,
"enable in-object double fields unboxing (64-bit only)")
DEFINE_IMPLICATION(unbox_double_fields, track_double_fields)
-
// Cleanup...
#undef FLAG_FULL
#undef FLAG_READONLY
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index c67fdc2d94..3b730278db 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -15,8 +15,8 @@
#include "src/safepoint-table.h"
#include "src/string-stream.h"
#include "src/vm-state-inl.h"
-#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -404,17 +404,17 @@ void StackFrame::SetReturnAddressLocationResolver(
static bool IsInterpreterFramePc(Isolate* isolate, Address pc) {
Code* interpreter_entry_trampoline =
isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
+ Code* interpreter_bytecode_advance =
+ isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeAdvance);
Code* interpreter_bytecode_dispatch =
isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
- Code* interpreter_baseline_on_return =
- isolate->builtins()->builtin(Builtins::kInterpreterMarkBaselineOnReturn);
return (pc >= interpreter_entry_trampoline->instruction_start() &&
pc < interpreter_entry_trampoline->instruction_end()) ||
+ (pc >= interpreter_bytecode_advance->instruction_start() &&
+ pc < interpreter_bytecode_advance->instruction_end()) ||
(pc >= interpreter_bytecode_dispatch->instruction_start() &&
- pc < interpreter_bytecode_dispatch->instruction_end()) ||
- (pc >= interpreter_baseline_on_return->instruction_start() &&
- pc < interpreter_baseline_on_return->instruction_end());
+ pc < interpreter_bytecode_dispatch->instruction_end());
}
StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
@@ -439,8 +439,8 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
if (!marker->IsSmi()) {
if (maybe_function->IsSmi()) {
return NONE;
- } else if (FLAG_ignition && IsInterpreterFramePc(iterator->isolate(),
- *(state->pc_address))) {
+ } else if (IsInterpreterFramePc(iterator->isolate(),
+ *(state->pc_address))) {
return INTERPRETED;
} else {
return JAVA_SCRIPT;
@@ -720,6 +720,12 @@ Object* StandardFrame::context() const {
return isolate()->heap()->undefined_value();
}
+int StandardFrame::position() const {
+ AbstractCode* code = AbstractCode::cast(LookupCode());
+ int code_offset = static_cast<int>(pc() - code->instruction_start());
+ return code->SourcePosition(code_offset);
+}
+
int StandardFrame::ComputeExpressionsCount() const {
Address base = GetExpressionAddress(0);
Address limit = sp() - kPointerSize;
@@ -985,16 +991,16 @@ int JavaScriptFrame::LookupExceptionHandlerInTable(
return code->LookupRangeInHandlerTable(pc_offset, stack_depth, prediction);
}
-void JavaScriptFrame::PrintFunctionAndOffset(JSFunction* function, Code* code,
- Address pc, FILE* file,
+void JavaScriptFrame::PrintFunctionAndOffset(JSFunction* function,
+ AbstractCode* code,
+ int code_offset, FILE* file,
bool print_line_number) {
PrintF(file, "%s", function->IsOptimized() ? "*" : "~");
function->PrintName(file);
- int code_offset = static_cast<int>(pc - code->instruction_start());
PrintF(file, "+%d", code_offset);
if (print_line_number) {
SharedFunctionInfo* shared = function->shared();
- int source_pos = AbstractCode::cast(code)->SourcePosition(code_offset);
+ int source_pos = code->SourcePosition(code_offset);
Object* maybe_script = shared->script();
if (maybe_script->IsScript()) {
Script* script = Script::cast(maybe_script);
@@ -1024,8 +1030,17 @@ void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
if (it.frame()->is_java_script()) {
JavaScriptFrame* frame = it.frame();
if (frame->IsConstructor()) PrintF(file, "new ");
- PrintFunctionAndOffset(frame->function(), frame->unchecked_code(),
- frame->pc(), file, print_line_number);
+ JSFunction* function = frame->function();
+ int code_offset = 0;
+ if (frame->is_interpreted()) {
+ InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
+ code_offset = iframe->GetBytecodeOffset();
+ } else {
+ Code* code = frame->unchecked_code();
+ code_offset = static_cast<int>(frame->pc() - code->instruction_start());
+ }
+ PrintFunctionAndOffset(function, function->abstract_code(), code_offset,
+ file, print_line_number);
if (print_args) {
// function arguments
// (we are intentionally only printing the actually
@@ -1208,9 +1223,7 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames,
abstract_code = AbstractCode::cast(code);
} else {
DCHECK_EQ(frame_opcode, Translation::INTERPRETED_FRAME);
- // BailoutId points to the next bytecode in the bytecode aray. Subtract
- // 1 to get the end of current bytecode.
- code_offset = bailout_id.ToInt() - 1;
+ code_offset = bailout_id.ToInt(); // Points to current bytecode.
abstract_code = AbstractCode::cast(shared_info->bytecode_array());
}
FrameSummary summary(receiver, function, abstract_code, code_offset,
@@ -1270,6 +1283,19 @@ DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
return nullptr;
}
+Object* OptimizedFrame::receiver() const {
+ Code* code = LookupCode();
+ if (code->kind() == Code::BUILTIN) {
+ Address argc_ptr = fp() + OptimizedBuiltinFrameConstants::kArgCOffset;
+ intptr_t argc = *reinterpret_cast<intptr_t*>(argc_ptr);
+ intptr_t args_size =
+ (StandardFrameConstants::kFixedSlotCountAboveFp + argc) * kPointerSize;
+ Address receiver_ptr = fp() + args_size;
+ return *reinterpret_cast<Object**>(receiver_ptr);
+ } else {
+ return JavaScriptFrame::receiver();
+ }
+}
void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) const {
DCHECK(functions->length() == 0);
@@ -1335,6 +1361,12 @@ Object* OptimizedFrame::StackSlotAt(int index) const {
return Memory::Object_at(fp() + StackSlotOffsetRelativeToFp(index));
}
+int InterpretedFrame::position() const {
+ AbstractCode* code = AbstractCode::cast(GetBytecodeArray());
+ int code_offset = GetBytecodeOffset();
+ return code->SourcePosition(code_offset);
+}
+
int InterpretedFrame::LookupExceptionHandlerInTable(
int* context_register, HandlerTable::CatchPrediction* prediction) {
BytecodeArray* bytecode = function()->shared()->bytecode_array();
@@ -1351,6 +1383,17 @@ int InterpretedFrame::GetBytecodeOffset() const {
return raw_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
}
+int InterpretedFrame::GetBytecodeOffset(Address fp) {
+ const int offset = InterpreterFrameConstants::kExpressionsOffset;
+ const int index = InterpreterFrameConstants::kBytecodeOffsetExpressionIndex;
+ DCHECK_EQ(
+ InterpreterFrameConstants::kBytecodeOffsetFromFp,
+ InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+ Address expression_offset = fp + offset - index * kPointerSize;
+ int raw_offset = Smi::cast(Memory::Object_at(expression_offset))->value();
+ return raw_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
+}
+
void InterpretedFrame::PatchBytecodeOffset(int new_offset) {
const int index = InterpreterFrameConstants::kBytecodeOffsetExpressionIndex;
DCHECK_EQ(
@@ -1460,9 +1503,9 @@ Address WasmFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPOffset;
}
-Object* WasmFrame::wasm_obj() const {
+Object* WasmFrame::wasm_instance() const {
Object* ret = wasm::GetOwningWasmInstance(LookupCode());
- if (ret == nullptr) ret = *(isolate()->factory()->undefined_value());
+ if (ret == nullptr) ret = isolate()->heap()->undefined_value();
return ret;
}
@@ -1473,9 +1516,18 @@ uint32_t WasmFrame::function_index() const {
}
Script* WasmFrame::script() const {
- Handle<JSObject> wasm(JSObject::cast(wasm_obj()), isolate());
- Handle<wasm::WasmDebugInfo> debug_info = wasm::GetDebugInfo(wasm);
- return wasm::WasmDebugInfo::GetFunctionScript(debug_info, function_index());
+ Handle<JSObject> instance(JSObject::cast(wasm_instance()), isolate());
+ return *wasm::GetScript(instance);
+}
+
+int WasmFrame::position() const {
+ int position = StandardFrame::position();
+ if (wasm::WasmIsAsmJs(wasm_instance(), isolate())) {
+ Handle<JSObject> instance(JSObject::cast(wasm_instance()), isolate());
+ position =
+ wasm::GetAsmWasmSourcePosition(instance, function_index(), position);
+ }
+ return position;
}
int WasmFrame::LookupExceptionHandlerInTable(int* stack_slots) {
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 373f4de92c..1daa36404b 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -218,6 +218,48 @@ class StandardFrameConstants : public CommonFrameConstants {
static const int kLastObjectOffset = kContextOffset;
};
+// OptimizedBuiltinFrameConstants are used for TF-generated builtins. They
+// always have a context below the saved fp/constant pool and below that the
+// JSFunction of the executing function and below that an integer (not a Smi)
+// containing the number of arguments passed to the builtin.
+//
+// slot JS frame
+// +-----------------+--------------------------------
+// -n-1 | parameter 0 | ^
+// |- - - - - - - - -| |
+// -n | | Caller
+// ... | ... | frame slots
+// -2 | parameter n-1 | (slot < 0)
+// |- - - - - - - - -| |
+// -1 | parameter n | v
+// -----+-----------------+--------------------------------
+// 0 | return addr | ^ ^
+// |- - - - - - - - -| | |
+// 1 | saved frame ptr | Fixed |
+// |- - - - - - - - -| Header <-- frame ptr |
+// 2 | [Constant Pool] | | |
+// |- - - - - - - - -| | |
+// 2+cp | Context | | if a constant pool |
+// |- - - - - - - - -| | is used, cp = 1, |
+// 3+cp | JSFunction | | otherwise, cp = 0 |
+// |- - - - - - - - -| | |
+// 4+cp | argc | v |
+// +-----------------+---- |
+// 5+cp | | ^ Callee
+// |- - - - - - - - -| | frame slots
+// ... | | Frame slots (slot >= 0)
+// |- - - - - - - - -| | |
+// | | v |
+// -----+-----------------+----- <-- stack ptr -------------
+//
+class OptimizedBuiltinFrameConstants : public StandardFrameConstants {
+ public:
+ static const int kArgCSize = kPointerSize;
+ static const int kArgCOffset = -3 * kPointerSize - kCPSlotSize;
+ static const int kFixedFrameSize = kFixedFrameSizeAboveFp - kArgCOffset;
+ static const int kFixedSlotCount = kFixedFrameSize / kPointerSize;
+};
+
// TypedFrames have a SMI type maker value below the saved FP/constant pool to
// distinguish them from StandardFrames, which have a context in that position
// instead.
@@ -308,10 +350,9 @@ class ConstructFrameConstants : public TypedFrameConstants {
public:
// FP-relative.
static const int kContextOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
- static const int kAllocationSiteOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
- static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
- static const int kImplicitReceiverOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
- DEFINE_TYPED_FRAME_SIZES(4);
+ static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
+ static const int kImplicitReceiverOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+ DEFINE_TYPED_FRAME_SIZES(3);
};
class StubFailureTrampolineFrameConstants : public InternalFrameConstants {
@@ -734,6 +775,7 @@ class StandardFrame : public StackFrame {
virtual Object* receiver() const;
virtual Script* script() const;
virtual Object* context() const;
+ virtual int position() const;
// Access the expressions in the stack frame including locals.
inline Object* GetExpression(int index) const;
@@ -871,8 +913,8 @@ class JavaScriptFrame : public StandardFrame {
return static_cast<JavaScriptFrame*>(frame);
}
- static void PrintFunctionAndOffset(JSFunction* function, Code* code,
- Address pc, FILE* file,
+ static void PrintFunctionAndOffset(JSFunction* function, AbstractCode* code,
+ int code_offset, FILE* file,
bool print_line_number);
static void PrintTop(Isolate* isolate, FILE* file, bool print_args,
@@ -941,6 +983,8 @@ class OptimizedFrame : public JavaScriptFrame {
DeoptimizationInputData* GetDeoptimizationData(int* deopt_index) const;
+ Object* receiver() const override;
+
static int StackSlotOffsetRelativeToFp(int slot_index);
protected:
@@ -957,6 +1001,9 @@ class InterpretedFrame : public JavaScriptFrame {
public:
Type type() const override { return INTERPRETED; }
+ // Accessors.
+ int position() const override;
+
// Lookup exception handler for current {pc}, returns -1 if none found.
int LookupExceptionHandlerInTable(
int* data, HandlerTable::CatchPrediction* prediction) override;
@@ -984,6 +1031,8 @@ class InterpretedFrame : public JavaScriptFrame {
List<FrameSummary>* frames,
FrameSummary::Mode mode = FrameSummary::kExactSummary) const override;
+ static int GetBytecodeOffset(Address fp);
+
protected:
inline explicit InterpretedFrame(StackFrameIteratorBase* iterator);
@@ -1064,9 +1113,10 @@ class WasmFrame : public StandardFrame {
Code* unchecked_code() const override;
// Accessors.
- Object* wasm_obj() const;
+ Object* wasm_instance() const;
uint32_t function_index() const;
Script* script() const override;
+ int position() const override;
static WasmFrame* cast(StackFrame* frame) {
DCHECK(frame->is_wasm());
diff --git a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
index e8eeb8ecac..22c991bed1 100644
--- a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
+++ b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
@@ -361,11 +361,7 @@ void FullCodeGenerator::Generate() {
masm()->CheckConstPool(true, false);
}
-
-void FullCodeGenerator::ClearAccumulator() {
- __ mov(r0, Operand(Smi::FromInt(0)));
-}
-
+void FullCodeGenerator::ClearAccumulator() { __ mov(r0, Operand(Smi::kZero)); }
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ mov(r2, Operand(profiling_counter_));
@@ -1022,8 +1018,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(eq, &exit);
__ bind(&convert);
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
RestoreContext();
__ bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
@@ -1062,7 +1057,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&use_cache);
__ EnumLength(r1, r0);
- __ cmp(r1, Operand(Smi::FromInt(0)));
+ __ cmp(r1, Operand(Smi::kZero));
__ b(eq, &no_descriptors);
__ LoadInstanceDescriptors(r0, r2);
@@ -1071,7 +1066,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Set up the four remaining stack slots.
__ push(r0); // Map.
- __ mov(r0, Operand(Smi::FromInt(0)));
+ __ mov(r0, Operand(Smi::kZero));
// Push enumeration cache, enumeration cache length (as smi) and zero.
__ Push(r2, r1, r0);
__ jmp(&loop);
@@ -1088,7 +1083,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
__ Push(r1); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
- __ mov(r0, Operand(Smi::FromInt(0)));
+ __ mov(r0, Operand(Smi::kZero));
__ Push(r0); // Initial index.
// Generate code for doing the condition check.
@@ -1124,10 +1119,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ str(r2, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(vector_index)));
// r0 contains the key. The receiver in r1 is the second argument to the
- // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+ // ForInFilter. ForInFilter returns undefined if the receiver doesn't
// have the key or returns the name-converted key.
- ForInFilterStub filter_stub(isolate());
- __ CallStub(&filter_stub);
+ __ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
RestoreContext();
PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
@@ -1299,7 +1293,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
- if (NeedsHoleCheckForLoad(proxy)) {
+ if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
@@ -1742,12 +1736,14 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Store the value.
switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op(), expr->AssignmentSlot());
+ case VARIABLE: {
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r0);
break;
+ }
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
@@ -1935,7 +1931,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ mov(right, Operand(scratch1), LeaveCC, ne);
__ b(ne, &done);
__ add(scratch2, right, Operand(left), SetCC);
- __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
+ __ mov(right, Operand(Smi::kZero), LeaveCC, pl);
__ b(mi, &stub_call);
break;
}
@@ -2029,9 +2025,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
switch (assign_type) {
case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
+ VariableProxy* proxy = expr->AsVariableProxy();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN, slot);
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
@@ -2106,9 +2103,9 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
}
-
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot) {
+ FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
@@ -2119,7 +2116,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, r1);
// Perform an initialization check for lexically declared variables.
- if (var->binding_needs_init()) {
+ if (hole_check_mode == HoleCheckMode::kRequired) {
Label assign;
__ ldr(r3, location);
__ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
@@ -2229,17 +2226,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
context()->Plug(r0);
}
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- TypeFeedbackId ast_id) {
- ic_total_count_++;
- // All calls must have a predictable size in full-codegen code to ensure that
- // the debugger can patch them correctly.
- __ Call(code, RelocInfo::CODE_TARGET, ast_id, al,
- NEVER_INLINE_TARGET_ADDRESS);
-}
-
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
@@ -2394,14 +2380,12 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// not return to this function.
EmitProfilingCounterHandlingForReturnSequence(true);
}
- Handle<Code> ic =
- CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
- .code();
+ Handle<Code> code =
+ CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
__ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- // Don't assign a type feedback id to the IC, since type feedback is provided
- // by the vector above.
- CallIC(ic);
+ __ mov(r0, Operand(arg_count));
+ CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -2505,11 +2489,13 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
// Record source position for debugger.
SetCallPosition(expr);
+ Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+ expr->tail_call_mode())
+ .code();
+ __ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ mov(r0, Operand(arg_count));
- __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- expr->tail_call_mode()),
- RelocInfo::CODE_TARGET);
+ __ Call(code, RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
RestoreContext();
@@ -2549,7 +2535,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ mov(r3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate());
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
@@ -2843,41 +2829,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
context()->DropAndPlug(1, r0);
}
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
- __ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(r0);
-
- __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
- __ IndexFromHash(r0, r0);
-
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3056,8 +3007,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForTypeofValue(expr->expression());
}
__ mov(r3, r0);
- TypeofStub typeof_stub(isolate());
- __ CallStub(&typeof_stub);
+ __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
context()->Plug(r0);
break;
}
@@ -3084,7 +3034,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
- __ mov(ip, Operand(Smi::FromInt(0)));
+ __ mov(ip, Operand(Smi::kZero));
PushOperand(ip);
}
switch (assign_type) {
@@ -3236,11 +3186,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Store the value returned in r0.
switch (assign_type) {
- case VARIABLE:
+ case VARIABLE: {
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (expr->is_postfix()) {
{ EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context.Plug(r0);
@@ -3251,13 +3202,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
context()->PlugTOS();
}
} else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context()->Plug(r0);
}
break;
+ }
case NAMED_PROPERTY: {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
@@ -3426,8 +3378,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForAccumulatorValue(expr->right());
SetExpressionPosition(expr);
PopOperand(r1);
- InstanceOfStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
index 1854f102be..51b3009cd0 100644
--- a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -362,11 +362,7 @@ void FullCodeGenerator::Generate() {
masm()->CheckConstPool(true, false);
}
-
-void FullCodeGenerator::ClearAccumulator() {
- __ Mov(x0, Smi::FromInt(0));
-}
-
+void FullCodeGenerator::ClearAccumulator() { __ Mov(x0, Smi::kZero); }
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ Mov(x2, Operand(profiling_counter_));
@@ -1017,8 +1013,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ JumpIfRoot(x0, Heap::kNullValueRootIndex, &exit);
__ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
__ Bind(&convert);
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
RestoreContext();
__ Bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
@@ -1113,10 +1108,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Str(x10, FieldMemOperand(x3, FixedArray::OffsetOfElementAt(vector_index)));
// x0 contains the key. The receiver in x1 is the second argument to the
- // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+ // ForInFilter. ForInFilter returns undefined if the receiver doesn't
// have the key or returns the name-converted key.
- ForInFilterStub filter_stub(isolate());
- __ CallStub(&filter_stub);
+ __ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
RestoreContext();
PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ CompareRoot(result_register(), Heap::kUndefinedValueRootIndex);
@@ -1286,7 +1280,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
- if (NeedsHoleCheckForLoad(proxy)) {
+ if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
@@ -1721,12 +1715,14 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Store the value.
switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op(), expr->AssignmentSlot());
+ case VARIABLE: {
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(x0);
break;
+ }
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
@@ -1919,9 +1915,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
switch (assign_type) {
case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
+ VariableProxy* proxy = expr->AsVariableProxy();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN, slot);
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
@@ -1998,9 +1995,9 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
}
-
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot) {
+ FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode) {
ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
if (var->IsUnallocated()) {
// Global var, const, or let.
@@ -2123,16 +2120,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
context()->Plug(x0);
}
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- TypeFeedbackId ast_id) {
- ic_total_count_++;
- // All calls must have a predictable size in full-codegen code to ensure that
- // the debugger can patch them correctly.
- __ Call(code, RelocInfo::CODE_TARGET, ast_id);
-}
-
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
ASM_LOCATION("FullCodeGenerator::EmitCallWithLoadIC");
@@ -2295,14 +2282,12 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// not return to this function.
EmitProfilingCounterHandlingForReturnSequence(true);
}
- Handle<Code> ic =
- CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
- .code();
+ Handle<Code> code =
+ CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
__ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
__ Peek(x1, (arg_count + 1) * kXRegSize);
- // Don't assign a type feedback id to the IC, since type feedback is provided
- // by the vector above.
- CallIC(ic);
+ __ Mov(x0, arg_count);
+ CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -2409,11 +2394,13 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
SetCallPosition(expr);
// Call the evaluated function.
+ Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+ expr->tail_call_mode())
+ .code();
+ __ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
__ Peek(x1, (arg_count + 1) * kXRegSize);
__ Mov(x0, arg_count);
- __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- expr->tail_call_mode()),
- RelocInfo::CODE_TARGET);
+ __ Call(code, RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
RestoreContext();
@@ -2453,7 +2440,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ Mov(x3, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate());
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
@@ -2751,41 +2738,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
context()->DropAndPlug(1, x0);
}
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
- __ Tst(x10, String::kContainsCachedArrayIndexMask);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(x0);
-
- __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset));
- __ IndexFromHash(x10, x0);
-
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -2976,8 +2928,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForTypeofValue(expr->expression());
}
__ Mov(x3, x0);
- TypeofStub typeof_stub(isolate());
- __ CallStub(&typeof_stub);
+ __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
context()->Plug(x0);
break;
}
@@ -3153,11 +3104,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Store the value returned in x0.
switch (assign_type) {
- case VARIABLE:
+ case VARIABLE: {
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (expr->is_postfix()) {
{ EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context.Plug(x0);
@@ -3168,13 +3120,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
context()->PlugTOS();
}
} else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context()->Plug(x0);
}
break;
+ }
case NAMED_PROPERTY: {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
@@ -3352,8 +3305,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForAccumulatorValue(expr->right());
SetExpressionPosition(expr);
PopOperand(x1);
- InstanceOfStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(x0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/full-codegen.cc b/deps/v8/src/full-codegen/full-codegen.cc
index 25d7f920f1..ee5e8881ba 100644
--- a/deps/v8/src/full-codegen/full-codegen.cc
+++ b/deps/v8/src/full-codegen/full-codegen.cc
@@ -223,20 +223,25 @@ void FullCodeGenerator::PrepareForBailout(Expression* node,
PrepareForBailoutForId(node->id(), state);
}
-void FullCodeGenerator::CallLoadIC(FeedbackVectorSlot slot, Handle<Object> name,
- TypeFeedbackId id) {
+void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
+ ic_total_count_++;
+ __ Call(code, RelocInfo::CODE_TARGET, ast_id);
+}
+
+void FullCodeGenerator::CallLoadIC(FeedbackVectorSlot slot,
+ Handle<Object> name) {
DCHECK(name->IsName());
__ Move(LoadDescriptor::NameRegister(), name);
EmitLoadSlot(LoadDescriptor::SlotRegister(), slot);
- Handle<Code> ic = CodeFactory::LoadIC(isolate()).code();
- CallIC(ic, id);
+ Handle<Code> code = CodeFactory::LoadIC(isolate()).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
if (FLAG_tf_load_ic_stub) RestoreContext();
}
void FullCodeGenerator::CallStoreIC(FeedbackVectorSlot slot,
- Handle<Object> name, TypeFeedbackId id) {
+ Handle<Object> name) {
DCHECK(name->IsName());
__ Move(StoreDescriptor::NameRegister(), name);
@@ -249,8 +254,8 @@ void FullCodeGenerator::CallStoreIC(FeedbackVectorSlot slot,
EmitLoadSlot(StoreDescriptor::SlotRegister(), slot);
}
- Handle<Code> ic = CodeFactory::StoreIC(isolate(), language_mode()).code();
- CallIC(ic, id);
+ Handle<Code> code = CodeFactory::StoreIC(isolate(), language_mode()).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -264,9 +269,9 @@ void FullCodeGenerator::CallKeyedStoreIC(FeedbackVectorSlot slot) {
EmitLoadSlot(StoreDescriptor::SlotRegister(), slot);
}
- Handle<Code> ic =
+ Handle<Code> code =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
- CallIC(ic);
+ __ Call(code, RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -466,9 +471,7 @@ void FullCodeGenerator::DoTest(const TestContext* context) {
context->fall_through());
}
-
-void FullCodeGenerator::VisitDeclarations(
- ZoneList<Declaration*>* declarations) {
+void FullCodeGenerator::VisitDeclarations(Declaration::List* declarations) {
ZoneList<Handle<Object> >* saved_globals = globals_;
ZoneList<Handle<Object> > inner_globals(10, zone());
globals_ = &inner_globals;
@@ -503,8 +506,8 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
#endif
EmitLoadSlot(LoadGlobalDescriptor::SlotRegister(),
proxy->VariableFeedbackSlot());
- Handle<Code> ic = CodeFactory::LoadGlobalIC(isolate(), typeof_mode).code();
- CallIC(ic);
+ Handle<Code> code = CodeFactory::LoadGlobalIC(isolate(), typeof_mode).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
}
void FullCodeGenerator::VisitSloppyBlockFunctionStatement(
@@ -652,10 +655,6 @@ void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
- EmitIntrinsicAsStubCall(expr, CodeFactory::RegExpConstructResult(isolate()));
-}
-
void FullCodeGenerator::EmitHasProperty() {
Callable callable = CodeFactory::HasProperty(isolate());
PopOperand(callable.descriptor().GetRegisterParameter(1));
@@ -666,12 +665,14 @@ void FullCodeGenerator::EmitHasProperty() {
void FullCodeGenerator::RecordStatementPosition(int pos) {
DCHECK_NE(kNoSourcePosition, pos);
- source_position_table_builder_.AddPosition(masm_->pc_offset(), pos, true);
+ source_position_table_builder_.AddPosition(masm_->pc_offset(),
+ SourcePosition(pos), true);
}
void FullCodeGenerator::RecordPosition(int pos) {
DCHECK_NE(kNoSourcePosition, pos);
- source_position_table_builder_.AddPosition(masm_->pc_offset(), pos, false);
+ source_position_table_builder_.AddPosition(masm_->pc_offset(),
+ SourcePosition(pos), false);
}
@@ -683,8 +684,7 @@ void FullCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
// For default constructors, start position equals end position, and there
// is no source code besides the class literal.
- int pos = std::max(fun->start_position(), fun->end_position() - 1);
- RecordStatementPosition(pos);
+ RecordStatementPosition(fun->return_position());
if (info_->is_debug()) {
// Always emit a debug break slot before a return.
DebugCodegen::GenerateSlot(masm_, RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
@@ -746,7 +746,7 @@ void FullCodeGenerator::VisitSuperCallReference(SuperCallReference* super) {
void FullCodeGenerator::EmitDebugBreakInOptimizedCode(CallRuntime* expr) {
- context()->Plug(handle(Smi::FromInt(0), isolate()));
+ context()->Plug(handle(Smi::kZero, isolate()));
}
@@ -1126,8 +1126,8 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
EmitLoadSlot(LoadDescriptor::SlotRegister(), prop->PropertyFeedbackSlot());
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
- CallIC(ic);
+ Handle<Code> code = CodeFactory::KeyedLoadIC(isolate()).code();
+ __ Call(code, RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1582,7 +1582,7 @@ void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
if (lit->class_variable_proxy() != nullptr) {
EmitVariableAssignment(lit->class_variable_proxy()->var(), Token::INIT,
- lit->ProxySlot());
+ lit->ProxySlot(), HoleCheckMode::kElided);
}
context()->Plug(result_register());
@@ -1668,47 +1668,48 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Expression* callee = expr->expression();
Call::CallType call_type = expr->GetCallType();
- switch (call_type) {
- case Call::POSSIBLY_EVAL_CALL:
- EmitPossiblyEvalCall(expr);
- break;
- case Call::GLOBAL_CALL:
- EmitCallWithLoadIC(expr);
- break;
- case Call::LOOKUP_SLOT_CALL:
- // Call to a lookup slot (dynamically introduced variable).
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- break;
- case Call::NAMED_PROPERTY_CALL: {
- Property* property = callee->AsProperty();
- VisitForStackValue(property->obj());
- EmitCallWithLoadIC(expr);
- break;
- }
- case Call::KEYED_PROPERTY_CALL: {
- Property* property = callee->AsProperty();
- VisitForStackValue(property->obj());
- EmitKeyedCallWithLoadIC(expr, property->key());
- break;
+ if (expr->is_possibly_eval()) {
+ EmitPossiblyEvalCall(expr);
+ } else {
+ switch (call_type) {
+ case Call::GLOBAL_CALL:
+ EmitCallWithLoadIC(expr);
+ break;
+ case Call::WITH_CALL:
+ // Call to a lookup slot looked up through a with scope.
+ PushCalleeAndWithBaseObject(expr);
+ EmitCall(expr);
+ break;
+ case Call::NAMED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VisitForStackValue(property->obj());
+ EmitCallWithLoadIC(expr);
+ break;
+ }
+ case Call::KEYED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VisitForStackValue(property->obj());
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ break;
+ }
+ case Call::NAMED_SUPER_PROPERTY_CALL:
+ EmitSuperCallWithLoadIC(expr);
+ break;
+ case Call::KEYED_SUPER_PROPERTY_CALL:
+ EmitKeyedSuperCallWithLoadIC(expr);
+ break;
+ case Call::SUPER_CALL:
+ EmitSuperConstructorCall(expr);
+ break;
+ case Call::OTHER_CALL:
+ // Call to an arbitrary expression not handled specially above.
+ VisitForStackValue(callee);
+ OperandStackDepthIncrement(1);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ // Emit function call.
+ EmitCall(expr);
+ break;
}
- case Call::NAMED_SUPER_PROPERTY_CALL:
- EmitSuperCallWithLoadIC(expr);
- break;
- case Call::KEYED_SUPER_PROPERTY_CALL:
- EmitKeyedSuperCallWithLoadIC(expr);
- break;
- case Call::SUPER_CALL:
- EmitSuperConstructorCall(expr);
- break;
- case Call::OTHER_CALL:
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- OperandStackDepthIncrement(1);
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- // Emit function call.
- EmitCall(expr);
- break;
}
#ifdef DEBUG
@@ -1982,65 +1983,6 @@ FullCodeGenerator::EnterBlockScopeIfNeeded::~EnterBlockScopeIfNeeded() {
codegen_->scope_ = saved_scope_;
}
-
-bool FullCodeGenerator::NeedsHoleCheckForLoad(VariableProxy* proxy) {
- Variable* var = proxy->var();
-
- if (!var->binding_needs_init()) {
- return false;
- }
-
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- DCHECK(var->scope() != NULL);
- DCHECK(var->location() == VariableLocation::PARAMETER ||
- var->location() == VariableLocation::LOCAL ||
- var->location() == VariableLocation::CONTEXT);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code), the VariableProxy is in
- // the source physically located after the initializer of the variable,
- // and that the initializer cannot be skipped due to a nonlinear scope.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- // The check cannot be skipped on non-linear scopes, namely switch
- // scopes, to ensure tests are done in cases like the following:
- // switch (1) { case 0: let x = 2; case 1: f(x); }
- // The scope of the variable needs to be checked, in case the use is
- // in a sub-block which may be linear.
- if (var->scope()->GetDeclarationScope() != scope()->GetDeclarationScope()) {
- return true;
- }
-
- if (var->is_this()) {
- DCHECK(literal() != nullptr &&
- (literal()->kind() & kSubclassConstructor) != 0);
- // TODO(littledan): implement 'this' hole check elimination.
- return true;
- }
-
- // Check that we always have valid source position.
- DCHECK(var->initializer_position() != kNoSourcePosition);
- DCHECK(proxy->position() != kNoSourcePosition);
-
- return var->scope()->is_nonlinear() ||
- var->initializer_position() >= proxy->position();
-}
-
Handle<Script> FullCodeGenerator::script() { return info_->script(); }
LanguageMode FullCodeGenerator::language_mode() {
diff --git a/deps/v8/src/full-codegen/full-codegen.h b/deps/v8/src/full-codegen/full-codegen.h
index 2a4eb9dd3b..558dae18dd 100644
--- a/deps/v8/src/full-codegen/full-codegen.h
+++ b/deps/v8/src/full-codegen/full-codegen.h
@@ -354,7 +354,7 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
void VisitInDuplicateContext(Expression* expr);
- void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void VisitDeclarations(Declaration::List* declarations);
void DeclareGlobals(Handle<FixedArray> pairs);
int DeclareGlobalsFlags();
@@ -478,15 +478,12 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
F(Call) \
F(NewObject) \
F(IsJSReceiver) \
- F(HasCachedArrayIndex) \
- F(GetCachedArrayIndex) \
F(GetSuperConstructor) \
F(DebugBreakInOptimizedCode) \
F(ClassOf) \
F(StringCharCodeAt) \
F(SubString) \
F(RegExpExec) \
- F(RegExpConstructResult) \
F(ToInteger) \
F(NumberToString) \
F(ToString) \
@@ -520,8 +517,6 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
void EmitAccessor(ObjectLiteralProperty* property);
- bool NeedsHoleCheckForLoad(VariableProxy* proxy);
-
// Expects the arguments and the function already pushed.
void EmitResolvePossiblyDirectEval(Call* expr);
@@ -576,7 +571,8 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
// Complete a variable assignment. The right-hand-side value is expected
// in the accumulator.
void EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot);
+ FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode);
// Helper functions to EmitVariableAssignment
void EmitStoreToStackLocalOrContextSlot(Variable* var,
@@ -620,10 +616,8 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
void CallIC(Handle<Code> code,
TypeFeedbackId id = TypeFeedbackId::None());
- void CallLoadIC(FeedbackVectorSlot slot, Handle<Object> name,
- TypeFeedbackId id = TypeFeedbackId::None());
- void CallStoreIC(FeedbackVectorSlot slot, Handle<Object> name,
- TypeFeedbackId id = TypeFeedbackId::None());
+ void CallLoadIC(FeedbackVectorSlot slot, Handle<Object> name);
+ void CallStoreIC(FeedbackVectorSlot slot, Handle<Object> name);
void CallKeyedStoreIC(FeedbackVectorSlot slot);
void SetFunctionPosition(FunctionLiteral* fun);
diff --git a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
index e5f66cd1d4..5e80dd3280 100644
--- a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -348,7 +348,7 @@ void FullCodeGenerator::Generate() {
void FullCodeGenerator::ClearAccumulator() {
- __ Move(eax, Immediate(Smi::FromInt(0)));
+ __ Move(eax, Immediate(Smi::kZero));
}
@@ -960,8 +960,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(eax, isolate()->factory()->null_value());
__ j(equal, &exit);
__ bind(&convert);
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
RestoreContext();
__ bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
@@ -992,7 +991,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&use_cache);
__ EnumLength(edx, eax);
- __ cmp(edx, Immediate(Smi::FromInt(0)));
+ __ cmp(edx, Immediate(Smi::kZero));
__ j(equal, &no_descriptors);
__ LoadInstanceDescriptors(eax, ecx);
@@ -1003,7 +1002,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(eax); // Map.
__ push(ecx); // Enumeration cache.
__ push(edx); // Number of valid entries for the map in the enum cache.
- __ push(Immediate(Smi::FromInt(0))); // Initial index.
+ __ push(Immediate(Smi::kZero)); // Initial index.
__ jmp(&loop);
__ bind(&no_descriptors);
@@ -1018,7 +1017,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ push(eax); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
- __ push(Immediate(Smi::FromInt(0))); // Initial index.
+ __ push(Immediate(Smi::kZero)); // Initial index.
// Generate code for doing the condition check.
__ bind(&loop);
@@ -1050,10 +1049,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
// eax contains the key. The receiver in ebx is the second argument to the
- // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+ // ForInFilter. ForInFilter returns undefined if the receiver doesn't
// have the key or returns the name-converted key.
- ForInFilterStub filter_stub(isolate());
- __ CallStub(&filter_stub);
+ __ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
RestoreContext();
PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ JumpIfRoot(result_register(), Heap::kUndefinedValueRootIndex,
@@ -1217,7 +1215,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
- if (NeedsHoleCheckForLoad(proxy)) {
+ if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
@@ -1650,12 +1648,14 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Store the value.
switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op(), expr->AssignmentSlot());
+ case VARIABLE: {
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(eax);
break;
+ }
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
@@ -1934,9 +1934,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
switch (assign_type) {
case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
+ VariableProxy* proxy = expr->AsVariableProxy();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN, slot);
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
@@ -2009,9 +2010,9 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
}
-
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot) {
+ FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::ReceiverRegister(), NativeContextOperand());
@@ -2025,7 +2026,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, ecx);
// Perform an initialization check for lexically declared variables.
- if (var->binding_needs_init()) {
+ if (hole_check_mode == HoleCheckMode::kRequired) {
Label assign;
__ mov(edx, location);
__ cmp(edx, isolate()->factory()->the_hole_value());
@@ -2135,14 +2136,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
context()->Plug(eax);
}
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- TypeFeedbackId ast_id) {
- ic_total_count_++;
- __ call(code, RelocInfo::CODE_TARGET, ast_id);
-}
-
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
@@ -2288,14 +2281,12 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// not return to this function.
EmitProfilingCounterHandlingForReturnSequence(true);
}
- Handle<Code> ic =
- CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
- .code();
+ Handle<Code> code =
+ CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
__ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- // Don't assign a type feedback id to the IC, since type feedback is provided
- // by the vector above.
- CallIC(ic);
+ __ Move(eax, Immediate(arg_count));
+ CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -2394,11 +2385,13 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
SetCallPosition(expr);
+ Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+ expr->tail_call_mode())
+ .code();
+ __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ Set(eax, arg_count);
- __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- expr->tail_call_mode()),
- RelocInfo::CODE_TARGET);
+ __ Move(eax, Immediate(arg_count));
+ __ call(code, RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
RestoreContext();
@@ -2438,7 +2431,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ mov(edx, Immediate(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate());
- __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
@@ -2729,45 +2722,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
context()->DropAndPlug(1, eax);
}
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(eax);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ test(FieldOperand(eax, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(eax);
-
- __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
- __ IndexFromHash(eax, eax);
-
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -2951,8 +2905,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForTypeofValue(expr->expression());
}
__ mov(ebx, eax);
- TypeofStub typeof_stub(isolate());
- __ CallStub(&typeof_stub);
+ __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
context()->Plug(eax);
break;
}
@@ -2979,7 +2932,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
- PushOperand(Smi::FromInt(0));
+ PushOperand(Smi::kZero);
}
switch (assign_type) {
case NAMED_PROPERTY: {
@@ -3131,12 +3084,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Store the value returned in eax.
switch (assign_type) {
- case VARIABLE:
+ case VARIABLE: {
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (expr->is_postfix()) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context.Plug(eax);
@@ -3148,13 +3102,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
} else {
// Perform the assignment as if via '='.
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context()->Plug(eax);
}
break;
+ }
case NAMED_PROPERTY: {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
@@ -3322,8 +3277,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForAccumulatorValue(expr->right());
SetExpressionPosition(expr);
PopOperand(edx);
- InstanceOfStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
index 7f976866a1..10cdb54b40 100644
--- a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
+++ b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
@@ -368,7 +368,7 @@ void FullCodeGenerator::Generate() {
void FullCodeGenerator::ClearAccumulator() {
- DCHECK(Smi::FromInt(0) == 0);
+ DCHECK(Smi::kZero == 0);
__ mov(v0, zero_reg);
}
@@ -1018,8 +1018,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex); // In delay slot.
__ Branch(&exit, eq, a0, Operand(at));
__ bind(&convert);
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
RestoreContext();
__ mov(a0, v0);
__ bind(&done_convert);
@@ -1058,14 +1057,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&use_cache);
__ EnumLength(a1, v0);
- __ Branch(&no_descriptors, eq, a1, Operand(Smi::FromInt(0)));
+ __ Branch(&no_descriptors, eq, a1, Operand(Smi::kZero));
__ LoadInstanceDescriptors(v0, a2);
__ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheOffset));
__ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
- __ li(a0, Operand(Smi::FromInt(0)));
+ __ li(a0, Operand(Smi::kZero));
// Push map, enumeration cache, enumeration cache length (as smi) and zero.
__ Push(v0, a2, a1, a0);
__ jmp(&loop);
@@ -1082,7 +1081,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
__ Push(a1); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
- __ li(a0, Operand(Smi::FromInt(0)));
+ __ li(a0, Operand(Smi::kZero));
__ Push(a0); // Initial index.
// Generate code for doing the condition check.
@@ -1119,10 +1118,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(a0, result_register());
// a0 contains the key. The receiver in a1 is the second argument to the
- // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+ // ForInFilter. ForInFilter returns undefined if the receiver doesn't
// have the key or returns the name-converted key.
- ForInFilterStub filter_stub(isolate());
- __ CallStub(&filter_stub);
+ __ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
RestoreContext();
PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -1295,7 +1293,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
- if (NeedsHoleCheckForLoad(proxy)) {
+ if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
@@ -1739,12 +1737,14 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Store the value.
switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op(), expr->AssignmentSlot());
+ case VARIABLE: {
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
break;
+ }
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
@@ -1938,7 +1938,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ Branch(&done, ne, v0, Operand(zero_reg));
__ Addu(scratch2, right, left);
__ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
- DCHECK(Smi::FromInt(0) == 0);
+ DCHECK(Smi::kZero == 0);
__ mov(v0, zero_reg);
break;
}
@@ -2033,9 +2033,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
switch (assign_type) {
case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
+ VariableProxy* proxy = expr->AsVariableProxy();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN, slot);
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
@@ -2110,9 +2111,9 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
}
-
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot) {
+ FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::ValueRegister(), result_register());
@@ -2124,7 +2125,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, a1);
// Perform an initialization check for lexically declared variables.
- if (var->binding_needs_init()) {
+ if (hole_check_mode == HoleCheckMode::kRequired) {
Label assign;
__ lw(a3, location);
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
@@ -2241,14 +2242,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
context()->Plug(v0);
}
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- TypeFeedbackId id) {
- ic_total_count_++;
- __ Call(code, RelocInfo::CODE_TARGET, id);
-}
-
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
@@ -2400,14 +2393,12 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// not return to this function.
EmitProfilingCounterHandlingForReturnSequence(true);
}
- Handle<Code> ic =
- CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
- .code();
+ Handle<Code> code =
+ CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
__ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- // Don't assign a type feedback id to the IC, since type feedback is provided
- // by the vector above.
- CallIC(ic);
+ __ li(a0, Operand(arg_count));
+ CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -2510,11 +2501,13 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
// Record source position for debugger.
SetCallPosition(expr);
+ Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+ expr->tail_call_mode())
+ .code();
+ __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
- __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- expr->tail_call_mode()),
- RelocInfo::CODE_TARGET);
+ __ Call(code, RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
RestoreContext();
@@ -2554,7 +2547,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ li(a3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate());
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
@@ -2850,42 +2843,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
context()->DropAndPlug(1, v0);
}
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ lw(a0, FieldMemOperand(v0, String::kHashFieldOffset));
- __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(v0);
-
- __ lw(v0, FieldMemOperand(v0, String::kHashFieldOffset));
- __ IndexFromHash(v0, v0);
-
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3063,8 +3020,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForTypeofValue(expr->expression());
}
__ mov(a3, v0);
- TypeofStub typeof_stub(isolate());
- __ CallStub(&typeof_stub);
+ __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
context()->Plug(v0);
break;
}
@@ -3091,7 +3047,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
- __ li(at, Operand(Smi::FromInt(0)));
+ __ li(at, Operand(Smi::kZero));
PushOperand(at);
}
switch (assign_type) {
@@ -3240,11 +3196,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Store the value returned in v0.
switch (assign_type) {
- case VARIABLE:
+ case VARIABLE: {
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (expr->is_postfix()) {
{ EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context.Plug(v0);
@@ -3255,13 +3212,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
context()->PlugTOS();
}
} else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context()->Plug(v0);
}
break;
+ }
case NAMED_PROPERTY: {
__ mov(StoreDescriptor::ValueRegister(), result_register());
PopOperand(StoreDescriptor::ReceiverRegister());
@@ -3433,8 +3391,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
SetExpressionPosition(expr);
__ mov(a0, result_register());
PopOperand(a1);
- InstanceOfStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(at, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
index 660adb1aa6..7640c52031 100644
--- a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -367,7 +367,7 @@ void FullCodeGenerator::Generate() {
void FullCodeGenerator::ClearAccumulator() {
- DCHECK(Smi::FromInt(0) == 0);
+ DCHECK(Smi::kZero == 0);
__ mov(v0, zero_reg);
}
@@ -1018,8 +1018,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex); // In delay slot.
__ Branch(&exit, eq, a0, Operand(at));
__ bind(&convert);
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
RestoreContext();
__ mov(a0, v0);
__ bind(&done_convert);
@@ -1058,14 +1057,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&use_cache);
__ EnumLength(a1, v0);
- __ Branch(&no_descriptors, eq, a1, Operand(Smi::FromInt(0)));
+ __ Branch(&no_descriptors, eq, a1, Operand(Smi::kZero));
__ LoadInstanceDescriptors(v0, a2);
__ ld(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheOffset));
__ ld(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
- __ li(a0, Operand(Smi::FromInt(0)));
+ __ li(a0, Operand(Smi::kZero));
// Push map, enumeration cache, enumeration cache length (as smi) and zero.
__ Push(v0, a2, a1, a0);
__ jmp(&loop);
@@ -1082,7 +1081,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
__ Push(a1); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
- __ li(a0, Operand(Smi::FromInt(0)));
+ __ li(a0, Operand(Smi::kZero));
__ Push(a0); // Initial index.
// Generate code for doing the condition check.
@@ -1120,10 +1119,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(a0, result_register());
// a0 contains the key. The receiver in a1 is the second argument to the
- // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+ // ForInFilter. ForInFilter returns undefined if the receiver doesn't
// have the key or returns the name-converted key.
- ForInFilterStub filter_stub(isolate());
- __ CallStub(&filter_stub);
+ __ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
RestoreContext();
PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -1296,7 +1294,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
- if (NeedsHoleCheckForLoad(proxy)) {
+ if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
@@ -1740,12 +1738,14 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Store the value.
switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op(), expr->AssignmentSlot());
+ case VARIABLE: {
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(v0);
break;
+ }
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
@@ -1938,7 +1938,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ Branch(USE_DELAY_SLOT, &done, ne, v0, Operand(zero_reg));
__ Daddu(scratch2, right, left);
__ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
- DCHECK(Smi::FromInt(0) == 0);
+ DCHECK(Smi::kZero == 0);
__ mov(v0, zero_reg);
break;
}
@@ -2033,9 +2033,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
switch (assign_type) {
case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
+ VariableProxy* proxy = expr->AsVariableProxy();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN, slot);
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
@@ -2110,9 +2111,9 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
}
-
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot) {
+ FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::ValueRegister(), result_register());
@@ -2124,7 +2125,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, a1);
// Perform an initialization check for lexically declared variables.
- if (var->binding_needs_init()) {
+ if (hole_check_mode == HoleCheckMode::kRequired) {
Label assign;
__ ld(a3, location);
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
@@ -2240,14 +2241,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
context()->Plug(v0);
}
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- TypeFeedbackId id) {
- ic_total_count_++;
- __ Call(code, RelocInfo::CODE_TARGET, id);
-}
-
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
@@ -2399,14 +2392,12 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// not return to this function.
EmitProfilingCounterHandlingForReturnSequence(true);
}
- Handle<Code> ic =
- CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
- .code();
+ Handle<Code> code =
+ CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
__ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- // Don't assign a type feedback id to the IC, since type feedback is provided
- // by the vector above.
- CallIC(ic);
+ __ li(a0, Operand(arg_count));
+ CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -2509,11 +2500,13 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
// Record source position for debugger.
SetCallPosition(expr);
+ Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+ expr->tail_call_mode())
+ .code();
+ __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
- __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- expr->tail_call_mode()),
- RelocInfo::CODE_TARGET);
+ __ Call(code, RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
RestoreContext();
@@ -2553,7 +2546,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ li(a3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate());
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
@@ -2849,42 +2842,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
context()->DropAndPlug(1, v0);
}
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ lwu(a0, FieldMemOperand(v0, String::kHashFieldOffset));
- __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(v0);
-
- __ lwu(v0, FieldMemOperand(v0, String::kHashFieldOffset));
- __ IndexFromHash(v0, v0);
-
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3063,8 +3020,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForTypeofValue(expr->expression());
}
__ mov(a3, v0);
- TypeofStub typeof_stub(isolate());
- __ CallStub(&typeof_stub);
+ __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
context()->Plug(v0);
break;
}
@@ -3091,7 +3047,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
- __ li(at, Operand(Smi::FromInt(0)));
+ __ li(at, Operand(Smi::kZero));
PushOperand(at);
}
switch (assign_type) {
@@ -3240,11 +3196,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Store the value returned in v0.
switch (assign_type) {
- case VARIABLE:
+ case VARIABLE: {
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (expr->is_postfix()) {
{ EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context.Plug(v0);
@@ -3255,13 +3212,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
context()->PlugTOS();
}
} else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context()->Plug(v0);
}
break;
+ }
case NAMED_PROPERTY: {
__ mov(StoreDescriptor::ValueRegister(), result_register());
PopOperand(StoreDescriptor::ReceiverRegister());
@@ -3433,8 +3391,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
SetExpressionPosition(expr);
__ mov(a0, result_register());
PopOperand(a1);
- InstanceOfStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(a4, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(a4), if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
index de9a8f46cf..85d198da2f 100644
--- a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -368,7 +368,7 @@ void FullCodeGenerator::Generate() {
void FullCodeGenerator::ClearAccumulator() {
- __ LoadSmiLiteral(r3, Smi::FromInt(0));
+ __ LoadSmiLiteral(r3, Smi::kZero);
}
@@ -982,8 +982,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
__ beq(&exit);
__ bind(&convert);
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
RestoreContext();
__ bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
@@ -1022,7 +1021,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&use_cache);
__ EnumLength(r4, r3);
- __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
+ __ CmpSmiLiteral(r4, Smi::kZero, r0);
__ beq(&no_descriptors);
__ LoadInstanceDescriptors(r3, r5);
@@ -1032,7 +1031,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Set up the four remaining stack slots.
__ push(r3); // Map.
- __ LoadSmiLiteral(r3, Smi::FromInt(0));
+ __ LoadSmiLiteral(r3, Smi::kZero);
// Push enumeration cache, enumeration cache length (as smi) and zero.
__ Push(r5, r4, r3);
__ b(&loop);
@@ -1049,7 +1048,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset));
__ Push(r4); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
- __ LoadSmiLiteral(r3, Smi::FromInt(0));
+ __ LoadSmiLiteral(r3, Smi::kZero);
__ Push(r3); // Initial index.
// Generate code for doing the condition check.
@@ -1265,7 +1264,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
- if (NeedsHoleCheckForLoad(proxy)) {
+ if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
@@ -1704,12 +1703,14 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Store the value.
switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op(), expr->AssignmentSlot());
+ case VARIABLE: {
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r3);
break;
+ }
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
@@ -1944,7 +1945,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ add(scratch2, right, left);
__ cmpi(scratch2, Operand::Zero());
__ blt(&stub_call);
- __ LoadSmiLiteral(right, Smi::FromInt(0));
+ __ LoadSmiLiteral(right, Smi::kZero);
break;
}
case Token::BIT_OR:
@@ -2037,9 +2038,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
switch (assign_type) {
case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
+ VariableProxy* proxy = expr->AsVariableProxy();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN, slot);
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
@@ -2114,9 +2116,9 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
}
-
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot) {
+ FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
@@ -2127,7 +2129,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, r4);
// Perform an initialization check for lexically declared variables.
- if (var->binding_needs_init()) {
+ if (hole_check_mode == HoleCheckMode::kRequired) {
Label assign;
__ LoadP(r6, location);
__ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
@@ -2237,13 +2239,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
context()->Plug(r3);
}
-
-void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
- ic_total_count_++;
- __ Call(code, RelocInfo::CODE_TARGET, ast_id);
-}
-
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
@@ -2394,14 +2389,12 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// not return to this function.
EmitProfilingCounterHandlingForReturnSequence(true);
}
- Handle<Code> ic =
- CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
- .code();
+ Handle<Code> code =
+ CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- // Don't assign a type feedback id to the IC, since type feedback is provided
- // by the vector above.
- CallIC(ic);
+ __ mov(r3, Operand(arg_count));
+ CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -2506,11 +2499,13 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
// Record source position for debugger.
SetCallPosition(expr);
+ Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+ expr->tail_call_mode())
+ .code();
+ __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ mov(r3, Operand(arg_count));
- __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- expr->tail_call_mode()),
- RelocInfo::CODE_TARGET);
+ __ Call(code, RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
RestoreContext();
@@ -2550,7 +2545,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate());
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
@@ -2845,43 +2840,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
context()->DropAndPlug(1, r3);
}
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ lwz(r3, FieldMemOperand(r3, String::kHashFieldOffset));
- // PPC - assume ip is free
- __ mov(ip, Operand(String::kContainsCachedArrayIndexMask));
- __ and_(r0, r3, ip, SetRC);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through, cr0);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(r3);
-
- __ lwz(r3, FieldMemOperand(r3, String::kHashFieldOffset));
- __ IndexFromHash(r3, r3);
-
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3055,8 +3013,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForTypeofValue(expr->expression());
}
__ mr(r6, r3);
- TypeofStub typeof_stub(isolate());
- __ CallStub(&typeof_stub);
+ __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
context()->Plug(r3);
break;
}
@@ -3083,7 +3040,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
- __ LoadSmiLiteral(ip, Smi::FromInt(0));
+ __ LoadSmiLiteral(ip, Smi::kZero);
PushOperand(ip);
}
switch (assign_type) {
@@ -3233,12 +3190,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Store the value returned in r3.
switch (assign_type) {
- case VARIABLE:
+ case VARIABLE: {
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (expr->is_postfix()) {
{
EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context.Plug(r3);
@@ -3249,13 +3207,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
context()->PlugTOS();
}
} else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context()->Plug(r3);
}
break;
+ }
case NAMED_PROPERTY: {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
@@ -3426,8 +3385,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForAccumulatorValue(expr->right());
SetExpressionPosition(expr);
PopOperand(r4);
- InstanceOfStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r3, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/s390/full-codegen-s390.cc b/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
index dfe652755a..91fa86de80 100644
--- a/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
+++ b/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
@@ -369,7 +369,7 @@ void FullCodeGenerator::Generate() {
}
void FullCodeGenerator::ClearAccumulator() {
- __ LoadSmiLiteral(r2, Smi::FromInt(0));
+ __ LoadSmiLiteral(r2, Smi::kZero);
}
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
@@ -952,8 +952,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ beq(&exit);
__ bind(&convert);
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
RestoreContext();
__ bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
@@ -991,7 +990,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&use_cache);
__ EnumLength(r3, r2);
- __ CmpSmiLiteral(r3, Smi::FromInt(0), r0);
+ __ CmpSmiLiteral(r3, Smi::kZero, r0);
__ beq(&no_descriptors, Label::kNear);
__ LoadInstanceDescriptors(r2, r4);
@@ -1001,7 +1000,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Set up the four remaining stack slots.
__ push(r2); // Map.
- __ LoadSmiLiteral(r2, Smi::FromInt(0));
+ __ LoadSmiLiteral(r2, Smi::kZero);
// Push enumeration cache, enumeration cache length (as smi) and zero.
__ Push(r4, r3, r2);
__ b(&loop);
@@ -1018,7 +1017,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ LoadP(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
__ Push(r3); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
- __ LoadSmiLiteral(r2, Smi::FromInt(0));
+ __ LoadSmiLiteral(r2, Smi::kZero);
__ Push(r2); // Initial index.
// Generate code for doing the condition check.
@@ -1229,7 +1228,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
- if (NeedsHoleCheckForLoad(proxy)) {
+ if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
@@ -1664,12 +1663,14 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Store the value.
switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op(), expr->AssignmentSlot());
+ case VARIABLE: {
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(r2);
break;
+ }
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
@@ -1904,7 +1905,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ AddP(scratch2, right, left);
__ CmpP(scratch2, Operand::Zero());
__ blt(&stub_call);
- __ LoadSmiLiteral(right, Smi::FromInt(0));
+ __ LoadSmiLiteral(right, Smi::kZero);
break;
}
case Token::BIT_OR:
@@ -1994,9 +1995,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
switch (assign_type) {
case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
+ VariableProxy* proxy = expr->AsVariableProxy();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN, slot);
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
@@ -2071,7 +2073,8 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot) {
+ FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
@@ -2083,7 +2086,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, r3);
// Perform an initialization check for lexically declared variables.
- if (var->binding_needs_init()) {
+ if (hole_check_mode == HoleCheckMode::kRequired) {
Label assign;
__ LoadP(r5, location);
__ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
@@ -2188,11 +2191,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
context()->Plug(r2);
}
-void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
- ic_total_count_++;
- __ Call(code, RelocInfo::CODE_TARGET, ast_id);
-}
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
@@ -2339,14 +2337,12 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// not return to this function.
EmitProfilingCounterHandlingForReturnSequence(true);
}
- Handle<Code> ic =
- CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
- .code();
+ Handle<Code> code =
+ CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
__ LoadSmiLiteral(r5, SmiFromSlot(expr->CallFeedbackICSlot()));
__ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- // Don't assign a type feedback id to the IC, since type feedback is provided
- // by the vector above.
- CallIC(ic);
+ __ mov(r2, Operand(arg_count));
+ CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -2448,11 +2444,13 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
// Record source position for debugger.
SetCallPosition(expr);
+ Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+ expr->tail_call_mode())
+ .code();
+ __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallFeedbackICSlot()));
__ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ mov(r2, Operand(arg_count));
- __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- expr->tail_call_mode()),
- RelocInfo::CODE_TARGET);
+ __ Call(code, RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
RestoreContext();
@@ -2491,7 +2489,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ LoadSmiLiteral(r5, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate());
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
@@ -2776,38 +2774,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
context()->DropAndPlug(1, r2);
}
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ LoadlW(r2, FieldMemOperand(r2, String::kHashFieldOffset));
- __ AndP(r0, r2, Operand(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(r2);
-
- __ LoadlW(r2, FieldMemOperand(r2, String::kHashFieldOffset));
- __ IndexFromHash(r2, r2);
-
- context()->Plug(r2);
-}
-
void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -2977,8 +2943,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForTypeofValue(expr->expression());
}
__ LoadRR(r5, r2);
- TypeofStub typeof_stub(isolate());
- __ CallStub(&typeof_stub);
+ __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
context()->Plug(r2);
break;
}
@@ -3004,7 +2969,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
- __ LoadSmiLiteral(ip, Smi::FromInt(0));
+ __ LoadSmiLiteral(ip, Smi::kZero);
PushOperand(ip);
}
switch (assign_type) {
@@ -3154,12 +3119,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Store the value returned in r2.
switch (assign_type) {
- case VARIABLE:
+ case VARIABLE: {
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (expr->is_postfix()) {
{
EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context.Plug(r2);
@@ -3170,13 +3136,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
context()->PlugTOS();
}
} else {
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context()->Plug(r2);
}
break;
+ }
case NAMED_PROPERTY: {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
@@ -3342,8 +3309,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForAccumulatorValue(expr->right());
SetExpressionPosition(expr);
PopOperand(r3);
- InstanceOfStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r2, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
index 525319fe70..0720c3d083 100644
--- a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
+++ b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
@@ -976,8 +976,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, &exit);
__ bind(&convert);
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
RestoreContext();
__ bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
@@ -1016,7 +1015,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label no_descriptors;
__ EnumLength(rdx, rax);
- __ Cmp(rdx, Smi::FromInt(0));
+ __ Cmp(rdx, Smi::kZero);
__ j(equal, &no_descriptors);
__ LoadInstanceDescriptors(rax, rcx);
@@ -1027,7 +1026,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(rax); // Map.
__ Push(rcx); // Enumeration cache.
__ Push(rdx); // Number of valid entries for the map in the enum cache.
- __ Push(Smi::FromInt(0)); // Initial index.
+ __ Push(Smi::kZero); // Initial index.
__ jmp(&loop);
__ bind(&no_descriptors);
@@ -1043,7 +1042,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ Push(rax); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
- __ Push(Smi::FromInt(0)); // Initial index.
+ __ Push(Smi::kZero); // Initial index.
// Generate code for doing the condition check.
__ bind(&loop);
@@ -1076,11 +1075,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Move(FieldOperand(rdx, FixedArray::OffsetOfElementAt(vector_index)),
TypeFeedbackVector::MegamorphicSentinel(isolate()));
- // rax contains the key. The receiver in rbx is the second argument to the
- // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+ // rax contains the key. The receiver in rbx is the second argument to
+ // ForInFilter. ForInFilter returns undefined if the receiver doesn't
// have the key or returns the name-converted key.
- ForInFilterStub has_stub(isolate());
- __ CallStub(&has_stub);
+ __ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
RestoreContext();
PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ JumpIfRoot(result_register(), Heap::kUndefinedValueRootIndex,
@@ -1246,7 +1244,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context slot"
: "[ Stack slot");
- if (NeedsHoleCheckForLoad(proxy)) {
+ if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
DCHECK(IsLexicalVariableMode(var->mode()));
@@ -1677,12 +1675,14 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Store the value.
switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op(), expr->AssignmentSlot());
+ case VARIABLE: {
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(rax);
break;
+ }
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
@@ -1926,9 +1926,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
switch (assign_type) {
case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
+ VariableProxy* proxy = expr->AsVariableProxy();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN, slot);
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
@@ -2001,9 +2002,9 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
}
-
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot) {
+ FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ LoadGlobalObject(StoreDescriptor::ReceiverRegister());
@@ -2014,7 +2015,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, rcx);
// Perform an initialization check for lexically declared variables.
- if (var->binding_needs_init()) {
+ if (hole_check_mode == HoleCheckMode::kRequired) {
Label assign;
__ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
@@ -2122,14 +2123,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
context()->Plug(rax);
}
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- TypeFeedbackId ast_id) {
- ic_total_count_++;
- __ call(code, RelocInfo::CODE_TARGET, ast_id);
-}
-
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
@@ -2277,14 +2270,12 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// not return to this function.
EmitProfilingCounterHandlingForReturnSequence(true);
}
- Handle<Code> ic =
- CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
- .code();
+ Handle<Code> code =
+ CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
__ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot()));
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- // Don't assign a type feedback id to the IC, since type feedback is provided
- // by the vector above.
- CallIC(ic);
+ __ Set(rax, arg_count);
+ CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -2384,11 +2375,13 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
SetCallPosition(expr);
+ Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+ expr->tail_call_mode())
+ .code();
+ __ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot()));
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ Set(rax, arg_count);
- __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- expr->tail_call_mode()),
- RelocInfo::CODE_TARGET);
+ __ call(code, RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
RestoreContext();
@@ -2428,7 +2421,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ Move(rdx, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate());
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
@@ -2720,45 +2713,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
context()->DropAndPlug(1, rax);
}
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ testl(FieldOperand(rax, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ j(zero, if_true);
- __ jmp(if_false);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(rax);
-
- __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
- DCHECK(String::kHashShift >= kSmiTagSize);
- __ IndexFromHash(rax, rax);
-
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -2942,8 +2896,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForTypeofValue(expr->expression());
}
__ movp(rbx, rax);
- TypeofStub typeof_stub(isolate());
- __ CallStub(&typeof_stub);
+ __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
context()->Plug(rax);
break;
}
@@ -2970,7 +2923,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
- PushOperand(Smi::FromInt(0));
+ PushOperand(Smi::kZero);
}
switch (assign_type) {
case NAMED_PROPERTY: {
@@ -3120,12 +3073,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Store the value returned in rax.
switch (assign_type) {
- case VARIABLE:
+ case VARIABLE: {
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (expr->is_postfix()) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context.Plug(rax);
@@ -3137,13 +3091,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
} else {
// Perform the assignment as if via '='.
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context()->Plug(rax);
}
break;
+ }
case NAMED_PROPERTY: {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
@@ -3311,8 +3266,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForAccumulatorValue(expr->right());
SetExpressionPosition(expr);
PopOperand(rdx);
- InstanceOfStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
diff --git a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
index 47be8b0616..7cc7e2bc04 100644
--- a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
+++ b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
@@ -345,7 +345,7 @@ void FullCodeGenerator::Generate() {
void FullCodeGenerator::ClearAccumulator() {
- __ Move(eax, Immediate(Smi::FromInt(0)));
+ __ Move(eax, Immediate(Smi::kZero));
}
@@ -952,8 +952,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(eax, isolate()->factory()->null_value());
__ j(equal, &exit);
__ bind(&convert);
- ToObjectStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->ToObject(), RelocInfo::CODE_TARGET);
RestoreContext();
__ bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
@@ -984,7 +983,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&use_cache);
__ EnumLength(edx, eax);
- __ cmp(edx, Immediate(Smi::FromInt(0)));
+ __ cmp(edx, Immediate(Smi::kZero));
__ j(equal, &no_descriptors);
__ LoadInstanceDescriptors(eax, ecx);
@@ -995,7 +994,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(eax); // Map.
__ push(ecx); // Enumeration cache.
__ push(edx); // Number of valid entries for the map in the enum cache.
- __ push(Immediate(Smi::FromInt(0))); // Initial index.
+ __ push(Immediate(Smi::kZero)); // Initial index.
__ jmp(&loop);
__ bind(&no_descriptors);
@@ -1010,7 +1009,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ push(eax); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
- __ push(Immediate(Smi::FromInt(0))); // Initial index.
+ __ push(Immediate(Smi::kZero)); // Initial index.
// Generate code for doing the condition check.
__ bind(&loop);
@@ -1042,10 +1041,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
// eax contains the key. The receiver in ebx is the second argument to the
- // ForInFilterStub. ForInFilter returns undefined if the receiver doesn't
+ // ForInFilter. ForInFilter returns undefined if the receiver doesn't
// have the key or returns the name-converted key.
- ForInFilterStub filter_stub(isolate());
- __ CallStub(&filter_stub);
+ __ Call(isolate()->builtins()->ForInFilter(), RelocInfo::CODE_TARGET);
RestoreContext();
PrepareForBailoutForId(stmt->FilterId(), BailoutState::TOS_REGISTER);
__ JumpIfRoot(result_register(), Heap::kUndefinedValueRootIndex,
@@ -1209,7 +1207,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
- if (NeedsHoleCheckForLoad(proxy)) {
+ if (proxy->hole_check_mode() == HoleCheckMode::kRequired) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
Label done;
@@ -1642,12 +1640,14 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Store the value.
switch (assign_type) {
- case VARIABLE:
- EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
- expr->op(), expr->AssignmentSlot());
+ case VARIABLE: {
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ EmitVariableAssignment(proxy->var(), expr->op(), expr->AssignmentSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
context()->Plug(eax);
break;
+ }
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
@@ -1926,9 +1926,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
switch (assign_type) {
case VARIABLE: {
- Variable* var = expr->AsVariableProxy()->var();
+ VariableProxy* proxy = expr->AsVariableProxy();
EffectContext context(this);
- EmitVariableAssignment(var, Token::ASSIGN, slot);
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
@@ -2001,9 +2002,9 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
}
}
-
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
- FeedbackVectorSlot slot) {
+ FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::ReceiverRegister(), NativeContextOperand());
@@ -2017,7 +2018,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, ecx);
// Perform an initialization check for lexically declared variables.
- if (var->binding_needs_init()) {
+ if (hole_check_mode == HoleCheckMode::kRequired) {
Label assign;
__ mov(edx, location);
__ cmp(edx, isolate()->factory()->the_hole_value());
@@ -2127,14 +2128,6 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
context()->Plug(eax);
}
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
- TypeFeedbackId ast_id) {
- ic_total_count_++;
- __ call(code, RelocInfo::CODE_TARGET, ast_id);
-}
-
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
@@ -2280,14 +2273,12 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// not return to this function.
EmitProfilingCounterHandlingForReturnSequence(true);
}
- Handle<Code> ic =
- CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
- .code();
+ Handle<Code> code =
+ CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
__ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- // Don't assign a type feedback id to the IC, since type feedback is provided
- // by the vector above.
- CallIC(ic);
+ __ Move(eax, Immediate(arg_count));
+ CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -2386,11 +2377,13 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
SetCallPosition(expr);
+ Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
+ expr->tail_call_mode())
+ .code();
+ __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ Set(eax, arg_count);
- __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
- expr->tail_call_mode()),
- RelocInfo::CODE_TARGET);
+ __ Move(eax, Immediate(arg_count));
+ __ call(code, RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
RestoreContext();
@@ -2430,7 +2423,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ mov(edx, Immediate(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate());
- __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ CallIC(stub.GetCode());
OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), BailoutState::TOS_REGISTER);
RestoreContext();
@@ -2721,45 +2714,6 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
context()->DropAndPlug(1, eax);
}
-
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(eax);
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ test(FieldOperand(eax, String::kHashFieldOffset),
- Immediate(String::kContainsCachedArrayIndexMask));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- __ AssertString(eax);
-
- __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
- __ IndexFromHash(eax, eax);
-
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -2943,8 +2897,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
VisitForTypeofValue(expr->expression());
}
__ mov(ebx, eax);
- TypeofStub typeof_stub(isolate());
- __ CallStub(&typeof_stub);
+ __ Call(isolate()->builtins()->Typeof(), RelocInfo::CODE_TARGET);
context()->Plug(eax);
break;
}
@@ -2971,7 +2924,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
- PushOperand(Smi::FromInt(0));
+ PushOperand(Smi::kZero);
}
switch (assign_type) {
case NAMED_PROPERTY: {
@@ -3123,12 +3076,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Store the value returned in eax.
switch (assign_type) {
- case VARIABLE:
+ case VARIABLE: {
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (expr->is_postfix()) {
// Perform the assignment as if via '='.
{ EffectContext context(this);
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context.Plug(eax);
@@ -3140,13 +3094,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
} else {
// Perform the assignment as if via '='.
- EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
- Token::ASSIGN, expr->CountSlot());
+ EmitVariableAssignment(proxy->var(), Token::ASSIGN, expr->CountSlot(),
+ proxy->hole_check_mode());
PrepareForBailoutForId(expr->AssignmentId(),
BailoutState::TOS_REGISTER);
context()->Plug(eax);
}
break;
+ }
case NAMED_PROPERTY: {
PopOperand(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountSlot(), prop->key()->AsLiteral()->value());
@@ -3314,8 +3269,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForAccumulatorValue(expr->right());
SetExpressionPosition(expr);
PopOperand(edx);
- InstanceOfStub stub(isolate());
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 4e73981593..cc5451f1b3 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -1942,7 +1942,7 @@ static void UnregisterCodeEntry(JITCodeEntry* entry) {
static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
#ifdef __MACH_O
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
MachO mach_o(&zone);
Writer w(&mach_o);
@@ -1954,7 +1954,7 @@ static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
mach_o.Write(&w, desc->CodeStart(), desc->CodeSize());
#else
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
ELF elf(&zone);
Writer w(&elf);
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index ea46344bd3..9ff16affe4 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -52,8 +52,6 @@ class GlobalHandles::Node {
STATIC_ASSERT(NEAR_DEATH == Internals::kNodeStateIsNearDeathValue);
STATIC_ASSERT(static_cast<int>(IsIndependent::kShift) ==
Internals::kNodeIsIndependentShift);
- STATIC_ASSERT(static_cast<int>(IsPartiallyDependent::kShift) ==
- Internals::kNodeIsPartiallyDependentShift);
STATIC_ASSERT(static_cast<int>(IsActive::kShift) ==
Internals::kNodeIsActiveShift);
}
@@ -66,11 +64,7 @@ class GlobalHandles::Node {
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
index_ = 0;
set_independent(false);
- if (FLAG_scavenge_reclaim_unmodified_objects) {
- set_active(false);
- } else {
- set_partially_dependent(false);
- }
+ set_active(false);
set_in_new_space_list(false);
parameter_or_next_free_.next_free = NULL;
weak_callback_ = NULL;
@@ -92,11 +86,7 @@ class GlobalHandles::Node {
object_ = object;
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
set_independent(false);
- if (FLAG_scavenge_reclaim_unmodified_objects) {
- set_active(false);
- } else {
- set_partially_dependent(false);
- }
+ set_active(false);
set_state(NORMAL);
parameter_or_next_free_.parameter = NULL;
weak_callback_ = NULL;
@@ -116,11 +106,7 @@ class GlobalHandles::Node {
object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
set_independent(false);
- if (FLAG_scavenge_reclaim_unmodified_objects) {
- set_active(false);
- } else {
- set_partially_dependent(false);
- }
+ set_active(false);
weak_callback_ = NULL;
DecreaseBlockUses();
}
@@ -153,21 +139,10 @@ class GlobalHandles::Node {
flags_ = IsIndependent::update(flags_, v);
}
- bool is_partially_dependent() {
- CHECK(!FLAG_scavenge_reclaim_unmodified_objects);
- return IsPartiallyDependent::decode(flags_);
- }
- void set_partially_dependent(bool v) {
- CHECK(!FLAG_scavenge_reclaim_unmodified_objects);
- flags_ = IsPartiallyDependent::update(flags_, v);
- }
-
bool is_active() {
- CHECK(FLAG_scavenge_reclaim_unmodified_objects);
return IsActive::decode(flags_);
}
void set_active(bool v) {
- CHECK(FLAG_scavenge_reclaim_unmodified_objects);
flags_ = IsActive::update(flags_, v);
}
@@ -227,14 +202,6 @@ class GlobalHandles::Node {
set_independent(true);
}
- void MarkPartiallyDependent() {
- DCHECK(IsInUse());
- if (GetGlobalHandles()->isolate()->heap()->InNewSpace(object_)) {
- set_partially_dependent(true);
- }
- }
- void clear_partially_dependent() { set_partially_dependent(false); }
-
// Callback accessor.
// TODO(svenpanne) Re-enable or nuke later.
// WeakReferenceCallback callback() { return callback_; }
@@ -398,7 +365,6 @@ class GlobalHandles::Node {
class IsIndependent : public BitField<bool, 3, 1> {};
// The following two fields are mutually exclusive
class IsActive : public BitField<bool, 4, 1> {};
- class IsPartiallyDependent : public BitField<bool, 4, 1> {};
class IsInNewSpaceList : public BitField<bool, 5, 1> {};
class NodeWeaknessType : public BitField<WeaknessType, 6, 2> {};
@@ -642,12 +608,6 @@ void GlobalHandles::MarkIndependent(Object** location) {
Node::FromLocation(location)->MarkIndependent();
}
-
-void GlobalHandles::MarkPartiallyDependent(Object** location) {
- Node::FromLocation(location)->MarkPartiallyDependent();
-}
-
-
bool GlobalHandles::IsIndependent(Object** location) {
return Node::FromLocation(location)->is_independent();
}
@@ -694,18 +654,10 @@ void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
- if (FLAG_scavenge_reclaim_unmodified_objects) {
- if (node->IsStrongRetainer() ||
- (node->IsWeakRetainer() && !node->is_independent() &&
- node->is_active())) {
- v->VisitPointer(node->location());
- }
- } else {
- if (node->IsStrongRetainer() ||
- (node->IsWeakRetainer() && !node->is_independent() &&
- !node->is_partially_dependent())) {
- v->VisitPointer(node->location());
- }
+ if (node->IsStrongRetainer() ||
+ (node->IsWeakRetainer() && !node->is_independent() &&
+ node->is_active())) {
+ v->VisitPointer(node->location());
}
}
}
@@ -716,8 +668,8 @@ void GlobalHandles::IdentifyNewSpaceWeakIndependentHandles(
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
DCHECK(node->is_in_new_space_list());
- if ((node->is_independent() || node->is_partially_dependent()) &&
- node->IsWeak() && f(isolate_->heap(), node->location())) {
+ if (node->is_independent() && node->IsWeak() &&
+ f(isolate_->heap(), node->location())) {
node->MarkPending();
}
}
@@ -728,8 +680,7 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
DCHECK(node->is_in_new_space_list());
- if ((node->is_independent() || node->is_partially_dependent()) &&
- node->IsWeakRetainer()) {
+ if (node->is_independent() && node->IsWeakRetainer()) {
// Pending weak phantom handles die immediately. Everything else survives.
if (node->IsPendingPhantomResetHandle()) {
node->ResetPhantomHandle();
@@ -968,18 +919,11 @@ int GlobalHandles::PostScavengeProcessing(
// to be
// called between two global garbage collection callbacks which
// are not called for minor collections.
- if (FLAG_scavenge_reclaim_unmodified_objects) {
if (!node->is_independent() && (node->is_active())) {
node->set_active(false);
continue;
}
node->set_active(false);
- } else {
- if (!node->is_independent() && !node->is_partially_dependent()) {
- continue;
- }
- node->clear_partially_dependent();
- }
if (node->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
@@ -1007,11 +951,7 @@ int GlobalHandles::PostMarkSweepProcessing(
// the freed_nodes.
continue;
}
- if (FLAG_scavenge_reclaim_unmodified_objects) {
- it.node()->set_active(false);
- } else {
- it.node()->clear_partially_dependent();
- }
+ it.node()->set_active(false);
if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// See the comment above.
@@ -1122,7 +1062,7 @@ int GlobalHandles::PostGarbageCollectionProcessing(
// PostScavengeProcessing.
return freed_nodes;
}
- if (collector == SCAVENGER) {
+ if (Heap::IsYoungGenerationCollector(collector)) {
freed_nodes += PostScavengeProcessing(initial_post_gc_processing_count);
} else {
freed_nodes += PostMarkSweepProcessing(initial_post_gc_processing_count);
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 24a2273e36..50e5ed6969 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -167,9 +167,6 @@ class GlobalHandles {
// Mark the reference to this object independent of any object group.
static void MarkIndependent(Object** location);
- // Mark the reference to this object externaly unreachable.
- static void MarkPartiallyDependent(Object** location);
-
static bool IsIndependent(Object** location);
// Tells whether global handle is near death.
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 03c5b1dc1a..f689c667b6 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -141,19 +141,20 @@ const int kMinUInt16 = 0;
const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
const int kMinUInt32 = 0;
-const int kCharSize = sizeof(char); // NOLINT
-const int kShortSize = sizeof(short); // NOLINT
-const int kIntSize = sizeof(int); // NOLINT
-const int kInt32Size = sizeof(int32_t); // NOLINT
-const int kInt64Size = sizeof(int64_t); // NOLINT
-const int kFloatSize = sizeof(float); // NOLINT
-const int kDoubleSize = sizeof(double); // NOLINT
-const int kIntptrSize = sizeof(intptr_t); // NOLINT
-const int kPointerSize = sizeof(void*); // NOLINT
+const int kCharSize = sizeof(char);
+const int kShortSize = sizeof(short); // NOLINT
+const int kIntSize = sizeof(int);
+const int kInt32Size = sizeof(int32_t);
+const int kInt64Size = sizeof(int64_t);
+const int kSizetSize = sizeof(size_t);
+const int kFloatSize = sizeof(float);
+const int kDoubleSize = sizeof(double);
+const int kIntptrSize = sizeof(intptr_t);
+const int kPointerSize = sizeof(void*);
#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
-const int kRegisterSize = kPointerSize + kPointerSize;
+const int kRegisterSize = kPointerSize + kPointerSize;
#else
-const int kRegisterSize = kPointerSize;
+const int kRegisterSize = kPointerSize;
#endif
const int kPCOnStackSize = kRegisterSize;
const int kFPOnStackSize = kRegisterSize;
@@ -576,7 +577,7 @@ enum MinimumCapacity {
USE_CUSTOM_MINIMUM_CAPACITY
};
-enum GarbageCollector { SCAVENGER, MARK_COMPACTOR };
+enum GarbageCollector { SCAVENGER, MARK_COMPACTOR, MINOR_MARK_COMPACTOR };
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
@@ -602,6 +603,14 @@ enum ParseRestriction {
ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression.
};
+// TODO(gsathya): Move this to JSPromise once we create it.
+// This should be in sync with the constants in promise.js
+enum PromiseStatus {
+ kPromisePending,
+ kPromiseFulfilled,
+ kPromiseRejected,
+};
+
// A CodeDesc describes a buffer holding instructions and relocation
// information. The instructions start at the beginning of the buffer
// and grow forward, the relocation information starts at the end of
@@ -1048,6 +1057,8 @@ enum VariableLocation : uint8_t {
// immediately initialized upon creation (kCreatedInitialized).
enum InitializationFlag : uint8_t { kNeedsInitialization, kCreatedInitialized };
+enum class HoleCheckMode { kRequired, kElided };
+
enum MaybeAssignedFlag : uint8_t { kNotAssigned, kMaybeAssigned };
// Serialized in PreparseData, so numeric values should not be changed.
@@ -1208,16 +1219,22 @@ inline uint32_t ObjectHash(Address address) {
// Type feedback is encoded in such a way that, we can combine the feedback
// at different points by performing an 'OR' operation. Type feedback moves
// to a more generic type when we combine feedback.
-// kSignedSmall -> kNumber -> kAny
-// kString -> kAny
+// kSignedSmall -> kNumber -> kNumberOrOddball -> kAny
+// kString -> kAny
+// TODO(mythria): Remove kNumber type when crankshaft can handle Oddballs
+// similar to Numbers. We don't need kNumber feedback for Turbofan. Extra
+// information about Number might reduce few instructions but causes more
+// deopts. We collect Number only because crankshaft does not handle all
+// cases of oddballs.
class BinaryOperationFeedback {
public:
enum {
kNone = 0x0,
kSignedSmall = 0x1,
kNumber = 0x3,
- kString = 0x4,
- kAny = 0xF
+ kNumberOrOddball = 0x7,
+ kString = 0x8,
+ kAny = 0x1F
};
};
@@ -1262,9 +1279,28 @@ inline std::ostream& operator<<(std::ostream& os, UnicodeEncoding encoding) {
return os;
}
+enum class IterationKind { kKeys, kValues, kEntries };
+
+inline std::ostream& operator<<(std::ostream& os, IterationKind kind) {
+ switch (kind) {
+ case IterationKind::kKeys:
+ return os << "IterationKind::kKeys";
+ case IterationKind::kValues:
+ return os << "IterationKind::kValues";
+ case IterationKind::kEntries:
+ return os << "IterationKind::kEntries";
+ }
+ UNREACHABLE();
+ return os;
+}
+
} // namespace internal
} // namespace v8
+// Used by js-builtin-reducer to identify whether ReduceArrayIterator() is
+// reducing a JSArray method, or a JSTypedArray method.
+enum class ArrayIteratorKind { kArray, kTypedArray };
+
namespace i = v8::internal;
#endif // V8_GLOBALS_H_
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 6331c79fac..3b1902e076 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -119,7 +119,7 @@ Address HandleScope::current_limit_address(Isolate* isolate) {
}
CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate)
- : isolate_(isolate), zone_(isolate->allocator()) {
+ : isolate_(isolate), zone_(isolate->allocator(), ZONE_NAME) {
HandleScopeData* handle_scope_data = isolate_->handle_scope_data();
prev_canonical_scope_ = handle_scope_data->canonical_scope;
handle_scope_data->canonical_scope = this;
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 3587d853c6..2c98209a1d 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -340,13 +340,13 @@ class RootIndexMap;
// This does not apply to nested inner HandleScopes unless a nested
// CanonicalHandleScope is introduced. Handles are only canonicalized within
// the same CanonicalHandleScope, but not across nested ones.
-class CanonicalHandleScope final {
+class V8_EXPORT_PRIVATE CanonicalHandleScope final {
public:
explicit CanonicalHandleScope(Isolate* isolate);
~CanonicalHandleScope();
private:
- V8_EXPORT_PRIVATE Object** Lookup(Object* object);
+ Object** Lookup(Object* object);
Isolate* isolate_;
Zone zone_;
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index c7b3370dce..cee900048e 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -12,6 +12,7 @@
V(Arguments_string, "Arguments") \
V(arguments_to_string, "[object Arguments]") \
V(Array_string, "Array") \
+ V(ArrayIterator_string, "Array Iterator") \
V(assign_string, "assign") \
V(array_to_string, "[object Array]") \
V(boolean_to_string, "[object Boolean]") \
@@ -53,11 +54,14 @@
V(default_string, "default") \
V(defineProperty_string, "defineProperty") \
V(deleteProperty_string, "deleteProperty") \
+ V(did_handle_string, "didHandle") \
V(display_name_string, "displayName") \
V(done_string, "done") \
V(dot_result_string, ".result") \
V(dot_string, ".") \
+ V(exec_string, "exec") \
V(entries_string, "entries") \
+ V(enqueue_string, "enqueue") \
V(enumerable_string, "enumerable") \
V(era_string, "era") \
V(Error_string, "Error") \
@@ -95,7 +99,8 @@
V(isView_string, "isView") \
V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
- V(last_index_string, "lastIndex") \
+ V(keys_string, "keys") \
+ V(lastIndex_string, "lastIndex") \
V(length_string, "length") \
V(line_string, "line") \
V(literal_string, "literal") \
@@ -122,6 +127,7 @@
V(preventExtensions_string, "preventExtensions") \
V(private_api_string, "private_api") \
V(Promise_string, "Promise") \
+ V(PromiseResolveThenableJob_string, "PromiseResolveThenableJob") \
V(proto_string, "__proto__") \
V(prototype_string, "prototype") \
V(Proxy_string, "Proxy") \
@@ -140,6 +146,7 @@
V(source_url_string, "source_url") \
V(stack_string, "stack") \
V(stackTraceLimit_string, "stackTraceLimit") \
+ V(sticky_string, "sticky") \
V(strict_compare_ic_string, "===") \
V(string_string, "string") \
V(String_string, "String") \
@@ -155,6 +162,8 @@
V(true_string, "true") \
V(TypeError_string, "TypeError") \
V(type_string, "type") \
+ V(CompileError_string, "CompileError") \
+ V(RuntimeError_string, "RuntimeError") \
V(uint16x8_string, "uint16x8") \
V(Uint16x8_string, "Uint16x8") \
V(uint32x4_string, "uint32x4") \
@@ -163,6 +172,7 @@
V(Uint8x16_string, "Uint8x16") \
V(undefined_string, "undefined") \
V(undefined_to_string, "[object Undefined]") \
+ V(unicode_string, "unicode") \
V(URIError_string, "URIError") \
V(valueOf_string, "valueOf") \
V(values_string, "values") \
@@ -170,53 +180,52 @@
V(WeakMap_string, "WeakMap") \
V(WeakSet_string, "WeakSet") \
V(weekday_string, "weekday") \
+ V(will_handle_string, "willHandle") \
V(writable_string, "writable") \
V(year_string, "year")
-#define PRIVATE_SYMBOL_LIST(V) \
- V(array_iteration_kind_symbol) \
- V(array_iterator_next_symbol) \
- V(array_iterator_object_symbol) \
- V(call_site_frame_array_symbol) \
- V(call_site_frame_index_symbol) \
- V(class_end_position_symbol) \
- V(class_start_position_symbol) \
- V(detailed_stack_trace_symbol) \
- V(elements_transition_symbol) \
- V(error_end_pos_symbol) \
- V(error_script_symbol) \
- V(error_start_pos_symbol) \
- V(frozen_symbol) \
- V(hash_code_symbol) \
- V(home_object_symbol) \
- V(intl_impl_object_symbol) \
- V(intl_initialized_marker_symbol) \
- V(intl_pattern_symbol) \
- V(intl_resolved_symbol) \
- V(megamorphic_symbol) \
- V(native_context_index_symbol) \
- V(nonexistent_symbol) \
- V(nonextensible_symbol) \
- V(normal_ic_symbol) \
- V(not_mapped_symbol) \
- V(premonomorphic_symbol) \
- V(promise_async_stack_id_symbol) \
- V(promise_debug_marker_symbol) \
- V(promise_deferred_reactions_symbol) \
- V(promise_forwarding_handler_symbol) \
- V(promise_fulfill_reactions_symbol) \
- V(promise_handled_by_symbol) \
- V(promise_handled_hint_symbol) \
- V(promise_has_handler_symbol) \
- V(promise_raw_symbol) \
- V(promise_reject_reactions_symbol) \
- V(promise_result_symbol) \
- V(promise_state_symbol) \
- V(sealed_symbol) \
- V(stack_trace_symbol) \
- V(strict_function_transition_symbol) \
- V(string_iterator_iterated_string_symbol) \
- V(string_iterator_next_index_symbol) \
+#define PRIVATE_SYMBOL_LIST(V) \
+ V(array_iteration_kind_symbol) \
+ V(array_iterator_next_symbol) \
+ V(array_iterator_object_symbol) \
+ V(call_site_frame_array_symbol) \
+ V(call_site_frame_index_symbol) \
+ V(class_end_position_symbol) \
+ V(class_start_position_symbol) \
+ V(detailed_stack_trace_symbol) \
+ V(elements_transition_symbol) \
+ V(error_end_pos_symbol) \
+ V(error_script_symbol) \
+ V(error_start_pos_symbol) \
+ V(frozen_symbol) \
+ V(hash_code_symbol) \
+ V(home_object_symbol) \
+ V(intl_impl_object_symbol) \
+ V(intl_initialized_marker_symbol) \
+ V(intl_pattern_symbol) \
+ V(intl_resolved_symbol) \
+ V(megamorphic_symbol) \
+ V(native_context_index_symbol) \
+ V(nonexistent_symbol) \
+ V(nonextensible_symbol) \
+ V(normal_ic_symbol) \
+ V(not_mapped_symbol) \
+ V(premonomorphic_symbol) \
+ V(promise_async_stack_id_symbol) \
+ V(promise_debug_marker_symbol) \
+ V(promise_deferred_reaction_symbol) \
+ V(promise_forwarding_handler_symbol) \
+ V(promise_fulfill_reactions_symbol) \
+ V(promise_handled_by_symbol) \
+ V(promise_handled_hint_symbol) \
+ V(promise_has_handler_symbol) \
+ V(promise_raw_symbol) \
+ V(promise_reject_reactions_symbol) \
+ V(promise_result_symbol) \
+ V(promise_state_symbol) \
+ V(sealed_symbol) \
+ V(stack_trace_symbol) \
+ V(strict_function_transition_symbol) \
V(uninitialized_symbol)
#define PUBLIC_SYMBOL_LIST(V) \
diff --git a/deps/v8/src/heap/gc-idle-time-handler.h b/deps/v8/src/heap/gc-idle-time-handler.h
index 39dea7e1ff..7ce0c1a2f6 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.h
+++ b/deps/v8/src/heap/gc-idle-time-handler.h
@@ -68,7 +68,7 @@ class GCIdleTimeHeapState {
// The idle time handler makes decisions about which garbage collection
// operations are executing during IdleNotification.
-class GCIdleTimeHandler {
+class V8_EXPORT_PRIVATE GCIdleTimeHandler {
public:
// If we haven't recorded any incremental marking events yet, we carefully
// mark with a conservative lower bound for the marking speed.
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 8049ce498b..dcd319fdae 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -11,10 +11,11 @@
namespace v8 {
namespace internal {
-static intptr_t CountTotalHolesSize(Heap* heap) {
- intptr_t holes_size = 0;
+static size_t CountTotalHolesSize(Heap* heap) {
+ size_t holes_size = 0;
OldSpaces spaces(heap);
for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
+ DCHECK_GE(holes_size + space->Waste() + space->Available(), holes_size);
holes_size += space->Waste() + space->Available();
}
return holes_size;
@@ -28,8 +29,7 @@ GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope)
STATIC_ASSERT(FIRST_INCREMENTAL_SCOPE == 0);
start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs();
// TODO(cbruni): remove once we fully moved to a trace-based system.
- if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
- FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
RuntimeCallStats::Enter(
tracer_->heap_->isolate()->counters()->runtime_call_stats(), &timer_,
&RuntimeCallStats::GC);
@@ -40,8 +40,7 @@ GCTracer::Scope::~Scope() {
tracer_->AddScopeSample(
scope_, tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_);
// TODO(cbruni): remove once we fully moved to a trace-based system.
- if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
- FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
RuntimeCallStats::Leave(
tracer_->heap_->isolate()->counters()->runtime_call_stats(), &timer_);
}
@@ -83,28 +82,17 @@ GCTracer::Event::Event(Type type, GarbageCollectionReason gc_reason,
}
}
-
const char* GCTracer::Event::TypeName(bool short_name) const {
switch (type) {
case SCAVENGER:
- if (short_name) {
- return "s";
- } else {
- return "Scavenge";
- }
+ return (short_name) ? "s" : "Scavenge";
case MARK_COMPACTOR:
case INCREMENTAL_MARK_COMPACTOR:
- if (short_name) {
- return "ms";
- } else {
- return "Mark-sweep";
- }
+ return (short_name) ? "ms" : "Mark-sweep";
+ case MINOR_MARK_COMPACTOR:
+ return (short_name) ? "mmc" : "Minor Mark-Compact";
case START:
- if (short_name) {
- return "st";
- } else {
- return "Start";
- }
+ return (short_name) ? "st" : "Start";
}
return "Unknown Event Type";
}
@@ -115,6 +103,7 @@ GCTracer::GCTracer(Heap* heap)
previous_(current_),
incremental_marking_bytes_(0),
incremental_marking_duration_(0.0),
+ incremental_marking_start_time_(0.0),
recorded_incremental_marking_speed_(0.0),
allocation_time_ms_(0.0),
new_space_allocation_counter_bytes_(0),
@@ -139,8 +128,8 @@ void GCTracer::ResetForTesting() {
new_space_allocation_in_bytes_since_gc_ = 0.0;
old_generation_allocation_in_bytes_since_gc_ = 0.0;
combined_mark_compact_speed_cache_ = 0.0;
- recorded_scavenges_total_.Reset();
- recorded_scavenges_survived_.Reset();
+ recorded_minor_gcs_total_.Reset();
+ recorded_minor_gcs_survived_.Reset();
recorded_compactions_.Reset();
recorded_mark_compacts_.Reset();
recorded_incremental_mark_compacts_.Reset();
@@ -162,15 +151,22 @@ void GCTracer::Start(GarbageCollector collector,
SampleAllocation(start_time, heap_->NewSpaceAllocationCounter(),
heap_->OldGenerationAllocationCounter());
- if (collector == SCAVENGER) {
- current_ = Event(Event::SCAVENGER, gc_reason, collector_reason);
- } else if (collector == MARK_COMPACTOR) {
- if (heap_->incremental_marking()->WasActivated()) {
+ switch (collector) {
+ case SCAVENGER:
+ current_ = Event(Event::SCAVENGER, gc_reason, collector_reason);
+ break;
+ case MINOR_MARK_COMPACTOR:
current_ =
- Event(Event::INCREMENTAL_MARK_COMPACTOR, gc_reason, collector_reason);
- } else {
- current_ = Event(Event::MARK_COMPACTOR, gc_reason, collector_reason);
- }
+ Event(Event::MINOR_MARK_COMPACTOR, gc_reason, collector_reason);
+ break;
+ case MARK_COMPACTOR:
+ if (heap_->incremental_marking()->WasActivated()) {
+ current_ = Event(Event::INCREMENTAL_MARK_COMPACTOR, gc_reason,
+ collector_reason);
+ } else {
+ current_ = Event(Event::MARK_COMPACTOR, gc_reason, collector_reason);
+ }
+ break;
}
current_.reduce_memory = heap_->ShouldReduceMemory();
@@ -188,12 +184,12 @@ void GCTracer::Start(GarbageCollector collector,
current_.scopes[i] = 0;
}
- int committed_memory = static_cast<int>(heap_->CommittedMemory() / KB);
- int used_memory = static_cast<int>(current_.start_object_size / KB);
+ size_t committed_memory = heap_->CommittedMemory() / KB;
+ size_t used_memory = current_.start_object_size / KB;
Counters* counters = heap_->isolate()->counters();
- if (collector == SCAVENGER) {
+ if (Heap::IsYoungGenerationCollector(collector)) {
counters->scavenge_reason()->AddSample(static_cast<int>(gc_reason));
} else {
counters->mark_compact_reason()->AddSample(static_cast<int>(gc_reason));
@@ -202,8 +198,7 @@ void GCTracer::Start(GarbageCollector collector,
committed_memory);
counters->aggregated_memory_heap_used()->AddSample(start_time, used_memory);
// TODO(cbruni): remove once we fully moved to a trace-based system.
- if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
- FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
RuntimeCallStats::Enter(heap_->isolate()->counters()->runtime_call_stats(),
&timer_, &RuntimeCallStats::GC);
}
@@ -220,15 +215,16 @@ void GCTracer::ResetIncrementalMarkingCounters() {
void GCTracer::Stop(GarbageCollector collector) {
start_counter_--;
if (start_counter_ != 0) {
- heap_->isolate()->PrintWithTimestamp(
- "[Finished reentrant %s during %s.]\n",
- collector == SCAVENGER ? "Scavenge" : "Mark-sweep",
- current_.TypeName(false));
+ heap_->isolate()->PrintWithTimestamp("[Finished reentrant %s during %s.]\n",
+ Heap::CollectorName(collector),
+ current_.TypeName(false));
return;
}
DCHECK(start_counter_ >= 0);
DCHECK((collector == SCAVENGER && current_.type == Event::SCAVENGER) ||
+ (collector == MINOR_MARK_COMPACTOR &&
+ current_.type == Event::MINOR_MARK_COMPACTOR) ||
(collector == MARK_COMPACTOR &&
(current_.type == Event::MARK_COMPACTOR ||
current_.type == Event::INCREMENTAL_MARK_COMPACTOR)));
@@ -241,8 +237,8 @@ void GCTracer::Stop(GarbageCollector collector) {
AddAllocation(current_.end_time);
- int committed_memory = static_cast<int>(heap_->CommittedMemory() / KB);
- int used_memory = static_cast<int>(current_.end_object_size / KB);
+ size_t committed_memory = heap_->CommittedMemory() / KB;
+ size_t used_memory = current_.end_object_size / KB;
heap_->isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
current_.end_time, committed_memory);
heap_->isolate()->counters()->aggregated_memory_heap_used()->AddSample(
@@ -250,36 +246,45 @@ void GCTracer::Stop(GarbageCollector collector) {
double duration = current_.end_time - current_.start_time;
- if (current_.type == Event::SCAVENGER) {
- recorded_scavenges_total_.Push(
- MakeBytesAndDuration(current_.new_space_object_size, duration));
- recorded_scavenges_survived_.Push(MakeBytesAndDuration(
- current_.survived_new_space_object_size, duration));
- } else if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
- current_.incremental_marking_bytes = incremental_marking_bytes_;
- current_.incremental_marking_duration = incremental_marking_duration_;
- for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
- current_.incremental_marking_scopes[i] = incremental_marking_scopes_[i];
- current_.scopes[i] = incremental_marking_scopes_[i].duration;
- }
- RecordIncrementalMarkingSpeed(current_.incremental_marking_bytes,
- current_.incremental_marking_duration);
- recorded_incremental_mark_compacts_.Push(
- MakeBytesAndDuration(current_.start_object_size, duration));
- ResetIncrementalMarkingCounters();
- combined_mark_compact_speed_cache_ = 0.0;
- } else {
- DCHECK_EQ(0, current_.incremental_marking_bytes);
- DCHECK_EQ(0, current_.incremental_marking_duration);
- recorded_mark_compacts_.Push(
- MakeBytesAndDuration(current_.start_object_size, duration));
- ResetIncrementalMarkingCounters();
- combined_mark_compact_speed_cache_ = 0.0;
+ switch (current_.type) {
+ case Event::SCAVENGER:
+ case Event::MINOR_MARK_COMPACTOR:
+ recorded_minor_gcs_total_.Push(
+ MakeBytesAndDuration(current_.new_space_object_size, duration));
+ recorded_minor_gcs_survived_.Push(MakeBytesAndDuration(
+ current_.survived_new_space_object_size, duration));
+ break;
+ case Event::INCREMENTAL_MARK_COMPACTOR:
+ current_.incremental_marking_bytes = incremental_marking_bytes_;
+ current_.incremental_marking_duration = incremental_marking_duration_;
+ for (int i = 0; i < Scope::NUMBER_OF_INCREMENTAL_SCOPES; i++) {
+ current_.incremental_marking_scopes[i] = incremental_marking_scopes_[i];
+ current_.scopes[i] = incremental_marking_scopes_[i].duration;
+ }
+ RecordIncrementalMarkingSpeed(current_.incremental_marking_bytes,
+ current_.incremental_marking_duration);
+ recorded_incremental_mark_compacts_.Push(
+ MakeBytesAndDuration(current_.start_object_size, duration));
+ ResetIncrementalMarkingCounters();
+ combined_mark_compact_speed_cache_ = 0.0;
+ break;
+ case Event::MARK_COMPACTOR:
+ DCHECK_EQ(0u, current_.incremental_marking_bytes);
+ DCHECK_EQ(0, current_.incremental_marking_duration);
+ recorded_mark_compacts_.Push(
+ MakeBytesAndDuration(current_.start_object_size, duration));
+ ResetIncrementalMarkingCounters();
+ combined_mark_compact_speed_cache_ = 0.0;
+ break;
+ case Event::START:
+ UNREACHABLE();
}
heap_->UpdateTotalGCTime(duration);
- if (current_.type == Event::SCAVENGER && FLAG_trace_gc_ignore_scavenger)
+ if ((current_.type == Event::SCAVENGER ||
+ current_.type == Event::MINOR_MARK_COMPACTOR) &&
+ FLAG_trace_gc_ignore_scavenger)
return;
if (FLAG_trace_gc_nvp) {
@@ -293,8 +298,7 @@ void GCTracer::Stop(GarbageCollector collector) {
}
// TODO(cbruni): remove once we fully moved to a trace-based system.
- if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() ||
- FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
RuntimeCallStats::Leave(heap_->isolate()->counters()->runtime_call_stats(),
&timer_);
}
@@ -348,9 +352,8 @@ void GCTracer::AddContextDisposalTime(double time) {
recorded_context_disposal_times_.Push(time);
}
-
void GCTracer::AddCompactionEvent(double duration,
- intptr_t live_bytes_compacted) {
+ size_t live_bytes_compacted) {
recorded_compactions_.Push(
MakeBytesAndDuration(live_bytes_compacted, duration));
}
@@ -360,8 +363,7 @@ void GCTracer::AddSurvivalRatio(double promotion_ratio) {
recorded_survival_ratios_.Push(promotion_ratio);
}
-
-void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
+void GCTracer::AddIncrementalMarkingStep(double duration, size_t bytes) {
if (bytes > 0) {
incremental_marking_bytes_ += bytes;
incremental_marking_duration_ += duration;
@@ -426,7 +428,7 @@ void GCTracer::Print() const {
void GCTracer::PrintNVP() const {
double duration = current_.end_time - current_.start_time;
double spent_in_mutator = current_.start_time - previous_.end_time;
- intptr_t allocated_since_last_gc =
+ size_t allocated_since_last_gc =
current_.start_object_size - previous_.end_object_size;
double incremental_walltime_duration = 0;
@@ -449,26 +451,25 @@ void GCTracer::PrintNVP() const {
"roots=%.2f "
"code=%.2f "
"semispace=%.2f "
- "object_groups=%.2f "
- "external_prologue=%.2f "
- "external_epilogue=%.2f "
+ "external.prologue=%.2f "
+ "external.epilogue=%.2f "
"external_weak_global_handles=%.2f "
"steps_count=%d "
"steps_took=%.1f "
"scavenge_throughput=%.f "
- "total_size_before=%" V8PRIdPTR
+ "total_size_before=%" PRIuS
" "
- "total_size_after=%" V8PRIdPTR
+ "total_size_after=%" PRIuS
" "
- "holes_size_before=%" V8PRIdPTR
+ "holes_size_before=%" PRIuS
" "
- "holes_size_after=%" V8PRIdPTR
+ "holes_size_after=%" PRIuS
" "
- "allocated=%" V8PRIdPTR
+ "allocated=%" PRIuS
" "
- "promoted=%" V8PRIdPTR
+ "promoted=%" PRIuS
" "
- "semi_space_copied=%" V8PRIdPTR
+ "semi_space_copied=%" PRIuS
" "
"nodes_died_in_new=%d "
"nodes_copied_in_new=%d "
@@ -486,9 +487,8 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::SCAVENGER_ROOTS],
current_.scopes[Scope::SCAVENGER_CODE_FLUSH_CANDIDATES],
current_.scopes[Scope::SCAVENGER_SEMISPACE],
- current_.scopes[Scope::SCAVENGER_OBJECT_GROUPS],
- current_.scopes[Scope::SCAVENGER_EXTERNAL_PROLOGUE],
- current_.scopes[Scope::SCAVENGER_EXTERNAL_EPILOGUE],
+ current_.scopes[Scope::EXTERNAL_PROLOGUE],
+ current_.scopes[Scope::EXTERNAL_EPILOGUE],
current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES],
current_.incremental_marking_scopes[GCTracer::Scope::MC_INCREMENTAL]
.steps,
@@ -505,6 +505,15 @@ void GCTracer::PrintNVP() const {
NewSpaceAllocationThroughputInBytesPerMillisecond(),
ContextDisposalRateInMilliseconds());
break;
+ case Event::MINOR_MARK_COMPACTOR:
+ heap_->isolate()->PrintWithTimestamp(
+ "pause=%.1f "
+ "mutator=%.1f "
+ "gc=%s "
+ "reduce_memory=%d\n",
+ duration, spent_in_mutator, current_.TypeName(true),
+ current_.reduce_memory);
+ break;
case Event::MARK_COMPACTOR:
case Event::INCREMENTAL_MARK_COMPACTOR:
heap_->isolate()->PrintWithTimestamp(
@@ -523,6 +532,7 @@ void GCTracer::PrintNVP() const {
"clear.weak_cells=%.1f "
"clear.weak_collections=%.1f "
"clear.weak_lists=%.1f "
+ "epilogue=%.1f "
"evacuate=%.1f "
"evacuate.candidates=%.1f "
"evacuate.clean_up=%.1f "
@@ -531,8 +541,8 @@ void GCTracer::PrintNVP() const {
"evacuate.update_pointers.to_evacuated=%.1f "
"evacuate.update_pointers.to_new=%.1f "
"evacuate.update_pointers.weak=%.1f "
- "external.mc_prologue=%.1f "
- "external.mc_epilogue=%.1f "
+ "external.prologue=%.1f "
+ "external.epilogue=%.1f "
"external.weak_global_handles=%.1f "
"finish=%.1f "
"mark=%.1f "
@@ -548,6 +558,7 @@ void GCTracer::PrintNVP() const {
"mark.wrapper_prologue=%.1f "
"mark.wrapper_epilogue=%.1f "
"mark.wrapper_tracing=%.1f "
+ "prologue=%.1f "
"sweep=%.1f "
"sweep.code=%.1f "
"sweep.map=%.1f "
@@ -568,19 +579,19 @@ void GCTracer::PrintNVP() const {
"incremental_steps_count=%d "
"incremental_marking_throughput=%.f "
"incremental_walltime_duration=%.f "
- "total_size_before=%" V8PRIdPTR
+ "total_size_before=%" PRIuS
" "
- "total_size_after=%" V8PRIdPTR
+ "total_size_after=%" PRIuS
" "
- "holes_size_before=%" V8PRIdPTR
+ "holes_size_before=%" PRIuS
" "
- "holes_size_after=%" V8PRIdPTR
+ "holes_size_after=%" PRIuS
" "
- "allocated=%" V8PRIdPTR
+ "allocated=%" PRIuS
" "
- "promoted=%" V8PRIdPTR
+ "promoted=%" PRIuS
" "
- "semi_space_copied=%" V8PRIdPTR
+ "semi_space_copied=%" PRIuS
" "
"nodes_died_in_new=%d "
"nodes_copied_in_new=%d "
@@ -604,6 +615,7 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_CLEAR_WEAK_CELLS],
current_.scopes[Scope::MC_CLEAR_WEAK_COLLECTIONS],
current_.scopes[Scope::MC_CLEAR_WEAK_LISTS],
+ current_.scopes[Scope::MC_EPILOGUE],
current_.scopes[Scope::MC_EVACUATE],
current_.scopes[Scope::MC_EVACUATE_CANDIDATES],
current_.scopes[Scope::MC_EVACUATE_CLEAN_UP],
@@ -612,8 +624,8 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK],
- current_.scopes[Scope::MC_EXTERNAL_PROLOGUE],
- current_.scopes[Scope::MC_EXTERNAL_EPILOGUE],
+ current_.scopes[Scope::EXTERNAL_PROLOGUE],
+ current_.scopes[Scope::EXTERNAL_EPILOGUE],
current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES],
current_.scopes[Scope::MC_FINISH], current_.scopes[Scope::MC_MARK],
current_.scopes[Scope::MC_MARK_FINISH_INCREMENTAL],
@@ -628,7 +640,7 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_MARK_WRAPPER_PROLOGUE],
current_.scopes[Scope::MC_MARK_WRAPPER_EPILOGUE],
current_.scopes[Scope::MC_MARK_WRAPPER_TRACING],
- current_.scopes[Scope::MC_SWEEP],
+ current_.scopes[Scope::MC_PROLOGUE], current_.scopes[Scope::MC_SWEEP],
current_.scopes[Scope::MC_SWEEP_CODE],
current_.scopes[Scope::MC_SWEEP_MAP],
current_.scopes[Scope::MC_SWEEP_OLD],
@@ -674,7 +686,7 @@ void GCTracer::PrintNVP() const {
}
}
-double GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
+double GCTracer::AverageSpeed(const base::RingBuffer<BytesAndDuration>& buffer,
const BytesAndDuration& initial, double time_ms) {
BytesAndDuration sum = buffer.Sum(
[time_ms](BytesAndDuration a, BytesAndDuration b) {
@@ -693,11 +705,12 @@ double GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
return speed;
}
-double GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer) {
+double GCTracer::AverageSpeed(
+ const base::RingBuffer<BytesAndDuration>& buffer) {
return AverageSpeed(buffer, MakeBytesAndDuration(0, 0), 0);
}
-void GCTracer::RecordIncrementalMarkingSpeed(intptr_t bytes, double duration) {
+void GCTracer::RecordIncrementalMarkingSpeed(size_t bytes, double duration) {
if (duration == 0 || bytes == 0) return;
double current_speed = bytes / duration;
if (recorded_incremental_marking_speed_ == 0) {
@@ -722,9 +735,9 @@ double GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
double GCTracer::ScavengeSpeedInBytesPerMillisecond(
ScavengeSpeedMode mode) const {
if (mode == kForAllObjects) {
- return AverageSpeed(recorded_scavenges_total_);
+ return AverageSpeed(recorded_minor_gcs_total_);
} else {
- return AverageSpeed(recorded_scavenges_survived_);
+ return AverageSpeed(recorded_minor_gcs_survived_);
}
}
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index e8c72c1e2c..ed62dee5f1 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -7,6 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/platform/platform.h"
+#include "src/base/ring-buffer.h"
#include "src/counters.h"
#include "src/globals.h"
#include "testing/gtest/include/gtest/gtest_prod.h"
@@ -14,44 +15,6 @@
namespace v8 {
namespace internal {
-template <typename T>
-class RingBuffer {
- public:
- RingBuffer() { Reset(); }
- static const int kSize = 10;
- void Push(const T& value) {
- if (count_ == kSize) {
- elements_[start_++] = value;
- if (start_ == kSize) start_ = 0;
- } else {
- DCHECK_EQ(start_, 0);
- elements_[count_++] = value;
- }
- }
-
- int Count() const { return count_; }
-
- template <typename Callback>
- T Sum(Callback callback, const T& initial) const {
- int j = start_ + count_ - 1;
- if (j >= kSize) j -= kSize;
- T result = initial;
- for (int i = 0; i < count_; i++) {
- result = callback(result, elements_[j]);
- if (--j == -1) j += kSize;
- }
- return result;
- }
-
- void Reset() { start_ = count_ = 0; }
-
- private:
- T elements_[kSize];
- int start_;
- int count_;
- DISALLOW_COPY_AND_ASSIGN(RingBuffer);
-};
-
typedef std::pair<uint64_t, double> BytesAndDuration;
inline BytesAndDuration MakeBytesAndDuration(uint64_t bytes, double duration) {
@@ -74,6 +37,8 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
#define TRACER_SCOPES(F) \
INCREMENTAL_SCOPES(F) \
+ F(EXTERNAL_EPILOGUE) \
+ F(EXTERNAL_PROLOGUE) \
F(EXTERNAL_WEAK_GLOBAL_HANDLES) \
F(MC_CLEAR) \
F(MC_CLEAR_CODE_FLUSH) \
@@ -86,6 +51,7 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_CLEAR_WEAK_CELLS) \
F(MC_CLEAR_WEAK_COLLECTIONS) \
F(MC_CLEAR_WEAK_LISTS) \
+ F(MC_EPILOGUE) \
F(MC_EVACUATE) \
F(MC_EVACUATE_CANDIDATES) \
F(MC_EVACUATE_CLEAN_UP) \
@@ -94,8 +60,6 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
- F(MC_EXTERNAL_EPILOGUE) \
- F(MC_EXTERNAL_PROLOGUE) \
F(MC_FINISH) \
F(MC_MARK) \
F(MC_MARK_FINISH_INCREMENTAL) \
@@ -110,14 +74,12 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_MARK_WRAPPER_PROLOGUE) \
F(MC_MARK_WRAPPER_TRACING) \
F(MC_MARK_OBJECT_GROUPING) \
+ F(MC_PROLOGUE) \
F(MC_SWEEP) \
F(MC_SWEEP_CODE) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \
F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
- F(SCAVENGER_EXTERNAL_EPILOGUE) \
- F(SCAVENGER_EXTERNAL_PROLOGUE) \
- F(SCAVENGER_OBJECT_GROUPS) \
F(SCAVENGER_OLD_TO_NEW_POINTERS) \
F(SCAVENGER_ROOTS) \
F(SCAVENGER_SCAVENGE) \
@@ -132,7 +94,7 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
// GCTracer collects and prints ONE line after each garbage collector
// invocation IFF --trace_gc is used.
-class GCTracer {
+class V8_EXPORT_PRIVATE GCTracer {
public:
struct IncrementalMarkingInfos {
IncrementalMarkingInfos() : duration(0), longest_step(0), steps(0) {}
@@ -190,7 +152,8 @@ class GCTracer {
SCAVENGER = 0,
MARK_COMPACTOR = 1,
INCREMENTAL_MARK_COMPACTOR = 2,
- START = 3
+ MINOR_MARK_COMPACTOR = 3,
+ START = 4
};
Event(Type type, GarbageCollectionReason gc_reason,
@@ -215,10 +178,10 @@ class GCTracer {
bool reduce_memory;
// Size of objects in heap set in constructor.
- intptr_t start_object_size;
+ size_t start_object_size;
// Size of objects in heap set in destructor.
- intptr_t end_object_size;
+ size_t end_object_size;
// Size of memory allocated from OS set in constructor.
size_t start_memory_size;
@@ -228,23 +191,20 @@ class GCTracer {
// Total amount of space either wasted or contained in one of free lists
// before the current GC.
- intptr_t start_holes_size;
+ size_t start_holes_size;
// Total amount of space either wasted or contained in one of free lists
// after the current GC.
- intptr_t end_holes_size;
+ size_t end_holes_size;
// Size of new space objects in constructor.
- intptr_t new_space_object_size;
+ size_t new_space_object_size;
// Size of survived new space objects in destructor.
- intptr_t survived_new_space_object_size;
-
- // Bytes marked since creation of tracer (value at start of event).
- intptr_t cumulative_incremental_marking_bytes;
+ size_t survived_new_space_object_size;
// Bytes marked incrementally for INCREMENTAL_MARK_COMPACTOR
- intptr_t incremental_marking_bytes;
+ size_t incremental_marking_bytes;
// Duration of incremental marking steps for INCREMENTAL_MARK_COMPACTOR.
double incremental_marking_duration;
@@ -277,12 +237,12 @@ class GCTracer {
void AddContextDisposalTime(double time);
- void AddCompactionEvent(double duration, intptr_t live_bytes_compacted);
+ void AddCompactionEvent(double duration, size_t live_bytes_compacted);
void AddSurvivalRatio(double survival_ratio);
// Log an incremental marking step.
- void AddIncrementalMarkingStep(double duration, intptr_t bytes);
+ void AddIncrementalMarkingStep(double duration, size_t bytes);
// Compute the average incremental marking speed in bytes/millisecond.
// Returns 0 if no events have been recorded.
@@ -380,13 +340,13 @@ class GCTracer {
// Returns the average speed of the events in the buffer.
// If the buffer is empty, the result is 0.
// Otherwise, the result is between 1 byte/ms and 1 GB/ms.
- static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer);
- static double AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
+ static double AverageSpeed(const base::RingBuffer<BytesAndDuration>& buffer);
+ static double AverageSpeed(const base::RingBuffer<BytesAndDuration>& buffer,
const BytesAndDuration& initial, double time_ms);
void ResetForTesting();
void ResetIncrementalMarkingCounters();
- void RecordIncrementalMarkingSpeed(intptr_t bytes, double duration);
+ void RecordIncrementalMarkingSpeed(size_t bytes, double duration);
// Print one detailed trace line in name=value format.
// TODO(ernstm): Move to Heap.
@@ -402,12 +362,10 @@ class GCTracer {
double TotalExternalTime() const {
return current_.scopes[Scope::EXTERNAL_WEAK_GLOBAL_HANDLES] +
- current_.scopes[Scope::MC_EXTERNAL_EPILOGUE] +
- current_.scopes[Scope::MC_EXTERNAL_PROLOGUE] +
+ current_.scopes[Scope::EXTERNAL_EPILOGUE] +
+ current_.scopes[Scope::EXTERNAL_PROLOGUE] +
current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE] +
- current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE] +
- current_.scopes[Scope::SCAVENGER_EXTERNAL_EPILOGUE] +
- current_.scopes[Scope::SCAVENGER_EXTERNAL_PROLOGUE];
+ current_.scopes[Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE];
}
// Pointer to the heap that owns this tracer.
@@ -422,7 +380,7 @@ class GCTracer {
// Size of incremental marking steps (in bytes) accumulated since the end of
// the last mark compact GC.
- intptr_t incremental_marking_bytes_;
+ size_t incremental_marking_bytes_;
// Duration of incremental marking steps since the end of the last mark-
// compact event.
@@ -456,15 +414,15 @@ class GCTracer {
// Separate timer used for --runtime_call_stats
RuntimeCallTimer timer_;
- RingBuffer<BytesAndDuration> recorded_scavenges_total_;
- RingBuffer<BytesAndDuration> recorded_scavenges_survived_;
- RingBuffer<BytesAndDuration> recorded_compactions_;
- RingBuffer<BytesAndDuration> recorded_incremental_mark_compacts_;
- RingBuffer<BytesAndDuration> recorded_mark_compacts_;
- RingBuffer<BytesAndDuration> recorded_new_generation_allocations_;
- RingBuffer<BytesAndDuration> recorded_old_generation_allocations_;
- RingBuffer<double> recorded_context_disposal_times_;
- RingBuffer<double> recorded_survival_ratios_;
+ base::RingBuffer<BytesAndDuration> recorded_minor_gcs_total_;
+ base::RingBuffer<BytesAndDuration> recorded_minor_gcs_survived_;
+ base::RingBuffer<BytesAndDuration> recorded_compactions_;
+ base::RingBuffer<BytesAndDuration> recorded_incremental_mark_compacts_;
+ base::RingBuffer<BytesAndDuration> recorded_mark_compacts_;
+ base::RingBuffer<BytesAndDuration> recorded_new_generation_allocations_;
+ base::RingBuffer<BytesAndDuration> recorded_old_generation_allocations_;
+ base::RingBuffer<double> recorded_context_disposal_times_;
+ base::RingBuffer<double> recorded_survival_ratios_;
DISALLOW_COPY_AND_ASSIGN(GCTracer);
};
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 23e171232d..7d0d241289 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -12,6 +12,7 @@
#include "src/heap/heap.h"
#include "src/heap/incremental-marking-inl.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/object-stats.h"
#include "src/heap/remembered-set.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/store-buffer.h"
@@ -490,37 +491,18 @@ bool Heap::InOldSpaceSlow(Address address) {
return old_space_->ContainsSlow(address);
}
-template <PromotionMode promotion_mode>
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
Page* page = Page::FromAddress(old_address);
Address age_mark = new_space_->age_mark();
-
- if (promotion_mode == PROMOTE_MARKED) {
- MarkBit mark_bit = ObjectMarking::MarkBitFrom(old_address);
- if (!Marking::IsWhite(mark_bit)) {
- return true;
- }
- }
-
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
(!page->ContainsLimit(age_mark) || old_address < age_mark);
}
-PromotionMode Heap::CurrentPromotionMode() {
- if (incremental_marking()->IsMarking()) {
- return PROMOTE_MARKED;
- } else {
- return DEFAULT_PROMOTION;
- }
-}
-
void Heap::RecordWrite(Object* object, int offset, Object* o) {
if (!InNewSpace(o) || !object->IsHeapObject() || InNewSpace(object)) {
return;
}
- RememberedSet<OLD_TO_NEW>::Insert(
- Page::FromAddress(reinterpret_cast<Address>(object)),
- HeapObject::cast(object)->address() + offset);
+ store_buffer()->InsertEntry(HeapObject::cast(object)->address() + offset);
}
void Heap::RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value) {
@@ -531,11 +513,9 @@ void Heap::RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value) {
void Heap::RecordFixedArrayElements(FixedArray* array, int offset, int length) {
if (InNewSpace(array)) return;
- Page* page = Page::FromAddress(reinterpret_cast<Address>(array));
for (int i = 0; i < length; i++) {
if (!InNewSpace(array->get(offset + i))) continue;
- RememberedSet<OLD_TO_NEW>::Insert(
- page,
+ store_buffer()->InsertEntry(
reinterpret_cast<Address>(array->RawFieldOfElementAt(offset + i)));
}
}
@@ -647,7 +627,13 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
template <Heap::UpdateAllocationSiteMode mode>
void Heap::UpdateAllocationSite(HeapObject* object,
base::HashMap* pretenuring_feedback) {
- DCHECK(InFromSpace(object));
+ DCHECK(InFromSpace(object) ||
+ (InToSpace(object) &&
+ Page::FromAddress(object->address())
+ ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) ||
+ (!InNewSpace(object) &&
+ Page::FromAddress(object->address())
+ ->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)));
if (!FLAG_allocation_site_pretenuring ||
!AllocationSite::CanTrack(object->map()->instance_type()))
return;
@@ -759,9 +745,7 @@ void Heap::ExternalStringTable::ShrinkNewStrings(int position) {
#endif
}
-void Heap::ClearInstanceofCache() {
- set_instanceof_cache_function(Smi::FromInt(0));
-}
+void Heap::ClearInstanceofCache() { set_instanceof_cache_function(Smi::kZero); }
Oddball* Heap::ToBoolean(bool condition) {
return condition ? true_value() : false_value();
@@ -769,8 +753,8 @@ Oddball* Heap::ToBoolean(bool condition) {
void Heap::CompletelyClearInstanceofCache() {
- set_instanceof_cache_map(Smi::FromInt(0));
- set_instanceof_cache_function(Smi::FromInt(0));
+ set_instanceof_cache_map(Smi::kZero);
+ set_instanceof_cache_function(Smi::kZero);
}
@@ -793,27 +777,27 @@ int Heap::NextScriptId() {
}
void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
- DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
+ DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::kZero);
set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void Heap::SetConstructStubDeoptPCOffset(int pc_offset) {
- DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
+ DCHECK(construct_stub_deopt_pc_offset() == Smi::kZero);
set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
- DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
+ DCHECK(getter_stub_deopt_pc_offset() == Smi::kZero);
set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
- DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
+ DCHECK(setter_stub_deopt_pc_offset() == Smi::kZero);
set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
- DCHECK(interpreter_entry_return_pc_offset() == Smi::FromInt(0));
+ DCHECK(interpreter_entry_return_pc_offset() == Smi::kZero);
set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
}
@@ -828,6 +812,16 @@ void Heap::SetSerializedTemplates(FixedArray* templates) {
set_serialized_templates(templates);
}
+void Heap::CreateObjectStats() {
+ if (V8_LIKELY(FLAG_gc_stats == 0)) return;
+ if (!live_object_stats_) {
+ live_object_stats_ = new ObjectStats(this);
+ }
+ if (!dead_object_stats_) {
+ dead_object_stats_ = new ObjectStats(this);
+ }
+}
+
AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
: heap_(isolate->heap()) {
heap_->always_allocate_scope_count_.Increment(1);
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index d823232ac7..2059dae6b7 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -156,8 +156,8 @@ Heap::Heap()
strong_roots_list_(NULL),
heap_iterator_depth_(0),
embedder_heap_tracer_(nullptr),
- embedder_reference_reporter_(new TracePossibleWrapperReporter(this)),
- force_oom_(false) {
+ force_oom_(false),
+ delay_sweeper_tasks_for_testing_(false) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -170,23 +170,22 @@ Heap::Heap()
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
set_native_contexts_list(NULL);
- set_allocation_sites_list(Smi::FromInt(0));
- set_encountered_weak_collections(Smi::FromInt(0));
- set_encountered_weak_cells(Smi::FromInt(0));
- set_encountered_transition_arrays(Smi::FromInt(0));
+ set_allocation_sites_list(Smi::kZero);
+ set_encountered_weak_collections(Smi::kZero);
+ set_encountered_weak_cells(Smi::kZero);
+ set_encountered_transition_arrays(Smi::kZero);
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
}
-
-intptr_t Heap::Capacity() {
+size_t Heap::Capacity() {
if (!HasBeenSetUp()) return 0;
return new_space_->Capacity() + OldGenerationCapacity();
}
-intptr_t Heap::OldGenerationCapacity() {
+size_t Heap::OldGenerationCapacity() {
if (!HasBeenSetUp()) return 0;
return old_space_->Capacity() + code_space_->Capacity() +
@@ -233,11 +232,10 @@ void Heap::UpdateMaximumCommitted() {
}
}
-
-intptr_t Heap::Available() {
+size_t Heap::Available() {
if (!HasBeenSetUp()) return 0;
- intptr_t total = 0;
+ size_t total = 0;
AllSpaces spaces(this);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
total += space->Available();
@@ -266,6 +264,12 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return MARK_COMPACTOR;
}
+ if (incremental_marking()->NeedsFinalization() &&
+ AllocationLimitOvershotByLargeMargin()) {
+ *reason = "Incremental marking needs finalization";
+ return MARK_COMPACTOR;
+ }
+
// Is there enough space left in OLD to guarantee that a scavenge can
// succeed?
//
@@ -275,8 +279,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
// and does not count available bytes already in the old space or code
// space. Undercounting is safe---we may get an unrequested full GC when
// a scavenge would have succeeded.
- if (static_cast<intptr_t>(memory_allocator()->MaxAvailable()) <=
- new_space_->Size()) {
+ if (memory_allocator()->MaxAvailable() <= new_space_->Size()) {
isolate_->counters()
->gc_compactor_caused_by_oldspace_exhaustion()
->Increment();
@@ -286,7 +289,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
// Default
*reason = NULL;
- return SCAVENGER;
+ return YoungGenerationCollector();
}
@@ -316,55 +319,55 @@ void Heap::ReportStatisticsBeforeGC() {
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
- PrintIsolate(isolate_,
- "Memory allocator, used: %6zu KB,"
- " available: %6zu KB\n",
+ PrintIsolate(isolate_, "Memory allocator, used: %6" PRIuS
+ " KB,"
+ " available: %6" PRIuS " KB\n",
memory_allocator()->Size() / KB,
memory_allocator()->Available() / KB);
- PrintIsolate(isolate_, "New space, used: %6" V8PRIdPTR
+ PrintIsolate(isolate_, "New space, used: %6" PRIuS
" KB"
- ", available: %6" V8PRIdPTR
+ ", available: %6" PRIuS
" KB"
- ", committed: %6zu KB\n",
+ ", committed: %6" PRIuS " KB\n",
new_space_->Size() / KB, new_space_->Available() / KB,
new_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "Old space, used: %6" V8PRIdPTR
+ PrintIsolate(isolate_, "Old space, used: %6" PRIuS
" KB"
- ", available: %6" V8PRIdPTR
+ ", available: %6" PRIuS
" KB"
- ", committed: %6zu KB\n",
+ ", committed: %6" PRIuS " KB\n",
old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
old_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "Code space, used: %6" V8PRIdPTR
+ PrintIsolate(isolate_, "Code space, used: %6" PRIuS
" KB"
- ", available: %6" V8PRIdPTR
+ ", available: %6" PRIuS
" KB"
- ", committed: %6zu KB\n",
+ ", committed: %6" PRIuS "KB\n",
code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
code_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "Map space, used: %6" V8PRIdPTR
+ PrintIsolate(isolate_, "Map space, used: %6" PRIuS
" KB"
- ", available: %6" V8PRIdPTR
+ ", available: %6" PRIuS
" KB"
- ", committed: %6zu KB\n",
+ ", committed: %6" PRIuS " KB\n",
map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
map_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "Large object space, used: %6" V8PRIdPTR
+ PrintIsolate(isolate_, "Large object space, used: %6" PRIuS
" KB"
- ", available: %6" V8PRIdPTR
+ ", available: %6" PRIuS
" KB"
- ", committed: %6zu KB\n",
+ ", committed: %6" PRIuS " KB\n",
lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
lo_space_->CommittedMemory() / KB);
- PrintIsolate(isolate_, "All spaces, used: %6" V8PRIdPTR
+ PrintIsolate(isolate_, "All spaces, used: %6" PRIuS
" KB"
- ", available: %6" V8PRIdPTR
+ ", available: %6" PRIuS
" KB"
- ", committed: %6zu KB\n",
+ ", committed: %6" PRIuS "KB\n",
this->SizeOfObjects() / KB, this->Available() / KB,
this->CommittedMemory() / KB);
- PrintIsolate(isolate_, "External memory reported: %6" V8PRIdPTR " KB\n",
- static_cast<intptr_t>(external_memory_ / KB));
+ PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
+ external_memory_ / KB);
PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
total_gc_time_ms_);
}
@@ -439,12 +442,11 @@ void Heap::GarbageCollectionPrologue() {
}
CheckNewSpaceExpansionCriteria();
UpdateNewSpaceAllocationCounter();
- store_buffer()->MoveEntriesToRememberedSet();
+ store_buffer()->MoveAllEntriesToRememberedSet();
}
-
-intptr_t Heap::SizeOfObjects() {
- intptr_t total = 0;
+size_t Heap::SizeOfObjects() {
+ size_t total = 0;
AllSpaces spaces(this);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
total += space->SizeOfObjects();
@@ -742,7 +744,7 @@ void Heap::PreprocessStackTraces() {
}
// We must not compact the weak fixed list here, as we may be in the middle
// of writing to it, when the GC triggered. Instead, we reset the root value.
- set_weak_stack_trace_list(Smi::FromInt(0));
+ set_weak_stack_trace_list(Smi::kZero);
}
@@ -822,7 +824,7 @@ void Heap::FinalizeIncrementalMarking(GarbageCollectionReason gc_reason) {
HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
- if (collector == SCAVENGER) {
+ if (IsYoungGenerationCollector(collector)) {
return isolate_->counters()->gc_scavenger();
} else {
if (!incremental_marking()->IsStopped()) {
@@ -862,7 +864,8 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
if (isolate()->concurrent_recompilation_enabled()) {
// The optimizing compiler may be unnecessarily holding on to memory.
DisallowHeapAllocation no_recursive_gc;
- isolate()->optimizing_compile_dispatcher()->Flush();
+ isolate()->optimizing_compile_dispatcher()->Flush(
+ OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
}
isolate()->ClearSerializerData();
set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
@@ -952,7 +955,8 @@ bool Heap::CollectGarbage(GarbageCollector collector,
EnsureFillerObjectAtTop();
- if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
+ if (IsYoungGenerationCollector(collector) &&
+ !incremental_marking()->IsStopped()) {
if (FLAG_trace_incremental_marking) {
isolate()->PrintWithTimestamp(
"[IncrementalMarking] Scavenge during marking.\n");
@@ -963,6 +967,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
!ShouldFinalizeIncrementalMarking() && !ShouldAbortIncrementalMarking() &&
!incremental_marking()->IsStopped() &&
!incremental_marking()->should_hurry() &&
+ !incremental_marking()->NeedsFinalization() &&
!IsCloseToOutOfMemory(new_space_->Capacity())) {
if (!incremental_marking()->IsComplete() &&
!mark_compact_collector()->marking_deque()->IsEmpty() &&
@@ -971,13 +976,13 @@ bool Heap::CollectGarbage(GarbageCollector collector,
isolate()->PrintWithTimestamp(
"[IncrementalMarking] Delaying MarkSweep.\n");
}
- collector = SCAVENGER;
+ collector = YoungGenerationCollector();
collector_reason = "incremental marking delaying mark-sweep";
}
}
bool next_gc_likely_to_collect_more = false;
- intptr_t committed_memory_before = 0;
+ size_t committed_memory_before = 0;
if (collector == MARK_COMPACTOR) {
committed_memory_before = CommittedOldGenerationMemory();
@@ -1004,8 +1009,8 @@ bool Heap::CollectGarbage(GarbageCollector collector,
}
if (collector == MARK_COMPACTOR) {
- intptr_t committed_memory_after = CommittedOldGenerationMemory();
- intptr_t used_memory_after = PromotedSpaceSizeOfObjects();
+ size_t committed_memory_after = CommittedOldGenerationMemory();
+ size_t used_memory_after = PromotedSpaceSizeOfObjects();
MemoryReducer::Event event;
event.type = MemoryReducer::kMarkCompact;
event.time_ms = MonotonicallyIncreasingTimeInMs();
@@ -1014,7 +1019,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
// - there is high fragmentation,
// - there are live detached contexts.
event.next_gc_likely_to_collect_more =
- (committed_memory_before - committed_memory_after) > MB ||
+ (committed_memory_before > committed_memory_after + MB) ||
HasHighFragmentation(used_memory_after, committed_memory_after) ||
(detached_contexts()->length() > 0);
if (deserialization_complete_) {
@@ -1036,7 +1041,8 @@ bool Heap::CollectGarbage(GarbageCollector collector,
// generator needs incremental marking to stay off after it aborted.
// We do this only for scavenger to avoid a loop where mark-compact
// causes another mark-compact.
- if (collector == SCAVENGER && !ShouldAbortIncrementalMarking()) {
+ if (IsYoungGenerationCollector(collector) &&
+ !ShouldAbortIncrementalMarking()) {
StartIncrementalMarkingIfAllocationLimitIsReached(kNoGCFlags,
kNoGCCallbackFlags);
}
@@ -1056,7 +1062,8 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
}
if (isolate()->concurrent_recompilation_enabled()) {
// Flush the queued recompilation tasks.
- isolate()->optimizing_compile_dispatcher()->Flush();
+ isolate()->optimizing_compile_dispatcher()->Flush(
+ OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
}
AgeInlineCaches();
number_of_disposed_maps_ = retained_maps()->Length();
@@ -1172,8 +1179,9 @@ bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
for (auto& chunk : *reservation) {
AllocationResult allocation;
int size = chunk.size;
- DCHECK_LE(size, MemoryAllocator::PageAreaSize(
- static_cast<AllocationSpace>(space)));
+ DCHECK_LE(static_cast<size_t>(size),
+ MemoryAllocator::PageAreaSize(
+ static_cast<AllocationSpace>(space)));
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRawUnaligned(size);
} else {
@@ -1275,7 +1283,7 @@ bool Heap::PerformGarbageCollection(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
int freed_global_handles = 0;
- if (collector != SCAVENGER) {
+ if (!IsYoungGenerationCollector(collector)) {
PROFILE(isolate_, CodeMovingGCEvent());
}
@@ -1292,9 +1300,7 @@ bool Heap::PerformGarbageCollection(
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
- TRACE_GC(tracer(), collector == MARK_COMPACTOR
- ? GCTracer::Scope::MC_EXTERNAL_PROLOGUE
- : GCTracer::Scope::SCAVENGER_EXTERNAL_PROLOGUE);
+ TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_PROLOGUE);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
@@ -1308,18 +1314,25 @@ bool Heap::PerformGarbageCollection(
{
Heap::PretenuringScope pretenuring_scope(this);
- if (collector == MARK_COMPACTOR) {
- UpdateOldGenerationAllocationCounter();
- // Perform mark-sweep with optional compaction.
- MarkCompact();
- old_generation_size_configured_ = true;
- // This should be updated before PostGarbageCollectionProcessing, which
- // can cause another GC. Take into account the objects promoted during GC.
- old_generation_allocation_counter_at_last_gc_ +=
- static_cast<size_t>(promoted_objects_size_);
- old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
- } else {
- Scavenge();
+ switch (collector) {
+ case MARK_COMPACTOR:
+ UpdateOldGenerationAllocationCounter();
+ // Perform mark-sweep with optional compaction.
+ MarkCompact();
+ old_generation_size_configured_ = true;
+ // This should be updated before PostGarbageCollectionProcessing, which
+ // can cause another GC. Take into account the objects promoted during
+ // GC.
+ old_generation_allocation_counter_at_last_gc_ +=
+ static_cast<size_t>(promoted_objects_size_);
+ old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
+ break;
+ case MINOR_MARK_COMPACTOR:
+ MinorMarkCompact();
+ break;
+ case SCAVENGER:
+ Scavenge();
+ break;
}
ProcessPretenuringFeedback();
@@ -1348,7 +1361,7 @@ bool Heap::PerformGarbageCollection(
double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
double mutator_speed =
tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
- intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
+ size_t old_gen_size = PromotedSpaceSizeOfObjects();
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
external_memory_at_last_mark_compact_ = external_memory_;
@@ -1363,9 +1376,7 @@ bool Heap::PerformGarbageCollection(
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
- TRACE_GC(tracer(), collector == MARK_COMPACTOR
- ? GCTracer::Scope::MC_EXTERNAL_EPILOGUE
- : GCTracer::Scope::SCAVENGER_EXTERNAL_EPILOGUE);
+ TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_EPILOGUE);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
@@ -1444,8 +1455,10 @@ void Heap::MarkCompact() {
}
}
+void Heap::MinorMarkCompact() { UNREACHABLE(); }
void Heap::MarkCompactEpilogue() {
+ TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
gc_state_ = NOT_IN_GC;
isolate_->counters()->objs_since_last_full()->Set(0);
@@ -1455,18 +1468,12 @@ void Heap::MarkCompactEpilogue() {
PreprocessStackTraces();
DCHECK(incremental_marking()->IsStopped());
- // We finished a marking cycle. We can uncommit the marking deque until
- // we start marking again.
- mark_compact_collector()->marking_deque()->Uninitialize();
- mark_compact_collector()->EnsureMarkingDequeIsCommitted(
- MarkCompactCollector::kMinMarkingDequeSize);
+ mark_compact_collector()->marking_deque()->StopUsing();
}
void Heap::MarkCompactPrologue() {
- // At any old GC clear the keyed lookup cache to enable collection of unused
- // maps.
- isolate_->keyed_lookup_cache()->Clear();
+ TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
isolate_->context_slot_cache()->Clear();
isolate_->descriptor_lookup_cache()->Clear();
RegExpResultsCache::Clear(string_split_cache());
@@ -1604,7 +1611,7 @@ void Heap::Scavenge() {
LOG(isolate_, ResourceEvent("scavenge", "begin"));
// Used for updating survived_since_last_expansion_ at function end.
- intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
+ size_t survived_watermark = PromotedSpaceSizeOfObjects();
scavenge_collector_->SelectScavengingVisitorsTable();
@@ -1640,13 +1647,10 @@ void Heap::Scavenge() {
Address new_space_front = new_space_->ToSpaceStart();
promotion_queue_.Initialize();
- PromotionMode promotion_mode = CurrentPromotionMode();
ScavengeVisitor scavenge_visitor(this);
- if (FLAG_scavenge_reclaim_unmodified_objects) {
- isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
- &IsUnmodifiedHeapObject);
- }
+ isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
+ &IsUnmodifiedHeapObject);
{
// Copy roots.
@@ -1678,8 +1682,6 @@ void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
// Copy objects reachable from the encountered weak collections list.
scavenge_visitor.VisitPointer(&encountered_weak_collections_);
- // Copy objects reachable from the encountered weak cells.
- scavenge_visitor.VisitPointer(&encountered_weak_cells_);
}
{
@@ -1693,36 +1695,15 @@ void Heap::Scavenge() {
{
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
- new_space_front =
- DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
+ new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
}
- if (FLAG_scavenge_reclaim_unmodified_objects) {
- isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
- &IsUnscavengedHeapObject);
-
- isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
- &scavenge_visitor);
- new_space_front =
- DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
- } else {
- TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
- while (isolate()->global_handles()->IterateObjectGroups(
- &scavenge_visitor, &IsUnscavengedHeapObject)) {
- new_space_front =
- DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
- }
- isolate()->global_handles()->RemoveObjectGroups();
- isolate()->global_handles()->RemoveImplicitRefGroups();
-
- isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
- &IsUnscavengedHeapObject);
+ isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
+ &IsUnscavengedHeapObject);
- isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
- &scavenge_visitor);
- new_space_front =
- DoScavenge(&scavenge_visitor, new_space_front, promotion_mode);
- }
+ isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
+ &scavenge_visitor);
+ new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
@@ -1742,9 +1723,9 @@ void Heap::Scavenge() {
ArrayBufferTracker::FreeDeadInNewSpace(this);
// Update how much has survived scavenge.
- IncrementYoungSurvivorsCounter(
- static_cast<int>((PromotedSpaceSizeOfObjects() - survived_watermark) +
- new_space_->Size()));
+ DCHECK_GE(PromotedSpaceSizeOfObjects(), survived_watermark);
+ IncrementYoungSurvivorsCounter(PromotedSpaceSizeOfObjects() +
+ new_space_->Size() - survived_watermark);
LOG(isolate_, ResourceEvent("scavenge", "end"));
@@ -1905,8 +1886,7 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
}
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
- Address new_space_front,
- PromotionMode promotion_mode) {
+ Address new_space_front) {
do {
SemiSpace::AssertValidRange(new_space_front, new_space_->top());
// The addresses new_space_front and new_space_.top() define a
@@ -1915,14 +1895,8 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
while (new_space_front != new_space_->top()) {
if (!Page::IsAlignedToPageSize(new_space_front)) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
- if (promotion_mode == PROMOTE_MARKED) {
- new_space_front += StaticScavengeVisitor<PROMOTE_MARKED>::IterateBody(
- object->map(), object);
- } else {
- new_space_front +=
- StaticScavengeVisitor<DEFAULT_PROMOTION>::IterateBody(
- object->map(), object);
- }
+ new_space_front +=
+ StaticScavengeVisitor::IterateBody(object->map(), object);
} else {
new_space_front = Page::FromAllocationAreaAddress(new_space_front)
->next_page()
@@ -1944,8 +1918,8 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// to new space.
DCHECK(!target->IsMap());
- IteratePromotedObject(target, static_cast<int>(size), was_marked_black,
- &Scavenger::ScavengeObject);
+ IterateAndScavengePromotedObject(target, static_cast<int>(size),
+ was_marked_black);
}
}
@@ -2039,7 +2013,7 @@ void Heap::ConfigureInitialOldGenerationSize() {
if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
old_generation_allocation_limit_ =
Max(MinimumAllocationLimitGrowingStep(),
- static_cast<intptr_t>(
+ static_cast<size_t>(
static_cast<double>(old_generation_allocation_limit_) *
(tracer()->AverageSurvivalRatio() / 100)));
}
@@ -2074,7 +2048,7 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
Map::OwnsDescriptors::encode(true) |
Map::ConstructionCounter::encode(Map::kNoSlackTracking);
reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
- reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::FromInt(0));
+ reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::kZero);
return result;
}
@@ -2098,8 +2072,8 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
- map->set_weak_cell_cache(Smi::FromInt(0));
- map->set_raw_transitions(Smi::FromInt(0));
+ map->set_weak_cell_cache(Smi::kZero);
+ map->set_raw_transitions(Smi::kZero);
map->set_unused_property_fields(0);
map->set_instance_descriptors(empty_descriptor_array());
if (FLAG_unbox_double_fields) {
@@ -2171,7 +2145,7 @@ namespace {
void FinalizePartialMap(Heap* heap, Map* map) {
map->set_code_cache(heap->empty_fixed_array());
map->set_dependent_code(DependentCode::cast(heap->empty_fixed_array()));
- map->set_raw_transitions(Smi::FromInt(0));
+ map->set_raw_transitions(Smi::kZero);
map->set_instance_descriptors(heap->empty_descriptor_array());
if (FLAG_unbox_double_fields) {
map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
@@ -2281,7 +2255,6 @@ bool Heap::CreateInitialMaps() {
DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
- ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info_entry)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
Context::NUMBER_FUNCTION_INDEX)
@@ -2506,7 +2479,7 @@ AllocationResult Heap::AllocatePropertyCell() {
PropertyCell* cell = PropertyCell::cast(result);
cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
- cell->set_property_details(PropertyDetails(Smi::FromInt(0)));
+ cell->set_property_details(PropertyDetails(Smi::kZero));
cell->set_value(the_hole_value());
return result;
}
@@ -2553,16 +2526,6 @@ AllocationResult Heap::AllocateTransitionArray(int capacity) {
void Heap::CreateApiObjects() {
HandleScope scope(isolate());
- Factory* factory = isolate()->factory();
- Handle<Map> new_neander_map =
- factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
-
- // Don't use Smi-only elements optimizations for objects with the neander
- // map. There are too many cases where element values are set directly with a
- // bottleneck to trap the Smi-only -> fast elements transition, and there
- // appears to be no benefit for optimize this case.
- new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
- set_neander_map(*new_neander_map);
set_message_listeners(*TemplateList::New(isolate(), 2));
}
@@ -2636,8 +2599,7 @@ void Heap::CreateInitialObjects() {
// Initialize the null_value.
Oddball::Initialize(isolate(), factory->null_value(), "null",
- handle(Smi::FromInt(0), isolate()), "object",
- Oddball::kNull);
+ handle(Smi::kZero, isolate()), "object", Oddball::kNull);
// Initialize the_hole_value.
Oddball::Initialize(isolate(), factory->the_hole_value(), "hole",
@@ -2651,7 +2613,7 @@ void Heap::CreateInitialObjects() {
// Initialize the false_value.
Oddball::Initialize(isolate(), factory->false_value(), "false",
- handle(Smi::FromInt(0), isolate()), "boolean",
+ handle(Smi::kZero, isolate()), "boolean",
Oddball::kFalse);
set_uninitialized_value(
@@ -2697,9 +2659,9 @@ void Heap::CreateInitialObjects() {
// expanding the dictionary during bootstrapping.
set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
- set_instanceof_cache_function(Smi::FromInt(0));
- set_instanceof_cache_map(Smi::FromInt(0));
- set_instanceof_cache_answer(Smi::FromInt(0));
+ set_instanceof_cache_function(Smi::kZero);
+ set_instanceof_cache_map(Smi::kZero);
+ set_instanceof_cache_answer(Smi::kZero);
{
HandleScope scope(isolate());
@@ -2768,7 +2730,7 @@ void Heap::CreateInitialObjects() {
set_undefined_cell(*factory->NewCell(factory->undefined_value()));
// The symbol registry is initialized lazily.
- set_symbol_registry(Smi::FromInt(0));
+ set_symbol_registry(Smi::kZero);
// Microtask queue uses the empty fixed array as a sentinel for "empty".
// Number of queued microtasks stored in Isolate::pending_microtask_count().
@@ -2816,7 +2778,7 @@ void Heap::CreateInitialObjects() {
empty_type_feedback_vector->set(TypeFeedbackVector::kMetadataIndex,
empty_fixed_array());
empty_type_feedback_vector->set(TypeFeedbackVector::kInvocationCountIndex,
- Smi::FromInt(0));
+ Smi::kZero);
set_empty_type_feedback_vector(*empty_type_feedback_vector);
// We use a canonical empty LiteralsArray for all functions that neither
@@ -2839,14 +2801,6 @@ void Heap::CreateInitialObjects() {
Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
set_empty_weak_cell(*cell);
cell->clear();
-
- Handle<FixedArray> cleared_optimized_code_map =
- factory->NewFixedArray(SharedFunctionInfo::kEntriesStart, TENURED);
- cleared_optimized_code_map->set(SharedFunctionInfo::kSharedCodeIndex,
- *cell);
- STATIC_ASSERT(SharedFunctionInfo::kEntriesStart == 1 &&
- SharedFunctionInfo::kSharedCodeIndex == 0);
- set_cleared_optimized_code_map(*cleared_optimized_code_map);
}
set_detached_contexts(empty_fixed_array());
@@ -2860,7 +2814,7 @@ void Heap::CreateInitialObjects() {
ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
weak_new_space_object_to_code_list()->SetLength(0);
- set_script_list(Smi::FromInt(0));
+ set_script_list(Smi::kZero);
Handle<SeededNumberDictionary> slow_element_dictionary =
SeededNumberDictionary::New(isolate(), 0, TENURED);
@@ -2871,7 +2825,7 @@ void Heap::CreateInitialObjects() {
// Handling of script id generation is in Heap::NextScriptId().
set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
- set_next_template_serial_number(Smi::FromInt(0));
+ set_next_template_serial_number(Smi::kZero);
// Allocate the empty script.
Handle<Script> script = factory->NewScript(factory->empty_string());
@@ -2879,7 +2833,7 @@ void Heap::CreateInitialObjects() {
set_empty_script(*script);
Handle<PropertyCell> cell = factory->NewPropertyCell();
- cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_array_protector(*cell);
cell = factory->NewPropertyCell();
@@ -2887,29 +2841,34 @@ void Heap::CreateInitialObjects() {
set_empty_property_cell(*cell);
cell = factory->NewPropertyCell();
- cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_has_instance_protector(*cell);
Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
Handle<Cell> species_cell = factory->NewCell(
- handle(Smi::FromInt(Isolate::kArrayProtectorValid), isolate()));
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_species_protector(*species_cell);
cell = factory->NewPropertyCell();
- cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
set_string_length_protector(*cell);
- set_serialized_templates(empty_fixed_array());
+ Handle<Cell> fast_array_iteration_cell = factory->NewCell(
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
+ set_fast_array_iteration_protector(*fast_array_iteration_cell);
- set_weak_stack_trace_list(Smi::FromInt(0));
+ Handle<Cell> array_iterator_cell = factory->NewCell(
+ handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
+ set_array_iterator_protector(*array_iterator_cell);
- set_noscript_shared_function_infos(Smi::FromInt(0));
+ set_serialized_templates(empty_fixed_array());
- // Initialize keyed lookup cache.
- isolate_->keyed_lookup_cache()->Clear();
+ set_weak_stack_trace_list(Smi::kZero);
+
+ set_noscript_shared_function_infos(Smi::kZero);
// Initialize context slot cache.
isolate_->context_slot_cache()->Clear();
@@ -2964,12 +2923,13 @@ int Heap::FullSizeNumberStringCacheLength() {
// Compute the size of the number string cache based on the max newspace size.
// The number string cache has a minimum size based on twice the initial cache
// size to ensure that it is bigger after being made 'full size'.
- int number_string_cache_size = max_semi_space_size_ / 512;
- number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
- Min(0x4000, number_string_cache_size));
+ size_t number_string_cache_size = max_semi_space_size_ / 512;
+ number_string_cache_size =
+ Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
+ Min<size_t>(0x4000u, number_string_cache_size));
// There is a string and a number per entry so the length is twice the number
// of entries.
- return number_string_cache_size * 2;
+ return static_cast<int>(number_string_cache_size * 2);
}
@@ -3308,7 +3268,7 @@ AllocationResult Heap::AllocateFixedTypedArrayWithExternalPointer(
result->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(result);
- elements->set_base_pointer(Smi::FromInt(0), SKIP_WRITE_BARRIER);
+ elements->set_base_pointer(Smi::kZero, SKIP_WRITE_BARRIER);
elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
elements->set_length(length);
return elements;
@@ -3392,7 +3352,7 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
DCHECK(!memory_allocator()->code_range()->valid() ||
memory_allocator()->code_range()->contains(code->address()) ||
object_size <= code_space()->AreaSize());
- code->set_gc_metadata(Smi::FromInt(0));
+ code->set_gc_metadata(Smi::kZero);
code->set_ic_age(global_ic_age_);
return code;
}
@@ -3489,7 +3449,7 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
// TODO(1240798): Initialize the object's body using valid initial values
// according to the object's initial map. For example, if the map's
// instance type is JS_ARRAY_TYPE, the length field should be initialized
- // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
+ // to a number (e.g. Smi::kZero) and the elements initialized to a
// fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
// verification code has to cope with (temporarily) invalid objects. See
// for example, JSArray::JSArrayVerify).
@@ -4036,13 +3996,7 @@ AllocationResult Heap::AllocateSymbol() {
result->set_map_no_write_barrier(symbol_map());
// Generate a random hash value.
- int hash;
- int attempts = 0;
- do {
- hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
- attempts++;
- } while (hash == 0 && attempts < 30);
- if (hash == 0) hash = 1; // never return 0
+ int hash = isolate()->GenerateIdentityHash(Name::kHashBitMask);
Symbol::cast(result)
->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
@@ -4165,16 +4119,16 @@ bool Heap::HasLowAllocationRate() {
bool Heap::HasHighFragmentation() {
- intptr_t used = PromotedSpaceSizeOfObjects();
- intptr_t committed = CommittedOldGenerationMemory();
+ size_t used = PromotedSpaceSizeOfObjects();
+ size_t committed = CommittedOldGenerationMemory();
return HasHighFragmentation(used, committed);
}
-
-bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) {
- const intptr_t kSlack = 16 * MB;
+bool Heap::HasHighFragmentation(size_t used, size_t committed) {
+ const size_t kSlack = 16 * MB;
// Fragmentation is high if committed > 2 * used + kSlack.
// Rewrite the exression to avoid overflow.
+ DCHECK_GE(committed, used);
return committed - used > used + kSlack;
}
@@ -4229,8 +4183,7 @@ void Heap::FinalizeIncrementalMarkingIfComplete(
(!incremental_marking()->finalize_marking_completed() &&
MarkingDequesAreEmpty()))) {
FinalizeIncrementalMarking(gc_reason);
- } else if (incremental_marking()->IsComplete() ||
- (mark_compact_collector()->marking_deque()->IsEmpty())) {
+ } else if (incremental_marking()->IsComplete() || MarkingDequesAreEmpty()) {
CollectAllGarbage(current_gc_flags_, gc_reason);
}
}
@@ -4457,7 +4410,8 @@ void Heap::CheckMemoryPressure() {
if (isolate()->concurrent_recompilation_enabled()) {
// The optimizing compiler may be unnecessarily holding on to memory.
DisallowHeapAllocation no_recursive_gc;
- isolate()->optimizing_compile_dispatcher()->Flush();
+ isolate()->optimizing_compile_dispatcher()->Flush(
+ OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
}
}
if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
@@ -4784,51 +4738,44 @@ void Heap::ZapFromSpace() {
}
}
-void Heap::IteratePromotedObjectPointers(HeapObject* object, Address start,
- Address end, bool record_slots,
- ObjectSlotCallback callback) {
- Address slot_address = start;
- Page* page = Page::FromAddress(start);
-
- while (slot_address < end) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- Object* target = *slot;
- if (target->IsHeapObject()) {
- if (Heap::InFromSpace(target)) {
- callback(reinterpret_cast<HeapObject**>(slot),
- HeapObject::cast(target));
- Object* new_target = *slot;
- if (InNewSpace(new_target)) {
- SLOW_DCHECK(Heap::InToSpace(new_target));
- SLOW_DCHECK(new_target->IsHeapObject());
- RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
+class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
+ public:
+ IterateAndScavengePromotedObjectsVisitor(Heap* heap, HeapObject* target,
+ bool record_slots)
+ : heap_(heap), target_(target), record_slots_(record_slots) {}
+
+ inline void VisitPointers(Object** start, Object** end) override {
+ Address slot_address = reinterpret_cast<Address>(start);
+ Page* page = Page::FromAddress(slot_address);
+
+ while (slot_address < reinterpret_cast<Address>(end)) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ Object* target = *slot;
+
+ if (target->IsHeapObject()) {
+ if (heap_->InFromSpace(target)) {
+ Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(slot),
+ HeapObject::cast(target));
+ target = *slot;
+ if (heap_->InNewSpace(target)) {
+ SLOW_DCHECK(heap_->InToSpace(target));
+ SLOW_DCHECK(target->IsHeapObject());
+ RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
+ }
+ SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
+ HeapObject::cast(target)));
+ } else if (record_slots_ &&
+ MarkCompactCollector::IsOnEvacuationCandidate(
+ HeapObject::cast(target))) {
+ heap_->mark_compact_collector()->RecordSlot(target_, slot, target);
}
- SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
- } else if (record_slots &&
- MarkCompactCollector::IsOnEvacuationCandidate(target)) {
- mark_compact_collector()->RecordSlot(object, slot, target);
}
- }
- slot_address += kPointerSize;
- }
-}
-
-class IteratePromotedObjectsVisitor final : public ObjectVisitor {
- public:
- IteratePromotedObjectsVisitor(Heap* heap, HeapObject* target,
- bool record_slots, ObjectSlotCallback callback)
- : heap_(heap),
- target_(target),
- record_slots_(record_slots),
- callback_(callback) {}
- V8_INLINE void VisitPointers(Object** start, Object** end) override {
- heap_->IteratePromotedObjectPointers(
- target_, reinterpret_cast<Address>(start),
- reinterpret_cast<Address>(end), record_slots_, callback_);
+ slot_address += kPointerSize;
+ }
}
- V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
+ inline void VisitCodeEntry(Address code_entry_slot) override {
// Black allocation requires us to process objects referenced by
// promoted objects.
if (heap_->incremental_marking()->black_allocation()) {
@@ -4841,12 +4788,10 @@ class IteratePromotedObjectsVisitor final : public ObjectVisitor {
Heap* heap_;
HeapObject* target_;
bool record_slots_;
- ObjectSlotCallback callback_;
};
-void Heap::IteratePromotedObject(HeapObject* target, int size,
- bool was_marked_black,
- ObjectSlotCallback callback) {
+void Heap::IterateAndScavengePromotedObject(HeapObject* target, int size,
+ bool was_marked_black) {
// We are not collecting slots on new space objects during mutation
// thus we have to scan for pointers to evacuation candidates when we
// promote objects. But we should not record any slots in non-black
@@ -4859,8 +4804,14 @@ void Heap::IteratePromotedObject(HeapObject* target, int size,
record_slots = Marking::IsBlack(mark_bit);
}
- IteratePromotedObjectsVisitor visitor(this, target, record_slots, callback);
- target->IterateBody(target->map()->instance_type(), size, &visitor);
+ IterateAndScavengePromotedObjectsVisitor visitor(this, target, record_slots);
+ if (target->IsJSFunction()) {
+ // JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for
+ // this links are recorded during processing of weak lists.
+ JSFunction::BodyDescriptorWeakCode::IterateBody(target, size, &visitor);
+ } else {
+ target->IterateBody(target->map()->instance_type(), size, &visitor);
+ }
// When black allocations is on, we have to visit not already marked black
// objects (in new space) promoted to black pages to keep their references
@@ -5032,31 +4983,31 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// TODO(1236194): Since the heap size is configurable on the command line
// and through the API, we should gracefully handle the case that the heap
// size is not big enough to fit all the initial objects.
-bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
- int max_executable_size, size_t code_range_size) {
+bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
+ size_t max_executable_size, size_t code_range_size) {
if (HasBeenSetUp()) return false;
// Overwrite default configuration.
- if (max_semi_space_size > 0) {
+ if (max_semi_space_size != 0) {
max_semi_space_size_ = max_semi_space_size * MB;
}
- if (max_old_space_size > 0) {
- max_old_generation_size_ = static_cast<intptr_t>(max_old_space_size) * MB;
+ if (max_old_space_size != 0) {
+ max_old_generation_size_ = max_old_space_size * MB;
}
- if (max_executable_size > 0) {
- max_executable_size_ = static_cast<intptr_t>(max_executable_size) * MB;
+ if (max_executable_size != 0) {
+ max_executable_size_ = max_executable_size * MB;
}
// If max space size flags are specified overwrite the configuration.
if (FLAG_max_semi_space_size > 0) {
- max_semi_space_size_ = FLAG_max_semi_space_size * MB;
+ max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
}
if (FLAG_max_old_space_size > 0) {
max_old_generation_size_ =
- static_cast<intptr_t>(FLAG_max_old_space_size) * MB;
+ static_cast<size_t>(FLAG_max_old_space_size) * MB;
}
if (FLAG_max_executable_size > 0) {
- max_executable_size_ = static_cast<intptr_t>(FLAG_max_executable_size) * MB;
+ max_executable_size_ = static_cast<size_t>(FLAG_max_executable_size) * MB;
}
if (Page::kPageSize > MB) {
@@ -5073,17 +5024,18 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
// The new space size must be a power of two to support single-bit testing
// for containment.
- max_semi_space_size_ =
- base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
+ max_semi_space_size_ = base::bits::RoundUpToPowerOfTwo32(
+ static_cast<uint32_t>(max_semi_space_size_));
if (FLAG_min_semi_space_size > 0) {
- int initial_semispace_size = FLAG_min_semi_space_size * MB;
+ size_t initial_semispace_size =
+ static_cast<size_t>(FLAG_min_semi_space_size) * MB;
if (initial_semispace_size > max_semi_space_size_) {
initial_semispace_size_ = max_semi_space_size_;
if (FLAG_trace_gc) {
PrintIsolate(isolate_,
"Min semi-space size cannot be more than the maximum "
- "semi-space size of %d MB\n",
+ "semi-space size of %" PRIuS " MB\n",
max_semi_space_size_ / MB);
}
} else {
@@ -5101,7 +5053,7 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
// The old generation is paged and needs at least one page for each space.
int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
max_old_generation_size_ =
- Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
+ Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
max_old_generation_size_);
// The max executable size must be less than or equal to the max old
@@ -5200,16 +5152,15 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
}
}
-
-intptr_t Heap::PromotedSpaceSizeOfObjects() {
+size_t Heap::PromotedSpaceSizeOfObjects() {
return old_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
map_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
}
-
-int64_t Heap::PromotedExternalMemorySize() {
+uint64_t Heap::PromotedExternalMemorySize() {
if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
- return external_memory_ - external_memory_at_last_mark_compact_;
+ return static_cast<uint64_t>(external_memory_ -
+ external_memory_at_last_mark_compact_);
}
@@ -5277,29 +5228,29 @@ double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed) {
return factor;
}
-
-intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
- intptr_t old_gen_size) {
+size_t Heap::CalculateOldGenerationAllocationLimit(double factor,
+ size_t old_gen_size) {
CHECK(factor > 1.0);
CHECK(old_gen_size > 0);
- intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
- limit = Max(limit, old_gen_size + MinimumAllocationLimitGrowingStep());
+ uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
+ limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
+ MinimumAllocationLimitGrowingStep());
limit += new_space_->Capacity();
- intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
- return Min(limit, halfway_to_the_max);
+ uint64_t halfway_to_the_max =
+ (static_cast<uint64_t>(old_gen_size) + max_old_generation_size_) / 2;
+ return static_cast<size_t>(Min(limit, halfway_to_the_max));
}
-intptr_t Heap::MinimumAllocationLimitGrowingStep() {
- const double kRegularAllocationLimitGrowingStep = 8;
- const double kLowMemoryAllocationLimitGrowingStep = 2;
- intptr_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
+size_t Heap::MinimumAllocationLimitGrowingStep() {
+ const size_t kRegularAllocationLimitGrowingStep = 8;
+ const size_t kLowMemoryAllocationLimitGrowingStep = 2;
+ size_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
return limit * (ShouldOptimizeForMemoryUsage()
? kLowMemoryAllocationLimitGrowingStep
: kRegularAllocationLimitGrowingStep);
}
-void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
- double gc_speed,
+void Heap::SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
double mutator_speed) {
double factor = HeapGrowingFactor(gc_speed, mutator_speed);
@@ -5332,24 +5283,23 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
CalculateOldGenerationAllocationLimit(factor, old_gen_size);
if (FLAG_trace_gc_verbose) {
- isolate_->PrintWithTimestamp("Grow: old size: %" V8PRIdPTR
- " KB, new limit: %" V8PRIdPTR " KB (%.1f)\n",
- old_gen_size / KB,
- old_generation_allocation_limit_ / KB, factor);
+ isolate_->PrintWithTimestamp(
+ "Grow: old size: %" PRIuS " KB, new limit: %" PRIuS " KB (%.1f)\n",
+ old_gen_size / KB, old_generation_allocation_limit_ / KB, factor);
}
}
-void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
+void Heap::DampenOldGenerationAllocationLimit(size_t old_gen_size,
double gc_speed,
double mutator_speed) {
double factor = HeapGrowingFactor(gc_speed, mutator_speed);
- intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
+ size_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
if (limit < old_generation_allocation_limit_) {
if (FLAG_trace_gc_verbose) {
isolate_->PrintWithTimestamp(
- "Dampen: old size: %" V8PRIdPTR " KB, old limit: %" V8PRIdPTR
+ "Dampen: old size: %" PRIuS " KB, old limit: %" PRIuS
" KB, "
- "new limit: %" V8PRIdPTR " KB (%.1f)\n",
+ "new limit: %" PRIuS " KB (%.1f)\n",
old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB,
factor);
}
@@ -5362,12 +5312,16 @@ void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
// major GC. It happens when the old generation allocation limit is reached and
// - either we need to optimize for memory usage,
// - or the incremental marking is not in progress and we cannot start it.
-bool Heap::ShouldExpandOldGenerationOnAllocationFailure() {
+bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
// We reached the old generation allocation limit.
if (ShouldOptimizeForMemoryUsage()) return false;
+ if (incremental_marking()->NeedsFinalization()) {
+ return !AllocationLimitOvershotByLargeMargin();
+ }
+
if (incremental_marking()->IsStopped() &&
IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
// We cannot start incremental marking.
@@ -5383,7 +5337,8 @@ bool Heap::ShouldExpandOldGenerationOnAllocationFailure() {
// The kHardLimit means that incremental marking should be started immediately.
Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (!incremental_marking()->CanBeActivated() ||
- PromotedSpaceSizeOfObjects() < IncrementalMarking::kActivationThreshold) {
+ PromotedSpaceSizeOfObjects() <=
+ IncrementalMarking::kActivationThreshold) {
// Incremental marking is disabled or it is too early to start.
return IncrementalMarkingLimit::kNoLimit;
}
@@ -5393,13 +5348,13 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
// start marking immediately.
return IncrementalMarkingLimit::kHardLimit;
}
- intptr_t old_generation_space_available = OldGenerationSpaceAvailable();
+ size_t old_generation_space_available = OldGenerationSpaceAvailable();
if (old_generation_space_available > new_space_->Capacity()) {
return IncrementalMarkingLimit::kNoLimit;
}
// We are close to the allocation limit.
// Choose between the hard and the soft limits.
- if (old_generation_space_available <= 0 || ShouldOptimizeForMemoryUsage()) {
+ if (old_generation_space_available == 0 || ShouldOptimizeForMemoryUsage()) {
return IncrementalMarkingLimit::kHardLimit;
}
return IncrementalMarkingLimit::kSoftLimit;
@@ -5434,8 +5389,7 @@ V8_DECLARE_ONCE(initialize_gc_once);
static void InitializeGCOnce() {
Scavenger::Initialize();
- StaticScavengeVisitor<DEFAULT_PROMOTION>::Initialize();
- StaticScavengeVisitor<PROMOTE_MARKED>::Initialize();
+ StaticScavengeVisitor::Initialize();
MarkCompactCollector::Initialize();
}
@@ -5518,7 +5472,7 @@ bool Heap::SetUp() {
mark_compact_collector_ = new MarkCompactCollector(this);
gc_idle_time_handler_ = new GCIdleTimeHandler();
memory_reducer_ = new MemoryReducer(this);
- if (FLAG_track_gc_object_stats) {
+ if (V8_UNLIKELY(FLAG_gc_stats)) {
live_object_stats_ = new ObjectStats(this);
dead_object_stats_ = new ObjectStats(this);
}
@@ -5570,8 +5524,8 @@ void Heap::SetStackLimits() {
}
void Heap::ClearStackLimits() {
- roots_[kStackLimitRootIndex] = Smi::FromInt(0);
- roots_[kRealStackLimitRootIndex] = Smi::FromInt(0);
+ roots_[kStackLimitRootIndex] = Smi::kZero;
+ roots_[kRealStackLimitRootIndex] = Smi::kZero;
}
void Heap::PrintAlloctionsHash() {
@@ -5597,8 +5551,7 @@ void Heap::NotifyDeserializationComplete() {
}
void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
- DCHECK_NOT_NULL(tracer);
- CHECK_NULL(embedder_heap_tracer_);
+ DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
embedder_heap_tracer_ = tracer;
}
@@ -5756,9 +5709,6 @@ void Heap::TearDown() {
delete memory_allocator_;
memory_allocator_ = nullptr;
-
- delete embedder_reference_reporter_;
- embedder_reference_reporter_ = nullptr;
}
@@ -5951,11 +5901,10 @@ void Heap::CheckHandleCount() {
void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
if (!InNewSpace(object)) {
- store_buffer()->MoveEntriesToRememberedSet();
Address slot_addr = reinterpret_cast<Address>(slot);
Page* page = Page::FromAddress(slot_addr);
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
- RememberedSet<OLD_TO_NEW>::Remove(page, slot_addr);
+ store_buffer()->DeleteEntry(slot_addr);
RememberedSet<OLD_TO_OLD>::Remove(page, slot_addr);
}
}
@@ -5963,10 +5912,10 @@ void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
void Heap::ClearRecordedSlotRange(Address start, Address end) {
Page* page = Page::FromAddress(start);
if (!page->InNewSpace()) {
- store_buffer()->MoveEntriesToRememberedSet();
DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end);
- RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end);
+ store_buffer()->DeleteEntry(start, end);
+ RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end,
+ SlotSet::FREE_EMPTY_BUCKETS);
}
}
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index ba89686672..013cd9a8fe 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -48,8 +48,6 @@ using v8::MemoryPressureLevel;
V(Map, one_byte_string_map, OneByteStringMap) \
V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
V(Map, scope_info_map, ScopeInfoMap) \
- V(Map, module_info_entry_map, ModuleInfoEntryMap) \
- V(Map, module_info_map, ModuleInfoMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, code_map, CodeMap) \
V(Map, function_context_map, FunctionContextMap) \
@@ -62,13 +60,13 @@ using v8::MemoryPressureLevel;
V(FixedArray, empty_literals_array, EmptyLiteralsArray) \
V(FixedArray, empty_type_feedback_vector, EmptyTypeFeedbackVector) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
- V(ScopeInfo, empty_scope_info, EmptyScopeInfo) \
- V(FixedArray, cleared_optimized_code_map, ClearedOptimizedCodeMap) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
/* Entries beyond the first 32 */ \
/* The roots above this line should be boring from a GC point of view. */ \
/* This means they are never in new space and never on a page that is */ \
/* being compacted. */ \
+ /* Empty scope info */ \
+ V(ScopeInfo, empty_scope_info, EmptyScopeInfo) \
/* Oddballs */ \
V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
V(Oddball, arguments_marker, ArgumentsMarker) \
@@ -92,9 +90,9 @@ using v8::MemoryPressureLevel;
V(Map, unseeded_number_dictionary_map, UnseededNumberDictionaryMap) \
V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
V(Map, message_object_map, JSMessageObjectMap) \
- V(Map, neander_map, NeanderMap) \
V(Map, external_map, ExternalMap) \
V(Map, bytecode_array_map, BytecodeArrayMap) \
+ V(Map, module_info_map, ModuleInfoMap) \
/* String maps */ \
V(Map, native_source_string_map, NativeSourceStringMap) \
V(Map, string_map, StringMap) \
@@ -168,6 +166,8 @@ using v8::MemoryPressureLevel;
V(PropertyCell, has_instance_protector, HasInstanceProtector) \
V(Cell, species_protector, SpeciesProtector) \
V(PropertyCell, string_length_protector, StringLengthProtector) \
+ V(Cell, fast_array_iteration_protector, FastArrayIterationProtector) \
+ V(Cell, array_iterator_protector, ArrayIteratorProtector) \
/* Special numbers */ \
V(HeapNumber, nan_value, NanValue) \
V(HeapNumber, hole_nan_value, HoleNanValue) \
@@ -278,7 +278,6 @@ using v8::MemoryPressureLevel;
V(FixedArrayMap) \
V(CodeMap) \
V(ScopeInfoMap) \
- V(ModuleInfoEntryMap) \
V(ModuleInfoMap) \
V(FixedCOWArrayMap) \
V(FixedDoubleArrayMap) \
@@ -307,7 +306,6 @@ using v8::MemoryPressureLevel;
V(ArgumentsMarkerMap) \
V(JSMessageObjectMap) \
V(ForeignMap) \
- V(NeanderMap) \
V(NanValue) \
V(InfinityValue) \
V(MinusZeroValue) \
@@ -342,8 +340,6 @@ class WeakObjectRetainer;
typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
-enum PromotionMode { PROMOTE_MARKED, DEFAULT_PROMOTION };
-
enum ArrayStorageAllocationMode {
DONT_INITIALIZE_ARRAY_ELEMENTS,
INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
@@ -626,8 +622,8 @@ class Heap {
static const int kTraceRingBufferSize = 512;
static const int kStacktraceBufferSize = 512;
- static const double kMinHeapGrowingFactor;
- static const double kMaxHeapGrowingFactor;
+ V8_EXPORT_PRIVATE static const double kMinHeapGrowingFactor;
+ V8_EXPORT_PRIVATE static const double kMaxHeapGrowingFactor;
static const double kMaxHeapGrowingFactorMemoryConstrained;
static const double kMaxHeapGrowingFactorIdle;
static const double kConservativeHeapGrowingFactor;
@@ -690,7 +686,28 @@ class Heap {
#endif
}
- static double HeapGrowingFactor(double gc_speed, double mutator_speed);
+ static inline bool IsYoungGenerationCollector(GarbageCollector collector) {
+ return collector == SCAVENGER || collector == MINOR_MARK_COMPACTOR;
+ }
+
+ static inline GarbageCollector YoungGenerationCollector() {
+ return (FLAG_minor_mc) ? MINOR_MARK_COMPACTOR : SCAVENGER;
+ }
+
+ static inline const char* CollectorName(GarbageCollector collector) {
+ switch (collector) {
+ case SCAVENGER:
+ return "Scavenger";
+ case MARK_COMPACTOR:
+ return "Mark-Compact";
+ case MINOR_MARK_COMPACTOR:
+ return "Minor Mark-Compact";
+ }
+ return "Unknown collector";
+ }
+
+ V8_EXPORT_PRIVATE static double HeapGrowingFactor(double gc_speed,
+ double mutator_speed);
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
@@ -835,11 +852,8 @@ class Heap {
// An object should be promoted if the object has survived a
// scavenge operation.
- template <PromotionMode promotion_mode>
inline bool ShouldBePromoted(Address old_address, int object_size);
- inline PromotionMode CurrentPromotionMode();
-
void ClearNormalizedMapCaches();
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
@@ -916,7 +930,7 @@ class Heap {
bool HasLowAllocationRate();
bool HasHighFragmentation();
- bool HasHighFragmentation(intptr_t used, intptr_t committed);
+ bool HasHighFragmentation(size_t used, size_t committed);
void ActivateMemoryReducerIfNeeded();
@@ -940,8 +954,8 @@ class Heap {
// Configure heap size in MB before setup. Return false if the heap has been
// set up already.
- bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
- int max_executable_size, size_t code_range_size);
+ bool ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
+ size_t max_executable_size, size_t code_range_size);
bool ConfigureHeapDefault();
// Prepares the heap, setting up memory areas that are needed in the isolate
@@ -952,6 +966,9 @@ class Heap {
// Returns whether it succeeded.
bool CreateHeapObjects();
+ // Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
+ V8_INLINE void CreateObjectStats();
+
// Destroys all memory allocated by the heap.
void TearDown();
@@ -1023,6 +1040,14 @@ class Heap {
Handle<Object> root_handle(RootListIndex index) {
return Handle<Object>(&roots_[index]);
}
+ template <typename T>
+ bool IsRootHandle(Handle<T> handle, RootListIndex* index) const {
+ Object** const handle_location = bit_cast<Object**>(handle.address());
+ if (handle_location >= &roots_[kRootListLength]) return false;
+ if (handle_location < &roots_[0]) return false;
+ *index = static_cast<RootListIndex>(handle_location - &roots_[0]);
+ return true;
+ }
// Generated code can embed this address to get access to the roots.
Object** roots_array_start() { return roots_; }
@@ -1127,13 +1152,8 @@ class Heap {
void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
// Iterate pointers of promoted objects.
- void IteratePromotedObject(HeapObject* target, int size,
- bool was_marked_black,
- ObjectSlotCallback callback);
-
- void IteratePromotedObjectPointers(HeapObject* object, Address start,
- Address end, bool record_slots,
- ObjectSlotCallback callback);
+ void IterateAndScavengePromotedObject(HeapObject* target, int size,
+ bool was_marked_black);
// ===========================================================================
// Store buffer API. =========================================================
@@ -1199,10 +1219,6 @@ class Heap {
EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
- EmbedderReachableReferenceReporter* embedder_reachable_reference_reporter() {
- return embedder_reference_reporter_;
- }
-
size_t wrappers_to_trace() { return wrappers_to_trace_.size(); }
// ===========================================================================
@@ -1274,20 +1290,20 @@ class Heap {
// ===========================================================================
// Returns the maximum amount of memory reserved for the heap.
- intptr_t MaxReserved() {
+ size_t MaxReserved() {
return 2 * max_semi_space_size_ + max_old_generation_size_;
}
- int MaxSemiSpaceSize() { return max_semi_space_size_; }
- int InitialSemiSpaceSize() { return initial_semispace_size_; }
- intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
- intptr_t MaxExecutableSize() { return max_executable_size_; }
+ size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
+ size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
+ size_t MaxOldGenerationSize() { return max_old_generation_size_; }
+ size_t MaxExecutableSize() { return max_executable_size_; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
- intptr_t Capacity();
+ size_t Capacity();
// Returns the capacity of the old generation.
- intptr_t OldGenerationCapacity();
+ size_t OldGenerationCapacity();
// Returns the amount of memory currently committed for the heap.
size_t CommittedMemory();
@@ -1311,28 +1327,26 @@ class Heap {
// Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead.
- intptr_t Available();
+ size_t Available();
// Returns of size of all objects residing in the heap.
- intptr_t SizeOfObjects();
+ size_t SizeOfObjects();
void UpdateSurvivalStatistics(int start_new_space_size);
- inline void IncrementPromotedObjectsSize(intptr_t object_size) {
- DCHECK_GE(object_size, 0);
+ inline void IncrementPromotedObjectsSize(size_t object_size) {
promoted_objects_size_ += object_size;
}
- inline intptr_t promoted_objects_size() { return promoted_objects_size_; }
+ inline size_t promoted_objects_size() { return promoted_objects_size_; }
- inline void IncrementSemiSpaceCopiedObjectSize(intptr_t object_size) {
- DCHECK_GE(object_size, 0);
+ inline void IncrementSemiSpaceCopiedObjectSize(size_t object_size) {
semi_space_copied_object_size_ += object_size;
}
- inline intptr_t semi_space_copied_object_size() {
+ inline size_t semi_space_copied_object_size() {
return semi_space_copied_object_size_;
}
- inline intptr_t SurvivedNewSpaceObjectSize() {
+ inline size_t SurvivedNewSpaceObjectSize() {
return promoted_objects_size_ + semi_space_copied_object_size_;
}
@@ -1342,20 +1356,13 @@ class Heap {
inline void IncrementNodesPromoted() { nodes_promoted_++; }
- inline void IncrementYoungSurvivorsCounter(intptr_t survived) {
- DCHECK_GE(survived, 0);
+ inline void IncrementYoungSurvivorsCounter(size_t survived) {
survived_last_scavenge_ = survived;
survived_since_last_expansion_ += survived;
}
- inline intptr_t PromotedTotalSize() {
- int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
- if (total > std::numeric_limits<intptr_t>::max()) {
- // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations.
- return std::numeric_limits<intptr_t>::max();
- }
- if (total < 0) return 0;
- return static_cast<intptr_t>(total);
+ inline uint64_t PromotedTotalSize() {
+ return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
}
inline void UpdateNewSpaceAllocationCounter();
@@ -1389,7 +1396,7 @@ class Heap {
int gc_count() const { return gc_count_; }
// Returns the size of objects residing in non new spaces.
- intptr_t PromotedSpaceSizeOfObjects();
+ size_t PromotedSpaceSizeOfObjects();
double total_regexp_code_generated() { return total_regexp_code_generated_; }
void IncreaseTotalRegexpCodeGenerated(int size) {
@@ -1439,6 +1446,10 @@ class Heap {
// ArrayBuffer tracking. =====================================================
// ===========================================================================
+ // TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external
+ // in the registration/unregistration APIs. Consider dropping the "New" from
+ // "RegisterNewArrayBuffer" because one can re-register a previously
+ // unregistered buffer, too, and the name is confusing.
void RegisterNewArrayBuffer(JSArrayBuffer* buffer);
void UnregisterArrayBuffer(JSArrayBuffer* buffer);
@@ -1770,6 +1781,8 @@ class Heap {
// Performs a major collection in the whole heap.
void MarkCompact();
+ // Performs a minor collection of just the young generation.
+ void MinorMarkCompact();
// Code to be run before and after mark-compact.
void MarkCompactPrologue();
@@ -1778,8 +1791,7 @@ class Heap {
// Performs a minor collection in new generation.
void Scavenge();
- Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front,
- PromotionMode promotion_mode);
+ Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
@@ -1797,8 +1809,27 @@ class Heap {
// GC statistics. ============================================================
// ===========================================================================
- inline intptr_t OldGenerationSpaceAvailable() {
- return old_generation_allocation_limit_ - PromotedTotalSize();
+ inline size_t OldGenerationSpaceAvailable() {
+ if (old_generation_allocation_limit_ <= PromotedTotalSize()) return 0;
+ return old_generation_allocation_limit_ -
+ static_cast<size_t>(PromotedTotalSize());
+ }
+
+ // We allow incremental marking to overshoot the allocation limit for
+ // performace reasons. If the overshoot is too large then we are more
+ // eager to finalize incremental marking.
+ inline bool AllocationLimitOvershotByLargeMargin() {
+ // This guards against too eager finalization in small heaps.
+ // The number is chosen based on v8.browsing_mobile on Nexus 7v2.
+ size_t kMarginForSmallHeaps = 32u * MB;
+ if (old_generation_allocation_limit_ >= PromotedTotalSize()) return false;
+ uint64_t overshoot = PromotedTotalSize() - old_generation_allocation_limit_;
+ // Overshoot margin is 50% of allocation limit or half-way to the max heap
+ // with special handling of small heaps.
+ uint64_t margin =
+ Min(Max(old_generation_allocation_limit_ / 2, kMarginForSmallHeaps),
+ (max_old_generation_size_ - old_generation_allocation_limit_) / 2);
+ return overshoot >= margin;
}
void UpdateTotalGCTime(double duration);
@@ -1811,23 +1842,21 @@ class Heap {
// Decrease the allocation limit if the new limit based on the given
// parameters is lower than the current limit.
- void DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
- double gc_speed,
+ void DampenOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
double mutator_speed);
-
// Calculates the allocation limit based on a given growing factor and a
// given old generation size.
- intptr_t CalculateOldGenerationAllocationLimit(double factor,
- intptr_t old_gen_size);
+ size_t CalculateOldGenerationAllocationLimit(double factor,
+ size_t old_gen_size);
// Sets the allocation limit to trigger the next full garbage collection.
- void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed,
+ void SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
double mutator_speed);
- intptr_t MinimumAllocationLimitGrowingStep();
+ size_t MinimumAllocationLimitGrowingStep();
- intptr_t old_generation_allocation_limit() const {
+ size_t old_generation_allocation_limit() const {
return old_generation_allocation_limit_;
}
@@ -1842,7 +1871,7 @@ class Heap {
return OldGenerationCapacity() + slack >= MaxOldGenerationSize();
}
- bool ShouldExpandOldGenerationOnAllocationFailure();
+ bool ShouldExpandOldGenerationOnSlowAllocation();
enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
IncrementalMarkingLimit IncrementalMarkingLimitReached();
@@ -2096,20 +2125,20 @@ class Heap {
Object* roots_[kRootListLength];
size_t code_range_size_;
- int max_semi_space_size_;
- int initial_semispace_size_;
- intptr_t max_old_generation_size_;
- intptr_t initial_old_generation_size_;
+ size_t max_semi_space_size_;
+ size_t initial_semispace_size_;
+ size_t max_old_generation_size_;
+ size_t initial_old_generation_size_;
bool old_generation_size_configured_;
- intptr_t max_executable_size_;
+ size_t max_executable_size_;
size_t maximum_committed_;
// For keeping track of how much data has survived
// scavenge since last new space expansion.
- intptr_t survived_since_last_expansion_;
+ size_t survived_since_last_expansion_;
// ... and since the last scavenge.
- intptr_t survived_last_scavenge_;
+ size_t survived_last_scavenge_;
// This is not the depth of nested AlwaysAllocateScope's but rather a single
// count, as scopes can be acquired from multiple tasks (read: threads).
@@ -2141,7 +2170,7 @@ class Heap {
Address new_space_top_after_last_gc_;
// Returns the amount of external memory registered since last global gc.
- int64_t PromotedExternalMemorySize();
+ uint64_t PromotedExternalMemorySize();
// How many "runtime allocations" happened.
uint32_t allocations_count_;
@@ -2170,7 +2199,7 @@ class Heap {
// is checked when we have already decided to do a GC to help determine
// which collector to invoke, before expanding a paged space in the old
// generation and on every allocation in large object space.
- intptr_t old_generation_allocation_limit_;
+ size_t old_generation_allocation_limit_;
// Indicates that inline bump-pointer allocation has been globally disabled
// for all spaces. This is used to disable allocations in generated code.
@@ -2200,11 +2229,11 @@ class Heap {
GCTracer* tracer_;
- intptr_t promoted_objects_size_;
+ size_t promoted_objects_size_;
double promotion_ratio_;
double promotion_rate_;
- intptr_t semi_space_copied_object_size_;
- intptr_t previous_semi_space_copied_object_size_;
+ size_t semi_space_copied_object_size_;
+ size_t previous_semi_space_copied_object_size_;
double semi_space_copied_rate_;
int nodes_died_in_new_space_;
int nodes_copied_in_new_space_;
@@ -2310,11 +2339,11 @@ class Heap {
int heap_iterator_depth_;
EmbedderHeapTracer* embedder_heap_tracer_;
- EmbedderReachableReferenceReporter* embedder_reference_reporter_;
std::vector<std::pair<void*, void*>> wrappers_to_trace_;
// Used for testing purposes.
bool force_oom_;
+ bool delay_sweeper_tasks_for_testing_;
// Classes in "heap" can be friends.
friend class AlwaysAllocateScope;
@@ -2324,7 +2353,6 @@ class Heap {
friend class IdleScavengeObserver;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
- friend class IteratePromotedObjectsVisitor;
friend class LargeObjectSpace;
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
@@ -2633,18 +2661,6 @@ class AllocationObserver {
DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
};
-class TracePossibleWrapperReporter : public EmbedderReachableReferenceReporter {
- public:
- explicit TracePossibleWrapperReporter(Heap* heap) : heap_(heap) {}
- void ReportExternalReference(Value* object) override {
- heap_->RegisterExternallyReferencedObject(
- reinterpret_cast<Object**>(object));
- }
-
- private:
- Heap* heap_;
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 99be9d0123..4b1d7712a7 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -519,17 +519,15 @@ void IncrementalMarking::StartMarking() {
"[IncrementalMarking] Start marking\n");
}
- is_compacting_ = !FLAG_never_compact &&
- heap_->mark_compact_collector()->StartCompaction(
- MarkCompactCollector::INCREMENTAL_COMPACTION);
+ is_compacting_ =
+ !FLAG_never_compact && heap_->mark_compact_collector()->StartCompaction();
state_ = MARKING;
if (heap_->UsingEmbedderHeapTracer()) {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
- heap_->embedder_heap_tracer()->TracePrologue(
- heap_->embedder_reachable_reference_reporter());
+ heap_->embedder_heap_tracer()->TracePrologue();
}
RecordWriteStub::Mode mode = is_compacting_
@@ -538,8 +536,7 @@ void IncrementalMarking::StartMarking() {
PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
- heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize(
- MarkCompactCollector::kMaxMarkingDequeSize);
+ heap_->mark_compact_collector()->marking_deque()->StartUsing();
ActivateIncrementalWriteBarrier();
@@ -587,9 +584,6 @@ void IncrementalMarking::FinishBlackAllocation() {
}
void IncrementalMarking::AbortBlackAllocation() {
- for (Page* page : *heap()->old_space()) {
- page->ReleaseBlackAreaEndMarkerMap();
- }
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Black allocation aborted\n");
@@ -628,9 +622,9 @@ void IncrementalMarking::ProcessWeakCells() {
Object* the_hole_value = heap()->the_hole_value();
Object* weak_cell_obj = heap()->encountered_weak_cells();
- Object* weak_cell_head = Smi::FromInt(0);
+ Object* weak_cell_head = Smi::kZero;
WeakCell* prev_weak_cell_obj = NULL;
- while (weak_cell_obj != Smi::FromInt(0)) {
+ while (weak_cell_obj != Smi::kZero) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
// We do not insert cleared weak cells into the list, so the value
// cannot be a Smi here.
@@ -648,7 +642,7 @@ void IncrementalMarking::ProcessWeakCells() {
weak_cell_obj = weak_cell->next();
weak_cell->clear_next(the_hole_value);
} else {
- if (weak_cell_head == Smi::FromInt(0)) {
+ if (weak_cell_head == Smi::kZero) {
weak_cell_head = weak_cell;
}
prev_weak_cell_obj = weak_cell;
@@ -1053,7 +1047,7 @@ void IncrementalMarking::FinalizeSweeping() {
DCHECK(state_ == SWEEPING);
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
(!FLAG_concurrent_sweeping ||
- heap_->mark_compact_collector()->sweeper().IsSweepingCompleted())) {
+ !heap_->mark_compact_collector()->sweeper().AreSweeperTasksRunning())) {
heap_->mark_compact_collector()->EnsureSweepingCompleted();
}
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index c2290c4d82..7ce0ae2379 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -66,6 +66,11 @@ class IncrementalMarking {
return request_type_ == FINALIZATION && !finalize_marking_completed_;
}
+ inline bool NeedsFinalization() {
+ return IsMarking() &&
+ (request_type_ == FINALIZATION || request_type_ == COMPLETE_MARKING);
+ }
+
GCRequestType request_type() const { return request_type_; }
void reset_request_type() { request_type_ = NONE; }
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index fe71fb1177..784a76f8bd 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -163,12 +163,14 @@ HeapObject* LiveObjectIterator<T>::Next() {
current_cell_ = *it_.CurrentCell();
}
+ Map* map = nullptr;
if (current_cell_ & second_bit_index) {
// We found a black object. If the black object is within a black area,
// make sure that we skip all set bits in the black area until the
// object ends.
HeapObject* black_object = HeapObject::FromAddress(addr);
- Address end = addr + black_object->Size() - kPointerSize;
+ map = base::NoBarrierAtomicValue<Map*>::FromAddress(addr)->Value();
+ Address end = addr + black_object->SizeFromMap(map) - kPointerSize;
// One word filler objects do not borrow the second mark bit. We have
// to jump over the advancing and clearing part.
// Note that we know that we are at a one word filler when
@@ -198,9 +200,9 @@ HeapObject* LiveObjectIterator<T>::Next() {
// We found a live object.
if (object != nullptr) {
- if (object->IsFiller()) {
- // Black areas together with slack tracking may result in black filler
- // objects. We filter these objects out in the iterator.
+ if (map != nullptr && map == heap()->one_pointer_filler_map()) {
+ // Black areas together with slack tracking may result in black one
+ // word filler objects. We filter these objects out in the iterator.
object = nullptr;
} else {
break;
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 7e5ef96fc9..88e6983035 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -25,6 +25,7 @@
#include "src/heap/spaces-inl.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/tracing/tracing-category-observer.h"
#include "src/utils-inl.h"
#include "src/v8.h"
@@ -58,8 +59,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
compacting_(false),
black_allocation_(false),
have_code_to_deoptimize_(false),
- marking_deque_memory_(NULL),
- marking_deque_memory_committed_(0),
+ marking_deque_(heap),
code_flusher_(nullptr),
sweeper_(heap) {
}
@@ -240,9 +240,7 @@ void MarkCompactCollector::SetUp() {
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- EnsureMarkingDequeIsReserved();
- EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
+ marking_deque()->SetUp();
if (FLAG_flush_code) {
code_flusher_ = new CodeFlusher(isolate());
@@ -255,7 +253,7 @@ void MarkCompactCollector::SetUp() {
void MarkCompactCollector::TearDown() {
AbortCompaction();
- delete marking_deque_memory_;
+ marking_deque()->TearDown();
delete code_flusher_;
}
@@ -276,8 +274,7 @@ static void TraceFragmentation(PagedSpace* space) {
static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
}
-
-bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
+bool MarkCompactCollector::StartCompaction() {
if (!compacting_) {
DCHECK(evacuation_candidates_.length() == 0);
@@ -293,33 +290,12 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
TraceFragmentation(heap()->map_space());
}
- heap()->old_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
- heap()->code_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
-
compacting_ = evacuation_candidates_.length() > 0;
}
return compacting_;
}
-void MarkCompactCollector::ClearInvalidRememberedSetSlots() {
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
- RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
- }
-// There is not need to filter the old to old set because
-// it is completely cleared after the mark-compact GC.
-// The slots that become invalid due to runtime transitions are
-// cleared eagerly immediately after the transition.
-
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
- RememberedSet<OLD_TO_OLD>::VerifyValidSlots(heap());
- }
-#endif
-}
-
void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
@@ -339,7 +315,7 @@ void MarkCompactCollector::CollectGarbage() {
}
#endif
- SweepSpaces();
+ StartSweepSpaces();
EvacuateNewSpaceAndCandidates();
@@ -469,22 +445,20 @@ void MarkCompactCollector::Sweeper::StartSweeping() {
std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
[](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
});
- if (FLAG_concurrent_sweeping) {
+}
+
+void MarkCompactCollector::Sweeper::StartSweeperTasks() {
+ if (FLAG_concurrent_sweeping && sweeping_in_progress_) {
ForAllSweepingSpaces([this](AllocationSpace space) {
if (space == NEW_SPACE) return;
- StartSweepingHelper(space);
+ num_sweeping_tasks_.Increment(1);
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space),
+ v8::Platform::kShortRunningTask);
});
}
}
-void MarkCompactCollector::Sweeper::StartSweepingHelper(
- AllocationSpace space_to_start) {
- num_sweeping_tasks_.Increment(1);
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start),
- v8::Platform::kShortRunningTask);
-}
-
void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
Page* page) {
if (!page->SweepingDone()) {
@@ -499,7 +473,8 @@ void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
}
void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
- if (FLAG_concurrent_sweeping && !sweeper().IsSweepingCompleted()) {
+ if (FLAG_concurrent_sweeping &&
+ !sweeper().IsSweepingCompleted(space->identity())) {
sweeper().ParallelSweepSpace(space->identity(), 0);
space->RefillFreeList();
}
@@ -519,10 +494,11 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() {
// If sweeping is not completed or not running at all, we try to complete it
// here.
- if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
- ForAllSweepingSpaces(
- [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
- }
+ ForAllSweepingSpaces([this](AllocationSpace space) {
+ if (!FLAG_concurrent_sweeping || !this->IsSweepingCompleted(space)) {
+ ParallelSweepSpace(space, 0);
+ }
+ });
if (FLAG_concurrent_sweeping) {
while (num_sweeping_tasks_.Value() > 0) {
@@ -537,13 +513,12 @@ void MarkCompactCollector::Sweeper::EnsureCompleted() {
}
DCHECK(sweeping_list_[space].empty());
});
- late_pages_ = false;
sweeping_in_progress_ = false;
}
void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
if (!sweeping_in_progress_) return;
- if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
+ if (!FLAG_concurrent_sweeping || !IsSweepingCompleted(NEW_SPACE)) {
for (Page* p : *heap_->new_space()) {
SweepOrWaitUntilSweepingCompleted(p);
}
@@ -565,13 +540,20 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
#endif
}
-bool MarkCompactCollector::Sweeper::IsSweepingCompleted() {
+bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() {
DCHECK(FLAG_concurrent_sweeping);
while (pending_sweeper_tasks_semaphore_.WaitFor(
base::TimeDelta::FromSeconds(0))) {
num_sweeping_tasks_.Increment(-1);
}
- return num_sweeping_tasks_.Value() == 0;
+ return num_sweeping_tasks_.Value() != 0;
+}
+
+bool MarkCompactCollector::Sweeper::IsSweepingCompleted(AllocationSpace space) {
+ DCHECK(FLAG_concurrent_sweeping);
+ if (AreSweeperTasksRunning()) return false;
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ return sweeping_list_[space].empty();
}
const char* AllocationSpaceName(AllocationSpace space) {
@@ -593,22 +575,21 @@ const char* AllocationSpaceName(AllocationSpace space) {
return NULL;
}
-
void MarkCompactCollector::ComputeEvacuationHeuristics(
- int area_size, int* target_fragmentation_percent,
- int* max_evacuated_bytes) {
+ size_t area_size, int* target_fragmentation_percent,
+ size_t* max_evacuated_bytes) {
// For memory reducing and optimize for memory mode we directly define both
// constants.
const int kTargetFragmentationPercentForReduceMemory = 20;
- const int kMaxEvacuatedBytesForReduceMemory = 12 * MB;
+ const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
const int kTargetFragmentationPercentForOptimizeMemory = 20;
- const int kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
+ const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
// For regular mode (which is latency critical) we define less aggressive
// defaults to start and switch to a trace-based (using compaction speed)
// approach as soon as we have enough samples.
const int kTargetFragmentationPercent = 70;
- const int kMaxEvacuatedBytes = 4 * MB;
+ const size_t kMaxEvacuatedBytes = 4 * MB;
// Time to take for a single area (=payload of page). Used as soon as there
// exist enough compaction speed samples.
const float kTargetMsPerArea = .5;
@@ -647,15 +628,22 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
int number_of_pages = space->CountTotalPages();
- int area_size = space->AreaSize();
+ size_t area_size = space->AreaSize();
// Pairs of (live_bytes_in_page, page).
- typedef std::pair<int, Page*> LiveBytesPagePair;
+ typedef std::pair<size_t, Page*> LiveBytesPagePair;
std::vector<LiveBytesPagePair> pages;
pages.reserve(number_of_pages);
+ DCHECK(!sweeping_in_progress());
+ DCHECK(!FLAG_concurrent_sweeping ||
+ sweeper().IsSweepingCompleted(space->identity()));
+ Page* owner_of_linear_allocation_area =
+ space->top() == space->limit()
+ ? nullptr
+ : Page::FromAllocationAreaAddress(space->top());
for (Page* p : *space) {
- if (p->NeverEvacuate()) continue;
+ if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue;
// Invariant: Evacuation candidates are just created when marking is
// started. This means that sweeping has finished. Furthermore, at the end
// of a GC all evacuation candidates are cleared and their slot buffers are
@@ -669,7 +657,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
}
int candidate_count = 0;
- int total_live_bytes = 0;
+ size_t total_live_bytes = 0;
const bool reduce_memory = heap()->ShouldReduceMemory();
if (FLAG_manual_evacuation_candidates_selection) {
@@ -705,12 +693,12 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
// them starting with the page with the most free memory, adding them to the
// set of evacuation candidates as long as both conditions (fragmentation
// and quota) hold.
- int max_evacuated_bytes;
+ size_t max_evacuated_bytes;
int target_fragmentation_percent;
ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
&max_evacuated_bytes);
- const intptr_t free_bytes_threshold =
+ const size_t free_bytes_threshold =
target_fragmentation_percent * (area_size / 100);
// Sort pages from the most free to the least free, then select
@@ -723,8 +711,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
return a.first < b.first;
});
for (size_t i = 0; i < pages.size(); i++) {
- int live_bytes = pages[i].first;
- int free_bytes = area_size - live_bytes;
+ size_t live_bytes = pages[i].first;
+ DCHECK_GE(area_size, live_bytes);
+ size_t free_bytes = area_size - live_bytes;
if (FLAG_always_compact ||
((free_bytes >= free_bytes_threshold) &&
((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
@@ -733,10 +722,10 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
}
if (FLAG_trace_fragmentation_verbose) {
PrintIsolate(isolate(),
- "compaction-selection-page: space=%s free_bytes_page=%d "
- "fragmentation_limit_kb=%" V8PRIdPTR
- " fragmentation_limit_percent=%d sum_compaction_kb=%d "
- "compaction_limit_kb=%d\n",
+ "compaction-selection-page: space=%s free_bytes_page=%zu "
+ "fragmentation_limit_kb=%" PRIuS
+ " fragmentation_limit_percent=%d sum_compaction_kb=%zu "
+ "compaction_limit_kb=%zu\n",
AllocationSpaceName(space->identity()), free_bytes / KB,
free_bytes_threshold / KB, target_fragmentation_percent,
total_live_bytes / KB, max_evacuated_bytes / KB);
@@ -744,7 +733,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
}
// How many pages we will allocated for the evacuated objects
// in the worst case: ceil(total_live_bytes / area_size)
- int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
+ int estimated_new_pages =
+ static_cast<int>((total_live_bytes + area_size - 1) / area_size);
DCHECK_LE(estimated_new_pages, candidate_count);
int estimated_released_pages = candidate_count - estimated_new_pages;
// Avoid (compact -> expand) cycles.
@@ -759,7 +749,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
if (FLAG_trace_fragmentation) {
PrintIsolate(isolate(),
"compaction-selection: space=%s reduce_memory=%d pages=%d "
- "total_live_bytes=%d\n",
+ "total_live_bytes=%zu\n",
AllocationSpaceName(space->identity()), reduce_memory,
candidate_count, total_live_bytes / KB);
}
@@ -794,6 +784,10 @@ void MarkCompactCollector::Prepare() {
EnsureSweepingCompleted();
}
+ if (heap()->incremental_marking()->IsSweeping()) {
+ heap()->incremental_marking()->Stop();
+ }
+
// If concurrent unmapping tasks are still running, we should wait for
// them here.
heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
@@ -810,14 +804,14 @@ void MarkCompactCollector::Prepare() {
if (heap_->UsingEmbedderHeapTracer()) {
heap_->embedder_heap_tracer()->AbortTracing();
}
+ marking_deque()->Clear();
was_marked_incrementally_ = false;
}
if (!was_marked_incrementally_) {
if (heap_->UsingEmbedderHeapTracer()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
- heap_->embedder_heap_tracer()->TracePrologue(
- heap_->embedder_reachable_reference_reporter());
+ heap_->embedder_heap_tracer()->TracePrologue();
}
}
@@ -828,7 +822,7 @@ void MarkCompactCollector::Prepare() {
// Don't start compaction if we are in the middle of incremental
// marking cycle. We did not collect any slots.
if (!FLAG_never_compact && !was_marked_incrementally_) {
- StartCompaction(NON_INCREMENTAL_COMPACTION);
+ StartCompaction();
}
PagedSpaces spaces(heap());
@@ -849,10 +843,8 @@ void MarkCompactCollector::Prepare() {
void MarkCompactCollector::Finish() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
- if (sweeper().contains_late_pages() && FLAG_concurrent_sweeping) {
- // If we added some more pages during MC, we need to start at least one
- // more task as all other tasks might already be finished.
- sweeper().StartSweepingHelper(OLD_SPACE);
+ if (!heap()->delay_sweeper_tasks_for_testing_) {
+ sweeper().StartSweeperTasks();
}
// The hashing of weak_object_to_code_table is no longer valid.
@@ -1647,7 +1639,7 @@ class MarkCompactCollector::EvacuateVisitorBase
DCHECK_OBJECT_SIZE(size);
DCHECK(IsAligned(size, kPointerSize));
heap_->CopyBlock(dst_addr, src_addr, size);
- if ((mode == kProfiled) && FLAG_ignition && dst->IsBytecodeArray()) {
+ if ((mode == kProfiled) && dst->IsBytecodeArray()) {
PROFILE(heap_->isolate(),
CodeMoveEvent(AbstractCode::cast(src), dst_addr));
}
@@ -1720,7 +1712,7 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
local_pretenuring_feedback_);
int size = object->Size();
HeapObject* target_object = nullptr;
- if (heap_->ShouldBePromoted<DEFAULT_PROMOTION>(object->address(), size) &&
+ if (heap_->ShouldBePromoted(object->address(), size) &&
TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
&target_object)) {
promoted_size_ += size;
@@ -1841,41 +1833,48 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
base::HashMap* local_pretenuring_feedback_;
};
+template <PageEvacuationMode mode>
class MarkCompactCollector::EvacuateNewSpacePageVisitor final
: public MarkCompactCollector::HeapObjectVisitor {
public:
- explicit EvacuateNewSpacePageVisitor(Heap* heap)
- : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {}
-
- static void MoveToOldSpace(Page* page, PagedSpace* owner) {
- page->Unlink();
- Page* new_page = Page::ConvertNewToOld(page, owner);
- new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
- }
+ explicit EvacuateNewSpacePageVisitor(
+ Heap* heap, base::HashMap* local_pretenuring_feedback)
+ : heap_(heap),
+ moved_bytes_(0),
+ local_pretenuring_feedback_(local_pretenuring_feedback) {}
- static void MoveToToSpace(Page* page) {
- page->heap()->new_space()->MovePageFromSpaceToSpace(page);
- page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
+ static void Move(Page* page) {
+ switch (mode) {
+ case NEW_TO_NEW:
+ page->heap()->new_space()->MovePageFromSpaceToSpace(page);
+ page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
+ break;
+ case NEW_TO_OLD: {
+ page->Unlink();
+ Page* new_page = Page::ConvertNewToOld(page);
+ new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ break;
+ }
+ }
}
inline bool Visit(HeapObject* object) {
- RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
- object->IterateBodyFast(&visitor);
- promoted_size_ += object->Size();
+ heap_->UpdateAllocationSite<Heap::kCached>(object,
+ local_pretenuring_feedback_);
+ if (mode == NEW_TO_OLD) {
+ RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
+ object->IterateBodyFast(&visitor);
+ }
return true;
}
- intptr_t promoted_size() { return promoted_size_; }
- intptr_t semispace_copied_size() { return semispace_copied_size_; }
-
- void account_semispace_copied(intptr_t copied) {
- semispace_copied_size_ += copied;
- }
+ intptr_t moved_bytes() { return moved_bytes_; }
+ void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
private:
Heap* heap_;
- intptr_t promoted_size_;
- intptr_t semispace_copied_size_;
+ intptr_t moved_bytes_;
+ base::HashMap* local_pretenuring_feedback_;
};
class MarkCompactCollector::EvacuateOldSpaceVisitor final
@@ -2121,85 +2120,87 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
}
}
-
-void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
- DCHECK(!marking_deque()->in_use());
- if (marking_deque_memory_ == NULL) {
- marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
- marking_deque_memory_committed_ = 0;
- }
- if (marking_deque_memory_ == NULL) {
- V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
+void MarkingDeque::SetUp() {
+ backing_store_ = new base::VirtualMemory(kMaxSize);
+ backing_store_committed_size_ = 0;
+ if (backing_store_ == nullptr) {
+ V8::FatalProcessOutOfMemory("MarkingDeque::SetUp");
}
}
+void MarkingDeque::TearDown() {
+ delete backing_store_;
+}
-void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
- // If the marking deque is too small, we try to allocate a bigger one.
- // If that fails, make do with a smaller one.
- CHECK(!marking_deque()->in_use());
- for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
- base::VirtualMemory* memory = marking_deque_memory_;
- size_t currently_committed = marking_deque_memory_committed_;
-
- if (currently_committed == size) return;
-
- if (currently_committed > size) {
- bool success = marking_deque_memory_->Uncommit(
- reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
- currently_committed - size);
- if (success) {
- marking_deque_memory_committed_ = size;
- return;
- }
- UNREACHABLE();
- }
-
- bool success = memory->Commit(
- reinterpret_cast<Address>(memory->address()) + currently_committed,
- size - currently_committed,
- false); // Not executable.
- if (success) {
- marking_deque_memory_committed_ = size;
- return;
- }
+void MarkingDeque::StartUsing() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (in_use_) {
+ // This can happen in mark-compact GC if the incremental marker already
+ // started using the marking deque.
+ return;
}
- V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
+ in_use_ = true;
+ EnsureCommitted();
+ array_ = reinterpret_cast<HeapObject**>(backing_store_->address());
+ size_t size = FLAG_force_marking_deque_overflows
+ ? 64 * kPointerSize
+ : backing_store_committed_size_;
+ DCHECK(
+ base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize)));
+ mask_ = static_cast<int>((size / kPointerSize) - 1);
+ top_ = bottom_ = 0;
+ overflowed_ = false;
}
-
-void MarkCompactCollector::InitializeMarkingDeque() {
- DCHECK(!marking_deque()->in_use());
- DCHECK(marking_deque_memory_committed_ > 0);
- Address addr = static_cast<Address>(marking_deque_memory_->address());
- size_t size = marking_deque_memory_committed_;
- if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
- marking_deque()->Initialize(addr, addr + size);
+void MarkingDeque::StopUsing() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ DCHECK(IsEmpty());
+ DCHECK(!overflowed_);
+ top_ = bottom_ = mask_ = 0;
+ in_use_ = false;
+ if (FLAG_concurrent_sweeping) {
+ StartUncommitTask();
+ } else {
+ Uncommit();
+ }
}
-
-void MarkingDeque::Initialize(Address low, Address high) {
- DCHECK(!in_use_);
- HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
- HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
- array_ = obj_low;
- mask_ = base::bits::RoundDownToPowerOfTwo32(
- static_cast<uint32_t>(obj_high - obj_low)) -
- 1;
+void MarkingDeque::Clear() {
+ DCHECK(in_use_);
top_ = bottom_ = 0;
overflowed_ = false;
- in_use_ = true;
}
+void MarkingDeque::Uncommit() {
+ DCHECK(!in_use_);
+ bool success = backing_store_->Uncommit(backing_store_->address(),
+ backing_store_committed_size_);
+ backing_store_committed_size_ = 0;
+ CHECK(success);
+}
-void MarkingDeque::Uninitialize(bool aborting) {
- if (!aborting) {
- DCHECK(IsEmpty());
- DCHECK(!overflowed_);
- }
+void MarkingDeque::EnsureCommitted() {
DCHECK(in_use_);
- top_ = bottom_ = 0xdecbad;
- in_use_ = false;
+ if (backing_store_committed_size_ > 0) return;
+
+ for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
+ if (backing_store_->Commit(backing_store_->address(), size, false)) {
+ backing_store_committed_size_ = size;
+ break;
+ }
+ }
+ if (backing_store_committed_size_ == 0) {
+ V8::FatalProcessOutOfMemory("MarkingDeque::EnsureCommitted");
+ }
+}
+
+void MarkingDeque::StartUncommitTask() {
+ if (!uncommit_task_pending_) {
+ uncommit_task_pending_ = true;
+ UncommitTask* task = new UncommitTask(heap_->isolate(), this);
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ }
}
class MarkCompactCollector::ObjectStatsVisitor
@@ -2242,17 +2243,21 @@ void MarkCompactCollector::VisitAllObjects(HeapObjectVisitor* visitor) {
}
void MarkCompactCollector::RecordObjectStats() {
- if (FLAG_track_gc_object_stats) {
+ if (V8_UNLIKELY(FLAG_gc_stats)) {
+ heap()->CreateObjectStats();
ObjectStatsVisitor visitor(heap(), heap()->live_object_stats_,
heap()->dead_object_stats_);
VisitAllObjects(&visitor);
- std::stringstream live, dead;
- heap()->live_object_stats_->Dump(live);
- heap()->dead_object_stats_->Dump(dead);
- TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
- "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
- "live", TRACE_STR_COPY(live.str().c_str()), "dead",
- TRACE_STR_COPY(dead.str().c_str()));
+ if (V8_UNLIKELY(FLAG_gc_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
+ std::stringstream live, dead;
+ heap()->live_object_stats_->Dump(live);
+ heap()->dead_object_stats_->Dump(dead);
+ TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
+ "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
+ "live", TRACE_STR_COPY(live.str().c_str()), "dead",
+ TRACE_STR_COPY(dead.str().c_str()));
+ }
if (FLAG_trace_gc_object_stats) {
heap()->live_object_stats_->PrintJSON("live");
heap()->dead_object_stats_->PrintJSON("dead");
@@ -2275,11 +2280,7 @@ void MarkCompactCollector::MarkLiveObjects() {
if (was_marked_incrementally_) {
incremental_marking->Finalize();
} else {
- // Abort any pending incremental activities e.g. incremental sweeping.
- incremental_marking->Stop();
- if (marking_deque()->in_use()) {
- marking_deque()->Uninitialize(true);
- }
+ CHECK(incremental_marking->IsStopped());
}
}
@@ -2288,8 +2289,7 @@ void MarkCompactCollector::MarkLiveObjects() {
state_ = MARK_LIVE_OBJECTS;
#endif
- EnsureMarkingDequeIsCommittedAndInitialize(
- MarkCompactCollector::kMaxMarkingDequeSize);
+ marking_deque()->StartUsing();
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
@@ -2410,8 +2410,6 @@ void MarkCompactCollector::ClearNonLiveReferences() {
MarkDependentCodeForDeoptimization(dependent_code_list);
ClearWeakCollections();
-
- ClearInvalidRememberedSetSlots();
}
@@ -2480,7 +2478,7 @@ void MarkCompactCollector::ClearSimpleMapTransitions(
Object* non_live_map_list) {
Object* the_hole_value = heap()->the_hole_value();
Object* weak_cell_obj = non_live_map_list;
- while (weak_cell_obj != Smi::FromInt(0)) {
+ while (weak_cell_obj != Smi::kZero) {
WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
Map* map = Map::cast(weak_cell->value());
DCHECK(Marking::IsWhite(ObjectMarking::MarkBitFrom(map)));
@@ -2504,7 +2502,7 @@ void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
// A previously existing simple transition (stored in a WeakCell) is going
// to be cleared. Clear the useless cell pointer, and take ownership
// of the descriptor array.
- map->set_raw_transitions(Smi::FromInt(0));
+ map->set_raw_transitions(Smi::kZero);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
DescriptorArray* descriptors = map->instance_descriptors();
if (descriptors == dead_transition->instance_descriptors() &&
@@ -2519,7 +2517,7 @@ void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
void MarkCompactCollector::ClearFullMapTransitions() {
HeapObject* undefined = heap()->undefined_value();
Object* obj = heap()->encountered_transition_arrays();
- while (obj != Smi::FromInt(0)) {
+ while (obj != Smi::kZero) {
TransitionArray* array = TransitionArray::cast(obj);
int num_transitions = array->number_of_entries();
DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
@@ -2539,7 +2537,7 @@ void MarkCompactCollector::ClearFullMapTransitions() {
obj = array->next_link();
array->set_next_link(undefined, SKIP_WRITE_BARRIER);
}
- heap()->set_encountered_transition_arrays(Smi::FromInt(0));
+ heap()->set_encountered_transition_arrays(Smi::kZero);
}
@@ -2643,7 +2641,7 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
void MarkCompactCollector::ProcessWeakCollections() {
Object* weak_collection_obj = heap()->encountered_weak_collections();
- while (weak_collection_obj != Smi::FromInt(0)) {
+ while (weak_collection_obj != Smi::kZero) {
JSWeakCollection* weak_collection =
reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
DCHECK(MarkCompactCollector::IsMarked(weak_collection));
@@ -2669,7 +2667,7 @@ void MarkCompactCollector::ProcessWeakCollections() {
void MarkCompactCollector::ClearWeakCollections() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
Object* weak_collection_obj = heap()->encountered_weak_collections();
- while (weak_collection_obj != Smi::FromInt(0)) {
+ while (weak_collection_obj != Smi::kZero) {
JSWeakCollection* weak_collection =
reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
DCHECK(MarkCompactCollector::IsMarked(weak_collection));
@@ -2685,19 +2683,19 @@ void MarkCompactCollector::ClearWeakCollections() {
weak_collection_obj = weak_collection->next();
weak_collection->set_next(heap()->undefined_value());
}
- heap()->set_encountered_weak_collections(Smi::FromInt(0));
+ heap()->set_encountered_weak_collections(Smi::kZero);
}
void MarkCompactCollector::AbortWeakCollections() {
Object* weak_collection_obj = heap()->encountered_weak_collections();
- while (weak_collection_obj != Smi::FromInt(0)) {
+ while (weak_collection_obj != Smi::kZero) {
JSWeakCollection* weak_collection =
reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
weak_collection_obj = weak_collection->next();
weak_collection->set_next(heap()->undefined_value());
}
- heap()->set_encountered_weak_collections(Smi::FromInt(0));
+ heap()->set_encountered_weak_collections(Smi::kZero);
}
@@ -2709,8 +2707,8 @@ void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
Object* the_hole_value = heap->the_hole_value();
DependentCode* dependent_code_head =
DependentCode::cast(heap->empty_fixed_array());
- Object* non_live_map_head = Smi::FromInt(0);
- while (weak_cell_obj != Smi::FromInt(0)) {
+ Object* non_live_map_head = Smi::kZero;
+ while (weak_cell_obj != Smi::kZero) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
Object* next_weak_cell = weak_cell->next();
bool clear_value = true;
@@ -2770,7 +2768,7 @@ void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
}
weak_cell_obj = next_weak_cell;
}
- heap->set_encountered_weak_cells(Smi::FromInt(0));
+ heap->set_encountered_weak_cells(Smi::kZero);
*non_live_map_list = non_live_map_head;
*dependent_code_list = dependent_code_head;
}
@@ -2779,24 +2777,24 @@ void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
void MarkCompactCollector::AbortWeakCells() {
Object* the_hole_value = heap()->the_hole_value();
Object* weak_cell_obj = heap()->encountered_weak_cells();
- while (weak_cell_obj != Smi::FromInt(0)) {
+ while (weak_cell_obj != Smi::kZero) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
weak_cell_obj = weak_cell->next();
weak_cell->clear_next(the_hole_value);
}
- heap()->set_encountered_weak_cells(Smi::FromInt(0));
+ heap()->set_encountered_weak_cells(Smi::kZero);
}
void MarkCompactCollector::AbortTransitionArrays() {
HeapObject* undefined = heap()->undefined_value();
Object* obj = heap()->encountered_transition_arrays();
- while (obj != Smi::FromInt(0)) {
+ while (obj != Smi::kZero) {
TransitionArray* array = TransitionArray::cast(obj);
obj = array->next_link();
array->set_next_link(undefined, SKIP_WRITE_BARRIER);
}
- heap()->set_encountered_transition_arrays(Smi::FromInt(0));
+ heap()->set_encountered_transition_arrays(Smi::kZero);
}
void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
@@ -2889,128 +2887,6 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
return String::cast(*p);
}
-bool MarkCompactCollector::IsSlotInBlackObject(MemoryChunk* p, Address slot) {
- Space* owner = p->owner();
- DCHECK(owner != heap_->lo_space() && owner != nullptr);
- USE(owner);
-
- // We may be part of a black area.
- if (Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(slot))) {
- return true;
- }
-
- uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
- unsigned int cell_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
- MarkBit::CellType index_mask = 1u << Bitmap::IndexInCell(mark_bit_index);
- MarkBit::CellType* cells = p->markbits()->cells();
- Address base_address = p->area_start();
- unsigned int base_address_cell_index = Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(base_address)));
-
- // Check if the slot points to the start of an object. This can happen e.g.
- // when we left trim a fixed array. Such slots are invalid and we can remove
- // them.
- if (index_mask > 1) {
- if ((cells[cell_index] & index_mask) != 0 &&
- (cells[cell_index] & (index_mask >> 1)) == 0) {
- return false;
- }
- } else {
- // Left trimming moves the mark bits so we cannot be in the very first cell.
- DCHECK(cell_index != base_address_cell_index);
- if ((cells[cell_index] & index_mask) != 0 &&
- (cells[cell_index - 1] & (1u << Bitmap::kBitIndexMask)) == 0) {
- return false;
- }
- }
-
- // Check if the object is in the current cell.
- MarkBit::CellType slot_mask;
- if ((cells[cell_index] == 0) ||
- (base::bits::CountTrailingZeros32(cells[cell_index]) >
- base::bits::CountTrailingZeros32(cells[cell_index] | index_mask))) {
- // If we are already in the first cell, there is no live object.
- if (cell_index == base_address_cell_index) return false;
-
- // If not, find a cell in a preceding cell slot that has a mark bit set.
- do {
- cell_index--;
- } while (cell_index > base_address_cell_index && cells[cell_index] == 0);
-
- // The slot must be in a dead object if there are no preceding cells that
- // have mark bits set.
- if (cells[cell_index] == 0) {
- return false;
- }
-
- // The object is in a preceding cell. Set the mask to find any object.
- slot_mask = ~0u;
- } else {
- // We are interested in object mark bits right before the slot.
- slot_mask = index_mask + (index_mask - 1);
- }
-
- MarkBit::CellType current_cell = cells[cell_index];
- CHECK(current_cell != 0);
-
- // Find the last live object in the cell.
- unsigned int leading_zeros =
- base::bits::CountLeadingZeros32(current_cell & slot_mask);
- CHECK(leading_zeros != Bitmap::kBitsPerCell);
- int offset = static_cast<int>(Bitmap::kBitIndexMask - leading_zeros) - 1;
-
- base_address += (cell_index - base_address_cell_index) *
- Bitmap::kBitsPerCell * kPointerSize;
- Address address = base_address + offset * kPointerSize;
-
- // If the found mark bit is part of a black area, the slot cannot be part
- // of a live object since it is not marked.
- if (p->IsBlackAreaEndMarker(address + kPointerSize)) return false;
-
- HeapObject* object = HeapObject::FromAddress(address);
- CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
- CHECK(object->address() < reinterpret_cast<Address>(slot));
- if ((object->address() + kPointerSize) <= slot &&
- (object->address() + object->Size()) > slot) {
- // If the slot is within the last found object in the cell, the slot is
- // in a live object.
- // Slots pointing to the first word of an object are invalid and removed.
- // This can happen when we move the object header while left trimming.
- return true;
- }
- return false;
-}
-
-HeapObject* MarkCompactCollector::FindBlackObjectBySlotSlow(Address slot) {
- Page* p = Page::FromAddress(slot);
- Space* owner = p->owner();
- if (owner == heap_->lo_space() || owner == nullptr) {
- Object* large_object = heap_->lo_space()->FindObject(slot);
- // This object has to exist, otherwise we would not have recorded a slot
- // for it.
- CHECK(large_object->IsHeapObject());
- HeapObject* large_heap_object = HeapObject::cast(large_object);
-
- if (IsMarked(large_heap_object)) {
- return large_heap_object;
- }
- return nullptr;
- }
-
- LiveObjectIterator<kBlackObjects> it(p);
- HeapObject* object = nullptr;
- while ((object = it.Next()) != nullptr) {
- int size = object->Size();
- if (object->address() > slot) return nullptr;
- if (object->address() <= slot && slot < (object->address() + size)) {
- return object;
- }
- }
-
- return nullptr;
-}
-
-
void MarkCompactCollector::EvacuateNewSpacePrologue() {
NewSpace* new_space = heap()->new_space();
// Append the list of new space pages to be processed.
@@ -3055,7 +2931,11 @@ class MarkCompactCollector::Evacuator : public Malloced {
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(collector->heap(), &compaction_spaces_,
&local_pretenuring_feedback_),
- new_space_page_visitor(collector->heap()),
+ new_to_new_page_visitor_(collector->heap(),
+ &local_pretenuring_feedback_),
+ new_to_old_page_visitor_(collector->heap(),
+ &local_pretenuring_feedback_),
+
old_space_visitor_(collector->heap(), &compaction_spaces_),
duration_(0.0),
bytes_compacted_(0) {}
@@ -3086,7 +2966,10 @@ class MarkCompactCollector::Evacuator : public Malloced {
// Visitors for the corresponding spaces.
EvacuateNewSpaceVisitor new_space_visitor_;
- EvacuateNewSpacePageVisitor new_space_page_visitor;
+ EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
+ new_to_new_page_visitor_;
+ EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD>
+ new_to_old_page_visitor_;
EvacuateOldSpaceVisitor old_space_visitor_;
// Book keeping info.
@@ -3107,20 +2990,23 @@ bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
case kObjectsNewToOld:
success = collector_->VisitLiveObjects(page, &new_space_visitor_,
kClearMarkbits);
+ DCHECK(success);
ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
- DCHECK(success);
break;
case kPageNewToOld:
- success = collector_->VisitLiveObjects(page, &new_space_page_visitor,
+ success = collector_->VisitLiveObjects(page, &new_to_old_page_visitor_,
kKeepMarking);
- // ArrayBufferTracker will be updated during sweeping.
DCHECK(success);
+ new_to_old_page_visitor_.account_moved_bytes(page->LiveBytes());
+ // ArrayBufferTracker will be updated during sweeping.
break;
case kPageNewToNew:
- new_space_page_visitor.account_semispace_copied(page->LiveBytes());
+ success = collector_->VisitLiveObjects(page, &new_to_new_page_visitor_,
+ kKeepMarking);
+ DCHECK(success);
+ new_to_new_page_visitor_.account_moved_bytes(page->LiveBytes());
// ArrayBufferTracker will be updated during sweeping.
- success = true;
break;
case kObjectsOldToOld:
success = collector_->VisitLiveObjects(page, &old_space_visitor_,
@@ -3145,8 +3031,6 @@ bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
}
break;
- default:
- UNREACHABLE();
}
}
ReportCompactionProgress(evacuation_time, saved_live_bytes);
@@ -3172,15 +3056,15 @@ void MarkCompactCollector::Evacuator::Finalize() {
compaction_spaces_.Get(CODE_SPACE));
heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
- new_space_page_visitor.promoted_size());
+ new_to_old_page_visitor_.moved_bytes());
heap()->IncrementSemiSpaceCopiedObjectSize(
new_space_visitor_.semispace_copied_size() +
- new_space_page_visitor.semispace_copied_size());
+ new_to_new_page_visitor_.moved_bytes());
heap()->IncrementYoungSurvivorsCounter(
new_space_visitor_.promoted_size() +
new_space_visitor_.semispace_copied_size() +
- new_space_page_visitor.promoted_size() +
- new_space_page_visitor.semispace_copied_size());
+ new_to_old_page_visitor_.moved_bytes() +
+ new_to_new_page_visitor_.moved_bytes());
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
}
@@ -3192,17 +3076,15 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
//
// The number of parallel compaction tasks is limited by:
// - #evacuation pages
- // - (#cores - 1)
+ // - #cores
const double kTargetCompactionTimeInMs = .5;
- const int kNumSweepingTasks = 3;
double compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
const int available_cores = Max(
1, static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) -
- kNumSweepingTasks - 1);
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
int tasks;
if (compaction_speed > 0) {
tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
@@ -3279,9 +3161,9 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
(page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
!page->Contains(age_mark)) {
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
- EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
+ EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
} else {
- EvacuateNewSpacePageVisitor::MoveToToSpace(page);
+ EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
}
}
@@ -3335,6 +3217,18 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
}
};
+MarkCompactCollector::Sweeper::ClearOldToNewSlotsMode
+MarkCompactCollector::Sweeper::GetClearOldToNewSlotsMode(Page* p) {
+ AllocationSpace identity = p->owner()->identity();
+ if (p->old_to_new_slots() &&
+ (identity == OLD_SPACE || identity == MAP_SPACE)) {
+ return MarkCompactCollector::Sweeper::CLEAR_REGULAR_SLOTS;
+ } else if (p->typed_old_to_new_slots() && identity == CODE_SPACE) {
+ return MarkCompactCollector::Sweeper::CLEAR_TYPED_SLOTS;
+ }
+ return MarkCompactCollector::Sweeper::DO_NOT_CLEAR;
+}
+
int MarkCompactCollector::Sweeper::RawSweep(
Page* p, FreeListRebuildingMode free_list_mode,
FreeSpaceTreatmentMode free_space_mode) {
@@ -3344,13 +3238,17 @@ int MarkCompactCollector::Sweeper::RawSweep(
space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
+ // If there are old-to-new slots in that page, we have to filter out slots
+ // that are in dead memory which is freed by the sweeper.
+ ClearOldToNewSlotsMode slots_clearing_mode = GetClearOldToNewSlotsMode(p);
+
+ // The free ranges map is used for filtering typed slots.
+ std::map<uint32_t, uint32_t> free_ranges;
+
// Before we sweep objects on the page, we free dead array buffers which
// requires valid mark bits.
ArrayBufferTracker::FreeDead(p);
- // We also release the black area markers here.
- p->ReleaseBlackAreaEndMarkerMap();
-
Address free_start = p->area_start();
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
@@ -3370,11 +3268,13 @@ int MarkCompactCollector::Sweeper::RawSweep(
LiveObjectIterator<kBlackObjects> it(p);
HeapObject* object = NULL;
+
while ((object = it.Next()) != NULL) {
DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
Address free_end = object->address();
if (free_end != free_start) {
- int size = static_cast<int>(free_end - free_start);
+ CHECK_GT(free_end, free_start);
+ size_t size = static_cast<size_t>(free_end - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
@@ -3383,9 +3283,18 @@ int MarkCompactCollector::Sweeper::RawSweep(
free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
- p->heap()->CreateFillerObjectAt(free_start, size,
+ p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
+
+ if (slots_clearing_mode == CLEAR_REGULAR_SLOTS) {
+ RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
+ free_ranges.insert(std::pair<uint32_t, uint32_t>(
+ static_cast<uint32_t>(free_start - p->address()),
+ static_cast<uint32_t>(free_end - p->address())));
+ }
}
Map* map = object->synchronized_map();
int size = object->SizeFromMap(map);
@@ -3401,11 +3310,9 @@ int MarkCompactCollector::Sweeper::RawSweep(
free_start = free_end + size;
}
- // Clear the mark bits of that page and reset live bytes count.
- p->ClearLiveness();
-
if (free_start != p->area_end()) {
- int size = static_cast<int>(p->area_end() - free_start);
+ CHECK_GT(p->area_end(), free_start);
+ size_t size = static_cast<size_t>(p->area_end() - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
@@ -3414,13 +3321,31 @@ int MarkCompactCollector::Sweeper::RawSweep(
free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
} else {
- p->heap()->CreateFillerObjectAt(free_start, size,
+ p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
+
+ if (slots_clearing_mode == CLEAR_REGULAR_SLOTS) {
+ RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
+ SlotSet::KEEP_EMPTY_BUCKETS);
+ } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
+ free_ranges.insert(std::pair<uint32_t, uint32_t>(
+ static_cast<uint32_t>(free_start - p->address()),
+ static_cast<uint32_t>(p->area_end() - p->address())));
+ }
}
+
+ // Clear invalid typed slots after collection all free ranges.
+ if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
+ p->typed_old_to_new_slots()->RemoveInvaldSlots(free_ranges);
+ }
+
+ // Clear the mark bits of that page and reset live bytes count.
+ p->ClearLiveness();
+
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0;
- return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
+ return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
}
void MarkCompactCollector::InvalidateCode(Code* code) {
@@ -3480,7 +3405,8 @@ bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
page->AddressToMarkbitIndex(object->address()));
if (page->old_to_new_slots() != nullptr) {
page->old_to_new_slots()->RemoveRange(
- 0, static_cast<int>(object->address() - page->address()));
+ 0, static_cast<int>(object->address() - page->address()),
+ SlotSet::PREFREE_EMPTY_BUCKETS);
}
if (page->typed_old_to_new_slots() != nullptr) {
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
@@ -3545,12 +3471,12 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
for (Page* p : newspace_evacuation_candidates_) {
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
- sweeper().AddLatePage(p->owner()->identity(), p);
+ sweeper().AddPage(p->owner()->identity(), p);
} else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
p->ForAllFreeListCategories(
[](FreeListCategory* category) { DCHECK(!category->is_linked()); });
- sweeper().AddLatePage(p->owner()->identity(), p);
+ sweeper().AddPage(p->owner()->identity(), p);
}
}
newspace_evacuation_candidates_.Rewind(0);
@@ -3562,7 +3488,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
SkipList* list = p->skip_list();
if (list != NULL) list->Clear();
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
- sweeper().AddLatePage(p->owner()->identity(), p);
+ sweeper().AddPage(p->owner()->identity(), p);
p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
}
}
@@ -3631,34 +3557,48 @@ class PointerUpdateJobTraits {
static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
Address slot_address) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- if (heap->InFromSpace(*slot)) {
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
+ // There may be concurrent action on slots in dead objects. Concurrent
+ // sweeper threads may overwrite the slot content with a free space object.
+ // Moreover, the pointed-to object may also get concurrently overwritten
+ // with a free space object. The sweeper always gets priority performing
+ // these writes.
+ base::NoBarrierAtomicValue<Object*>* slot =
+ base::NoBarrierAtomicValue<Object*>::FromAddress(slot_address);
+ Object* slot_reference = slot->Value();
+ if (heap->InFromSpace(slot_reference)) {
+ HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
DCHECK(heap_object->IsHeapObject());
MapWord map_word = heap_object->map_word();
// There could still be stale pointers in large object space, map space,
// and old space for pages that have been promoted.
if (map_word.IsForwardingAddress()) {
- // Update the corresponding slot.
- *slot = map_word.ToForwardingAddress();
+ // A sweeper thread may concurrently write a size value which looks like
+ // a forwarding pointer. We have to ignore these values.
+ if (map_word.ToRawValue() < Page::kPageSize) {
+ return REMOVE_SLOT;
+ }
+ // Update the corresponding slot only if the slot content did not
+ // change in the meantime. This may happen when a concurrent sweeper
+ // thread stored a free space object at that memory location.
+ slot->TrySetValue(slot_reference, map_word.ToForwardingAddress());
}
// If the object was in from space before and is after executing the
// callback in to space, the object is still live.
// Unfortunately, we do not know about the slot. It could be in a
// just freed free space object.
- if (heap->InToSpace(*slot)) {
+ if (heap->InToSpace(slot->Value())) {
return KEEP_SLOT;
}
- } else if (heap->InToSpace(*slot)) {
+ } else if (heap->InToSpace(slot_reference)) {
// Slots can point to "to" space if the page has been moved, or if the
// slot has been recorded multiple times in the remembered set. Since
// there is no forwarding information present we need to check the
// markbits to determine liveness.
- if (Marking::IsBlack(
- ObjectMarking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot))))
+ if (Marking::IsBlack(ObjectMarking::MarkBitFrom(
+ reinterpret_cast<HeapObject*>(slot_reference))))
return KEEP_SLOT;
} else {
- DCHECK(!heap->InNewSpace(*slot));
+ DCHECK(!heap->InNewSpace(slot_reference));
}
return REMOVE_SLOT;
}
@@ -3666,9 +3606,11 @@ class PointerUpdateJobTraits {
int NumberOfPointerUpdateTasks(int pages) {
if (!FLAG_parallel_pointer_update) return 1;
- const int kMaxTasks = 4;
+ const int available_cores = Max(
+ 1, static_cast<int>(
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
const int kPagesPerTask = 4;
- return Min(kMaxTasks, (pages + kPagesPerTask - 1) / kPagesPerTask);
+ return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask);
}
template <PointerDirection direction>
@@ -3813,24 +3755,21 @@ int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
AllocationSpace identity) {
int max_freed = 0;
- if (page->mutex()->TryLock()) {
+ {
+ base::LockGuard<base::Mutex> guard(page->mutex());
// If this page was already swept in the meantime, we can return here.
- if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
- page->mutex()->Unlock();
- return 0;
- }
+ if (page->SweepingDone()) return 0;
+ DCHECK_EQ(Page::kSweepingPending,
+ page->concurrent_sweeping_state().Value());
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
const Sweeper::FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
if (identity == NEW_SPACE) {
RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
- } else if (identity == OLD_SPACE) {
- max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
- } else if (identity == CODE_SPACE) {
- max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
} else {
max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
}
+ DCHECK(page->SweepingDone());
// After finishing sweeping of a page we clean up its remembered set.
if (page->typed_old_to_new_slots()) {
@@ -3839,35 +3778,26 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
if (page->old_to_new_slots()) {
page->old_to_new_slots()->FreeToBeFreedBuckets();
}
+ }
- {
- base::LockGuard<base::Mutex> guard(&mutex_);
- swept_list_[identity].Add(page);
- }
- page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
- page->mutex()->Unlock();
+ {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ swept_list_[identity].Add(page);
}
return max_freed;
}
void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
- DCHECK(!sweeping_in_progress_);
+ DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
PrepareToBeSweptPage(space, page);
sweeping_list_[space].push_back(page);
}
-void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space,
- Page* page) {
- DCHECK(sweeping_in_progress_);
- PrepareToBeSweptPage(space, page);
- late_pages_ = true;
- AddSweepingPageSafe(space, page);
-}
-
void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
Page* page) {
page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
- int to_sweep = page->area_size() - page->LiveBytes();
+ DCHECK_GE(page->area_size(), static_cast<size_t>(page->LiveBytes()));
+ size_t to_sweep = page->area_size() - page->LiveBytes();
if (space != NEW_SPACE)
heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
}
@@ -3903,7 +3833,6 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
if (p->IsEvacuationCandidate()) {
// Will be processed in EvacuateNewSpaceAndCandidates.
DCHECK(evacuation_candidates_.length() > 0);
- DCHECK(!p->HasBlackAreas());
continue;
}
@@ -3943,8 +3872,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
}
}
-
-void MarkCompactCollector::SweepSpaces() {
+void MarkCompactCollector::StartSweepSpaces() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
#ifdef DEBUG
state_ = SWEEP_SPACES;
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 2cbb369f76..de182073ea 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -8,6 +8,8 @@
#include <deque>
#include "src/base/bits.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/cancelable-task.h"
#include "src/heap/marking.h"
#include "src/heap/spaces.h"
#include "src/heap/store-buffer.h"
@@ -52,16 +54,26 @@ class ObjectMarking : public AllStatic {
// Marking deque for tracing live objects.
class MarkingDeque {
public:
- MarkingDeque()
- : array_(NULL),
+ explicit MarkingDeque(Heap* heap)
+ : backing_store_(nullptr),
+ backing_store_committed_size_(0),
+ array_(nullptr),
top_(0),
bottom_(0),
mask_(0),
overflowed_(false),
- in_use_(false) {}
+ in_use_(false),
+ uncommit_task_pending_(false),
+ heap_(heap) {}
- void Initialize(Address low, Address high);
- void Uninitialize(bool aborting = false);
+ void SetUp();
+ void TearDown();
+
+ // Ensures that the marking deque is committed and will stay committed until
+ // StopUsing() is called.
+ void StartUsing();
+ void StopUsing();
+ void Clear();
inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
@@ -69,8 +81,6 @@ class MarkingDeque {
bool overflowed() const { return overflowed_; }
- bool in_use() const { return in_use_; }
-
void ClearOverflowed() { overflowed_ = false; }
void SetOverflowed() { overflowed_ = true; }
@@ -118,6 +128,43 @@ class MarkingDeque {
void set_top(int top) { top_ = top; }
private:
+ // This task uncommits the marking_deque backing store if
+ // markin_deque->in_use_ is false.
+ class UncommitTask : public CancelableTask {
+ public:
+ explicit UncommitTask(Isolate* isolate, MarkingDeque* marking_deque)
+ : CancelableTask(isolate), marking_deque_(marking_deque) {}
+
+ private:
+ // CancelableTask override.
+ void RunInternal() override {
+ base::LockGuard<base::Mutex> guard(&marking_deque_->mutex_);
+ if (!marking_deque_->in_use_) {
+ marking_deque_->Uncommit();
+ }
+ marking_deque_->uncommit_task_pending_ = false;
+ }
+
+ MarkingDeque* marking_deque_;
+ DISALLOW_COPY_AND_ASSIGN(UncommitTask);
+ };
+
+ static const size_t kMaxSize = 4 * MB;
+ static const size_t kMinSize = 256 * KB;
+
+ // Must be called with mutex lock.
+ void EnsureCommitted();
+
+ // Must be called with mutex lock.
+ void Uncommit();
+
+ // Must be called with mutex lock.
+ void StartUncommitTask();
+
+ base::Mutex mutex_;
+
+ base::VirtualMemory* backing_store_;
+ size_t backing_store_committed_size_;
HeapObject** array_;
// array_[(top - 1) & mask_] is the top element in the deque. The Deque is
// empty when top_ == bottom_. It is full when top_ + 1 == bottom
@@ -126,7 +173,11 @@ class MarkingDeque {
int bottom_;
int mask_;
bool overflowed_;
+ // in_use_ == true after taking mutex lock implies that the marking deque is
+ // committed and will stay committed at least until in_use_ == false.
bool in_use_;
+ bool uncommit_task_pending_;
+ Heap* heap_;
DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
};
@@ -270,12 +321,16 @@ class LiveObjectIterator BASE_EMBEDDED {
HeapObject* Next();
private:
+ inline Heap* heap() { return chunk_->heap(); }
+
MemoryChunk* chunk_;
MarkBitCellIterator it_;
Address cell_base_;
MarkBit::CellType current_cell_;
};
+enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
+
// -------------------------------------------------------------------------
// Mark-Compact collector
class MarkCompactCollector {
@@ -288,6 +343,11 @@ class MarkCompactCollector {
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
+ enum ClearOldToNewSlotsMode {
+ DO_NOT_CLEAR,
+ CLEAR_REGULAR_SLOTS,
+ CLEAR_TYPED_SLOTS
+ };
typedef std::deque<Page*> SweepingList;
typedef List<Page*> SweptList;
@@ -299,24 +359,25 @@ class MarkCompactCollector {
: heap_(heap),
pending_sweeper_tasks_semaphore_(0),
sweeping_in_progress_(false),
- late_pages_(false),
num_sweeping_tasks_(0) {}
bool sweeping_in_progress() { return sweeping_in_progress_; }
- bool contains_late_pages() { return late_pages_; }
void AddPage(AllocationSpace space, Page* page);
- void AddLatePage(AllocationSpace space, Page* page);
int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
int max_pages = 0);
int ParallelSweepPage(Page* page, AllocationSpace identity);
+ // After calling this function sweeping is considered to be in progress
+ // and the main thread can sweep lazily, but the background sweeper tasks
+ // are not running yet.
void StartSweeping();
- void StartSweepingHelper(AllocationSpace space_to_start);
+ void StartSweeperTasks();
void EnsureCompleted();
void EnsureNewSpaceCompleted();
- bool IsSweepingCompleted();
+ bool AreSweeperTasksRunning();
+ bool IsSweepingCompleted(AllocationSpace space);
void SweepOrWaitUntilSweepingCompleted(Page* page);
void AddSweptPageSafe(PagedSpace* space, Page* page);
@@ -325,6 +386,8 @@ class MarkCompactCollector {
private:
static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
+ static ClearOldToNewSlotsMode GetClearOldToNewSlotsMode(Page* p);
+
template <typename Callback>
void ForAllSweepingSpaces(Callback callback) {
for (int i = 0; i < kAllocationSpaces; i++) {
@@ -343,7 +406,6 @@ class MarkCompactCollector {
SweptList swept_list_[kAllocationSpaces];
SweepingList sweeping_list_[kAllocationSpaces];
bool sweeping_in_progress_;
- bool late_pages_;
base::AtomicNumber<intptr_t> num_sweeping_tasks_;
};
@@ -369,9 +431,7 @@ class MarkCompactCollector {
// Performs a global garbage collection.
void CollectGarbage();
- enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
-
- bool StartCompaction(CompactionMode mode);
+ bool StartCompaction();
void AbortCompaction();
@@ -412,7 +472,7 @@ class MarkCompactCollector {
->ShouldSkipEvacuationSlotRecording();
}
- INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
+ static inline bool IsOnEvacuationCandidate(HeapObject* obj) {
return Page::FromAddress(reinterpret_cast<Address>(obj))
->IsEvacuationCandidate();
}
@@ -463,34 +523,10 @@ class MarkCompactCollector {
MarkingDeque* marking_deque() { return &marking_deque_; }
- static const size_t kMaxMarkingDequeSize = 4 * MB;
- static const size_t kMinMarkingDequeSize = 256 * KB;
-
- void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
- if (!marking_deque()->in_use()) {
- EnsureMarkingDequeIsCommitted(max_size);
- InitializeMarkingDeque();
- }
- }
-
- void EnsureMarkingDequeIsCommitted(size_t max_size);
- void EnsureMarkingDequeIsReserved();
-
- void InitializeMarkingDeque();
-
- // The following two methods can just be called after marking, when the
- // whole transitive closure is known. They must be called before sweeping
- // when mark bits are still intact.
- bool IsSlotInBlackObject(MemoryChunk* p, Address slot);
- HeapObject* FindBlackObjectBySlotSlow(Address slot);
-
- // Removes all the slots in the slot buffers that are within the given
- // address range.
- void RemoveObjectSlots(Address start_slot, Address end_slot);
-
Sweeper& sweeper() { return sweeper_; }
private:
+ template <PageEvacuationMode mode>
class EvacuateNewSpacePageVisitor;
class EvacuateNewSpaceVisitor;
class EvacuateOldSpaceVisitor;
@@ -502,11 +538,10 @@ class MarkCompactCollector {
explicit MarkCompactCollector(Heap* heap);
bool WillBeDeoptimized(Code* code);
- void ClearInvalidRememberedSetSlots();
- void ComputeEvacuationHeuristics(int area_size,
+ void ComputeEvacuationHeuristics(size_t area_size,
int* target_fragmentation_percent,
- int* max_evacuated_bytes);
+ size_t* max_evacuated_bytes);
void VisitAllObjects(HeapObjectVisitor* visitor);
@@ -644,21 +679,10 @@ class MarkCompactCollector {
void AbortTransitionArrays();
- // -----------------------------------------------------------------------
- // Phase 2: Sweeping to clear mark bits and free non-live objects for
- // a non-compacting collection.
- //
- // Before: Live objects are marked and non-live objects are unmarked.
- //
- // After: Live objects are unmarked, non-live regions have been added to
- // their space's free list. Active eden semispace is compacted by
- // evacuation.
- //
-
- // If we are not compacting the heap, we simply sweep the spaces except
- // for the large object space, clearing mark bits and adding unmarked
- // regions to each space's free list.
- void SweepSpaces();
+ // Starts sweeping of spaces by contributing on the main thread and setting
+ // up other pages for sweeping. Does not start sweeper tasks.
+ void StartSweepSpaces();
+ void StartSweepSpace(PagedSpace* space);
void EvacuateNewSpacePrologue();
@@ -681,9 +705,6 @@ class MarkCompactCollector {
void ReleaseEvacuationCandidates();
- // Starts sweeping of a space by contributing on the main thread and setting
- // up other pages for sweeping.
- void StartSweepSpace(PagedSpace* space);
#ifdef DEBUG
friend class MarkObjectVisitor;
@@ -726,8 +747,6 @@ class MarkCompactCollector {
bool have_code_to_deoptimize_;
- base::VirtualMemory* marking_deque_memory_;
- size_t marking_deque_memory_committed_;
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index ba9010e7bc..2aed4c714a 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -24,27 +24,26 @@ MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
void MemoryReducer::TimerTask::RunInternal() {
- const double kJsCallsPerMsThreshold = 0.5;
Heap* heap = memory_reducer_->heap();
Event event;
double time_ms = heap->MonotonicallyIncreasingTimeInMs();
heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
heap->OldGenerationAllocationCounter());
- double js_call_rate = memory_reducer_->SampleAndGetJsCallsPerMs(time_ms);
bool low_allocation_rate = heap->HasLowAllocationRate();
- bool is_idle = js_call_rate < kJsCallsPerMsThreshold && low_allocation_rate;
bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
if (FLAG_trace_gc_verbose) {
- PrintIsolate(heap->isolate(), "Memory reducer: call rate %.3lf, %s, %s\n",
- js_call_rate, low_allocation_rate ? "low alloc" : "high alloc",
- optimize_for_memory ? "background" : "foreground");
+ heap->isolate()->PrintWithTimestamp(
+ "Memory reducer: %s, %s\n",
+ low_allocation_rate ? "low alloc" : "high alloc",
+ optimize_for_memory ? "background" : "foreground");
}
event.type = kTimer;
event.time_ms = time_ms;
// The memory reducer will start incremental markig if
// 1) mutator is likely idle: js call rate is low and allocation rate is low.
// 2) mutator is in background: optimize for memory flag is set.
- event.should_start_incremental_gc = is_idle || optimize_for_memory;
+ event.should_start_incremental_gc =
+ low_allocation_rate || optimize_for_memory;
event.can_start_incremental_gc =
heap->incremental_marking()->IsStopped() &&
(heap->incremental_marking()->CanBeActivated() || optimize_for_memory);
@@ -52,16 +51,6 @@ void MemoryReducer::TimerTask::RunInternal() {
}
-double MemoryReducer::SampleAndGetJsCallsPerMs(double time_ms) {
- unsigned int counter = heap()->isolate()->js_calls_from_api_counter();
- unsigned int call_delta = counter - js_calls_counter_;
- double time_delta_ms = time_ms - js_calls_sample_time_ms_;
- js_calls_counter_ = counter;
- js_calls_sample_time_ms_ = time_ms;
- return time_delta_ms > 0 ? call_delta / time_delta_ms : 0;
-}
-
-
void MemoryReducer::NotifyTimer(const Event& event) {
DCHECK_EQ(kTimer, event.type);
DCHECK_EQ(kWait, state_.action);
@@ -70,8 +59,8 @@ void MemoryReducer::NotifyTimer(const Event& event) {
DCHECK(heap()->incremental_marking()->IsStopped());
DCHECK(FLAG_incremental_marking);
if (FLAG_trace_gc_verbose) {
- PrintIsolate(heap()->isolate(), "Memory reducer: started GC #%d\n",
- state_.started_gcs);
+ heap()->isolate()->PrintWithTimestamp("Memory reducer: started GC #%d\n",
+ state_.started_gcs);
}
heap()->StartIdleIncrementalMarking(
GarbageCollectionReason::kMemoryReducer);
@@ -93,8 +82,9 @@ void MemoryReducer::NotifyTimer(const Event& event) {
// Re-schedule the timer.
ScheduleTimer(event.time_ms, state_.next_gc_start_ms - event.time_ms);
if (FLAG_trace_gc_verbose) {
- PrintIsolate(heap()->isolate(), "Memory reducer: waiting for %.f ms\n",
- state_.next_gc_start_ms - event.time_ms);
+ heap()->isolate()->PrintWithTimestamp(
+ "Memory reducer: waiting for %.f ms\n",
+ state_.next_gc_start_ms - event.time_ms);
}
}
}
@@ -110,9 +100,9 @@ void MemoryReducer::NotifyMarkCompact(const Event& event) {
}
if (old_action == kRun) {
if (FLAG_trace_gc_verbose) {
- PrintIsolate(heap()->isolate(), "Memory reducer: finished GC #%d (%s)\n",
- state_.started_gcs,
- state_.action == kWait ? "will do more" : "done");
+ heap()->isolate()->PrintWithTimestamp(
+ "Memory reducer: finished GC #%d (%s)\n", state_.started_gcs,
+ state_.action == kWait ? "will do more" : "done");
}
}
}
@@ -194,8 +184,6 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
DCHECK(delay_ms > 0);
- // Record the time and the js call counter.
- SampleAndGetJsCallsPerMs(time_ms);
// Leave some room for precision error in task scheduler.
const double kSlackMs = 100;
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap()->isolate());
@@ -204,7 +192,6 @@ void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
isolate, timer_task, (delay_ms + kSlackMs) / 1000.0);
}
-
void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0); }
} // namespace internal
diff --git a/deps/v8/src/heap/memory-reducer.h b/deps/v8/src/heap/memory-reducer.h
index 0fe53e5fea..0421987a3c 100644
--- a/deps/v8/src/heap/memory-reducer.h
+++ b/deps/v8/src/heap/memory-reducer.h
@@ -8,6 +8,7 @@
#include "include/v8-platform.h"
#include "src/base/macros.h"
#include "src/cancelable-task.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -79,7 +80,7 @@ class Heap;
// now_ms is the current time,
// t' is t if the current event is not a GC event and is now_ms otherwise,
// long_delay_ms, short_delay_ms, and watchdog_delay_ms are constants.
-class MemoryReducer {
+class V8_EXPORT_PRIVATE MemoryReducer {
public:
enum Action { kDone, kWait, kRun };
@@ -148,9 +149,6 @@ class MemoryReducer {
static bool WatchdogGC(const State& state, const Event& event);
- // Returns the rate of JS calls initiated from the API.
- double SampleAndGetJsCallsPerMs(double time_ms);
-
Heap* heap_;
State state_;
unsigned int js_calls_counter_;
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index 6e4b50ec24..ef5f65734e 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -52,55 +52,59 @@ V8_NOINLINE static void DumpJSONArray(std::stringstream& stream, size_t* array,
stream << "]";
}
+void ObjectStats::PrintKeyAndId(const char* key, int gc_count) {
+ PrintF("\"isolate\": \"%p\", \"id\": %d, \"key\": \"%s\", ",
+ reinterpret_cast<void*>(isolate()), gc_count, key);
+}
+
+void ObjectStats::PrintInstanceTypeJSON(const char* key, int gc_count,
+ const char* name, int index) {
+ PrintF("{ ");
+ PrintKeyAndId(key, gc_count);
+ PrintF("\"type\": \"instance_type_data\", ");
+ PrintF("\"instance_type\": %d, ", index);
+ PrintF("\"instance_type_name\": \"%s\", ", name);
+ PrintF("\"overall\": %zu, ", object_sizes_[index]);
+ PrintF("\"count\": %zu, ", object_counts_[index]);
+ PrintF("\"over_allocated\": %zu, ", over_allocated_[index]);
+ PrintF("\"histogram\": ");
+ PrintJSONArray(size_histogram_[index], kNumberOfBuckets);
+ PrintF(",");
+ PrintF("\"over_allocated_histogram\": ");
+ PrintJSONArray(over_allocated_histogram_[index], kNumberOfBuckets);
+ PrintF(" }\n");
+}
+
void ObjectStats::PrintJSON(const char* key) {
double time = isolate()->time_millis_since_init();
int gc_count = heap()->gc_count();
-#define PRINT_KEY_AND_ID() \
- PrintF("\"isolate\": \"%p\", \"id\": %d, \"key\": \"%s\", ", \
- reinterpret_cast<void*>(isolate()), gc_count, key);
-
// gc_descriptor
PrintF("{ ");
- PRINT_KEY_AND_ID();
+ PrintKeyAndId(key, gc_count);
PrintF("\"type\": \"gc_descriptor\", \"time\": %f }\n", time);
// bucket_sizes
PrintF("{ ");
- PRINT_KEY_AND_ID();
+ PrintKeyAndId(key, gc_count);
PrintF("\"type\": \"bucket_sizes\", \"sizes\": [ ");
for (int i = 0; i < kNumberOfBuckets; i++) {
PrintF("%d", 1 << (kFirstBucketShift + i));
if (i != (kNumberOfBuckets - 1)) PrintF(", ");
}
PrintF(" ] }\n");
-// instance_type_data
-#define PRINT_INSTANCE_TYPE_DATA(name, index) \
- PrintF("{ "); \
- PRINT_KEY_AND_ID(); \
- PrintF("\"type\": \"instance_type_data\", "); \
- PrintF("\"instance_type\": %d, ", index); \
- PrintF("\"instance_type_name\": \"%s\", ", name); \
- PrintF("\"overall\": %zu, ", object_sizes_[index]); \
- PrintF("\"count\": %zu, ", object_counts_[index]); \
- PrintF("\"over_allocated\": %zu, ", over_allocated_[index]); \
- PrintF("\"histogram\": "); \
- PrintJSONArray(size_histogram_[index], kNumberOfBuckets); \
- PrintF(","); \
- PrintF("\"over_allocated_histogram\": "); \
- PrintJSONArray(over_allocated_histogram_[index], kNumberOfBuckets); \
- PrintF(" }\n");
-#define INSTANCE_TYPE_WRAPPER(name) PRINT_INSTANCE_TYPE_DATA(#name, name)
-#define CODE_KIND_WRAPPER(name) \
- PRINT_INSTANCE_TYPE_DATA("*CODE_" #name, \
- FIRST_CODE_KIND_SUB_TYPE + Code::name)
-#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
- PRINT_INSTANCE_TYPE_DATA("*FIXED_ARRAY_" #name, \
- FIRST_FIXED_ARRAY_SUB_TYPE + name)
-#define CODE_AGE_WRAPPER(name) \
- PRINT_INSTANCE_TYPE_DATA( \
- "*CODE_AGE_" #name, \
- FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge)
+#define INSTANCE_TYPE_WRAPPER(name) \
+ PrintInstanceTypeJSON(key, gc_count, #name, name);
+#define CODE_KIND_WRAPPER(name) \
+ PrintInstanceTypeJSON(key, gc_count, "*CODE_" #name, \
+ FIRST_CODE_KIND_SUB_TYPE + Code::name);
+#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
+ PrintInstanceTypeJSON(key, gc_count, "*FIXED_ARRAY_" #name, \
+ FIRST_FIXED_ARRAY_SUB_TYPE + name);
+#define CODE_AGE_WRAPPER(name) \
+ PrintInstanceTypeJSON( \
+ key, gc_count, "*CODE_AGE_" #name, \
+ FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge);
INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER)
CODE_KIND_LIST(CODE_KIND_WRAPPER)
@@ -115,6 +119,20 @@ void ObjectStats::PrintJSON(const char* key) {
#undef PRINT_KEY_AND_ID
}
+void ObjectStats::DumpInstanceTypeData(std::stringstream& stream,
+ const char* name, int index) {
+ stream << "\"" << name << "\":{";
+ stream << "\"type\":" << static_cast<int>(index) << ",";
+ stream << "\"overall\":" << object_sizes_[index] << ",";
+ stream << "\"count\":" << object_counts_[index] << ",";
+ stream << "\"over_allocated\":" << over_allocated_[index] << ",";
+ stream << "\"histogram\":";
+ DumpJSONArray(stream, size_histogram_[index], kNumberOfBuckets);
+ stream << ",\"over_allocated_histogram\":";
+ DumpJSONArray(stream, over_allocated_histogram_[index], kNumberOfBuckets);
+ stream << "},";
+}
+
void ObjectStats::Dump(std::stringstream& stream) {
double time = isolate()->time_millis_since_init();
int gc_count = heap()->gc_count();
@@ -131,29 +149,19 @@ void ObjectStats::Dump(std::stringstream& stream) {
stream << "],";
stream << "\"type_data\":{";
-#define PRINT_INSTANCE_TYPE_DATA(name, index) \
- stream << "\"" << name << "\":{"; \
- stream << "\"type\":" << static_cast<int>(index) << ","; \
- stream << "\"overall\":" << object_sizes_[index] << ","; \
- stream << "\"count\":" << object_counts_[index] << ","; \
- stream << "\"over_allocated\":" << over_allocated_[index] << ","; \
- stream << "\"histogram\":"; \
- DumpJSONArray(stream, size_histogram_[index], kNumberOfBuckets); \
- stream << ",\"over_allocated_histogram\":"; \
- DumpJSONArray(stream, over_allocated_histogram_[index], kNumberOfBuckets); \
- stream << "},";
+#define INSTANCE_TYPE_WRAPPER(name) DumpInstanceTypeData(stream, #name, name);
+#define CODE_KIND_WRAPPER(name) \
+ DumpInstanceTypeData(stream, "*CODE_" #name, \
+ FIRST_CODE_KIND_SUB_TYPE + Code::name);
+
+#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
+ DumpInstanceTypeData(stream, "*FIXED_ARRAY_" #name, \
+ FIRST_FIXED_ARRAY_SUB_TYPE + name);
-#define INSTANCE_TYPE_WRAPPER(name) PRINT_INSTANCE_TYPE_DATA(#name, name)
-#define CODE_KIND_WRAPPER(name) \
- PRINT_INSTANCE_TYPE_DATA("*CODE_" #name, \
- FIRST_CODE_KIND_SUB_TYPE + Code::name)
-#define FIXED_ARRAY_SUB_INSTANCE_TYPE_WRAPPER(name) \
- PRINT_INSTANCE_TYPE_DATA("*FIXED_ARRAY_" #name, \
- FIRST_FIXED_ARRAY_SUB_TYPE + name)
-#define CODE_AGE_WRAPPER(name) \
- PRINT_INSTANCE_TYPE_DATA( \
- "*CODE_AGE_" #name, \
- FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge)
+#define CODE_AGE_WRAPPER(name) \
+ DumpInstanceTypeData( \
+ stream, "*CODE_AGE_" #name, \
+ FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge);
INSTANCE_TYPE_LIST(INSTANCE_TYPE_WRAPPER);
CODE_KIND_LIST(CODE_KIND_WRAPPER);
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
index add5a12b04..7d0cfb5a69 100644
--- a/deps/v8/src/heap/object-stats.h
+++ b/deps/v8/src/heap/object-stats.h
@@ -75,6 +75,9 @@ class ObjectStats {
over_allocated;
over_allocated_histogram_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]
[HistogramIndexFromSize(over_allocated)]++;
+ over_allocated_[InstanceType::FIXED_ARRAY_TYPE] += over_allocated;
+ over_allocated_histogram_[InstanceType::FIXED_ARRAY_TYPE]
+ [HistogramIndexFromSize(over_allocated)]++;
}
return true;
}
@@ -97,6 +100,14 @@ class ObjectStats {
static const int kLastBucket = 1 << kLastBucketShift;
static const int kNumberOfBuckets = kLastBucketShift - kFirstBucketShift + 1;
+ void PrintKeyAndId(const char* key, int gc_count);
+ // The following functions are excluded from inline to reduce the overall
+ // binary size of VB. On x64 this save around 80KB.
+ V8_NOINLINE void PrintInstanceTypeJSON(const char* key, int gc_count,
+ const char* name, int index);
+ V8_NOINLINE void DumpInstanceTypeData(std::stringstream& stream,
+ const char* name, int index);
+
int HistogramIndexFromSize(size_t size) {
if (size == 0) return 0;
int idx = static_cast<int>(base::ieee754::log2(static_cast<double>(size))) -
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 252b2fe5e2..f3502568d6 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -84,7 +84,10 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitFreeSpace, &VisitFreeSpace);
- table_.Register(kVisitJSWeakCollection, &JSObjectVisitor::Visit);
+ table_.Register(
+ kVisitJSWeakCollection,
+ &FlexibleBodyVisitor<StaticVisitor, JSWeakCollection::BodyDescriptor,
+ int>::Visit);
table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 9393fcc615..d4aa8b2f00 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -107,6 +107,8 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_ARGUMENTS_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
+ case JS_FIXED_ARRAY_ITERATOR_TYPE:
+ case JS_MODULE_NAMESPACE_TYPE:
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
case JS_ARRAY_TYPE:
@@ -120,6 +122,43 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
+
+ case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
+
case JS_PROMISE_TYPE:
case JS_BOUND_FUNCTION_TYPE:
return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
diff --git a/deps/v8/src/heap/page-parallel-job.h b/deps/v8/src/heap/page-parallel-job.h
index 440c440b7e..ad1d9b3e30 100644
--- a/deps/v8/src/heap/page-parallel-job.h
+++ b/deps/v8/src/heap/page-parallel-job.h
@@ -103,7 +103,8 @@ class PageParallelJob {
delete main_task;
// Wait for background tasks.
for (int i = 0; i < num_tasks_; i++) {
- if (!cancelable_task_manager_->TryAbort(task_ids[i])) {
+ if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
+ CancelableTaskManager::kTaskAborted) {
pending_tasks_->Wait();
}
}
diff --git a/deps/v8/src/heap/remembered-set.cc b/deps/v8/src/heap/remembered-set.cc
deleted file mode 100644
index c5dab90515..0000000000
--- a/deps/v8/src/heap/remembered-set.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/heap/remembered-set.h"
-#include "src/heap/heap-inl.h"
-#include "src/heap/heap.h"
-#include "src/heap/mark-compact.h"
-#include "src/heap/slot-set.h"
-#include "src/heap/spaces.h"
-#include "src/heap/store-buffer.h"
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-template <PointerDirection direction>
-void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
- STATIC_ASSERT(direction == OLD_TO_NEW);
- for (MemoryChunk* chunk : *heap->old_space()) {
- SlotSet* slots = GetSlotSet(chunk);
- if (slots != nullptr) {
- slots->Iterate(
- [heap, chunk](Address addr) {
- Object** slot = reinterpret_cast<Object**>(addr);
- return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
- },
- SlotSet::PREFREE_EMPTY_BUCKETS);
- }
- }
- for (MemoryChunk* chunk : *heap->code_space()) {
- TypedSlotSet* slots = GetTypedSlotSet(chunk);
- if (slots != nullptr) {
- slots->Iterate(
- [heap, chunk](SlotType type, Address host_addr, Address addr) {
- if (Marking::IsBlack(ObjectMarking::MarkBitFrom(host_addr))) {
- return KEEP_SLOT;
- } else {
- return REMOVE_SLOT;
- }
- },
- TypedSlotSet::PREFREE_EMPTY_CHUNKS);
- }
- }
- for (MemoryChunk* chunk : *heap->map_space()) {
- SlotSet* slots = GetSlotSet(chunk);
- if (slots != nullptr) {
- slots->Iterate(
- [heap, chunk](Address addr) {
- Object** slot = reinterpret_cast<Object**>(addr);
- // TODO(mlippautz): In map space all allocations would ideally be
- // map
- // aligned. After establishing this invariant IsValidSlot could just
- // refer to the containing object using alignment and check the mark
- // bits.
- return IsValidSlot(heap, chunk, slot) ? KEEP_SLOT : REMOVE_SLOT;
- },
- SlotSet::PREFREE_EMPTY_BUCKETS);
- }
- }
-}
-
-template <PointerDirection direction>
-void RememberedSet<direction>::VerifyValidSlots(Heap* heap) {
- Iterate(heap, [heap](Address addr) {
- HeapObject* obj =
- heap->mark_compact_collector()->FindBlackObjectBySlotSlow(addr);
- if (obj == nullptr) {
- // The slot is in dead object.
- MemoryChunk* chunk = MemoryChunk::FromAnyPointerAddress(heap, addr);
- AllocationSpace owner = chunk->owner()->identity();
- // The old to old remembered set should not have dead slots.
- CHECK_NE(direction, OLD_TO_OLD);
- // The old to new remembered set is allowed to have slots in dead
- // objects only in map and large object space because these space
- // cannot have raw untagged pointers.
- CHECK(owner == MAP_SPACE || owner == LO_SPACE);
- } else {
- int offset = static_cast<int>(addr - obj->address());
- CHECK(obj->IsValidSlot(offset));
- }
- return KEEP_SLOT;
- });
-}
-
-template <PointerDirection direction>
-bool RememberedSet<direction>::IsValidSlot(Heap* heap, MemoryChunk* chunk,
- Object** slot) {
- STATIC_ASSERT(direction == OLD_TO_NEW);
- Object* object = *slot;
- if (!heap->InNewSpace(object)) {
- return false;
- }
- HeapObject* heap_object = HeapObject::cast(object);
- // If the target object is not black, the source slot must be part
- // of a non-black (dead) object.
- return Marking::IsBlack(ObjectMarking::MarkBitFrom(heap_object)) &&
- heap->mark_compact_collector()->IsSlotInBlackObject(
- chunk, reinterpret_cast<Address>(slot));
-}
-
-template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap);
-template void RememberedSet<OLD_TO_NEW>::VerifyValidSlots(Heap* heap);
-template void RememberedSet<OLD_TO_OLD>::VerifyValidSlots(Heap* heap);
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index 74791b926b..a625b13dbf 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -45,7 +45,8 @@ class RememberedSet {
// Given a page and a range of slots in that page, this function removes the
// slots from the remembered set.
- static void RemoveRange(MemoryChunk* chunk, Address start, Address end) {
+ static void RemoveRange(MemoryChunk* chunk, Address start, Address end,
+ SlotSet::EmptyBucketMode mode) {
SlotSet* slot_set = GetSlotSet(chunk);
if (slot_set != nullptr) {
uintptr_t start_offset = start - chunk->address();
@@ -53,7 +54,7 @@ class RememberedSet {
DCHECK_LT(start_offset, end_offset);
if (end_offset < static_cast<uintptr_t>(Page::kPageSize)) {
slot_set->RemoveRange(static_cast<int>(start_offset),
- static_cast<int>(end_offset));
+ static_cast<int>(end_offset), mode);
} else {
// The large page has multiple slot sets.
// Compute slot set indicies for the range [start_offset, end_offset).
@@ -67,17 +68,17 @@ class RememberedSet {
end_offset - static_cast<uintptr_t>(end_chunk) * Page::kPageSize);
if (start_chunk == end_chunk) {
slot_set[start_chunk].RemoveRange(offset_in_start_chunk,
- offset_in_end_chunk);
+ offset_in_end_chunk, mode);
} else {
// Clear all slots from start_offset to the end of first chunk.
slot_set[start_chunk].RemoveRange(offset_in_start_chunk,
- Page::kPageSize);
+ Page::kPageSize, mode);
// Clear all slots in intermediate chunks.
for (int i = start_chunk + 1; i < end_chunk; i++) {
- slot_set[i].RemoveRange(0, Page::kPageSize);
+ slot_set[i].RemoveRange(0, Page::kPageSize, mode);
}
// Clear slots from the beginning of the last page to end_offset.
- slot_set[end_chunk].RemoveRange(0, offset_in_end_chunk);
+ slot_set[end_chunk].RemoveRange(0, offset_in_end_chunk, mode);
}
}
}
@@ -201,9 +202,7 @@ class RememberedSet {
// slots that are not part of live objects anymore. This method must be
// called after marking, when the whole transitive closure is known and
// must be called before sweeping when mark bits are still intact.
- static void ClearInvalidSlots(Heap* heap);
-
- static void VerifyValidSlots(Heap* heap);
+ static void ClearInvalidTypedSlots(Heap* heap, MemoryChunk* chunk);
private:
static SlotSet* GetSlotSet(MemoryChunk* chunk) {
diff --git a/deps/v8/src/heap/scavenge-job.h b/deps/v8/src/heap/scavenge-job.h
index fadfccdcc4..f7fbfc1480 100644
--- a/deps/v8/src/heap/scavenge-job.h
+++ b/deps/v8/src/heap/scavenge-job.h
@@ -6,6 +6,7 @@
#define V8_HEAP_SCAVENGE_JOB_H_
#include "src/cancelable-task.h"
+#include "src/globals.h"
#include "src/heap/gc-tracer.h"
namespace v8 {
@@ -16,7 +17,7 @@ class Isolate;
// This class posts idle tasks and performs scavenges in the idle tasks.
-class ScavengeJob {
+class V8_EXPORT_PRIVATE ScavengeJob {
public:
class IdleTask : public CancelableIdleTask {
public:
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index 9671f3615f..4cc215a83e 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -62,10 +62,8 @@ SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
}
// static
-template <PromotionMode promotion_mode>
-void StaticScavengeVisitor<promotion_mode>::VisitPointer(Heap* heap,
- HeapObject* obj,
- Object** p) {
+void StaticScavengeVisitor::VisitPointer(Heap* heap, HeapObject* obj,
+ Object** p) {
Object* object = *p;
if (!heap->InNewSpace(object)) return;
Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 59d04300e6..cad0e8af25 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -22,7 +22,7 @@ enum LoggingAndProfiling {
enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
-template <MarksHandling marks_handling, PromotionMode promotion_mode,
+template <MarksHandling marks_handling,
LoggingAndProfiling logging_and_profiling_mode>
class ScavengingVisitor : public StaticVisitorBase {
public:
@@ -185,8 +185,12 @@ class ScavengingVisitor : public StaticVisitorBase {
if (allocation.To(&target)) {
MigrateObject(heap, object, target, object_size);
- // Update slot to new target.
- *slot = target;
+ // Update slot to new target using CAS. A concurrent sweeper thread my
+ // filter the slot concurrently.
+ HeapObject* old = *slot;
+ base::Release_CompareAndSwap(reinterpret_cast<base::AtomicWord*>(slot),
+ reinterpret_cast<base::AtomicWord>(old),
+ reinterpret_cast<base::AtomicWord>(target));
if (object_contents == POINTER_OBJECT) {
heap->promotion_queue()->insert(
@@ -206,8 +210,7 @@ class ScavengingVisitor : public StaticVisitorBase {
SLOW_DCHECK(object->Size() == object_size);
Heap* heap = map->GetHeap();
- if (!heap->ShouldBePromoted<promotion_mode>(object->address(),
- object_size)) {
+ if (!heap->ShouldBePromoted(object->address(), object_size)) {
// A semi-space copy may fail due to fragmentation. In that case, we
// try to promote the object.
if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
@@ -219,9 +222,7 @@ class ScavengingVisitor : public StaticVisitorBase {
object_size)) {
return;
}
- if (promotion_mode == PROMOTE_MARKED) {
- FatalProcessOutOfMemory("Scavenger: promoting marked\n");
- }
+
// If promotion failed, we try to copy the object to the other semi-space
if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
@@ -358,21 +359,19 @@ class ScavengingVisitor : public StaticVisitorBase {
static VisitorDispatchTable<ScavengingCallback> table_;
};
-template <MarksHandling marks_handling, PromotionMode promotion_mode,
+template <MarksHandling marks_handling,
LoggingAndProfiling logging_and_profiling_mode>
-VisitorDispatchTable<ScavengingCallback> ScavengingVisitor<
- marks_handling, promotion_mode, logging_and_profiling_mode>::table_;
+VisitorDispatchTable<ScavengingCallback>
+ ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
// static
void Scavenger::Initialize() {
- ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
- LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
+ ScavengingVisitor<TRANSFER_MARKS,
LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
- LOGGING_AND_PROFILING_ENABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
+ ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
+ ScavengingVisitor<TRANSFER_MARKS,
LOGGING_AND_PROFILING_ENABLED>::Initialize();
+ ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
}
@@ -397,21 +396,21 @@ void Scavenger::SelectScavengingVisitorsTable() {
if (!heap()->incremental_marking()->IsMarking()) {
if (!logging_and_profiling) {
scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
+ ScavengingVisitor<IGNORE_MARKS,
LOGGING_AND_PROFILING_DISABLED>::GetTable());
} else {
scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<IGNORE_MARKS, DEFAULT_PROMOTION,
+ ScavengingVisitor<IGNORE_MARKS,
LOGGING_AND_PROFILING_ENABLED>::GetTable());
}
} else {
if (!logging_and_profiling) {
scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
+ ScavengingVisitor<TRANSFER_MARKS,
LOGGING_AND_PROFILING_DISABLED>::GetTable());
} else {
scavenging_visitors_table_.CopyFrom(
- ScavengingVisitor<TRANSFER_MARKS, PROMOTE_MARKED,
+ ScavengingVisitor<TRANSFER_MARKS,
LOGGING_AND_PROFILING_ENABLED>::GetTable());
}
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index f2213b8a36..54fe6ffdf9 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -63,9 +63,8 @@ class ScavengeVisitor : public ObjectVisitor {
// Helper class for turning the scavenger into an object visitor that is also
// filtering out non-HeapObjects and objects which do not reside in new space.
-template <PromotionMode promotion_mode>
class StaticScavengeVisitor
- : public StaticNewSpaceVisitor<StaticScavengeVisitor<promotion_mode>> {
+ : public StaticNewSpaceVisitor<StaticScavengeVisitor> {
public:
static inline void VisitPointer(Heap* heap, HeapObject* object, Object** p);
};
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index 017667b482..da61052b8a 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -5,6 +5,7 @@
#ifndef V8_SLOT_SET_H
#define V8_SLOT_SET_H
+#include <map>
#include <stack>
#include "src/allocation.h"
@@ -25,7 +26,13 @@ enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT };
// Each bucket is a bitmap with a bit corresponding to a single slot offset.
class SlotSet : public Malloced {
public:
- enum IterationMode { PREFREE_EMPTY_BUCKETS, KEEP_EMPTY_BUCKETS };
+ enum EmptyBucketMode {
+ FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately.
+ PREFREE_EMPTY_BUCKETS, // An empty bucket will be unlinked from the slot
+ // set, but deallocated on demand by a sweeper
+ // thread.
+ KEEP_EMPTY_BUCKETS // An empty bucket will be kept.
+ };
SlotSet() {
for (int i = 0; i < kBuckets; i++) {
@@ -76,7 +83,7 @@ class SlotSet : public Malloced {
// The slot offsets specify a range of slots at addresses:
// [page_start_ + start_offset ... page_start_ + end_offset).
- void RemoveRange(int start_offset, int end_offset) {
+ void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) {
CHECK_LE(end_offset, 1 << kPageSizeBits);
DCHECK_LE(start_offset, end_offset);
int start_bucket, start_cell, start_bit;
@@ -93,12 +100,10 @@ class SlotSet : public Malloced {
int current_cell = start_cell;
ClearCell(current_bucket, current_cell, ~start_mask);
current_cell++;
+ base::AtomicValue<uint32_t>* bucket_ptr = bucket[current_bucket].Value();
if (current_bucket < end_bucket) {
- if (bucket[current_bucket].Value() != nullptr) {
- while (current_cell < kCellsPerBucket) {
- bucket[current_bucket].Value()[current_cell].SetValue(0);
- current_cell++;
- }
+ if (bucket_ptr != nullptr) {
+ ClearBucket(bucket_ptr, current_cell, kCellsPerBucket);
}
// The rest of the current bucket is cleared.
// Move on to the next bucket.
@@ -108,17 +113,27 @@ class SlotSet : public Malloced {
DCHECK(current_bucket == end_bucket ||
(current_bucket < end_bucket && current_cell == 0));
while (current_bucket < end_bucket) {
- ReleaseBucket(current_bucket);
+ if (mode == PREFREE_EMPTY_BUCKETS) {
+ PreFreeEmptyBucket(current_bucket);
+ } else if (mode == FREE_EMPTY_BUCKETS) {
+ ReleaseBucket(current_bucket);
+ } else {
+ DCHECK(mode == KEEP_EMPTY_BUCKETS);
+ bucket_ptr = bucket[current_bucket].Value();
+ if (bucket_ptr) {
+ ClearBucket(bucket_ptr, 0, kCellsPerBucket);
+ }
+ }
current_bucket++;
}
// All buckets between start_bucket and end_bucket are cleared.
+ bucket_ptr = bucket[current_bucket].Value();
DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
- if (current_bucket == kBuckets ||
- bucket[current_bucket].Value() == nullptr) {
+ if (current_bucket == kBuckets || bucket_ptr == nullptr) {
return;
}
while (current_cell < end_cell) {
- bucket[current_bucket].Value()[current_cell].SetValue(0);
+ bucket_ptr[current_cell].SetValue(0);
current_cell++;
}
// All cells between start_cell and end_cell are cleared.
@@ -148,19 +163,19 @@ class SlotSet : public Malloced {
// else return REMOVE_SLOT;
// });
template <typename Callback>
- int Iterate(Callback callback, IterationMode mode) {
+ int Iterate(Callback callback, EmptyBucketMode mode) {
int new_count = 0;
for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
- if (bucket[bucket_index].Value() != nullptr) {
+ base::AtomicValue<uint32_t>* current_bucket =
+ bucket[bucket_index].Value();
+ if (current_bucket != nullptr) {
int in_bucket_count = 0;
- base::AtomicValue<uint32_t>* current_bucket =
- bucket[bucket_index].Value();
int cell_offset = bucket_index * kBitsPerBucket;
for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
if (current_bucket[i].Value()) {
uint32_t cell = current_bucket[i].Value();
uint32_t old_cell = cell;
- uint32_t new_cell = cell;
+ uint32_t mask = 0;
while (cell) {
int bit_offset = base::bits::CountTrailingZeros32(cell);
uint32_t bit_mask = 1u << bit_offset;
@@ -168,10 +183,11 @@ class SlotSet : public Malloced {
if (callback(page_start_ + slot) == KEEP_SLOT) {
++in_bucket_count;
} else {
- new_cell ^= bit_mask;
+ mask |= bit_mask;
}
cell ^= bit_mask;
}
+ uint32_t new_cell = old_cell & ~mask;
if (old_cell != new_cell) {
while (!current_bucket[i].TrySetValue(old_cell, new_cell)) {
// If TrySetValue fails, the cell must have changed. We just
@@ -180,17 +196,13 @@ class SlotSet : public Malloced {
// method will only be called on the main thread and filtering
// threads will only remove slots.
old_cell = current_bucket[i].Value();
- new_cell &= old_cell;
+ new_cell = old_cell & ~mask;
}
}
}
}
if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) {
- base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
- base::AtomicValue<uint32_t>* bucket_ptr =
- bucket[bucket_index].Value();
- to_be_freed_buckets_.push(bucket_ptr);
- bucket[bucket_index].SetValue(nullptr);
+ PreFreeEmptyBucket(bucket_index);
}
new_count += in_bucket_count;
}
@@ -226,6 +238,26 @@ class SlotSet : public Malloced {
return result;
}
+ void ClearBucket(base::AtomicValue<uint32_t>* bucket, int start_cell,
+ int end_cell) {
+ DCHECK_GE(start_cell, 0);
+ DCHECK_LE(end_cell, kCellsPerBucket);
+ int current_cell = start_cell;
+ while (current_cell < kCellsPerBucket) {
+ bucket[current_cell].SetValue(0);
+ current_cell++;
+ }
+ }
+
+ void PreFreeEmptyBucket(int bucket_index) {
+ base::AtomicValue<uint32_t>* bucket_ptr = bucket[bucket_index].Value();
+ if (bucket_ptr != nullptr) {
+ base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_);
+ to_be_freed_buckets_.push(bucket_ptr);
+ bucket[bucket_index].SetValue(nullptr);
+ }
+ }
+
void ReleaseBucket(int bucket_index) {
DeleteArray<base::AtomicValue<uint32_t>>(bucket[bucket_index].Value());
bucket[bucket_index].SetValue(nullptr);
@@ -429,6 +461,28 @@ class TypedSlotSet {
}
}
+ void RemoveInvaldSlots(std::map<uint32_t, uint32_t>& invalid_ranges) {
+ Chunk* chunk = chunk_.Value();
+ while (chunk != nullptr) {
+ TypedSlot* buffer = chunk->buffer.Value();
+ int count = chunk->count.Value();
+ for (int i = 0; i < count; i++) {
+ uint32_t host_offset = buffer[i].host_offset();
+ std::map<uint32_t, uint32_t>::iterator upper_bound =
+ invalid_ranges.upper_bound(host_offset);
+ if (upper_bound == invalid_ranges.begin()) continue;
+ // upper_bounds points to the invalid range after the given slot. Hence,
+ // we have to go to the previous element.
+ upper_bound--;
+ DCHECK_LE(upper_bound->first, host_offset);
+ if (upper_bound->second > host_offset) {
+ buffer[i].Clear();
+ }
+ }
+ chunk = chunk->next.Value();
+ }
+ }
+
private:
static const int kInitialBufferSize = 100;
static const int kMaxBufferSize = 16 * KB;
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 314d22f9a6..f3f9215f3d 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -203,14 +203,15 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
return page;
}
-Page* Page::ConvertNewToOld(Page* old_page, PagedSpace* new_owner) {
+Page* Page::ConvertNewToOld(Page* old_page) {
+ OldSpace* old_space = old_page->heap()->old_space();
DCHECK(old_page->InNewSpace());
- old_page->set_owner(new_owner);
+ old_page->set_owner(old_space);
old_page->SetFlags(0, ~0);
- new_owner->AccountCommitted(old_page->size());
+ old_space->AccountCommitted(old_page->size());
Page* new_page = Page::Initialize<kDoNotFreeMemory>(
- old_page->heap(), old_page, NOT_EXECUTABLE, new_owner);
- new_page->InsertAfter(new_owner->anchor()->prev_page());
+ old_page->heap(), old_page, NOT_EXECUTABLE, old_space);
+ new_page->InsertAfter(old_space->anchor()->prev_page());
return new_page;
}
@@ -279,6 +280,7 @@ intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
added += category->available();
category->Relink();
});
+ DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
return added;
}
@@ -597,8 +599,7 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
return static_cast<LargePage*>(chunk);
}
-
-intptr_t LargeObjectSpace::Available() {
+size_t LargeObjectSpace::Available() {
return ObjectSizeFor(heap()->memory_allocator()->Available());
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index c2043ed902..e0e6d12fda 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -107,7 +107,7 @@ bool CodeRange::SetUp(size_t requested) {
}
const size_t reserved_area =
- kReservedCodeRangePages * base::OS::CommitPageSize();
+ kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
if (requested < (kMaximalCodeRangeSize - reserved_area))
requested += reserved_area;
@@ -294,8 +294,8 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
highest_ever_allocated_(reinterpret_cast<void*>(0)),
unmapper_(this) {}
-bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable,
- intptr_t code_range_size) {
+bool MemoryAllocator::SetUp(size_t capacity, size_t capacity_executable,
+ size_t code_range_size) {
capacity_ = RoundUp(capacity, Page::kPageSize);
capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
DCHECK_GE(capacity_, capacity_executable_);
@@ -304,23 +304,17 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable,
size_executable_ = 0;
code_range_ = new CodeRange(isolate_);
- if (!code_range_->SetUp(static_cast<size_t>(code_range_size))) return false;
+ if (!code_range_->SetUp(code_range_size)) return false;
return true;
}
void MemoryAllocator::TearDown() {
- unmapper()->WaitUntilCompleted();
-
- MemoryChunk* chunk = nullptr;
- while ((chunk = unmapper()->TryGetPooledMemoryChunkSafe()) != nullptr) {
- FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
- NOT_EXECUTABLE);
- }
+ unmapper()->TearDown();
// Check that spaces were torn down before MemoryAllocator.
- DCHECK_EQ(size_.Value(), 0);
+ DCHECK_EQ(size_.Value(), 0u);
// TODO(gc) this will be true again when we fix FreeMemory.
// DCHECK(size_executable_ == 0);
capacity_ = 0;
@@ -384,6 +378,13 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
}
}
+void MemoryAllocator::Unmapper::TearDown() {
+ WaitUntilCompleted();
+ ReconsiderDelayedChunks();
+ CHECK(delayed_regular_chunks_.empty());
+ PerformFreeMemoryOnQueuedChunks();
+}
+
void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
// Move constructed, so the permanent list should be empty.
@@ -395,11 +396,12 @@ void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
- // We cannot free memory chunks in new space while the sweeper is running
- // since a sweeper thread might be stuck right before trying to lock the
- // corresponding page.
- return !chunk->InNewSpace() || (mc == nullptr) || !FLAG_concurrent_sweeping ||
- mc->sweeper().IsSweepingCompleted();
+ // We cannot free a memory chunk in new space while the sweeper is running
+ // because the memory chunk can be in the queue of a sweeper task.
+ // Chunks in old generation are unmapped if they are empty.
+ DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
+ return !chunk->InNewSpace() || mc == nullptr || !FLAG_concurrent_sweeping ||
+ mc->sweeper().IsSweepingCompleted(NEW_SPACE);
}
bool MemoryAllocator::CommitMemory(Address base, size_t size,
@@ -478,6 +480,7 @@ Address MemoryAllocator::AllocateAlignedMemory(
// Failed to commit the body. Release the mapping and any partially
// commited regions inside it.
reservation.Release();
+ size_.Decrement(reserve_size);
return NULL;
}
@@ -513,7 +516,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->typed_old_to_new_slots_.SetValue(nullptr);
chunk->typed_old_to_old_slots_ = nullptr;
chunk->skip_list_ = nullptr;
- chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
@@ -525,7 +527,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->set_next_chunk(nullptr);
chunk->set_prev_chunk(nullptr);
chunk->local_tracker_ = nullptr;
- chunk->black_area_end_marker_map_ = nullptr;
DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
@@ -547,9 +548,9 @@ bool MemoryChunk::CommitArea(size_t requested) {
IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
size_t header_size = area_start() - address() - guard_size;
size_t commit_size =
- RoundUp(header_size + requested, base::OS::CommitPageSize());
+ RoundUp(header_size + requested, MemoryAllocator::GetCommitPageSize());
size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
- base::OS::CommitPageSize());
+ MemoryAllocator::GetCommitPageSize());
if (commit_size > committed_size) {
// Commit size should be less or equal than the reserved size.
@@ -617,8 +618,8 @@ void MemoryChunk::Unlink() {
}
void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
- DCHECK_GE(bytes_to_shrink, static_cast<size_t>(base::OS::CommitPageSize()));
- DCHECK_EQ(0, bytes_to_shrink % base::OS::CommitPageSize());
+ DCHECK_GE(bytes_to_shrink, static_cast<size_t>(GetCommitPageSize()));
+ DCHECK_EQ(0u, bytes_to_shrink % GetCommitPageSize());
Address free_start = chunk->area_end_ - bytes_to_shrink;
// Don't adjust the size of the page. The area is just uncomitted but not
// released.
@@ -628,22 +629,22 @@ void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
if (chunk->reservation_.IsReserved())
chunk->reservation_.Guard(chunk->area_end_);
else
- base::OS::Guard(chunk->area_end_, base::OS::CommitPageSize());
+ base::OS::Guard(chunk->area_end_, GetCommitPageSize());
}
}
-MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
- intptr_t commit_area_size,
+MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
+ size_t commit_area_size,
Executability executable,
Space* owner) {
- DCHECK(commit_area_size <= reserve_area_size);
+ DCHECK_LE(commit_area_size, reserve_area_size);
size_t chunk_size;
Heap* heap = isolate_->heap();
- Address base = NULL;
+ Address base = nullptr;
base::VirtualMemory reservation;
- Address area_start = NULL;
- Address area_end = NULL;
+ Address area_start = nullptr;
+ Address area_end = nullptr;
//
// MemoryChunk layout:
@@ -677,7 +678,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
if (executable == EXECUTABLE) {
chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
- base::OS::CommitPageSize()) +
+ GetCommitPageSize()) +
CodePageGuardSize();
// Check executable memory limit.
@@ -689,7 +690,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
// Size of header (not executable) plus area (executable).
size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
- base::OS::CommitPageSize());
+ GetCommitPageSize());
// Allocate executable memory either from code range or from the
// OS.
#ifdef V8_TARGET_ARCH_MIPS64
@@ -725,10 +726,10 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
area_end = area_start + commit_area_size;
} else {
chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
- base::OS::CommitPageSize());
+ GetCommitPageSize());
size_t commit_size =
RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
- base::OS::CommitPageSize());
+ GetCommitPageSize());
base =
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
executable, &reservation);
@@ -777,6 +778,14 @@ void Page::ResetFreeListStatistics() {
available_in_free_list_ = 0;
}
+size_t Page::AvailableInFreeList() {
+ size_t sum = 0;
+ ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
+ sum += category->available();
+ });
+ return sum;
+}
+
size_t Page::ShrinkToHighWaterMark() {
// Shrink pages to high water mark. The water mark points either to a filler
// or the area_end.
@@ -805,7 +814,7 @@ size_t Page::ShrinkToHighWaterMark() {
size_t unused = RoundDown(
static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
- base::OS::CommitPageSize());
+ MemoryAllocator::GetCommitPageSize());
if (unused > 0) {
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
@@ -914,11 +923,11 @@ template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
MemoryChunk* chunk);
template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
-Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
+Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
Executability executable) {
MemoryChunk* chunk = nullptr;
if (alloc_mode == kPooled) {
- DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
+ DCHECK_EQ(size, static_cast<size_t>(MemoryChunk::kAllocatableMemory));
DCHECK_EQ(executable, NOT_EXECUTABLE);
chunk = AllocatePagePooled(owner);
}
@@ -931,15 +940,15 @@ Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
- intptr_t size, PagedSpace* owner, Executability executable);
+ size_t size, PagedSpace* owner, Executability executable);
template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
- intptr_t size, SemiSpace* owner, Executability executable);
+ size_t size, SemiSpace* owner, Executability executable);
template Page*
MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
- intptr_t size, SemiSpace* owner, Executability executable);
+ size_t size, SemiSpace* owner, Executability executable);
-LargePage* MemoryAllocator::AllocateLargePage(intptr_t size,
+LargePage* MemoryAllocator::AllocateLargePage(size_t size,
LargeObjectSpace* owner,
Executability executable) {
MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
@@ -1000,30 +1009,35 @@ void MemoryAllocator::ReportStatistics() {
}
#endif
-
-int MemoryAllocator::CodePageGuardStartOffset() {
+size_t MemoryAllocator::CodePageGuardStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
- return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
+ return RoundUp(Page::kObjectStartOffset, GetCommitPageSize());
}
-
-int MemoryAllocator::CodePageGuardSize() {
- return static_cast<int>(base::OS::CommitPageSize());
+size_t MemoryAllocator::CodePageGuardSize() {
+ return static_cast<int>(GetCommitPageSize());
}
-
-int MemoryAllocator::CodePageAreaStartOffset() {
+size_t MemoryAllocator::CodePageAreaStartOffset() {
// We are guarding code pages: the first OS page after the header
// will be protected as non-writable.
return CodePageGuardStartOffset() + CodePageGuardSize();
}
-
-int MemoryAllocator::CodePageAreaEndOffset() {
+size_t MemoryAllocator::CodePageAreaEndOffset() {
// We are guarding code pages: the last OS page will be protected as
// non-writable.
- return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
+ return Page::kPageSize - static_cast<int>(GetCommitPageSize());
+}
+
+intptr_t MemoryAllocator::GetCommitPageSize() {
+ if (FLAG_v8_os_page_size != 0) {
+ DCHECK(base::bits::IsPowerOfTwo32(FLAG_v8_os_page_size));
+ return FLAG_v8_os_page_size * KB;
+ } else {
+ return base::OS::CommitPageSize();
+ }
}
@@ -1250,6 +1264,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
p->set_owner(this);
p->InsertAfter(anchor_.prev_page());
RelinkFreeListCategories(p);
+ DCHECK_EQ(p->AvailableInFreeList(), p->available_in_free_list());
}
}
@@ -1277,7 +1292,7 @@ Object* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called on iterable spaces.
DCHECK(!heap()->mark_compact_collector()->in_use());
- if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
+ if (!Contains(addr)) return Smi::kZero; // Signaling not found.
Page* p = Page::FromAddress(addr);
HeapObjectIterator it(p);
@@ -1288,7 +1303,7 @@ Object* PagedSpace::FindObject(Address addr) {
}
UNREACHABLE();
- return Smi::FromInt(0);
+ return Smi::kZero;
}
void PagedSpace::ShrinkImmortalImmovablePages() {
@@ -1378,12 +1393,6 @@ void PagedSpace::EmptyAllocationInfo() {
if (heap()->incremental_marking()->black_allocation()) {
Page* page = Page::FromAllocationAreaAddress(current_top);
- // We have to remember the end of the current black allocation area if
- // something was allocated in the current bump pointer range.
- if (allocation_info_.original_top() != current_top) {
- Address end_black_area = current_top - kPointerSize;
- page->AddBlackAreaEndMarker(end_black_area);
- }
// Clear the bits in the unused black area.
if (current_top != current_limit) {
@@ -1394,7 +1403,8 @@ void PagedSpace::EmptyAllocationInfo() {
}
SetTopAndLimit(NULL, NULL);
- Free(current_top, static_cast<int>(current_limit - current_top));
+ DCHECK_GE(current_limit, current_top);
+ Free(current_top, current_limit - current_top);
}
void PagedSpace::IncreaseCapacity(size_t bytes) {
@@ -1408,8 +1418,6 @@ void PagedSpace::ReleasePage(Page* page) {
free_list_.EvictFreeListItems(page);
DCHECK(!free_list_.ContainsPageFreeListItems(page));
- page->ReleaseBlackAreaEndMarkerMap();
-
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
allocation_info_.Reset(nullptr, nullptr);
}
@@ -1481,10 +1489,11 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// -----------------------------------------------------------------------------
// NewSpace implementation
-bool NewSpace::SetUp(int initial_semispace_capacity,
- int maximum_semispace_capacity) {
+bool NewSpace::SetUp(size_t initial_semispace_capacity,
+ size_t maximum_semispace_capacity) {
DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
- DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
+ DCHECK(base::bits::IsPowerOfTwo32(
+ static_cast<uint32_t>(maximum_semispace_capacity)));
to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
@@ -1529,9 +1538,9 @@ void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
void NewSpace::Grow() {
// Double the semispace size but only up to maximum capacity.
DCHECK(TotalCapacity() < MaximumCapacity());
- int new_capacity =
+ size_t new_capacity =
Min(MaximumCapacity(),
- FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
+ static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
if (to_space_.GrowTo(new_capacity)) {
// Only grow from space if we managed to grow to-space.
if (!from_space_.GrowTo(new_capacity)) {
@@ -1549,8 +1558,8 @@ void NewSpace::Grow() {
void NewSpace::Shrink() {
- int new_capacity = Max(InitialTotalCapacity(), 2 * static_cast<int>(Size()));
- int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
+ size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
+ size_t rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
if (rounded_new_capacity < TotalCapacity() &&
to_space_.ShrinkTo(rounded_new_capacity)) {
// Only shrink from-space if we managed to shrink to-space.
@@ -1577,7 +1586,8 @@ bool NewSpace::Rebalance() {
bool SemiSpace::EnsureCurrentCapacity() {
if (is_committed()) {
- const int expected_pages = current_capacity_ / Page::kPageSize;
+ const int expected_pages =
+ static_cast<int>(current_capacity_ / Page::kPageSize);
int actual_pages = 0;
Page* current_page = anchor()->next_page();
while (current_page != anchor()) {
@@ -1604,7 +1614,7 @@ bool SemiSpace::EnsureCurrentCapacity() {
current_page->SetFlags(anchor()->prev_page()->GetFlags(),
Page::kCopyAllFlags);
heap()->CreateFillerObjectAt(current_page->area_start(),
- current_page->area_size(),
+ static_cast<int>(current_page->area_size()),
ClearRecordedSlots::kNo);
}
}
@@ -1878,8 +1888,8 @@ void NewSpace::Verify() {
// -----------------------------------------------------------------------------
// SemiSpace implementation
-void SemiSpace::SetUp(int initial_capacity, int maximum_capacity) {
- DCHECK_GE(maximum_capacity, Page::kPageSize);
+void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
+ DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
current_capacity_ = minimum_capacity_;
maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
@@ -1902,7 +1912,7 @@ void SemiSpace::TearDown() {
bool SemiSpace::Commit() {
DCHECK(!is_committed());
Page* current = anchor();
- const int num_pages = current_capacity_ / Page::kPageSize;
+ const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
for (int pages_added = 0; pages_added < num_pages; pages_added++) {
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
@@ -1948,17 +1958,16 @@ size_t SemiSpace::CommittedPhysicalMemory() {
return size;
}
-
-bool SemiSpace::GrowTo(int new_capacity) {
+bool SemiSpace::GrowTo(size_t new_capacity) {
if (!is_committed()) {
if (!Commit()) return false;
}
- DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
+ DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
DCHECK_LE(new_capacity, maximum_capacity_);
DCHECK_GT(new_capacity, current_capacity_);
- const int delta = new_capacity - current_capacity_;
+ const size_t delta = new_capacity - current_capacity_;
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
- const int delta_pages = delta / Page::kPageSize;
+ const int delta_pages = static_cast<int>(delta / Page::kPageSize);
Page* last_page = anchor()->prev_page();
DCHECK_NE(last_page, anchor());
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
@@ -1993,14 +2002,14 @@ void SemiSpace::RewindPages(Page* start, int num_pages) {
}
}
-bool SemiSpace::ShrinkTo(int new_capacity) {
- DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
+bool SemiSpace::ShrinkTo(size_t new_capacity) {
+ DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
DCHECK_GE(new_capacity, minimum_capacity_);
DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) {
- const int delta = current_capacity_ - new_capacity;
+ const size_t delta = current_capacity_ - new_capacity;
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
- int delta_pages = delta / Page::kPageSize;
+ int delta_pages = static_cast<int>(delta / Page::kPageSize);
Page* new_last_page;
Page* last_page;
while (delta_pages > 0) {
@@ -2343,7 +2352,7 @@ void FreeListCategory::Reset() {
available_ = 0;
}
-FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
+FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
DCHECK(page()->CanAllocate());
FreeSpace* node = top();
@@ -2354,8 +2363,8 @@ FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
return node;
}
-FreeSpace* FreeListCategory::TryPickNodeFromList(int minimum_size,
- int* node_size) {
+FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
+ size_t* node_size) {
DCHECK(page()->CanAllocate());
FreeSpace* node = PickNodeFromList(node_size);
@@ -2367,15 +2376,16 @@ FreeSpace* FreeListCategory::TryPickNodeFromList(int minimum_size,
return node;
}
-FreeSpace* FreeListCategory::SearchForNodeInList(int minimum_size,
- int* node_size) {
+FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
+ size_t* node_size) {
DCHECK(page()->CanAllocate());
FreeSpace* prev_non_evac_node = nullptr;
for (FreeSpace* cur_node = top(); cur_node != nullptr;
cur_node = cur_node->next()) {
- int size = cur_node->size();
+ size_t size = cur_node->size();
if (size >= minimum_size) {
+ DCHECK_GE(available_, size);
available_ -= size;
if (cur_node == top()) {
set_top(cur_node->next());
@@ -2392,7 +2402,7 @@ FreeSpace* FreeListCategory::SearchForNodeInList(int minimum_size,
return nullptr;
}
-bool FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes,
+bool FreeListCategory::Free(FreeSpace* free_space, size_t size_in_bytes,
FreeMode mode) {
if (!page()->CanAllocate()) return false;
@@ -2425,7 +2435,7 @@ void FreeListCategory::Relink() {
}
void FreeListCategory::Invalidate() {
- page()->add_available_in_free_list(-available());
+ page()->remove_available_in_free_list(available());
Reset();
type_ = kInvalidCategory;
}
@@ -2447,10 +2457,10 @@ void FreeList::Reset() {
ResetStats();
}
-int FreeList::Free(Address start, int size_in_bytes, FreeMode mode) {
+size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
if (size_in_bytes == 0) return 0;
- owner()->heap()->CreateFillerObjectAt(start, size_in_bytes,
+ owner()->heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
ClearRecordedSlots::kNo);
Page* page = Page::FromAddress(start);
@@ -2469,10 +2479,11 @@ int FreeList::Free(Address start, int size_in_bytes, FreeMode mode) {
if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
page->add_available_in_free_list(size_in_bytes);
}
+ DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
return 0;
}
-FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
+FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t* node_size) {
FreeListCategoryIterator it(this, type);
FreeSpace* node = nullptr;
while (it.HasNext()) {
@@ -2480,7 +2491,7 @@ FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
node = current->PickNodeFromList(node_size);
if (node != nullptr) {
Page::FromAddress(node->address())
- ->add_available_in_free_list(-(*node_size));
+ ->remove_available_in_free_list(*node_size);
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
@@ -2489,21 +2500,22 @@ FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
return node;
}
-FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, int* node_size,
- int minimum_size) {
+FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
+ size_t minimum_size) {
if (categories_[type] == nullptr) return nullptr;
FreeSpace* node =
categories_[type]->TryPickNodeFromList(minimum_size, node_size);
if (node != nullptr) {
Page::FromAddress(node->address())
- ->add_available_in_free_list(-(*node_size));
+ ->remove_available_in_free_list(*node_size);
DCHECK(IsVeryLong() || Available() == SumFreeLists());
}
return node;
}
FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
- int* node_size, int minimum_size) {
+ size_t* node_size,
+ size_t minimum_size) {
FreeListCategoryIterator it(this, type);
FreeSpace* node = nullptr;
while (it.HasNext()) {
@@ -2511,7 +2523,7 @@ FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
node = current->SearchForNodeInList(minimum_size, node_size);
if (node != nullptr) {
Page::FromAddress(node->address())
- ->add_available_in_free_list(-(*node_size));
+ ->remove_available_in_free_list(*node_size);
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
@@ -2522,7 +2534,7 @@ FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
return node;
}
-FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
FreeSpace* node = nullptr;
// First try the allocation fast path: try to allocate the minimum element
@@ -2559,12 +2571,19 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
// allocation space has been set up with the top and limit of the space. If
// the allocation fails then NULL is returned, and the caller can perform a GC
// or allocate a new page before retrying.
-HeapObject* FreeList::Allocate(int size_in_bytes) {
- DCHECK(0 < size_in_bytes);
+HeapObject* FreeList::Allocate(size_t size_in_bytes) {
DCHECK(size_in_bytes <= kMaxBlockSize);
DCHECK(IsAligned(size_in_bytes, kPointerSize));
+ DCHECK_LE(owner_->top(), owner_->limit());
+#ifdef DEBUG
+ if (owner_->top() != owner_->limit()) {
+ DCHECK_EQ(Page::FromAddress(owner_->top()),
+ Page::FromAddress(owner_->limit() - 1));
+ }
+#endif
// Don't free list allocate if there is linear space available.
- DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
+ DCHECK_LT(static_cast<size_t>(owner_->limit() - owner_->top()),
+ size_in_bytes);
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
@@ -2574,15 +2593,15 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
Heap::kNoGCFlags, kNoGCCallbackFlags);
- int new_node_size = 0;
+ size_t new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == nullptr) return nullptr;
- int bytes_left = new_node_size - size_in_bytes;
- DCHECK(bytes_left >= 0);
+ DCHECK_GE(new_node_size, size_in_bytes);
+ size_t bytes_left = new_node_size - size_in_bytes;
#ifdef DEBUG
- for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
+ for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
reinterpret_cast<Object**>(new_node->address())[i] =
Smi::FromInt(kCodeZapValue);
}
@@ -2593,11 +2612,11 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
- const int kThreshold = IncrementalMarking::kAllocatedThreshold;
+ const size_t kThreshold = IncrementalMarking::kAllocatedThreshold;
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
- owner_->Allocate(new_node_size);
+ owner_->Allocate(static_cast<int>(new_node_size));
if (owner_->heap()->inline_allocation_disabled()) {
// Keep the linear allocation area empty if requested to do so, just
@@ -2608,17 +2627,17 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
} else if (bytes_left > kThreshold &&
owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
FLAG_incremental_marking) {
- int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
+ size_t linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
// We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again whether
// we want to do another increment until the linear area is used up.
+ DCHECK_GE(new_node_size, size_in_bytes + linear_size);
owner_->Free(new_node->address() + size_in_bytes + linear_size,
new_node_size - size_in_bytes - linear_size);
owner_->SetAllocationInfo(
new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes + linear_size);
} else {
- DCHECK(bytes_left >= 0);
// Normally we give the rest of the node to the allocator as its new
// linear allocation area.
owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
@@ -2628,8 +2647,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
return new_node;
}
-intptr_t FreeList::EvictFreeListItems(Page* page) {
- intptr_t sum = 0;
+size_t FreeList::EvictFreeListItems(Page* page) {
+ size_t sum = 0;
page->ForAllFreeListCategories(
[this, &sum, page](FreeListCategory* category) {
DCHECK_EQ(this, category->owner());
@@ -2703,8 +2722,8 @@ void FreeList::PrintCategories(FreeListCategoryType type) {
#ifdef DEBUG
-intptr_t FreeListCategory::SumFreeList() {
- intptr_t sum = 0;
+size_t FreeListCategory::SumFreeList() {
+ size_t sum = 0;
FreeSpace* cur = top();
while (cur != NULL) {
DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
@@ -2741,8 +2760,8 @@ bool FreeList::IsVeryLong() {
// This can take a very long time because it is linear in the number of entries
// on the free list, so it should not be called if FreeListLength returns
// kVeryLongFreeList.
-intptr_t FreeList::SumFreeLists() {
- intptr_t sum = 0;
+size_t FreeList::SumFreeLists() {
+ size_t sum = 0;
ForAllFreeListCategories(
[&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
return sum;
@@ -2762,13 +2781,10 @@ void PagedSpace::PrepareForMarkCompact() {
free_list_.Reset();
}
-
-intptr_t PagedSpace::SizeOfObjects() {
- const intptr_t size = Size() - (limit() - top());
+size_t PagedSpace::SizeOfObjects() {
CHECK_GE(limit(), top());
- CHECK_GE(size, 0);
- USE(size);
- return size;
+ DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
+ return Size() - (limit() - top());
}
@@ -2781,24 +2797,12 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
// Each page may have a small free space that is not tracked by a free list.
// Update the maps for those free space objects.
for (Page* page : *this) {
- int size = static_cast<int>(page->wasted_memory());
+ size_t size = page->wasted_memory();
if (size == 0) continue;
+ DCHECK_GE(static_cast<size_t>(Page::kPageSize), size);
Address address = page->OffsetToAddress(Page::kPageSize - size);
- heap()->CreateFillerObjectAt(address, size, ClearRecordedSlots::kNo);
- }
-}
-
-
-void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
- if (allocation_info_.top() >= allocation_info_.limit()) return;
-
- if (!Page::FromAllocationAreaAddress(allocation_info_.top())->CanAllocate()) {
- // Create filler object to keep page iterable if it was iterable.
- int remaining =
- static_cast<int>(allocation_info_.limit() - allocation_info_.top());
- heap()->CreateFillerObjectAt(allocation_info_.top(), remaining,
+ heap()->CreateFillerObjectAt(address, static_cast<int>(size),
ClearRecordedSlots::kNo);
- allocation_info_.Reset(nullptr, nullptr);
}
}
@@ -2826,8 +2830,8 @@ HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
return nullptr;
}
-
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
+ DCHECK_GE(size_in_bytes, 0);
const int kMaxPagesToSweep = 1;
// Allocation in this space has failed.
@@ -2840,7 +2844,8 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
RefillFreeList();
// Retry the free list allocation.
- HeapObject* object = free_list_.Allocate(size_in_bytes);
+ HeapObject* object =
+ free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != NULL) return object;
// If sweeping is still in progress try to sweep pages on the main thread.
@@ -2848,15 +2853,15 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
identity(), size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
if (max_freed >= size_in_bytes) {
- object = free_list_.Allocate(size_in_bytes);
+ object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != nullptr) return object;
}
}
- if (heap()->ShouldExpandOldGenerationOnAllocationFailure() && Expand()) {
+ if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
DCHECK((CountTotalPages() > 1) ||
- (size_in_bytes <= free_list_.Available()));
- return free_list_.Allocate(size_in_bytes);
+ (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
+ return free_list_.Allocate(static_cast<size_t>(size_in_bytes));
}
// If sweeper threads are active, wait for them at that point and steal
@@ -2897,7 +2902,7 @@ Address LargePage::GetAddressToShrink() {
return 0;
}
size_t used_size = RoundUp((object->address() - address()) + object->Size(),
- base::OS::CommitPageSize());
+ MemoryAllocator::GetCommitPageSize());
if (used_size < CommittedPhysicalMemory()) {
return address() + used_size;
}
@@ -2905,8 +2910,10 @@ Address LargePage::GetAddressToShrink() {
}
void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
- RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end());
- RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end());
+ RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
+ SlotSet::FREE_EMPTY_BUCKETS);
+ RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
+ SlotSet::FREE_EMPTY_BUCKETS);
RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
}
@@ -2967,14 +2974,15 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
Executability executable) {
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
- if (!heap()->CanExpandOldGeneration(object_size)) {
+ if (!heap()->CanExpandOldGeneration(object_size) ||
+ !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
return AllocationResult::Retry(identity());
}
LargePage* page = heap()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
if (page == NULL) return AllocationResult::Retry(identity());
- DCHECK(page->area_size() >= object_size);
+ DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
size_ += static_cast<int>(page->size());
AccountCommitted(page->size());
@@ -2993,7 +3001,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
// We only need to do this in debug builds or if verify_heap is on.
reinterpret_cast<Object**>(object->address())[0] =
heap()->fixed_array_map();
- reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
+ reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
}
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
@@ -3022,7 +3030,7 @@ Object* LargeObjectSpace::FindObject(Address a) {
if (page != NULL) {
return page->GetObject();
}
- return Smi::FromInt(0); // Signaling not found.
+ return Smi::kZero; // Signaling not found.
}
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 732ba7ead5..f5701adc69 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -135,7 +135,8 @@ enum FreeMode { kLinkCategory, kDoNotLinkCategory };
class FreeListCategory {
public:
static const int kSize = kIntSize + // FreeListCategoryType type_
- kIntSize + // int available_
+ kIntSize + // padding for type_
+ kSizetSize + // size_t available_
kPointerSize + // FreeSpace* top_
kPointerSize + // FreeListCategory* prev_
kPointerSize; // FreeListCategory* next_
@@ -167,28 +168,28 @@ class FreeListCategory {
// category is currently unlinked.
void Relink();
- bool Free(FreeSpace* node, int size_in_bytes, FreeMode mode);
+ bool Free(FreeSpace* node, size_t size_in_bytes, FreeMode mode);
// Picks a node from the list and stores its size in |node_size|. Returns
// nullptr if the category is empty.
- FreeSpace* PickNodeFromList(int* node_size);
+ FreeSpace* PickNodeFromList(size_t* node_size);
// Performs a single try to pick a node of at least |minimum_size| from the
// category. Stores the actual size in |node_size|. Returns nullptr if no
// node is found.
- FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size);
+ FreeSpace* TryPickNodeFromList(size_t minimum_size, size_t* node_size);
// Picks a node of at least |minimum_size| from the category. Stores the
// actual size in |node_size|. Returns nullptr if no node is found.
- FreeSpace* SearchForNodeInList(int minimum_size, int* node_size);
+ FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
inline FreeList* owner();
inline bool is_linked();
bool is_empty() { return top() == nullptr; }
- int available() const { return available_; }
+ size_t available() const { return available_; }
#ifdef DEBUG
- intptr_t SumFreeList();
+ size_t SumFreeList();
int FreeListLength();
#endif
@@ -211,7 +212,7 @@ class FreeListCategory {
// |available_|: Total available bytes in all blocks of this free list
// category.
- int available_;
+ size_t available_;
// |top_|: Points to the top FreeSpace* in the free list category.
FreeSpace* top_;
@@ -310,11 +311,6 @@ class MemoryChunk {
kSweepingInProgress,
};
- // Every n write barrier invocations we go to runtime even though
- // we could have handled it in generated code. This lets us check
- // whether we have hit the limit and should do some more marking.
- static const int kWriteBarrierCounterGranularity = 500;
-
static const intptr_t kAlignment =
(static_cast<uintptr_t>(1) << kPageSizeBits);
@@ -324,36 +320,30 @@ class MemoryChunk {
static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
- static const size_t kWriteBarrierCounterOffset =
- kSizeOffset + kPointerSize // size_t size
- + kIntptrSize // Flags flags_
- + kPointerSize // Address area_start_
- + kPointerSize // Address area_end_
- + 2 * kPointerSize // base::VirtualMemory reservation_
- + kPointerSize // Address owner_
- + kPointerSize // Heap* heap_
- + kIntSize // int progress_bar_
- + kIntSize // int live_bytes_count_
- + kPointerSize // SlotSet* old_to_new_slots_;
- + kPointerSize // SlotSet* old_to_old_slots_;
- + kPointerSize // TypedSlotSet* typed_old_to_new_slots_;
- + kPointerSize // TypedSlotSet* typed_old_to_old_slots_;
- + kPointerSize; // SkipList* skip_list_;
-
static const size_t kMinHeaderSize =
- kWriteBarrierCounterOffset +
- kIntptrSize // intptr_t write_barrier_counter_
- + kPointerSize // AtomicValue high_water_mark_
- + kPointerSize // base::Mutex* mutex_
- + kPointerSize // base::AtomicWord concurrent_sweeping_
- + 2 * kPointerSize // AtomicNumber free-list statistics
- + kPointerSize // AtomicValue next_chunk_
- + kPointerSize // AtomicValue prev_chunk_
+ kSizeOffset + kSizetSize // size_t size
+ + kIntptrSize // Flags flags_
+ + kPointerSize // Address area_start_
+ + kPointerSize // Address area_end_
+ + 2 * kPointerSize // base::VirtualMemory reservation_
+ + kPointerSize // Address owner_
+ + kPointerSize // Heap* heap_
+ + kIntSize // int progress_bar_
+ + kIntSize // int live_bytes_count_
+ + kPointerSize // SlotSet* old_to_new_slots_
+ + kPointerSize // SlotSet* old_to_old_slots_
+ + kPointerSize // TypedSlotSet* typed_old_to_new_slots_
+ + kPointerSize // TypedSlotSet* typed_old_to_old_slots_
+ + kPointerSize // SkipList* skip_list_
+ + kPointerSize // AtomicValue high_water_mark_
+ + kPointerSize // base::Mutex* mutex_
+ + kPointerSize // base::AtomicWord concurrent_sweeping_
+ + 2 * kSizetSize // AtomicNumber free-list statistics
+ + kPointerSize // AtomicValue next_chunk_
+ + kPointerSize // AtomicValue prev_chunk_
// FreeListCategory categories_[kNumberOfCategories]
+ FreeListCategory::kSize * kNumberOfCategories +
- kPointerSize // LocalArrayBufferTracker* local_tracker_
- // std::unordered_set<Address>* black_area_end_marker_map_
- + kPointerSize;
+ kPointerSize; // LocalArrayBufferTracker* local_tracker_
// We add some more space to the computed header size to amount for missing
// alignment requirements in our computation.
@@ -421,6 +411,10 @@ class MemoryChunk {
return concurrent_sweeping_;
}
+ bool SweepingDone() {
+ return concurrent_sweeping_state().Value() == kSweepingDone;
+ }
+
// Manage live byte count, i.e., count of bytes in black objects.
inline void ResetLiveBytes();
inline void IncrementLiveBytes(int by);
@@ -436,14 +430,6 @@ class MemoryChunk {
live_byte_count_ = live_bytes;
}
- int write_barrier_counter() {
- return static_cast<int>(write_barrier_counter_);
- }
-
- void set_write_barrier_counter(int counter) {
- write_barrier_counter_ = counter;
- }
-
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
@@ -465,7 +451,7 @@ class MemoryChunk {
V8_EXPORT_PRIVATE void AllocateOldToNewSlots();
void ReleaseOldToNewSlots();
- void AllocateOldToOldSlots();
+ V8_EXPORT_PRIVATE void AllocateOldToOldSlots();
void ReleaseOldToOldSlots();
void AllocateTypedOldToNewSlots();
void ReleaseTypedOldToNewSlots();
@@ -476,7 +462,7 @@ class MemoryChunk {
Address area_start() { return area_start_; }
Address area_end() { return area_end_; }
- int area_size() { return static_cast<int>(area_end() - area_start()); }
+ size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
bool CommitArea(size_t requested);
@@ -588,33 +574,6 @@ class MemoryChunk {
void InsertAfter(MemoryChunk* other);
void Unlink();
- void ReleaseBlackAreaEndMarkerMap() {
- if (black_area_end_marker_map_) {
- delete black_area_end_marker_map_;
- black_area_end_marker_map_ = nullptr;
- }
- }
-
- bool IsBlackAreaEndMarker(Address address) {
- if (black_area_end_marker_map_) {
- return black_area_end_marker_map_->find(address) !=
- black_area_end_marker_map_->end();
- }
- return false;
- }
-
- void AddBlackAreaEndMarker(Address address) {
- if (!black_area_end_marker_map_) {
- black_area_end_marker_map_ = new std::unordered_set<Address>();
- }
- auto ret = black_area_end_marker_map_->insert(address);
- USE(ret);
- // Check that we inserted a new black area end marker.
- DCHECK(ret.second);
- }
-
- bool HasBlackAreas() { return black_area_end_marker_map_ != nullptr; }
-
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
@@ -660,8 +619,6 @@ class MemoryChunk {
SkipList* skip_list_;
- intptr_t write_barrier_counter_;
-
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
base::AtomicValue<intptr_t> high_water_mark_;
@@ -683,9 +640,6 @@ class MemoryChunk {
LocalArrayBufferTracker* local_tracker_;
- // Stores the end addresses of black areas.
- std::unordered_set<Address>* black_area_end_marker_map_;
-
private:
void InitializeReservedMemory() { reservation_.Reset(); }
@@ -713,7 +667,7 @@ class Page : public MemoryChunk {
static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
- static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);
+ static inline Page* ConvertNewToOld(Page* old_page);
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[. This only works if the object
@@ -768,13 +722,10 @@ class Page : public MemoryChunk {
}
// Returns the offset of a given address to this page.
- inline int Offset(Address a) {
- int offset = static_cast<int>(a - address());
- return offset;
- }
+ inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
// Returns the address for a given offset to the this page.
- Address OffsetToAddress(int offset) {
+ Address OffsetToAddress(size_t offset) {
DCHECK_PAGE_OFFSET(offset);
return address() + offset;
}
@@ -788,15 +739,13 @@ class Page : public MemoryChunk {
DCHECK(SweepingDone());
}
- bool SweepingDone() {
- return concurrent_sweeping_state().Value() == kSweepingDone;
- }
-
void ResetFreeListStatistics();
- int LiveBytesFromFreeList() {
- return static_cast<int>(area_size() - wasted_memory() -
- available_in_free_list());
+ size_t AvailableInFreeList();
+
+ size_t LiveBytesFromFreeList() {
+ DCHECK_GE(area_size(), wasted_memory() + available_in_free_list());
+ return area_size() - wasted_memory() - available_in_free_list();
}
FreeListCategory* free_list_category(FreeListCategoryType type) {
@@ -805,12 +754,18 @@ class Page : public MemoryChunk {
bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
- intptr_t wasted_memory() { return wasted_memory_.Value(); }
- void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); }
- intptr_t available_in_free_list() { return available_in_free_list_.Value(); }
- void add_available_in_free_list(intptr_t available) {
+ size_t wasted_memory() { return wasted_memory_.Value(); }
+ void add_wasted_memory(size_t waste) { wasted_memory_.Increment(waste); }
+ size_t available_in_free_list() { return available_in_free_list_.Value(); }
+ void add_available_in_free_list(size_t available) {
+ DCHECK_LE(available, area_size());
available_in_free_list_.Increment(available);
}
+ void remove_available_in_free_list(size_t available) {
+ DCHECK_LE(available, area_size());
+ DCHECK_GE(available_in_free_list(), available);
+ available_in_free_list_.Decrement(available);
+ }
size_t ShrinkToHighWaterMark();
@@ -914,17 +869,17 @@ class Space : public Malloced {
virtual size_t MaximumCommittedMemory() { return max_committed_; }
// Returns allocated size.
- virtual intptr_t Size() = 0;
+ virtual size_t Size() = 0;
// Returns size of objects. Can differ from the allocated size
// (e.g. see LargeObjectSpace).
- virtual intptr_t SizeOfObjects() { return Size(); }
+ virtual size_t SizeOfObjects() { return Size(); }
// Approximate amount of physical memory committed for this space.
virtual size_t CommittedPhysicalMemory() = 0;
// Return the available bytes without growing.
- virtual intptr_t Available() = 0;
+ virtual size_t Available() = 0;
virtual int RoundSizeDownToObjectAlignment(int size) {
if (id_ == CODE_SPACE) {
@@ -973,8 +928,6 @@ class Space : public Malloced {
class MemoryChunkValidator {
// Computed offsets should match the compiler generated ones.
STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
- STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
- offsetof(MemoryChunk, write_barrier_counter_));
// Validate our estimates on the header size.
STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
@@ -1137,7 +1090,7 @@ class SkipList {
// ----------------------------------------------------------------------------
// A space acquires chunks of memory from the operating system. The memory
-// allocator allocated and deallocates pages for the paged heap spaces and large
+// allocator allocates and deallocates pages for the paged heap spaces and large
// pages for large object space.
class MemoryAllocator {
public:
@@ -1180,6 +1133,7 @@ class MemoryAllocator {
void FreeQueuedChunks();
bool WaitUntilCompleted();
+ void TearDown();
private:
enum ChunkQueueType {
@@ -1237,30 +1191,32 @@ class MemoryAllocator {
kPooledAndQueue,
};
- static int CodePageGuardStartOffset();
+ static size_t CodePageGuardStartOffset();
- static int CodePageGuardSize();
+ static size_t CodePageGuardSize();
- static int CodePageAreaStartOffset();
+ static size_t CodePageAreaStartOffset();
- static int CodePageAreaEndOffset();
+ static size_t CodePageAreaEndOffset();
- static int CodePageAreaSize() {
+ static size_t CodePageAreaSize() {
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
- static int PageAreaSize(AllocationSpace space) {
+ static size_t PageAreaSize(AllocationSpace space) {
DCHECK_NE(LO_SPACE, space);
return (space == CODE_SPACE) ? CodePageAreaSize()
: Page::kAllocatableMemory;
}
+ static intptr_t GetCommitPageSize();
+
explicit MemoryAllocator(Isolate* isolate);
// Initializes its internal bookkeeping structures.
// Max capacity of the total space and executable memory limit.
- bool SetUp(intptr_t max_capacity, intptr_t capacity_executable,
- intptr_t code_range_size);
+ bool SetUp(size_t max_capacity, size_t capacity_executable,
+ size_t code_range_size);
void TearDown();
@@ -1269,9 +1225,9 @@ class MemoryAllocator {
// should be tried first.
template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
typename SpaceType>
- Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable);
+ Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
- LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner,
+ LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
Executability executable);
template <MemoryAllocator::FreeMode mode = kFull>
@@ -1313,8 +1269,7 @@ class MemoryAllocator {
// Returns a MemoryChunk in which the memory region from commit_area_size to
// reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea.
- MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
- intptr_t commit_area_size,
+ MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
Executability executable, Space* space);
void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
@@ -1690,7 +1645,7 @@ class FreeList {
public:
// This method returns how much memory can be allocated after freeing
// maximum_freed memory.
- static inline int GuaranteedAllocatable(int maximum_freed) {
+ static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
if (maximum_freed <= kTiniestListMax) {
// Since we are not iterating over all list entries, we cannot guarantee
// that we can find the maximum freed block in that free list.
@@ -1715,12 +1670,12 @@ class FreeList {
// was too small. Bookkeeping information will be written to the block, i.e.,
// its contents will be destroyed. The start address should be word aligned,
// and the size should be a non-zero multiple of the word size.
- int Free(Address start, int size_in_bytes, FreeMode mode);
+ size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
// Allocate a block of size {size_in_bytes} from the free list. The block is
// unitialized. A failure is returned if no block is available. The size
// should be a non-zero multiple of the word size.
- MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
+ MUST_USE_RESULT HeapObject* Allocate(size_t size_in_bytes);
// Clear the free list.
void Reset();
@@ -1732,8 +1687,8 @@ class FreeList {
}
// Return the number of bytes available on the free list.
- intptr_t Available() {
- intptr_t available = 0;
+ size_t Available() {
+ size_t available = 0;
ForAllFreeListCategories([&available](FreeListCategory* category) {
available += category->available();
});
@@ -1751,11 +1706,11 @@ class FreeList {
// Used after booting the VM.
void RepairLists(Heap* heap);
- intptr_t EvictFreeListItems(Page* page);
+ size_t EvictFreeListItems(Page* page);
bool ContainsPageFreeListItems(Page* page);
PagedSpace* owner() { return owner_; }
- intptr_t wasted_bytes() { return wasted_bytes_.Value(); }
+ size_t wasted_bytes() { return wasted_bytes_.Value(); }
template <typename Callback>
void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
@@ -1779,7 +1734,7 @@ class FreeList {
void PrintCategories(FreeListCategoryType type);
#ifdef DEBUG
- intptr_t SumFreeLists();
+ size_t SumFreeLists();
bool IsVeryLong();
#endif
@@ -1803,33 +1758,33 @@ class FreeList {
};
// The size range of blocks, in bytes.
- static const int kMinBlockSize = 3 * kPointerSize;
- static const int kMaxBlockSize = Page::kAllocatableMemory;
+ static const size_t kMinBlockSize = 3 * kPointerSize;
+ static const size_t kMaxBlockSize = Page::kAllocatableMemory;
- static const int kTiniestListMax = 0xa * kPointerSize;
- static const int kTinyListMax = 0x1f * kPointerSize;
- static const int kSmallListMax = 0xff * kPointerSize;
- static const int kMediumListMax = 0x7ff * kPointerSize;
- static const int kLargeListMax = 0x3fff * kPointerSize;
- static const int kTinyAllocationMax = kTiniestListMax;
- static const int kSmallAllocationMax = kTinyListMax;
- static const int kMediumAllocationMax = kSmallListMax;
- static const int kLargeAllocationMax = kMediumListMax;
+ static const size_t kTiniestListMax = 0xa * kPointerSize;
+ static const size_t kTinyListMax = 0x1f * kPointerSize;
+ static const size_t kSmallListMax = 0xff * kPointerSize;
+ static const size_t kMediumListMax = 0x7ff * kPointerSize;
+ static const size_t kLargeListMax = 0x3fff * kPointerSize;
+ static const size_t kTinyAllocationMax = kTiniestListMax;
+ static const size_t kSmallAllocationMax = kTinyListMax;
+ static const size_t kMediumAllocationMax = kSmallListMax;
+ static const size_t kLargeAllocationMax = kMediumListMax;
- FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
+ FreeSpace* FindNodeFor(size_t size_in_bytes, size_t* node_size);
// Walks all available categories for a given |type| and tries to retrieve
// a node. Returns nullptr if the category is empty.
- FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size);
+ FreeSpace* FindNodeIn(FreeListCategoryType type, size_t* node_size);
// Tries to retrieve a node from the first category in a given |type|.
// Returns nullptr if the category is empty.
- FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size,
- int minimum_size);
+ FreeSpace* TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
+ size_t minimum_size);
// Searches a given |type| for a node of at least |minimum_size|.
- FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size,
- int minimum_size);
+ FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
+ size_t minimum_size);
FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
if (size_in_bytes <= kTiniestListMax) {
@@ -1862,7 +1817,7 @@ class FreeList {
FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
PagedSpace* owner_;
- base::AtomicNumber<intptr_t> wasted_bytes_;
+ base::AtomicNumber<size_t> wasted_bytes_;
FreeListCategory* categories_[kNumberOfCategories];
friend class FreeListCategory;
@@ -1974,7 +1929,7 @@ class PagedSpace : public Space {
void PrepareForMarkCompact();
// Current capacity without growing (Size() + Available()).
- intptr_t Capacity() { return accounting_stats_.Capacity(); }
+ size_t Capacity() { return accounting_stats_.Capacity(); }
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
@@ -1996,21 +1951,21 @@ class PagedSpace : public Space {
// The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are
// immediately added to the free list so they show up here.
- intptr_t Available() override { return free_list_.Available(); }
+ size_t Available() override { return free_list_.Available(); }
// Allocated bytes in this space. Garbage bytes that were not found due to
// concurrent sweeping are counted as being allocated! The bytes in the
// current linear allocation area (between top and limit) are also counted
// here.
- intptr_t Size() override { return accounting_stats_.Size(); }
+ size_t Size() override { return accounting_stats_.Size(); }
// As size, but the bytes in lazily swept pages are estimated and the bytes
// in the current linear allocation area are not included.
- intptr_t SizeOfObjects() override;
+ size_t SizeOfObjects() override;
// Wasted bytes in this space. These are just the bytes that were thrown away
// due to being too small to use for allocation.
- virtual intptr_t Waste() { return free_list_.wasted_bytes(); }
+ virtual size_t Waste() { return free_list_.wasted_bytes(); }
// Returns the allocation pointer in this space.
Address top() { return allocation_info_.top(); }
@@ -2049,14 +2004,16 @@ class PagedSpace : public Space {
// the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
- int Free(Address start, int size_in_bytes) {
- int wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
+ size_t Free(Address start, size_t size_in_bytes) {
+ size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
accounting_stats_.DeallocateBytes(size_in_bytes);
+ DCHECK_GE(size_in_bytes, wasted);
return size_in_bytes - wasted;
}
- int UnaccountedFree(Address start, int size_in_bytes) {
- int wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
+ size_t UnaccountedFree(Address start, size_t size_in_bytes) {
+ size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
+ DCHECK_GE(size_in_bytes, wasted);
return size_in_bytes - wasted;
}
@@ -2112,15 +2069,13 @@ class PagedSpace : public Space {
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
- void EvictEvacuationCandidatesFromLinearAllocationArea();
-
bool CanExpand(size_t size);
// Returns the number of total pages in this space.
int CountTotalPages();
// Return size of allocatable area on a page in this space.
- inline int AreaSize() { return area_size_; }
+ inline int AreaSize() { return static_cast<int>(area_size_); }
virtual bool is_local() { return false; }
@@ -2183,7 +2138,7 @@ class PagedSpace : public Space {
// Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
- int area_size_;
+ size_t area_size_;
// Accounting information for this space.
AllocationStats accounting_stats_;
@@ -2237,7 +2192,7 @@ class SemiSpace : public Space {
inline bool Contains(Object* o);
inline bool ContainsSlow(Address a);
- void SetUp(int initial_capacity, int maximum_capacity);
+ void SetUp(size_t initial_capacity, size_t maximum_capacity);
void TearDown();
bool HasBeenSetUp() { return maximum_capacity_ != 0; }
@@ -2247,12 +2202,12 @@ class SemiSpace : public Space {
// Grow the semispace to the new capacity. The new capacity requested must
// be larger than the current capacity and less than the maximum capacity.
- bool GrowTo(int new_capacity);
+ bool GrowTo(size_t new_capacity);
// Shrinks the semispace to the new capacity. The new capacity requested
// must be more than the amount of used memory in the semispace and less
// than the current capacity.
- bool ShrinkTo(int new_capacity);
+ bool ShrinkTo(size_t new_capacity);
bool EnsureCurrentCapacity();
@@ -2300,13 +2255,13 @@ class SemiSpace : public Space {
void set_age_mark(Address mark);
// Returns the current capacity of the semispace.
- int current_capacity() { return current_capacity_; }
+ size_t current_capacity() { return current_capacity_; }
// Returns the maximum capacity of the semispace.
- int maximum_capacity() { return maximum_capacity_; }
+ size_t maximum_capacity() { return maximum_capacity_; }
// Returns the initial capacity of the semispace.
- int minimum_capacity() { return minimum_capacity_; }
+ size_t minimum_capacity() { return minimum_capacity_; }
SemiSpaceId id() { return id_; }
@@ -2316,14 +2271,14 @@ class SemiSpace : public Space {
// If we don't have these here then SemiSpace will be abstract. However
// they should never be called:
- intptr_t Size() override {
+ size_t Size() override {
UNREACHABLE();
return 0;
}
- intptr_t SizeOfObjects() override { return Size(); }
+ size_t SizeOfObjects() override { return Size(); }
- intptr_t Available() override {
+ size_t Available() override {
UNREACHABLE();
return 0;
}
@@ -2352,20 +2307,22 @@ class SemiSpace : public Space {
void RewindPages(Page* start, int num_pages);
inline Page* anchor() { return &anchor_; }
- inline int max_pages() { return current_capacity_ / Page::kPageSize; }
+ inline int max_pages() {
+ return static_cast<int>(current_capacity_ / Page::kPageSize);
+ }
// Copies the flags into the masked positions on all pages in the space.
void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
// The currently committed space capacity.
- int current_capacity_;
+ size_t current_capacity_;
// The maximum capacity that can be used by this space. A space cannot grow
// beyond that size.
- int maximum_capacity_;
+ size_t maximum_capacity_;
// The minimum capacity for the space. A space cannot shrink below this size.
- int minimum_capacity_;
+ size_t minimum_capacity_;
// Used to govern object promotion during mark-compact collection.
Address age_mark_;
@@ -2426,7 +2383,7 @@ class NewSpace : public Space {
inline bool ContainsSlow(Address a);
inline bool Contains(Object* o);
- bool SetUp(int initial_semispace_capacity, int max_semispace_capacity);
+ bool SetUp(size_t initial_semispace_capacity, size_t max_semispace_capacity);
// Tears down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
@@ -2448,15 +2405,16 @@ class NewSpace : public Space {
void Shrink();
// Return the allocated bytes in the active semispace.
- intptr_t Size() override {
+ size_t Size() override {
+ DCHECK_GE(top(), to_space_.page_low());
return to_space_.pages_used() * Page::kAllocatableMemory +
- static_cast<int>(top() - to_space_.page_low());
+ static_cast<size_t>(top() - to_space_.page_low());
}
- intptr_t SizeOfObjects() override { return Size(); }
+ size_t SizeOfObjects() override { return Size(); }
// Return the allocatable capacity of a semispace.
- intptr_t Capacity() {
+ size_t Capacity() {
SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
return (to_space_.current_capacity() / Page::kPageSize) *
Page::kAllocatableMemory;
@@ -2464,7 +2422,7 @@ class NewSpace : public Space {
// Return the current size of a semispace, allocatable and non-allocatable
// memory.
- intptr_t TotalCapacity() {
+ size_t TotalCapacity() {
DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
return to_space_.current_capacity();
}
@@ -2484,7 +2442,10 @@ class NewSpace : public Space {
size_t CommittedPhysicalMemory() override;
// Return the available bytes without growing.
- intptr_t Available() override { return Capacity() - Size(); }
+ size_t Available() override {
+ DCHECK_GE(Capacity(), Size());
+ return Capacity() - Size();
+ }
size_t AllocatedSinceLastGC() {
bool seen_age_mark = false;
@@ -2510,17 +2471,18 @@ class NewSpace : public Space {
// Top was reset at some point, invalidating this metric.
return 0;
}
- intptr_t allocated = age_mark_page->area_end() - age_mark;
+ DCHECK_GE(age_mark_page->area_end(), age_mark);
+ size_t allocated = age_mark_page->area_end() - age_mark;
DCHECK_EQ(current_page, age_mark_page);
current_page = age_mark_page->next_page();
while (current_page != last_page) {
allocated += Page::kAllocatableMemory;
current_page = current_page->next_page();
}
+ DCHECK_GE(top(), current_page->area_start());
allocated += top() - current_page->area_start();
- DCHECK_LE(0, allocated);
DCHECK_LE(allocated, Size());
- return static_cast<size_t>(allocated);
+ return allocated;
}
void MovePageFromSpaceToSpace(Page* page) {
@@ -2532,7 +2494,7 @@ class NewSpace : public Space {
bool Rebalance();
// Return the maximum capacity of a semispace.
- int MaximumCapacity() {
+ size_t MaximumCapacity() {
DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
return to_space_.maximum_capacity();
}
@@ -2540,7 +2502,7 @@ class NewSpace : public Space {
bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
// Returns the initial capacity of a semispace.
- int InitialTotalCapacity() {
+ size_t InitialTotalCapacity() {
DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
return to_space_.minimum_capacity();
}
@@ -2832,7 +2794,7 @@ class LargeObjectSpace : public Space {
// Releases internal resources, frees objects in this space.
void TearDown();
- static intptr_t ObjectSizeFor(intptr_t chunk_size) {
+ static size_t ObjectSizeFor(size_t chunk_size) {
if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
}
@@ -2843,11 +2805,11 @@ class LargeObjectSpace : public Space {
AllocateRaw(int object_size, Executability executable);
// Available bytes for objects in this space.
- inline intptr_t Available() override;
+ inline size_t Available() override;
- intptr_t Size() override { return size_; }
+ size_t Size() override { return size_; }
- intptr_t SizeOfObjects() override { return objects_size_; }
+ size_t SizeOfObjects() override { return objects_size_; }
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory() override;
@@ -2905,9 +2867,9 @@ class LargeObjectSpace : public Space {
private:
// The head of the linked list of large object chunks.
LargePage* first_page_;
- intptr_t size_; // allocated bytes
+ size_t size_; // allocated bytes
int page_count_; // number of chunks
- intptr_t objects_size_; // size of objects
+ size_t objects_size_; // size of objects
// Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
base::HashMap chunk_map_;
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index a982eb3c40..974b85e1c8 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -16,63 +16,150 @@ namespace v8 {
namespace internal {
StoreBuffer::StoreBuffer(Heap* heap)
- : heap_(heap),
- top_(nullptr),
- start_(nullptr),
- limit_(nullptr),
- virtual_memory_(nullptr) {}
+ : heap_(heap), top_(nullptr), current_(0), virtual_memory_(nullptr) {
+ for (int i = 0; i < kStoreBuffers; i++) {
+ start_[i] = nullptr;
+ limit_[i] = nullptr;
+ lazy_top_[i] = nullptr;
+ }
+ task_running_ = false;
+}
void StoreBuffer::SetUp() {
// Allocate 3x the buffer size, so that we can start the new store buffer
// aligned to 2x the size. This lets us use a bit test to detect the end of
// the area.
- virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 2);
+ virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
uintptr_t start_as_int =
reinterpret_cast<uintptr_t>(virtual_memory_->address());
- start_ = reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize));
- limit_ = start_ + (kStoreBufferSize / kPointerSize);
+ start_[0] =
+ reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize));
+ limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize);
+ start_[1] = limit_[0];
+ limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize);
- DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
- DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
Address* vm_limit = reinterpret_cast<Address*>(
reinterpret_cast<char*>(virtual_memory_->address()) +
virtual_memory_->size());
- DCHECK(start_ <= vm_limit);
- DCHECK(limit_ <= vm_limit);
+
USE(vm_limit);
- DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferMask) == 0);
+ for (int i = 0; i < kStoreBuffers; i++) {
+ DCHECK(reinterpret_cast<Address>(start_[i]) >= virtual_memory_->address());
+ DCHECK(reinterpret_cast<Address>(limit_[i]) >= virtual_memory_->address());
+ DCHECK(start_[i] <= vm_limit);
+ DCHECK(limit_[i] <= vm_limit);
+ DCHECK((reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask) == 0);
+ }
- if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_),
- kStoreBufferSize,
+ if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_[0]),
+ kStoreBufferSize * kStoreBuffers,
false)) { // Not executable.
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
- top_ = start_;
+ current_ = 0;
+ top_ = start_[current_];
}
void StoreBuffer::TearDown() {
delete virtual_memory_;
- top_ = start_ = limit_ = nullptr;
+ top_ = nullptr;
+ for (int i = 0; i < kStoreBuffers; i++) {
+ start_[i] = nullptr;
+ limit_[i] = nullptr;
+ lazy_top_[i] = nullptr;
+ }
}
void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
- isolate->heap()->store_buffer()->MoveEntriesToRememberedSet();
+ isolate->heap()->store_buffer()->FlipStoreBuffers();
isolate->counters()->store_buffer_overflows()->Increment();
}
-void StoreBuffer::MoveEntriesToRememberedSet() {
- if (top_ == start_) return;
- DCHECK(top_ <= limit_);
- for (Address* current = start_; current < top_; current++) {
+void StoreBuffer::FlipStoreBuffers() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ int other = (current_ + 1) % kStoreBuffers;
+ MoveEntriesToRememberedSet(other);
+ lazy_top_[current_] = top_;
+ current_ = other;
+ top_ = start_[current_];
+
+ if (!task_running_) {
+ task_running_ = true;
+ Task* task = new Task(heap_->isolate(), this);
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ }
+}
+
+void StoreBuffer::MoveEntriesToRememberedSet(int index) {
+ if (!lazy_top_[index]) return;
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, kStoreBuffers);
+ for (Address* current = start_[index]; current < lazy_top_[index];
+ current++) {
DCHECK(!heap_->code_space()->Contains(*current));
Address addr = *current;
Page* page = Page::FromAnyPointerAddress(heap_, addr);
- RememberedSet<OLD_TO_NEW>::Insert(page, addr);
+ if (IsDeletionAddress(addr)) {
+ current++;
+ Address end = *current;
+ DCHECK(!IsDeletionAddress(end));
+ addr = UnmarkDeletionAddress(addr);
+ if (end) {
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, addr, end,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ } else {
+ RememberedSet<OLD_TO_NEW>::Remove(page, addr);
+ }
+ } else {
+ DCHECK(!IsDeletionAddress(addr));
+ RememberedSet<OLD_TO_NEW>::Insert(page, addr);
+ }
}
- top_ = start_;
+ lazy_top_[index] = nullptr;
+}
+
+void StoreBuffer::MoveAllEntriesToRememberedSet() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ int other = (current_ + 1) % kStoreBuffers;
+ MoveEntriesToRememberedSet(other);
+ lazy_top_[current_] = top_;
+ MoveEntriesToRememberedSet(current_);
+ top_ = start_[current_];
}
+void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ int other = (current_ + 1) % kStoreBuffers;
+ MoveEntriesToRememberedSet(other);
+ task_running_ = false;
+}
+
+void StoreBuffer::DeleteEntry(Address start, Address end) {
+ // Deletions coming from the GC are directly deleted from the remembered
+ // set. Deletions coming from the runtime are added to the store buffer
+ // to allow concurrent processing.
+ if (heap_->gc_state() == Heap::NOT_IN_GC) {
+ if (top_ + sizeof(Address) * 2 > limit_[current_]) {
+ StoreBufferOverflow(heap_->isolate());
+ }
+ *top_ = MarkDeletionAddress(start);
+ top_++;
+ *top_ = end;
+ top_++;
+ } else {
+ // In GC the store buffer has to be empty at any time.
+ DCHECK(Empty());
+ Page* page = Page::FromAddress(start);
+ if (end) {
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ } else {
+ RememberedSet<OLD_TO_NEW>::Remove(page, start);
+ }
+ }
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index 1b3fcb0a98..09faf4dcbd 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -8,20 +8,28 @@
#include "src/allocation.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
+#include "src/cancelable-task.h"
#include "src/globals.h"
+#include "src/heap/remembered-set.h"
#include "src/heap/slot-set.h"
namespace v8 {
namespace internal {
// Intermediate buffer that accumulates old-to-new stores from the generated
-// code. On buffer overflow the slots are moved to the remembered set.
+// code. Moreover, it stores invalid old-to-new slots with two entries.
+// The first is a tagged address of the start of the invalid range, the second
+// one is the end address of the invalid range or null if there is just one slot
+// that needs to be removed from the remembered set. On buffer overflow the
+// slots are moved to the remembered set.
class StoreBuffer {
public:
static const int kStoreBufferSize = 1 << (14 + kPointerSizeLog2);
static const int kStoreBufferMask = kStoreBufferSize - 1;
+ static const int kStoreBuffers = 2;
+ static const intptr_t kDeletionTag = 1;
- static void StoreBufferOverflow(Isolate* isolate);
+ V8_EXPORT_PRIVATE static void StoreBufferOverflow(Isolate* isolate);
explicit StoreBuffer(Heap* heap);
void SetUp();
@@ -30,17 +38,109 @@ class StoreBuffer {
// Used to add entries from generated code.
inline Address* top_address() { return reinterpret_cast<Address*>(&top_); }
- void MoveEntriesToRememberedSet();
+ // Moves entries from a specific store buffer to the remembered set. This
+ // method takes a lock.
+ void MoveEntriesToRememberedSet(int index);
+
+ // This method ensures that all used store buffer entries are transfered to
+ // the remembered set.
+ void MoveAllEntriesToRememberedSet();
+
+ inline bool IsDeletionAddress(Address address) const {
+ return reinterpret_cast<intptr_t>(address) & kDeletionTag;
+ }
+
+ inline Address MarkDeletionAddress(Address address) {
+ return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) |
+ kDeletionTag);
+ }
+
+ inline Address UnmarkDeletionAddress(Address address) {
+ return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) &
+ ~kDeletionTag);
+ }
+
+ // If we only want to delete a single slot, end should be set to null which
+ // will be written into the second field. When processing the store buffer
+ // the more efficient Remove method will be called in this case.
+ void DeleteEntry(Address start, Address end = nullptr);
+
+ void InsertEntry(Address slot) {
+ // Insertions coming from the GC are directly inserted into the remembered
+ // set. Insertions coming from the runtime are added to the store buffer to
+ // allow concurrent processing.
+ if (heap_->gc_state() == Heap::NOT_IN_GC) {
+ if (top_ + sizeof(Address) > limit_[current_]) {
+ StoreBufferOverflow(heap_->isolate());
+ }
+ *top_ = slot;
+ top_++;
+ } else {
+ // In GC the store buffer has to be empty at any time.
+ DCHECK(Empty());
+ RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+ }
+ }
+
+ // Used by the concurrent processing thread to transfer entries from the
+ // store buffer to the remembered set.
+ void ConcurrentlyProcessStoreBuffer();
+
+ bool Empty() {
+ for (int i = 0; i < kStoreBuffers; i++) {
+ if (lazy_top_[i]) {
+ return false;
+ }
+ }
+ return top_ == start_[current_];
+ }
private:
+ // There are two store buffers. If one store buffer fills up, the main thread
+ // publishes the top pointer of the store buffer that needs processing in its
+ // global lazy_top_ field. After that it start the concurrent processing
+ // thread. The concurrent processing thread uses the pointer in lazy_top_.
+ // It will grab the given mutex and transfer its entries to the remembered
+ // set. If the concurrent thread does not make progress, the main thread will
+ // perform the work.
+ // Important: there is an ordering constrained. The store buffer with the
+ // older entries has to be processed first.
+ class Task : public CancelableTask {
+ public:
+ Task(Isolate* isolate, StoreBuffer* store_buffer)
+ : CancelableTask(isolate), store_buffer_(store_buffer) {}
+ virtual ~Task() {}
+
+ private:
+ void RunInternal() override {
+ store_buffer_->ConcurrentlyProcessStoreBuffer();
+ }
+ StoreBuffer* store_buffer_;
+ DISALLOW_COPY_AND_ASSIGN(Task);
+ };
+
+ void FlipStoreBuffers();
+
Heap* heap_;
Address* top_;
// The start and the limit of the buffer that contains store slots
- // added from the generated code.
- Address* start_;
- Address* limit_;
+ // added from the generated code. We have two chunks of store buffers.
+ // Whenever one fills up, we notify a concurrent processing thread and
+ // use the other empty one in the meantime.
+ Address* start_[kStoreBuffers];
+ Address* limit_[kStoreBuffers];
+
+ // At most one lazy_top_ pointer is set at any time.
+ Address* lazy_top_[kStoreBuffers];
+ base::Mutex mutex_;
+
+ // We only want to have at most one concurrent processing tas running.
+ bool task_running_;
+
+ // Points to the current buffer in use.
+ int current_;
base::VirtualMemory* virtual_memory_;
};
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 4d3195957e..d4de79ef59 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -2912,8 +2912,9 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
+ if (desc.buffer_size > kMaximalBufferSize ||
+ static_cast<size_t>(desc.buffer_size) >
+ isolate()->heap()->MaxOldGenerationSize()) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index a1dc4b62be..79f4125354 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -1446,7 +1446,8 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+ void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+ int id);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index edab277fbe..9b2c51e99b 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -810,14 +810,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ add(edx, Immediate(2)); // edx was a smi.
// edx: Number of capture registers
- // Load last_match_info which is still known to be a fast-elements JSObject.
- // Check that the fourth object is a JSObject.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_OBJECT_TYPE, ebx);
- __ j(not_equal, &runtime);
+ // Check that the last match info is a FixedArray.
+ __ mov(ebx, Operand(esp, kLastMatchInfoOffset));
+ __ JumpIfSmi(ebx, &runtime);
// Check that the object has fast elements.
- __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
__ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
__ cmp(eax, factory->fixed_array_map());
__ j(not_equal, &runtime);
@@ -825,31 +821,25 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// additional information.
__ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
__ SmiUntag(eax);
- __ sub(eax, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ sub(eax, Immediate(RegExpMatchInfo::kLastMatchOverhead));
__ cmp(edx, eax);
__ j(greater, &runtime);
- // ebx: last_match_info backing store (FixedArray)
+ // ebx: last_match_info (FixedArray)
// edx: number of capture registers
// Store the capture count.
__ SmiTag(edx); // Number of capture registers to smi.
- __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
+ __ mov(FieldOperand(ebx, RegExpMatchInfo::kNumberOfCapturesOffset), edx);
__ SmiUntag(edx); // Number of capture registers back from smi.
// Store last subject and last input.
__ mov(eax, Operand(esp, kSubjectOffset));
__ mov(ecx, eax);
- __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
- __ RecordWriteField(ebx,
- RegExpImpl::kLastSubjectOffset,
- eax,
- edi,
+ __ mov(FieldOperand(ebx, RegExpMatchInfo::kLastSubjectOffset), eax);
+ __ RecordWriteField(ebx, RegExpMatchInfo::kLastSubjectOffset, eax, edi,
kDontSaveFPRegs);
__ mov(eax, ecx);
- __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
- __ RecordWriteField(ebx,
- RegExpImpl::kLastInputOffset,
- eax,
- edi,
+ __ mov(FieldOperand(ebx, RegExpMatchInfo::kLastInputOffset), eax);
+ __ RecordWriteField(ebx, RegExpMatchInfo::kLastInputOffset, eax, edi,
kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
@@ -857,12 +847,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ExternalReference::address_of_static_offsets_vector(isolate());
__ mov(ecx, Immediate(address_of_static_offsets_vector));
- // ebx: last_match_info backing store (FixedArray)
+ // ebx: last_match_info (FixedArray)
// ecx: offsets vector
// edx: number of capture registers
Label next_capture, done;
// Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
+ // counts down until wrapping after zero.
__ bind(&next_capture);
__ sub(edx, Immediate(1));
__ j(negative, &done, Label::kNear);
@@ -870,16 +860,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(edi, Operand(ecx, edx, times_int_size, 0));
__ SmiTag(edi);
// Store the smi value in the last match info.
- __ mov(FieldOperand(ebx,
- edx,
- times_pointer_size,
- RegExpImpl::kFirstCaptureOffset),
- edi);
+ __ mov(FieldOperand(ebx, edx, times_pointer_size,
+ RegExpMatchInfo::kFirstCaptureOffset),
+ edi);
__ jmp(&next_capture);
__ bind(&done);
// Return last match info.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ mov(eax, ebx);
__ ret(4 * kPointerSize);
// Do the runtime call to execute the regexp.
@@ -1062,7 +1050,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// If either is a Smi (we know that not both are), then they can only
// be equal if the other is a HeapNumber. If so, use the slow case.
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
__ mov(ecx, Immediate(kSmiTagMask));
__ and_(ecx, eax);
__ test(ecx, edx);
@@ -1429,6 +1417,7 @@ static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+ // eax - number of arguments
// edi - function
// edx - slot id
// ebx - vector
@@ -1436,7 +1425,6 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ cmp(edi, ecx);
__ j(not_equal, miss);
- __ mov(eax, arg_count());
// Reload ecx.
__ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize));
@@ -1446,7 +1434,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ mov(ebx, ecx);
__ mov(edx, edi);
- ArrayConstructorStub stub(masm->isolate(), arg_count());
+ ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
// Unreachable.
@@ -1454,13 +1442,12 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
void CallICStub::Generate(MacroAssembler* masm) {
+ // edi - number of arguments
// edi - function
// edx - slot id
// ebx - vector
Isolate* isolate = masm->isolate();
Label extra_checks_or_miss, call, call_function, call_count_incremented;
- int argc = arg_count();
- ParameterCount actual(argc);
// The checks. First, does edi match the recorded monomorphic target?
__ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
@@ -1492,7 +1479,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, ebx, edx);
- __ Set(eax, argc);
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -1538,7 +1524,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&call_count_incremented);
- __ Set(eax, argc);
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -1564,12 +1549,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &miss);
// Store the function. Use a stub since we need a frame for allocation.
+ // eax - number of arguments
// ebx - vector
// edx - slot
// edi - function
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(isolate);
+ __ SmiTag(eax);
+ __ push(eax);
__ push(ebx);
__ push(edx);
__ push(edi);
@@ -1579,6 +1567,8 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ pop(edi);
__ pop(edx);
__ pop(ebx);
+ __ pop(eax);
+ __ SmiUntag(eax);
}
__ jmp(&call_function);
@@ -1598,6 +1588,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Preserve the number of arguments.
+ __ SmiTag(eax);
+ __ push(eax);
+
// Push the function and feedback info.
__ push(edi);
__ push(ebx);
@@ -1608,6 +1602,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
// Move result to edi and exit the internal frame.
__ mov(edi, eax);
+
+ // Restore number of arguments.
+ __ pop(eax);
+ __ SmiUntag(eax);
}
@@ -3018,254 +3016,12 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
}
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
-static void HandleArrayCases(MacroAssembler* masm, Register receiver,
- Register key, Register vector, Register slot,
- Register feedback, bool is_polymorphic,
- Label* miss) {
- // feedback initially contains the feedback array
- Label next, next_loop, prepare_next;
- Label load_smi_map, compare_map;
- Label start_polymorphic;
-
- __ push(receiver);
- __ push(vector);
-
- Register receiver_map = receiver;
- Register cached_map = vector;
-
- // Receiver might not be a heap object.
- __ JumpIfSmi(receiver, &load_smi_map);
- __ mov(receiver_map, FieldOperand(receiver, 0));
- __ bind(&compare_map);
- __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
-
- // A named keyed load might have a 2 element array, all other cases can count
- // on an array with at least 2 {map, handler} pairs, so they can go right
- // into polymorphic array handling.
- __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
- __ j(not_equal, is_polymorphic ? &start_polymorphic : &next);
-
- // found, now call handler.
- Register handler = feedback;
- __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ pop(vector);
- __ pop(receiver);
- __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
- __ jmp(handler);
-
- if (!is_polymorphic) {
- __ bind(&next);
- __ cmp(FieldOperand(feedback, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(2)));
- __ j(not_equal, &start_polymorphic);
- __ pop(vector);
- __ pop(receiver);
- __ jmp(miss);
- }
-
- // Polymorphic, we have to loop from 2 to N
- __ bind(&start_polymorphic);
- __ push(key);
- Register counter = key;
- __ mov(counter, Immediate(Smi::FromInt(2)));
- __ bind(&next_loop);
- __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
- __ j(not_equal, &prepare_next);
- __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ pop(key);
- __ pop(vector);
- __ pop(receiver);
- __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
- __ jmp(handler);
-
- __ bind(&prepare_next);
- __ add(counter, Immediate(Smi::FromInt(2)));
- __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
- __ j(less, &next_loop);
-
- // We exhausted our array of map handler pairs.
- __ pop(key);
- __ pop(vector);
- __ pop(receiver);
- __ jmp(miss);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
- Register key, Register vector, Register slot,
- Register weak_cell, Label* miss) {
- // feedback initially contains the feedback array
- Label compare_smi_map;
-
- // Move the weak map into the weak_cell register.
- Register ic_map = weak_cell;
- __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
-
- // Receiver might not be a heap object.
- __ JumpIfSmi(receiver, &compare_smi_map);
- __ cmp(ic_map, FieldOperand(receiver, 0));
- __ j(not_equal, miss);
- Register handler = weak_cell;
- __ mov(handler, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
- __ jmp(handler);
-
- // In microbenchmarks, it made sense to unroll this code so that the call to
- // the handler is duplicated for a HeapObject receiver and a Smi receiver.
- __ bind(&compare_smi_map);
- __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, miss);
- __ mov(handler, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
- __ jmp(handler);
-}
-
-
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // edx
- Register name = LoadWithVectorDescriptor::NameRegister(); // ecx
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // ebx
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // eax
- Register scratch = edi;
- __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize));
-
- // Is it a weak cell?
- Label try_array;
- Label not_array, smi_key, key_okay, miss;
- __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
- __ j(not_equal, &try_array);
- HandleMonomorphicCase(masm, receiver, name, vector, slot, scratch, &miss);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &not_array);
- HandleArrayCases(masm, receiver, name, vector, slot, scratch, true, &miss);
-
- __ bind(&not_array);
- __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
- __ j(not_equal, &miss);
- __ push(slot);
- __ push(vector);
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, name,
- vector, scratch);
- __ pop(vector);
- __ pop(slot);
-
- __ bind(&miss);
- LoadIC::GenerateMiss(masm);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // edx
- Register key = LoadWithVectorDescriptor::NameRegister(); // ecx
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // ebx
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // eax
- Register feedback = edi;
- __ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize));
- // Is it a weak cell?
- Label try_array;
- Label not_array, smi_key, key_okay, miss;
- __ CompareRoot(FieldOperand(feedback, 0), Heap::kWeakCellMapRootIndex);
- __ j(not_equal, &try_array);
- HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, &miss);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &not_array);
-
- // We have a polymorphic element handler.
- Label polymorphic, try_poly_name;
- __ bind(&polymorphic);
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, true, &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ j(not_equal, &try_poly_name);
- Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ cmp(key, feedback);
- __ j(not_equal, &miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, false, &miss);
-
- __ bind(&miss);
- KeyedLoadIC::GenerateMiss(masm);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- StoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
// value is on the stack already.
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
Register key, Register vector,
@@ -3382,63 +3138,6 @@ static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
__ jmp(weak_cell);
}
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // edx
- Register key = StoreWithVectorDescriptor::NameRegister(); // ecx
- Register value = StoreWithVectorDescriptor::ValueRegister(); // eax
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // ebx
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // edi
- Label miss;
-
- if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
- // Current stack layout:
- // - esp[8] -- value
- // - esp[4] -- slot
- // - esp[0] -- return address
- STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
- STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
- if (in_frame) {
- __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
- // If the vector is not on the stack, then insert the vector beneath
- // return address in order to prepare for calling handler with
- // StoreWithVector calling convention.
- __ push(Operand(esp, 0));
- __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
- __ RecordComment("]");
- } else {
- __ mov(vector, Operand(esp, 1 * kPointerSize));
- }
- __ mov(slot, Operand(esp, 2 * kPointerSize));
- }
-
- Register scratch = value;
- __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize));
-
- // Is it a weak cell?
- Label try_array;
- Label not_array, smi_key, key_okay;
- __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
- __ j(not_equal, &try_array);
- HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &not_array);
- HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, true,
- &miss);
-
- __ bind(&not_array);
- __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
- __ j(not_equal, &miss);
-
- masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, slot,
- no_reg);
- __ bind(&miss);
- StoreIC::GenerateMiss(masm);
-}
-
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
@@ -3487,7 +3186,7 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
// - esp[12] -- value
// - receiver, key, handler in registers.
Register counter = key;
- __ mov(counter, Immediate(Smi::FromInt(0)));
+ __ mov(counter, Immediate(Smi::kZero));
__ bind(&next_loop);
__ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
FixedArray::kHeaderSize));
@@ -3830,36 +3529,22 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
}
}
-
void ArrayConstructorStub::GenerateDispatchToArrayStub(
- MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- if (argument_count() == ANY) {
- Label not_zero_case, not_one_case;
- __ test(eax, eax);
- __ j(not_zero, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ bind(&not_zero_case);
- __ cmp(eax, 1);
- __ j(greater, &not_one_case);
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else if (argument_count() == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count() == ONE) {
- CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count() == MORE_THAN_ONE) {
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- UNREACHABLE();
- }
-}
+ MacroAssembler* masm, AllocationSiteOverrideMode mode) {
+ Label not_zero_case, not_one_case;
+ __ test(eax, eax);
+ __ j(not_zero, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ __ bind(&not_zero_case);
+ __ cmp(eax, 1);
+ __ j(greater, &not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -3912,21 +3597,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
- switch (argument_count()) {
- case ANY:
- case MORE_THAN_ONE:
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
- __ add(eax, Immediate(3));
- break;
- case NONE:
- __ mov(Operand(esp, 1 * kPointerSize), edi);
- __ mov(eax, Immediate(3));
- break;
- case ONE:
- __ mov(Operand(esp, 2 * kPointerSize), edi);
- __ mov(eax, Immediate(4));
- break;
- }
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+ __ add(eax, Immediate(3));
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ Push(ebx);
@@ -4210,8 +3882,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ mov(ecx, isolate()->factory()->empty_fixed_array());
__ mov(FieldOperand(eax, JSArray::kPropertiesOffset), ecx);
__ mov(FieldOperand(eax, JSArray::kElementsOffset), ecx);
- __ mov(FieldOperand(eax, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(0)));
+ __ mov(FieldOperand(eax, JSArray::kLengthOffset), Immediate(Smi::kZero));
STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
__ Ret();
@@ -4252,7 +3923,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
{
Label loop, done_loop;
- __ Move(ecx, Smi::FromInt(0));
+ __ Move(ecx, Smi::kZero);
__ bind(&loop);
__ cmp(ecx, eax);
__ j(equal, &done_loop, Label::kNear);
@@ -4641,7 +4312,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
{
Label loop, done_loop;
- __ Move(ecx, Smi::FromInt(0));
+ __ Move(ecx, Smi::kZero);
__ bind(&loop);
__ cmp(ecx, eax);
__ j(equal, &done_loop, Label::kNear);
@@ -4704,129 +4375,6 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
}
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register context_reg = esi;
- Register slot_reg = ebx;
- Register value_reg = eax;
- Register cell_reg = edi;
- Register cell_details_reg = edx;
- Register cell_value_reg = ecx;
- Label fast_heapobject_case, fast_smi_case, slow_case;
-
- if (FLAG_debug_code) {
- __ CompareRoot(value_reg, Heap::kTheHoleValueRootIndex);
- __ Check(not_equal, kUnexpectedValue);
- }
-
- // Go up context chain to the script context.
- for (int i = 0; i < depth(); ++i) {
- __ mov(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
- context_reg = cell_reg;
- }
-
- // Load the PropertyCell at the specified slot.
- __ mov(cell_reg, ContextOperand(context_reg, slot_reg));
-
- // Load PropertyDetails for the cell (actually only the cell_type and kind).
- __ mov(cell_details_reg,
- FieldOperand(cell_reg, PropertyCell::kDetailsOffset));
- __ SmiUntag(cell_details_reg);
- __ and_(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::kMask |
- PropertyDetails::KindField::kMask |
- PropertyDetails::kAttributesReadOnlyMask));
-
- // Check if PropertyCell holds mutable data.
- Label not_mutable_data;
- __ cmp(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kMutable) |
- PropertyDetails::KindField::encode(kData)));
- __ j(not_equal, &not_mutable_data);
- __ JumpIfSmi(value_reg, &fast_smi_case);
- __ bind(&fast_heapobject_case);
- __ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
- __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
- cell_details_reg, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // RecordWriteField clobbers the value register, so we need to reload.
- __ mov(value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
- __ Ret();
- __ bind(&not_mutable_data);
-
- // Check if PropertyCell value matches the new value (relevant for Constant,
- // ConstantType and Undefined cells).
- Label not_same_value;
- __ mov(cell_value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
- __ cmp(cell_value_reg, value_reg);
- __ j(not_equal, &not_same_value,
- FLAG_debug_code ? Label::kFar : Label::kNear);
- // Make sure the PropertyCell is not marked READ_ONLY.
- __ test(cell_details_reg,
- Immediate(PropertyDetails::kAttributesReadOnlyMask));
- __ j(not_zero, &slow_case);
- if (FLAG_debug_code) {
- Label done;
- // This can only be true for Constant, ConstantType and Undefined cells,
- // because we never store the_hole via this stub.
- __ cmp(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstant) |
- PropertyDetails::KindField::encode(kData)));
- __ j(equal, &done);
- __ cmp(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
- __ j(equal, &done);
- __ cmp(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kUndefined) |
- PropertyDetails::KindField::encode(kData)));
- __ Check(equal, kUnexpectedValue);
- __ bind(&done);
- }
- __ Ret();
- __ bind(&not_same_value);
-
- // Check if PropertyCell contains data with constant type (and is not
- // READ_ONLY).
- __ cmp(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
- __ j(not_equal, &slow_case, Label::kNear);
-
- // Now either both old and new values must be SMIs or both must be heap
- // objects with same map.
- Label value_is_heap_object;
- __ JumpIfNotSmi(value_reg, &value_is_heap_object, Label::kNear);
- __ JumpIfNotSmi(cell_value_reg, &slow_case, Label::kNear);
- // Old and new values are SMIs, no need for a write barrier here.
- __ bind(&fast_smi_case);
- __ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
- __ Ret();
- __ bind(&value_is_heap_object);
- __ JumpIfSmi(cell_value_reg, &slow_case, Label::kNear);
- Register cell_value_map_reg = cell_value_reg;
- __ mov(cell_value_map_reg,
- FieldOperand(cell_value_reg, HeapObject::kMapOffset));
- __ cmp(cell_value_map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
- __ j(equal, &fast_heapobject_case);
-
- // Fallback to the runtime.
- __ bind(&slow_case);
- __ SmiTag(slot_reg);
- __ Pop(cell_reg); // Pop return address.
- __ Push(slot_reg);
- __ Push(value_reg);
- __ Push(cell_reg); // Push return address.
- __ TailCallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
// Generates an Operand for saving parameters after PrepareCallApiFunction.
static Operand ApiParameterOperand(int index) {
return Operand(esp, index * kPointerSize);
@@ -5160,7 +4708,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
__ push(Immediate(ExternalReference::isolate_address(isolate())));
__ push(holder);
- __ push(Immediate(Smi::FromInt(0))); // should_throw_on_error -> false
+ __ push(Immediate(Smi::kZero)); // should_throw_on_error -> false
__ push(FieldOperand(callback, AccessorInfo::kNameOffset));
__ push(scratch); // Restore return address.
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index 220484c1bf..8ce78720de 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -31,6 +31,7 @@ const Register LoadDescriptor::SlotRegister() { return eax; }
const Register LoadWithVectorDescriptor::VectorRegister() { return ebx; }
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return edi; }
const Register StoreDescriptor::ReceiverRegister() { return edx; }
const Register StoreDescriptor::NameRegister() { return ecx; }
@@ -40,15 +41,9 @@ const Register StoreDescriptor::SlotRegister() { return edi; }
const Register StoreWithVectorDescriptor::VectorRegister() { return ebx; }
const Register StoreTransitionDescriptor::SlotRegister() { return no_reg; }
-
const Register StoreTransitionDescriptor::VectorRegister() { return ebx; }
-
const Register StoreTransitionDescriptor::MapRegister() { return edi; }
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return edx; }
const Register StringCompareDescriptor::RightRegister() { return eax; }
@@ -161,7 +156,7 @@ void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {edi, edx, ebx};
+ Register registers[] = {edi, eax, edx, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -210,13 +205,6 @@ void ConstructTrampolineDescriptor::InitializePlatformSpecific(
}
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ecx, ebx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {eax, ebx};
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 2bd8760c3a..2fa9d0eda5 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -810,20 +810,6 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
-
-void MacroAssembler::CheckFastElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleyElementValue));
- j(above, fail, distance);
-}
-
-
void MacroAssembler::CheckFastObjectElements(Register map,
Label* fail,
Label::Distance distance) {
@@ -1296,79 +1282,6 @@ void MacroAssembler::PopStackHandler() {
}
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label same_contexts;
-
- DCHECK(!holder_reg.is(scratch1));
- DCHECK(!holder_reg.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
-
- // Load current lexical context from the active StandardFrame, which
- // may require crawling past STUB frames.
- Label load_context;
- Label has_context;
- mov(scratch2, ebp);
- bind(&load_context);
- mov(scratch1,
- MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- JumpIfNotSmi(scratch1, &has_context);
- mov(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
- jmp(&load_context);
- bind(&has_context);
-
- // When generating debug code, make sure the lexical context is set.
- if (emit_debug_code()) {
- cmp(scratch1, Immediate(0));
- Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
- }
- // Load the native context of the current context.
- mov(scratch1, ContextOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // Read the first word and compare to native_context_map.
- cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
- isolate()->factory()->native_context_map());
- Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
- }
-
- // Check if both contexts are the same.
- cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- j(equal, &same_contexts);
-
- // Compare security tokens, save holder_reg on the stack so we can use it
- // as a temporary register.
- //
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- mov(scratch2,
- FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- cmp(scratch2, isolate()->factory()->null_value());
- Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
-
- // Read the first word and compare to native_context_map(),
- cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
- isolate()->factory()->native_context_map());
- Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
- }
-
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
- mov(scratch1, FieldOperand(scratch1, token_offset));
- cmp(scratch1, FieldOperand(scratch2, token_offset));
- j(not_equal, miss);
-
- bind(&same_contexts);
-}
-
-
// Compute the hash code from the untagged key. This must be kept in sync with
// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
@@ -1413,82 +1326,6 @@ void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
and_(r0, 0x3fffffff);
}
-
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver and is unchanged.
- //
- // key - holds the smi key on entry and is unchanged.
- //
- // Scratch registers:
- //
- // r0 - holds the untagged key on entry and holds the hash once computed.
- //
- // r1 - used to hold the capacity mask of the dictionary
- //
- // r2 - used for the index into the dictionary.
- //
- // result - holds the result on exit if the load succeeds and we fall through.
-
- Label done;
-
- GetNumberHash(r0, r1);
-
- // Compute capacity mask.
- mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
- shr(r1, kSmiTagSize); // convert smi to int
- dec(r1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- for (int i = 0; i < kNumberDictionaryProbes; i++) {
- // Use r2 for index calculations and keep the hash intact in r0.
- mov(r2, r0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
- }
- and_(r2, r1);
-
- // Scale the index by multiplying by the entry size.
- DCHECK(SeededNumberDictionary::kEntrySize == 3);
- lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
-
- // Check if the key matches.
- cmp(key, FieldOperand(elements,
- r2,
- times_pointer_size,
- SeededNumberDictionary::kElementsStartOffset));
- if (i != (kNumberDictionaryProbes - 1)) {
- j(equal, &done);
- } else {
- j(not_equal, miss);
- }
- }
-
- bind(&done);
- // Check that the value is a field property.
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- DCHECK_EQ(DATA, 0);
- test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
- Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
- j(not_zero, miss);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
@@ -1971,74 +1808,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
-
-// Copy memory, byte-by-byte, from source to destination. Not optimized for
-// long or aligned copies. The contents of scratch and length are destroyed.
-// Source and destination are incremented by length.
-// Many variants of movsb, loop unrolling, word moves, and indexed operands
-// have been tried here already, and this is fastest.
-// A simpler loop is faster on small copies, but 30% slower on large ones.
-// The cld() instruction must have been emitted, to set the direction flag(),
-// before calling this function.
-void MacroAssembler::CopyBytes(Register source,
- Register destination,
- Register length,
- Register scratch) {
- Label short_loop, len4, len8, len12, done, short_string;
- DCHECK(source.is(esi));
- DCHECK(destination.is(edi));
- DCHECK(length.is(ecx));
- cmp(length, Immediate(4));
- j(below, &short_string, Label::kNear);
-
- // Because source is 4-byte aligned in our uses of this function,
- // we keep source aligned for the rep_movs call by copying the odd bytes
- // at the end of the ranges.
- mov(scratch, Operand(source, length, times_1, -4));
- mov(Operand(destination, length, times_1, -4), scratch);
-
- cmp(length, Immediate(8));
- j(below_equal, &len4, Label::kNear);
- cmp(length, Immediate(12));
- j(below_equal, &len8, Label::kNear);
- cmp(length, Immediate(16));
- j(below_equal, &len12, Label::kNear);
-
- mov(scratch, ecx);
- shr(ecx, 2);
- rep_movs();
- and_(scratch, Immediate(0x3));
- add(destination, scratch);
- jmp(&done, Label::kNear);
-
- bind(&len12);
- mov(scratch, Operand(source, 8));
- mov(Operand(destination, 8), scratch);
- bind(&len8);
- mov(scratch, Operand(source, 4));
- mov(Operand(destination, 4), scratch);
- bind(&len4);
- mov(scratch, Operand(source, 0));
- mov(Operand(destination, 0), scratch);
- add(destination, length);
- jmp(&done, Label::kNear);
-
- bind(&short_string);
- test(length, length);
- j(zero, &done, Label::kNear);
-
- bind(&short_loop);
- mov_b(scratch, Operand(source, 0));
- mov_b(Operand(destination, 0), scratch);
- inc(source);
- inc(destination);
- dec(length);
- j(not_zero, &short_loop);
-
- bind(&done);
-}
-
-
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {
@@ -2153,20 +1922,6 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // The assert checks that the constants for the maximum number of digits
- // for an array index cached in the hash field and the number of bits
- // reserved for it does not conflict.
- DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- if (!index.is(hash)) {
- mov(index, hash);
- }
- DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments,
SaveFPRegsMode save_doubles) {
@@ -3090,7 +2845,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
cmp(index, FieldOperand(string, String::kLengthOffset));
Check(less, kIndexIsTooLarge);
- cmp(index, Immediate(Smi::FromInt(0)));
+ cmp(index, Immediate(Smi::kZero));
Check(greater_equal, kIndexIsNegative);
// Restore the index
@@ -3343,7 +3098,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// For all objects but the receiver, check that the cache is empty.
EnumLength(edx, ebx);
- cmp(edx, Immediate(Smi::FromInt(0)));
+ cmp(edx, Immediate(Smi::kZero));
j(not_equal, call_runtime);
bind(&start);
@@ -3375,20 +3130,21 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
- const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+ const int kMementoLastWordOffset =
+ kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
// Bail out if the object is not in new space.
JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
- lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+ lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
xor_(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
j(zero, &top_check);
// The object is on a different page than allocation top. Bail out if the
// object sits on the page boundary as no memento can follow and we cannot
// touch the memory following it.
- lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+ lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
xor_(scratch_reg, receiver_reg);
test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
j(not_zero, no_memento_found);
@@ -3397,9 +3153,9 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
// If top is on the same page as the current object, we need to check whether
// we are below top.
bind(&top_check);
- lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+ lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
- j(greater, no_memento_found);
+ j(greater_equal, no_memento_found);
// Memento map check.
bind(&map_check);
mov(scratch_reg, Operand(receiver_reg, kMementoMapOffset));
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 2220ca7c4f..e8ff59d41b 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -391,11 +391,6 @@ class MacroAssembler: public Assembler {
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map, Label* fail,
- Label::Distance distance = Label::kFar);
-
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map, Label* fail,
@@ -604,18 +599,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Inline caching support
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, but the scratch register is clobbered.
- void CheckAccessGlobalProxy(Register holder_reg, Register scratch1,
- Register scratch2, Label* miss);
-
void GetNumberHash(Register r0, Register scratch);
- void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
- Register r0, Register r1, Register r2,
- Register result);
-
// ---------------------------------------------------------------------------
// Allocation support
@@ -685,12 +670,6 @@ class MacroAssembler: public Assembler {
void AllocateJSValue(Register result, Register constructor, Register value,
Register scratch, Label* gc_required);
- // Copy memory, byte-by-byte, from source to destination. Not optimized for
- // long or aligned copies.
- // The contents of index and scratch are destroyed.
- void CopyBytes(Register source, Register destination, Register length,
- Register scratch);
-
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.
@@ -723,12 +702,6 @@ class MacroAssembler: public Assembler {
void TryGetFunctionPrototype(Register function, Register result,
Register scratch, Label* miss);
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
// ---------------------------------------------------------------------------
// Runtime calls
@@ -821,7 +794,10 @@ class MacroAssembler: public Assembler {
void Drop(int element_count);
void Call(Label* target) { call(target); }
- void Call(Handle<Code> target, RelocInfo::Mode rmode) { call(target, rmode); }
+ void Call(Handle<Code> target, RelocInfo::Mode rmode,
+ TypeFeedbackId id = TypeFeedbackId::None()) {
+ call(target, rmode, id);
+ }
void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
void Push(Register src) { push(src); }
void Push(const Operand& src) { push(src); }
diff --git a/deps/v8/src/ic/access-compiler-data.h b/deps/v8/src/ic/access-compiler-data.h
new file mode 100644
index 0000000000..dffcac7d05
--- /dev/null
+++ b/deps/v8/src/ic/access-compiler-data.h
@@ -0,0 +1,48 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_ACCESS_COMPILER_DATA_H_
+#define V8_IC_ACCESS_COMPILER_DATA_H_
+
+#include <memory>
+
+#include "src/allocation.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+class AccessCompilerData {
+ public:
+ AccessCompilerData() {}
+
+ bool IsInitialized() const { return load_calling_convention_ != nullptr; }
+ void Initialize(int load_register_count, const Register* load_registers,
+ int store_register_count, const Register* store_registers) {
+ load_calling_convention_.reset(NewArray<Register>(load_register_count));
+ for (int i = 0; i < load_register_count; ++i) {
+ load_calling_convention_[i] = load_registers[i];
+ }
+ store_calling_convention_.reset(NewArray<Register>(store_register_count));
+ for (int i = 0; i < store_register_count; ++i) {
+ store_calling_convention_[i] = store_registers[i];
+ }
+ }
+
+ Register* load_calling_convention() { return load_calling_convention_.get(); }
+ Register* store_calling_convention() {
+ return store_calling_convention_.get();
+ }
+
+ private:
+ std::unique_ptr<Register[]> load_calling_convention_;
+ std::unique_ptr<Register[]> store_calling_convention_;
+
+ DISALLOW_COPY_AND_ASSIGN(AccessCompilerData);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_IC_ACCESS_COMPILER_DATA_H_
diff --git a/deps/v8/src/ic/access-compiler.cc b/deps/v8/src/ic/access-compiler.cc
index bb6b5e50d9..d92f9c0c53 100644
--- a/deps/v8/src/ic/access-compiler.cc
+++ b/deps/v8/src/ic/access-compiler.cc
@@ -4,7 +4,6 @@
#include "src/ic/access-compiler.h"
-
namespace v8 {
namespace internal {
@@ -42,13 +41,17 @@ void PropertyAccessCompiler::TailCallBuiltin(MacroAssembler* masm,
GenerateTailCall(masm, code);
}
-
-Register* PropertyAccessCompiler::GetCallingConvention(Code::Kind kind) {
+Register* PropertyAccessCompiler::GetCallingConvention(Isolate* isolate,
+ Code::Kind kind) {
+ AccessCompilerData* data = isolate->access_compiler_data();
+ if (!data->IsInitialized()) {
+ InitializePlatformSpecific(data);
+ }
if (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC) {
- return load_calling_convention();
+ return data->load_calling_convention();
}
DCHECK(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
- return store_calling_convention();
+ return data->store_calling_convention();
}
diff --git a/deps/v8/src/ic/access-compiler.h b/deps/v8/src/ic/access-compiler.h
index ecc5c08a59..3d488e82ea 100644
--- a/deps/v8/src/ic/access-compiler.h
+++ b/deps/v8/src/ic/access-compiler.h
@@ -6,13 +6,13 @@
#define V8_IC_ACCESS_COMPILER_H_
#include "src/code-stubs.h"
+#include "src/ic/access-compiler-data.h"
#include "src/macro-assembler.h"
#include "src/objects.h"
namespace v8 {
namespace internal {
-
class PropertyAccessCompiler BASE_EMBEDDED {
public:
static Builtins::Name MissBuiltin(Code::Kind kind) {
@@ -36,7 +36,7 @@ class PropertyAccessCompiler BASE_EMBEDDED {
protected:
PropertyAccessCompiler(Isolate* isolate, Code::Kind kind,
CacheHolderFlag cache_holder)
- : registers_(GetCallingConvention(kind)),
+ : registers_(GetCallingConvention(isolate, kind)),
kind_(kind),
cache_holder_(cache_holder),
isolate_(isolate),
@@ -59,11 +59,6 @@ class PropertyAccessCompiler BASE_EMBEDDED {
Register scratch1() const { return registers_[2]; }
Register scratch2() const { return registers_[3]; }
- static Register* GetCallingConvention(Code::Kind);
- static Register* load_calling_convention();
- static Register* store_calling_convention();
- static Register* keyed_store_calling_convention();
-
Register* registers_;
static void GenerateTailCall(MacroAssembler* masm, Handle<Code> code);
@@ -72,6 +67,9 @@ class PropertyAccessCompiler BASE_EMBEDDED {
Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name);
private:
+ static Register* GetCallingConvention(Isolate* isolate, Code::Kind kind);
+ static void InitializePlatformSpecific(AccessCompilerData* data);
+
Code::Kind kind_;
CacheHolderFlag cache_holder_;
diff --git a/deps/v8/src/ic/arm/access-compiler-arm.cc b/deps/v8/src/ic/arm/access-compiler-arm.cc
index 9ce485ed46..e501cdcc8b 100644
--- a/deps/v8/src/ic/arm/access-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/access-compiler-arm.cc
@@ -17,24 +17,22 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r3, r0, r4};
- return registers;
-}
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, r3, r0, r4};
-Register* PropertyAccessCompiler::store_calling_convention() {
+ // Store calling convention.
// receiver, name, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r3, r4};
- return registers;
-}
+ Register store_registers[] = {receiver, name, r3, r4};
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
+}
#undef __
} // namespace internal
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index 691fe3d23d..6145d43641 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -407,10 +407,34 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ ldr(scratch1, NativeContextMemOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+ __ cmp(scratch1, scratch2);
+
+ if (!compare_native_contexts_only) {
+ __ b(eq, &done);
+
+ // Compare security tokens of current and expected native contexts.
+ __ ldr(scratch1,
+ ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ ldr(scratch2,
+ ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ __ cmp(scratch1, scratch2);
+ }
+ __ b(ne, miss);
+
+ __ bind(&done);
+}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -429,17 +453,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ b(ne, miss);
}
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ ldr(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ b(ne, miss);
- }
-
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
@@ -449,46 +462,28 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
-
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -496,7 +491,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -507,7 +502,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index 10ec578f7b..babf497a5b 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -19,18 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ b(eq, global_object);
- __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
- __ b(eq, global_object);
-}
-
-
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -126,138 +114,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver, Register map,
- Register scratch,
- int interceptor_bit, Label* slow) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(scratch,
- Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ b(ne, slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(JS_OBJECT_TYPE));
- __ b(lt, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register elements,
- Register scratch1, Register scratch2,
- Register result, Label* slow) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // elements - holds the elements of the receiver and its prototypes.
- //
- // scratch1 - used to hold elements length, bit fields, base addresses.
- //
- // scratch2 - used to hold maps, prototypes, and the loaded value.
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(elements);
-
- // Check that the key (index) is within bounds.
- __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(scratch1));
- __ b(lo, &in_bounds);
- // Out-of-bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- __ cmp(key, Operand(0));
- __ b(lt, slow); // Negative keys can't take the fast OOB path.
- __ bind(&check_prototypes);
- __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(&check_next_prototype);
- __ ldr(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
- // scratch2: current prototype
- __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
- __ b(eq, &absent);
- __ ldr(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
- __ ldr(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- // elements: elements of current prototype
- // scratch2: map of current prototype
- __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
- __ b(lo, slow);
- __ ldrb(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
- __ tst(scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor)));
- __ b(ne, slow);
- __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ b(ne, slow);
- __ jmp(&check_next_prototype);
-
- __ bind(&absent);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- __ bind(&in_bounds);
- // Fast case: Do the load.
- __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
- __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ b(eq, &check_prototypes);
- __ mov(result, scratch2);
- __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map, Register hash,
- Label* index_string, Label* not_unique) {
- // The key is not a smi.
- Label unique;
- // Is it a name?
- __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
- __ b(hi, not_unique);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ b(eq, &unique);
-
- // Is the string an array index, with cached numeric value?
- __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
- __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
- __ b(eq, index_string);
-
- // Is the string internalized? We know it's a string, so a single
- // bit test is enough.
- // map: key map
- __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0);
- __ tst(hash, Operand(kIsNotInternalizedMask));
- __ b(ne, not_unique);
-
- __ bind(&unique);
-}
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = r0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -340,106 +196,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in lr.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register key = LoadDescriptor::NameRegister();
- Register receiver = LoadDescriptor::ReceiverRegister();
- DCHECK(key.is(r2));
- DCHECK(receiver.is(r1));
-
- Isolate* isolate = masm->isolate();
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
- Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(r0, r3, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, &slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r4,
- r3);
- __ Ret();
-
- __ bind(&check_number_dictionary);
- __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
-
- // Check whether the elements is a number dictionary.
- // r3: elements map
- // r4: elements
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, &slow);
- __ SmiUntag(r0, key);
- __ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5);
- __ Ret();
-
- // Slow case, key and receiver still in r2 and r1.
- __ bind(&slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r4,
- r3);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
- Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &probe_dictionary);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, r4, r5, r6, r9));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ mov(slot, Operand(Smi::FromInt(slot_index)));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, r4, r5,
- r6, r9);
- // Cache miss.
- GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // r3: elements
- __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, r0, &slow);
- // Load the property to r0.
- GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
- r4, r3);
- __ Ret();
-
- __ bind(&index_name);
- __ IndexFromHash(r3, key);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
diff --git a/deps/v8/src/ic/arm64/access-compiler-arm64.cc b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
index 6273633822..8cbb5278ea 100644
--- a/deps/v8/src/ic/arm64/access-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
@@ -25,23 +25,22 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
// registers are actually scratch registers, and which are important. For now,
// we use the same assignments as ARM to remain on the safe side.
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, x3, x0, x4};
- return registers;
-}
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, x3, x0, x4};
-Register* PropertyAccessCompiler::store_calling_convention() {
- // receiver, value, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, x3, x4};
- return registers;
-}
+ // Store calling convention.
+ // receiver, name, scratch1, scratch2.
+ Register store_registers[] = {receiver, name, x3, x4};
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
+}
#undef __
} // namespace internal
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 3f97fddcd5..58d0bb7446 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -437,10 +437,34 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ Ldr(scratch1, NativeContextMemOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+ __ Cmp(scratch1, scratch2);
+
+ if (!compare_native_contexts_only) {
+ __ B(eq, &done);
+
+ // Compare security tokens of current and expected native contexts.
+ __ Ldr(scratch1,
+ ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ Ldr(scratch2,
+ ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ __ Cmp(scratch1, scratch2);
+ }
+ __ B(ne, miss);
+
+ __ bind(&done);
+}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -454,19 +478,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
__ Mov(scratch1, Operand(validity_cell));
__ Ldr(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
- __ Cmp(scratch1, Operand(Smi::FromInt(Map::kPrototypeChainValid)));
- __ B(ne, miss);
- }
-
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ Ldr(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ B(ne, miss);
+ // Compare scratch1 against Map::kPrototypeChainValid.
+ static_assert(Map::kPrototypeChainValid == 0,
+ "Map::kPrototypeChainValid has unexpected value");
+ __ Cbnz(scratch1, miss);
}
// Keep track of the current object in register reg.
@@ -478,46 +493,27 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- UseScratchRegisterScope temps(masm());
- __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
- }
-
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() || (current->property_dictionary()->FindEntry(
name) == NameDictionary::kNotFound));
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -525,7 +521,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -536,7 +532,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index fa9d7c16b7..0ced207d8a 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -15,18 +15,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-// "type" holds an instance type on entry and is not clobbered.
-// Generated code branch on "global_object" if type is any kind of global
-// JS object.
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
- __ B(eq, global_object);
-}
-
-
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -116,144 +104,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object and return the map of the
-// receiver in 'map_scratch' if the receiver is not a SMI.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver,
- Register map_scratch,
- Register scratch,
- int interceptor_bit, Label* slow) {
- DCHECK(!AreAliased(map_scratch, scratch));
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
- __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
- __ Tbnz(scratch, interceptor_bit, slow);
-
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object, we enter the
- // runtime system to make sure that indexing into string objects work
- // as intended.
- STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
- __ Cmp(scratch, JS_OBJECT_TYPE);
- __ B(lt, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-//
-// receiver - holds the receiver on entry.
-// Unchanged unless 'result' is the same register.
-//
-// key - holds the smi key on entry.
-// Unchanged unless 'result' is the same register.
-//
-// elements - holds the elements of the receiver and its prototypes. Clobbered.
-//
-// result - holds the result on exit if the load succeeded.
-// Allowed to be the the same as 'receiver' or 'key'.
-// Unchanged on bailout so 'receiver' and 'key' can be safely
-// used by further computation.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register elements,
- Register scratch1, Register scratch2,
- Register result, Label* slow) {
- DCHECK(!AreAliased(receiver, key, elements, scratch1, scratch2));
-
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- // Check for fast array.
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(elements);
-
- // Check that the key (index) is within bounds.
- __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Cmp(key, scratch1);
- __ B(lo, &in_bounds);
-
- // Out of bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- __ Cmp(key, Operand(Smi::FromInt(0)));
- __ B(lt, slow); // Negative keys can't take the fast OOB path.
- __ Bind(&check_prototypes);
- __ Ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Bind(&check_next_prototype);
- __ Ldr(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
- // scratch2: current prototype
- __ JumpIfRoot(scratch2, Heap::kNullValueRootIndex, &absent);
- __ Ldr(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
- __ Ldr(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- // elements: elements of current prototype
- // scratch2: map of current prototype
- __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
- __ B(lo, slow);
- __ Ldrb(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
- __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, slow);
- __ Tbnz(scratch1, Map::kHasIndexedInterceptor, slow);
- __ JumpIfNotRoot(elements, Heap::kEmptyFixedArrayRootIndex, slow);
- __ B(&check_next_prototype);
-
- __ Bind(&absent);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ B(&done);
-
- __ Bind(&in_bounds);
- // Fast case: Do the load.
- __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ SmiUntag(scratch2, key);
- __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
-
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, &check_prototypes);
-
- // Move the value to the result register.
- // 'result' can alias with 'receiver' or 'key' but these two must be
- // preserved if we jump to 'slow'.
- __ Mov(result, scratch2);
- __ Bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-// The map of the key is returned in 'map_scratch'.
-// If the jump to 'index_string' is done the hash of the key is left
-// in 'hash_scratch'.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map_scratch, Register hash_scratch,
- Label* index_string, Label* not_unique) {
- DCHECK(!AreAliased(key, map_scratch, hash_scratch));
-
- // Is the key a name?
- Label unique;
- __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
- not_unique, hi);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ B(eq, &unique);
-
- // Is the string an array index with cached numeric value?
- __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
- __ TestAndBranchIfAllClear(hash_scratch, Name::kContainsCachedArrayIndexMask,
- index_string);
-
- // Is the string internalized? We know it's a string, so a single bit test is
- // enough.
- __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0);
- __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
-
- __ Bind(&unique);
- // Fall through if the key is a unique name.
-}
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = x0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -323,127 +173,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
- Register receiver, Register scratch1,
- Register scratch2, Register scratch3,
- Register scratch4, Register scratch5,
- Label* slow) {
- DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
- scratch5));
-
- Isolate* isolate = masm->isolate();
- Label check_number_dictionary;
- // If we can load the value, it should be returned in x0.
- Register result = x0;
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
- Map::kHasIndexedInterceptor, slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
- result, slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1,
- scratch1, scratch2);
- __ Ret();
-
- __ Bind(&check_number_dictionary);
- __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
-
- // Check whether we have a number dictionary.
- __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
-
- __ LoadFromNumberDictionary(slow, scratch3, key, result, scratch1, scratch2,
- scratch4, scratch5);
- __ Ret();
-}
-
-static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
- Register receiver, Register scratch1,
- Register scratch2, Register scratch3,
- Register scratch4, Register scratch5,
- Label* slow) {
- DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
- scratch5));
-
- Isolate* isolate = masm->isolate();
- Label probe_dictionary, property_array_property;
- // If we can load the value, it should be returned in x0.
- Register result = x0;
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
- Map::kHasNamedInterceptor, slow);
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, scratch1, scratch2, scratch3, scratch4));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ Mov(slot, Operand(Smi::FromInt(slot_index)));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(
- masm, receiver, key, scratch1, scratch2, scratch3, scratch4);
- // Cache miss.
- KeyedLoadIC::GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it exists.
- __ Bind(&probe_dictionary);
- __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
- // Load the property.
- GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
- scratch1, scratch2);
- __ Ret();
-}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in lr.
- Label slow, check_name, index_smi, index_name;
-
- Register key = LoadDescriptor::NameRegister();
- Register receiver = LoadDescriptor::ReceiverRegister();
- DCHECK(key.is(x2));
- DCHECK(receiver.is(x1));
-
- __ JumpIfNotSmi(key, &check_name);
- __ Bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
- GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
-
- // Slow case.
- __ Bind(&slow);
- __ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_generic_slow(),
- 1, x4, x3);
- GenerateRuntimeGetProperty(masm);
-
- __ Bind(&check_name);
- GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
-
- GenerateKeyedLoadWithNameKey(masm, key, receiver, x4, x5, x6, x7, x3, &slow);
-
- __ Bind(&index_name);
- __ IndexFromHash(x3, key);
- // Now jump to the place where smi keys are handled.
- __ B(&index_smi);
-}
-
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index 3b2e115b4f..05e9031915 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -6,7 +6,7 @@
#include "src/field-type.h"
#include "src/ic/call-optimization.h"
-#include "src/ic/handler-configuration.h"
+#include "src/ic/handler-configuration-inl.h"
#include "src/ic/ic-inl.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h"
@@ -65,7 +65,10 @@ Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
// name specific if there are global objects involved.
Handle<Code> handler = PropertyHandlerCompiler::Find(
cache_name, stub_holder_map, Code::LOAD_IC, flag);
- if (!handler.is_null()) return handler;
+ if (!handler.is_null()) {
+ TRACE_HANDLER_STATS(isolate, LoadIC_HandlerCacheHit_NonExistent);
+ return handler;
+ }
TRACE_HANDLER_STATS(isolate, LoadIC_LoadNonexistent);
NamedLoadHandlerCompiler compiler(isolate, receiver_map, last, flag);
@@ -95,24 +98,23 @@ Register NamedLoadHandlerCompiler::FrontendHeader(Register object_reg,
Handle<Name> name,
Label* miss,
ReturnHolder return_what) {
- PrototypeCheckType check_type = SKIP_RECEIVER;
- int function_index = map()->IsPrimitiveMap()
- ? map()->GetConstructorFunctionIndex()
- : Map::kNoConstructorFunctionIndex;
- if (function_index != Map::kNoConstructorFunctionIndex) {
- GenerateDirectLoadGlobalFunctionPrototype(masm(), function_index,
- scratch1(), miss);
- Object* function = isolate()->native_context()->get(function_index);
- Object* prototype = JSFunction::cast(function)->instance_prototype();
- Handle<Map> map(JSObject::cast(prototype)->map());
- set_map(map);
- object_reg = scratch1();
- check_type = CHECK_ALL_MAPS;
+ if (map()->IsPrimitiveMap() || map()->IsJSGlobalProxyMap()) {
+ // If the receiver is a global proxy and if we get to this point then
+ // the compile-time (current) native context has access to global proxy's
+ // native context. Since access rights revocation is not supported at all,
+ // we can generate a check that an execution-time native context is either
+ // the same as compile-time native context or has the same access token.
+ Handle<Context> native_context = isolate()->native_context();
+ Handle<WeakCell> weak_cell(native_context->self_weak_cell(), isolate());
+
+ bool compare_native_contexts_only = map()->IsPrimitiveMap();
+ GenerateAccessCheck(weak_cell, scratch1(), scratch2(), miss,
+ compare_native_contexts_only);
}
// Check that the maps starting from the prototype haven't changed.
return CheckPrototypes(object_reg, scratch1(), scratch2(), scratch3(), name,
- miss, check_type, return_what);
+ miss, return_what);
}
@@ -122,8 +124,14 @@ Register NamedStoreHandlerCompiler::FrontendHeader(Register object_reg,
Handle<Name> name,
Label* miss,
ReturnHolder return_what) {
+ if (map()->IsJSGlobalProxyMap()) {
+ Handle<Context> native_context = isolate()->native_context();
+ Handle<WeakCell> weak_cell(native_context->self_weak_cell(), isolate());
+ GenerateAccessCheck(weak_cell, scratch1(), scratch2(), miss, false);
+ }
+
return CheckPrototypes(object_reg, this->name(), scratch1(), scratch2(), name,
- miss, SKIP_RECEIVER, return_what);
+ miss, return_what);
}
@@ -224,7 +232,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
Handle<Name> name, Handle<AccessorInfo> callback, Handle<Code> slow_stub) {
- if (FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
GenerateTailCall(masm(), slow_stub);
}
Register reg = Frontend(name);
@@ -236,7 +244,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
Handle<Name> name, const CallOptimization& call_optimization,
int accessor_index, Handle<Code> slow_stub) {
DCHECK(call_optimization.is_simple_api_call());
- if (FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
GenerateTailCall(masm(), slow_stub);
}
Register holder = Frontend(name);
@@ -590,7 +598,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
Handle<JSObject> object, Handle<Name> name,
const CallOptimization& call_optimization, int accessor_index,
Handle<Code> slow_stub) {
- if (FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
GenerateTailCall(masm(), slow_stub);
}
Register holder = Frontend(name);
@@ -633,11 +641,9 @@ Handle<Object> ElementHandlerCompiler::GetKeyedLoadHandler(
bool is_js_array = instance_type == JS_ARRAY_TYPE;
if (elements_kind == DICTIONARY_ELEMENTS) {
if (FLAG_tf_load_ic_stub) {
- int config = KeyedLoadElementsKind::encode(elements_kind) |
- KeyedLoadConvertHole::encode(false) |
- KeyedLoadIsJsArray::encode(is_js_array) |
- LoadHandlerTypeBit::encode(kLoadICHandlerForElements);
- return handle(Smi::FromInt(config), isolate);
+ TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
+ return LoadHandler::LoadElement(isolate, elements_kind, false,
+ is_js_array);
}
TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadDictionaryElementStub);
return LoadDictionaryElementStub(isolate).GetCode();
@@ -649,11 +655,9 @@ Handle<Object> ElementHandlerCompiler::GetKeyedLoadHandler(
is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
*receiver_map == isolate->get_initial_js_array_map(elements_kind);
if (FLAG_tf_load_ic_stub) {
- int config = KeyedLoadElementsKind::encode(elements_kind) |
- KeyedLoadConvertHole::encode(convert_hole_to_undefined) |
- KeyedLoadIsJsArray::encode(is_js_array) |
- LoadHandlerTypeBit::encode(kLoadICHandlerForElements);
- return handle(Smi::FromInt(config), isolate);
+ TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
+ return LoadHandler::LoadElement(isolate, elements_kind,
+ convert_hole_to_undefined, is_js_array);
} else {
TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadFastElementStub);
return LoadFastElementStub(isolate, is_js_array, elements_kind,
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index 63ca050ca2..0dec36af2f 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -13,7 +13,6 @@ namespace internal {
class CallOptimization;
-enum PrototypeCheckType { CHECK_ALL_MAPS, SKIP_RECEIVER };
enum ReturnHolder { RETURN_HOLDER, DONT_RETURN_ANYTHING };
class PropertyHandlerCompiler : public PropertyAccessCompiler {
@@ -84,6 +83,18 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
Handle<Name> name, Register scratch,
Label* miss);
+ // Generates check that current native context has the same access rights
+ // as the given |native_context_cell|.
+ // If |compare_native_contexts_only| is true then access check is considered
+ // passed if the execution-time native context is equal to contents of
+ // |native_context_cell|.
+ // If |compare_native_contexts_only| is false then access check is considered
+ // passed if the execution-time native context is equal to contents of
+ // |native_context_cell| or security tokens of both contexts are equal.
+ void GenerateAccessCheck(Handle<WeakCell> native_context_cell,
+ Register scratch1, Register scratch2, Label* miss,
+ bool compare_native_contexts_only);
+
// Generates code that verifies that the property holder has not changed
// (checking maps of objects in the prototype chain for fast and global
// objects or doing negative lookup for slow objects, ensures that the
@@ -99,7 +110,7 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
Register CheckPrototypes(Register object_reg, Register holder_reg,
Register scratch1, Register scratch2,
Handle<Name> name, Label* miss,
- PrototypeCheckType check, ReturnHolder return_what);
+ ReturnHolder return_what);
Handle<Code> GetCode(Code::Kind kind, Handle<Name> name);
void set_holder(Handle<JSObject> holder) { holder_ = holder; }
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
new file mode 100644
index 0000000000..505d67cf42
--- /dev/null
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -0,0 +1,145 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_HANDLER_CONFIGURATION_INL_H_
+#define V8_IC_HANDLER_CONFIGURATION_INL_H_
+
+#include "src/ic/handler-configuration.h"
+
+#include "src/field-index-inl.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Object> LoadHandler::LoadField(Isolate* isolate,
+ FieldIndex field_index) {
+ int config = KindBits::encode(kForFields) |
+ IsInobjectBits::encode(field_index.is_inobject()) |
+ IsDoubleBits::encode(field_index.is_double()) |
+ FieldOffsetBits::encode(field_index.offset());
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::LoadConstant(Isolate* isolate, int descriptor) {
+ int config = KindBits::encode(kForConstants) |
+ IsAccessorInfoBits::encode(false) |
+ DescriptorValueIndexBits::encode(
+ DescriptorArray::ToValueIndex(descriptor));
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::LoadApiGetter(Isolate* isolate, int descriptor) {
+ int config = KindBits::encode(kForConstants) |
+ IsAccessorInfoBits::encode(true) |
+ DescriptorValueIndexBits::encode(
+ DescriptorArray::ToValueIndex(descriptor));
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::EnableAccessCheckOnReceiver(
+ Isolate* isolate, Handle<Object> smi_handler) {
+ int config = Smi::cast(*smi_handler)->value();
+#ifdef DEBUG
+ Kind kind = KindBits::decode(config);
+ DCHECK_NE(kForElements, kind);
+#endif
+ config = DoAccessCheckOnReceiverBits::update(config, true);
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::EnableNegativeLookupOnReceiver(
+ Isolate* isolate, Handle<Object> smi_handler) {
+ int config = Smi::cast(*smi_handler)->value();
+#ifdef DEBUG
+ Kind kind = KindBits::decode(config);
+ DCHECK_NE(kForElements, kind);
+#endif
+ config = DoNegativeLookupOnReceiverBits::update(config, true);
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::LoadNonExistent(
+ Isolate* isolate, bool do_negative_lookup_on_receiver) {
+ int config =
+ KindBits::encode(kForNonExistent) |
+ DoNegativeLookupOnReceiverBits::encode(do_negative_lookup_on_receiver);
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> LoadHandler::LoadElement(Isolate* isolate,
+ ElementsKind elements_kind,
+ bool convert_hole_to_undefined,
+ bool is_js_array) {
+ int config = KindBits::encode(kForElements) |
+ ElementsKindBits::encode(elements_kind) |
+ ConvertHoleBits::encode(convert_hole_to_undefined) |
+ IsJsArrayBits::encode(is_js_array);
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> StoreHandler::StoreField(Isolate* isolate, Kind kind,
+ int descriptor, FieldIndex field_index,
+ Representation representation,
+ bool extend_storage) {
+ StoreHandler::FieldRepresentation field_rep;
+ switch (representation.kind()) {
+ case Representation::kSmi:
+ field_rep = StoreHandler::kSmi;
+ break;
+ case Representation::kDouble:
+ field_rep = StoreHandler::kDouble;
+ break;
+ case Representation::kHeapObject:
+ field_rep = StoreHandler::kHeapObject;
+ break;
+ case Representation::kTagged:
+ field_rep = StoreHandler::kTagged;
+ break;
+ default:
+ UNREACHABLE();
+ return Handle<Object>::null();
+ }
+ int value_index = DescriptorArray::ToValueIndex(descriptor);
+
+ DCHECK(kind == kStoreField || kind == kTransitionToField);
+ DCHECK_IMPLIES(kind == kStoreField, !extend_storage);
+
+ int config = StoreHandler::KindBits::encode(kind) |
+ StoreHandler::ExtendStorageBits::encode(extend_storage) |
+ StoreHandler::IsInobjectBits::encode(field_index.is_inobject()) |
+ StoreHandler::FieldRepresentationBits::encode(field_rep) |
+ StoreHandler::DescriptorValueIndexBits::encode(value_index) |
+ StoreHandler::FieldOffsetBits::encode(field_index.offset());
+ return handle(Smi::FromInt(config), isolate);
+}
+
+Handle<Object> StoreHandler::StoreField(Isolate* isolate, int descriptor,
+ FieldIndex field_index,
+ Representation representation) {
+ return StoreField(isolate, kStoreField, descriptor, field_index,
+ representation, false);
+}
+
+Handle<Object> StoreHandler::TransitionToField(Isolate* isolate, int descriptor,
+ FieldIndex field_index,
+ Representation representation,
+ bool extend_storage) {
+ return StoreField(isolate, kTransitionToField, descriptor, field_index,
+ representation, extend_storage);
+}
+
+Handle<Object> StoreHandler::TransitionToConstant(Isolate* isolate,
+ int descriptor) {
+ int value_index = DescriptorArray::ToValueIndex(descriptor);
+ int config =
+ StoreHandler::KindBits::encode(StoreHandler::kTransitionToConstant) |
+ StoreHandler::DescriptorValueIndexBits::encode(value_index);
+ return handle(Smi::FromInt(config), isolate);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_IC_HANDLER_CONFIGURATION_INL_H_
diff --git a/deps/v8/src/ic/handler-configuration.h b/deps/v8/src/ic/handler-configuration.h
index bf7c4770b9..a5291736dc 100644
--- a/deps/v8/src/ic/handler-configuration.h
+++ b/deps/v8/src/ic/handler-configuration.h
@@ -6,38 +6,196 @@
#define V8_IC_HANDLER_CONFIGURATION_H_
#include "src/elements-kind.h"
+#include "src/field-index.h"
#include "src/globals.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
-enum LoadHandlerType {
- kLoadICHandlerForElements = 0,
- kLoadICHandlerForProperties = 1
-};
+// A set of bit fields representing Smi handlers for loads.
+class LoadHandler {
+ public:
+ enum Kind { kForElements, kForFields, kForConstants, kForNonExistent };
+ class KindBits : public BitField<Kind, 0, 2> {};
+
+ // Defines whether access rights check should be done on receiver object.
+ // Applicable to kForFields, kForConstants and kForNonExistent kinds only when
+ // loading value from prototype chain. Ignored when loading from holder.
+ class DoAccessCheckOnReceiverBits
+ : public BitField<bool, KindBits::kNext, 1> {};
+
+ // Defines whether negative lookup check should be done on receiver object.
+ // Applicable to kForFields, kForConstants and kForNonExistent kinds only when
+ // loading value from prototype chain. Ignored when loading from holder.
+ class DoNegativeLookupOnReceiverBits
+ : public BitField<bool, DoAccessCheckOnReceiverBits::kNext, 1> {};
+
+ //
+ // Encoding when KindBits contains kForConstants.
+ //
+
+ class IsAccessorInfoBits
+ : public BitField<bool, DoNegativeLookupOnReceiverBits::kNext, 1> {};
+ // Index of a value entry in the descriptor array.
+ // +2 here is because each descriptor entry occupies 3 slots in array.
+ class DescriptorValueIndexBits
+ : public BitField<unsigned, IsAccessorInfoBits::kNext,
+ kDescriptorIndexBitCount + 2> {};
+ // Make sure we don't overflow the smi.
+ STATIC_ASSERT(DescriptorValueIndexBits::kNext <= kSmiValueSize);
+
+ //
+ // Encoding when KindBits contains kForFields.
+ //
+ class IsInobjectBits
+ : public BitField<bool, DoNegativeLookupOnReceiverBits::kNext, 1> {};
+ class IsDoubleBits : public BitField<bool, IsInobjectBits::kNext, 1> {};
+ // +1 here is to cover all possible JSObject header sizes.
+ class FieldOffsetBits
+ : public BitField<unsigned, IsDoubleBits::kNext,
+ kDescriptorIndexBitCount + 1 + kPointerSizeLog2> {};
+ // Make sure we don't overflow the smi.
+ STATIC_ASSERT(FieldOffsetBits::kNext <= kSmiValueSize);
+
+ //
+ // Encoding when KindBits contains kForElements.
+ //
+ class IsJsArrayBits : public BitField<bool, KindBits::kNext, 1> {};
+ class ConvertHoleBits : public BitField<bool, IsJsArrayBits::kNext, 1> {};
+ class ElementsKindBits
+ : public BitField<ElementsKind, ConvertHoleBits::kNext, 8> {};
+ // Make sure we don't overflow the smi.
+ STATIC_ASSERT(ElementsKindBits::kNext <= kSmiValueSize);
+
+ // The layout of an Tuple3 handler representing a load of a field from
+ // prototype when prototype chain checks do not include non-existing lookups
+ // or access checks.
+ static const int kHolderCellOffset = Tuple3::kValue1Offset;
+ static const int kSmiHandlerOffset = Tuple3::kValue2Offset;
+ static const int kValidityCellOffset = Tuple3::kValue3Offset;
+
+ // The layout of an array handler representing a load of a field from
+ // prototype when prototype chain checks include non-existing lookups and
+ // access checks.
+ static const int kSmiHandlerIndex = 0;
+ static const int kValidityCellIndex = 1;
+ static const int kHolderCellIndex = 2;
+ static const int kFirstPrototypeIndex = 3;
+
+ // Creates a Smi-handler for loading a field from fast object.
+ static inline Handle<Object> LoadField(Isolate* isolate,
+ FieldIndex field_index);
+
+ // Creates a Smi-handler for loading a constant from fast object.
+ static inline Handle<Object> LoadConstant(Isolate* isolate, int descriptor);
+
+ // Creates a Smi-handler for loading an Api getter property from fast object.
+ static inline Handle<Object> LoadApiGetter(Isolate* isolate, int descriptor);
+
+ // Sets DoAccessCheckOnReceiverBits in given Smi-handler. The receiver
+ // check is a part of a prototype chain check.
+ static inline Handle<Object> EnableAccessCheckOnReceiver(
+ Isolate* isolate, Handle<Object> smi_handler);
-class LoadHandlerTypeBit : public BitField<bool, 0, 1> {};
+ // Sets DoNegativeLookupOnReceiverBits in given Smi-handler. The receiver
+ // check is a part of a prototype chain check.
+ static inline Handle<Object> EnableNegativeLookupOnReceiver(
+ Isolate* isolate, Handle<Object> smi_handler);
-// Encoding for configuration Smis for property loads:
-class FieldOffsetIsInobject
- : public BitField<bool, LoadHandlerTypeBit::kNext, 1> {};
-class FieldOffsetIsDouble
- : public BitField<bool, FieldOffsetIsInobject::kNext, 1> {};
-class FieldOffsetOffset : public BitField<int, FieldOffsetIsDouble::kNext, 27> {
+ // Creates a Smi-handler for loading a non-existent property. Works only as
+ // a part of prototype chain check.
+ static inline Handle<Object> LoadNonExistent(
+ Isolate* isolate, bool do_negative_lookup_on_receiver);
+
+ // Creates a Smi-handler for loading an element.
+ static inline Handle<Object> LoadElement(Isolate* isolate,
+ ElementsKind elements_kind,
+ bool convert_hole_to_undefined,
+ bool is_js_array);
};
-// Make sure we don't overflow into the sign bit.
-STATIC_ASSERT(FieldOffsetOffset::kNext <= kSmiValueSize - 1);
-// Encoding for configuration Smis for elements loads:
-class KeyedLoadIsJsArray : public BitField<bool, LoadHandlerTypeBit::kNext, 1> {
+// A set of bit fields representing Smi handlers for stores.
+class StoreHandler {
+ public:
+ enum Kind {
+ kStoreElement,
+ kStoreField,
+ kTransitionToField,
+ kTransitionToConstant
+ };
+ class KindBits : public BitField<Kind, 0, 2> {};
+
+ enum FieldRepresentation { kSmi, kDouble, kHeapObject, kTagged };
+
+ // Applicable to kStoreField, kTransitionToField and kTransitionToConstant
+ // kinds.
+
+ // Index of a value entry in the descriptor array.
+ // +2 here is because each descriptor entry occupies 3 slots in array.
+ class DescriptorValueIndexBits
+ : public BitField<unsigned, KindBits::kNext,
+ kDescriptorIndexBitCount + 2> {};
+ //
+ // Encoding when KindBits contains kTransitionToConstant.
+ //
+
+ // Make sure we don't overflow the smi.
+ STATIC_ASSERT(DescriptorValueIndexBits::kNext <= kSmiValueSize);
+
+ //
+ // Encoding when KindBits contains kStoreField or kTransitionToField.
+ //
+ class ExtendStorageBits
+ : public BitField<bool, DescriptorValueIndexBits::kNext, 1> {};
+ class IsInobjectBits : public BitField<bool, ExtendStorageBits::kNext, 1> {};
+ class FieldRepresentationBits
+ : public BitField<FieldRepresentation, IsInobjectBits::kNext, 2> {};
+ // +1 here is to cover all possible JSObject header sizes.
+ class FieldOffsetBits
+ : public BitField<unsigned, FieldRepresentationBits::kNext,
+ kDescriptorIndexBitCount + 1 + kPointerSizeLog2> {};
+ // Make sure we don't overflow the smi.
+ STATIC_ASSERT(FieldOffsetBits::kNext <= kSmiValueSize);
+
+ // The layout of an Tuple3 handler representing a transitioning store
+ // when prototype chain checks do not include non-existing lookups or access
+ // checks.
+ static const int kTransitionCellOffset = Tuple3::kValue1Offset;
+ static const int kSmiHandlerOffset = Tuple3::kValue2Offset;
+ static const int kValidityCellOffset = Tuple3::kValue3Offset;
+
+ // The layout of an array handler representing a transitioning store
+ // when prototype chain checks include non-existing lookups and access checks.
+ static const int kSmiHandlerIndex = 0;
+ static const int kValidityCellIndex = 1;
+ static const int kTransitionCellIndex = 2;
+ static const int kFirstPrototypeIndex = 3;
+
+ // Creates a Smi-handler for storing a field to fast object.
+ static inline Handle<Object> StoreField(Isolate* isolate, int descriptor,
+ FieldIndex field_index,
+ Representation representation);
+
+ // Creates a Smi-handler for transitioning store to a field.
+ static inline Handle<Object> TransitionToField(Isolate* isolate,
+ int descriptor,
+ FieldIndex field_index,
+ Representation representation,
+ bool extend_storage);
+
+ // Creates a Smi-handler for transitioning store to a constant field (in this
+ // case the only thing that needs to be done is an update of a map).
+ static inline Handle<Object> TransitionToConstant(Isolate* isolate,
+ int descriptor);
+
+ private:
+ static inline Handle<Object> StoreField(Isolate* isolate, Kind kind,
+ int descriptor,
+ FieldIndex field_index,
+ Representation representation,
+ bool extend_storage);
};
-class KeyedLoadConvertHole
- : public BitField<bool, KeyedLoadIsJsArray::kNext, 1> {};
-class KeyedLoadElementsKind
- : public BitField<ElementsKind, KeyedLoadConvertHole::kNext, 8> {};
-// Make sure we don't overflow into the sign bit.
-STATIC_ASSERT(KeyedLoadElementsKind::kNext <= kSmiValueSize - 1);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/ia32/access-compiler-ia32.cc b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
index 3219f3d1cb..411c744659 100644
--- a/deps/v8/src/ic/ia32/access-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
@@ -16,22 +16,21 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
__ jmp(code, RelocInfo::CODE_TARGET);
}
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, ebx, eax, edi};
- return registers;
-}
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, ebx, eax, edi};
-Register* PropertyAccessCompiler::store_calling_convention() {
+ // Store calling convention.
// receiver, name, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, ebx, edi};
- return registers;
+ Register store_registers[] = {receiver, name, ebx, edi};
+
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
}
#undef __
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 06c58b8aae..68fd1b9d98 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -411,10 +411,32 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ mov(scratch1, NativeContextOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+ __ cmp(scratch1, scratch2);
+
+ if (!compare_native_contexts_only) {
+ __ j(equal, &done);
+
+ // Compare security tokens of current and expected native contexts.
+ __ mov(scratch1, ContextOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ mov(scratch2, ContextOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ __ cmp(scratch1, scratch2);
+ }
+ __ j(not_equal, miss);
+
+ __ bind(&done);
+}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -433,17 +455,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ j(not_equal, miss);
}
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ mov(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ j(not_equal, miss);
- }
-
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
@@ -453,46 +464,28 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
- }
-
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -500,7 +493,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -511,7 +504,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index b7496d4624..44a5b9f531 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -18,18 +18,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, global_object);
- __ cmp(type, JS_GLOBAL_PROXY_TYPE);
- __ j(equal, global_object);
-}
-
-
// Helper function used to load a property from a dictionary backing
// storage. This function may fail to load a property even though it is
// in the dictionary, so code at miss_label must always call a backup
@@ -132,238 +120,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
__ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
}
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver, Register map,
- int interceptor_bit, Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // Scratch registers:
- // map - used to hold the map of the receiver.
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
-
- // Get the map of the receiver.
- __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Check bit field.
- __ test_b(
- FieldOperand(map, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ j(not_zero, slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type. In
- // the case that the object is a value-wrapper object, we enter the runtime
- // system to make sure that indexing into string objects works as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-
- __ CmpInstanceType(map, JS_OBJECT_TYPE);
- __ j(below, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register scratch,
- Register scratch2, Register result,
- Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // key - holds the key and is unchanged (must be a smi).
- // Scratch registers:
- // scratch - used to hold elements of the receiver and the loaded value.
- // scratch2 - holds maps and prototypes during prototype chain check.
- // result - holds the result on exit if the load succeeds and
- // we fall through.
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(scratch);
-
- // Check that the key (index) is within bounds.
- __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
- __ j(below, &in_bounds);
- // Out-of-bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- __ cmp(key, 0);
- __ j(less, slow); // Negative keys can't take the fast OOB path.
- __ bind(&check_prototypes);
- __ mov(scratch2, FieldOperand(receiver, HeapObject::kMapOffset));
- __ bind(&check_next_prototype);
- __ mov(scratch2, FieldOperand(scratch2, Map::kPrototypeOffset));
- // scratch2: current prototype
- __ cmp(scratch2, masm->isolate()->factory()->null_value());
- __ j(equal, &absent);
- __ mov(scratch, FieldOperand(scratch2, JSObject::kElementsOffset));
- __ mov(scratch2, FieldOperand(scratch2, HeapObject::kMapOffset));
- // scratch: elements of current prototype
- // scratch2: map of current prototype
- __ CmpInstanceType(scratch2, JS_OBJECT_TYPE);
- __ j(below, slow);
- __ test_b(FieldOperand(scratch2, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor)));
- __ j(not_zero, slow);
- __ cmp(scratch, masm->isolate()->factory()->empty_fixed_array());
- __ j(not_equal, slow);
- __ jmp(&check_next_prototype);
-
- __ bind(&absent);
- __ mov(result, masm->isolate()->factory()->undefined_value());
- __ jmp(&done);
-
- __ bind(&in_bounds);
- // Fast case: Do the load.
- STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
- __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
- __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ j(equal, &check_prototypes);
- __ Move(result, scratch);
- __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map, Register hash,
- Label* index_string, Label* not_unique) {
- // Register use:
- // key - holds the key and is unchanged. Assumed to be non-smi.
- // Scratch registers:
- // map - used to hold the map of the key.
- // hash - used to hold the hash of the key.
- Label unique;
- __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
- __ j(above, not_unique);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ j(equal, &unique);
-
- // Is the string an array index, with cached numeric value?
- __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
- __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
- __ j(zero, index_string);
-
- // Is the string internalized? We already know it's a string so a single
- // bit test is enough.
- STATIC_ASSERT(kNotInternalizedTag != 0);
- __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(kIsNotInternalizedMask));
- __ j(not_zero, not_unique);
-
- __ bind(&unique);
-}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is on the stack.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, eax,
- Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(eax, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow);
- Isolate* isolate = masm->isolate();
- Counters* counters = isolate->counters();
- __ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
- __ ret(0);
-
- __ bind(&check_number_dictionary);
- __ mov(ebx, key);
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(receiver, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // ebx: untagged index
- // eax: elements
- __ CheckMap(eax, isolate->factory()->hash_table_map(), &slow,
- DONT_DO_SMI_CHECK);
- Label slow_pop_receiver;
- // Push receiver on the stack to free up a register for the dictionary
- // probing.
- __ push(receiver);
- __ LoadFromNumberDictionary(&slow_pop_receiver, eax, key, ebx, edx, edi, eax);
- // Pop receiver before returning.
- __ pop(receiver);
- __ ret(0);
-
- __ bind(&slow_pop_receiver);
- // Pop the receiver from the stack and jump to runtime.
- __ pop(receiver);
-
- __ bind(&slow);
- // Slow case: jump to runtime.
- __ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
- &slow);
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(isolate->factory()->hash_table_map()));
- __ j(equal, &probe_dictionary);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(isolate);
- int slot = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ push(Immediate(Smi::FromInt(slot)));
- __ push(Immediate(dummy_vector));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, ebx,
- edi);
-
- __ pop(LoadWithVectorDescriptor::VectorRegister());
- __ pop(LoadDescriptor::SlotRegister());
-
- // Cache miss.
- GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
-
- __ mov(eax, FieldOperand(receiver, JSObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
-
- GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
- __ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
- __ ret(0);
-
- __ bind(&index_name);
- __ IndexFromHash(ebx, key);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc
index 2f0633e0d8..750c88daa9 100644
--- a/deps/v8/src/ic/ic-compiler.cc
+++ b/deps/v8/src/ic/ic-compiler.cc
@@ -56,9 +56,11 @@ void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
// Tracking to do a better job of ensuring the data types are what they need
// to be. Not all the elements are in place yet, pessimistic elements
// transitions are still important for performance.
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- ElementsKind elements_kind = receiver_map->elements_kind();
if (!transitioned_map.is_null()) {
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ TRACE_HANDLER_STATS(isolate(),
+ KeyedStoreIC_ElementsTransitionAndStoreStub);
cached_stub =
ElementsTransitionAndStoreStub(isolate(), elements_kind,
transitioned_map->elements_kind(),
@@ -66,19 +68,11 @@ void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
} else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
// TODO(mvstanton): Consider embedding store_mode in the state of the slow
// keyed store ic for uniformity.
+ TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub);
cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
} else {
- if (IsSloppyArgumentsElements(elements_kind)) {
- cached_stub =
- KeyedStoreSloppyArgumentsStub(isolate(), store_mode).GetCode();
- } else if (receiver_map->has_fast_elements() ||
- receiver_map->has_fixed_typed_array_elements()) {
- cached_stub = StoreFastElementStub(isolate(), is_js_array,
- elements_kind, store_mode).GetCode();
- } else {
- cached_stub =
- StoreElementStub(isolate(), elements_kind, store_mode).GetCode();
- }
+ cached_stub =
+ CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
}
DCHECK(!cached_stub.is_null());
handlers->Add(cached_stub);
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 4fc8ada8df..1b5d063270 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -92,6 +92,12 @@ Code* IC::target() const {
return GetTargetAtAddress(address(), constant_pool());
}
+bool IC::IsHandler(Object* object) {
+ return (object->IsSmi() && (object != nullptr)) || object->IsTuple3() ||
+ object->IsFixedArray() ||
+ (object->IsCode() && Code::cast(object)->is_handler());
+}
+
Handle<Map> IC::GetHandlerCacheHolder(Handle<Map> receiver_map,
bool receiver_is_holder, Isolate* isolate,
CacheHolderFlag* flag) {
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index ea1f16c824..f94803681b 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -17,7 +17,7 @@ void ICUtility::Clear(Isolate* isolate, Address address,
std::ostream& operator<<(std::ostream& os, const CallICState& s) {
- return os << "(args(" << s.argc() << "), " << s.convert_mode() << ", ";
+ return os << "(" << s.convert_mode() << ", " << s.tail_call_mode() << ")";
}
@@ -256,10 +256,10 @@ void BinaryOpICState::Update(Handle<Object> left, Handle<Object> right,
if (old_extra_ic_state == GetExtraICState()) {
// Tagged operations can lead to non-truncating HChanges
- if (left->IsUndefined(isolate_) || left->IsBoolean()) {
+ if (left->IsOddball()) {
left_kind_ = GENERIC;
} else {
- DCHECK(right->IsUndefined(isolate_) || right->IsBoolean());
+ DCHECK(right->IsOddball());
right_kind_ = GENERIC;
}
}
@@ -270,8 +270,8 @@ BinaryOpICState::Kind BinaryOpICState::UpdateKind(Handle<Object> object,
Kind kind) const {
Kind new_kind = GENERIC;
bool is_truncating = Token::IsTruncatingBinaryOp(op());
- if (object->IsBoolean() && is_truncating) {
- // Booleans will be automatically truncated by HChange.
+ if (object->IsOddball() && is_truncating) {
+ // Oddballs will be automatically truncated by HChange.
new_kind = INT32;
} else if (object->IsUndefined(isolate_)) {
// Undefined will be automatically truncated by HChange.
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index 38be57ac04..1ba37b99db 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -26,10 +26,8 @@ class CallICState final BASE_EMBEDDED {
public:
explicit CallICState(ExtraICState extra_ic_state)
: bit_field_(extra_ic_state) {}
- CallICState(int argc, ConvertReceiverMode convert_mode,
- TailCallMode tail_call_mode)
- : bit_field_(ArgcBits::encode(argc) |
- ConvertModeBits::encode(convert_mode) |
+ CallICState(ConvertReceiverMode convert_mode, TailCallMode tail_call_mode)
+ : bit_field_(ConvertModeBits::encode(convert_mode) |
TailCallModeBits::encode(tail_call_mode)) {}
ExtraICState GetExtraICState() const { return bit_field_; }
@@ -38,7 +36,6 @@ class CallICState final BASE_EMBEDDED {
void (*Generate)(Isolate*,
const CallICState&));
- int argc() const { return ArgcBits::decode(bit_field_); }
ConvertReceiverMode convert_mode() const {
return ConvertModeBits::decode(bit_field_);
}
@@ -47,8 +44,7 @@ class CallICState final BASE_EMBEDDED {
}
private:
- typedef BitField<int, 0, Code::kArgumentsBits> ArgcBits;
- typedef BitField<ConvertReceiverMode, ArgcBits::kNext, 2> ConvertModeBits;
+ typedef BitField<ConvertReceiverMode, 0, 2> ConvertModeBits;
typedef BitField<TailCallMode, ConvertModeBits::kNext, 1> TailCallModeBits;
int const bit_field_;
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 0e751bd358..7e0cefdca9 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -4,6 +4,8 @@
#include "src/ic/ic.h"
+#include <iostream>
+
#include "src/accessors.h"
#include "src/api-arguments-inl.h"
#include "src/api.h"
@@ -16,6 +18,7 @@
#include "src/frames-inl.h"
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
+#include "src/ic/handler-configuration-inl.h"
#include "src/ic/ic-compiler.h"
#include "src/ic/ic-inl.h"
#include "src/ic/stub-cache.h"
@@ -98,38 +101,51 @@ void IC::TraceIC(const char* type, Handle<Object> name) {
void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
State new_state) {
- if (FLAG_trace_ic) {
- PrintF("[%s%s in ", is_keyed() ? "Keyed" : "", type);
-
- // TODO(jkummerow): Add support for "apply". The logic is roughly:
- // marker = [fp_ + kMarkerOffset];
- // if marker is smi and marker.value == INTERNAL and
- // the frame's code == builtin(Builtins::kFunctionApply):
- // then print "apply from" and advance one frame
-
- Object* maybe_function =
- Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset);
- if (maybe_function->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(maybe_function);
- JavaScriptFrame::PrintFunctionAndOffset(function, function->code(), pc(),
- stdout, true);
+ if (!FLAG_trace_ic) return;
+ PrintF("[%s%s in ", is_keyed() ? "Keyed" : "", type);
+
+ // TODO(jkummerow): Add support for "apply". The logic is roughly:
+ // marker = [fp_ + kMarkerOffset];
+ // if marker is smi and marker.value == INTERNAL and
+ // the frame's code == builtin(Builtins::kFunctionApply):
+ // then print "apply from" and advance one frame
+
+ Object* maybe_function =
+ Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset);
+ if (maybe_function->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(maybe_function);
+ int code_offset = 0;
+ if (function->IsInterpreted()) {
+ code_offset = InterpretedFrame::GetBytecodeOffset(fp());
+ } else {
+ code_offset =
+ static_cast<int>(pc() - function->code()->instruction_start());
}
+ JavaScriptFrame::PrintFunctionAndOffset(function, function->abstract_code(),
+ code_offset, stdout, true);
+ }
- const char* modifier = "";
- if (kind() == Code::KEYED_STORE_IC) {
- KeyedAccessStoreMode mode =
- casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
- modifier = GetTransitionMarkModifier(mode);
- }
- void* map = nullptr;
- if (!receiver_map().is_null()) {
- map = reinterpret_cast<void*>(*receiver_map());
- }
- PrintF(" (%c->%c%s) map=%p ", TransitionMarkFromState(old_state),
- TransitionMarkFromState(new_state), modifier, map);
- name->ShortPrint(stdout);
- PrintF("]\n");
+ const char* modifier = "";
+ if (kind() == Code::KEYED_STORE_IC) {
+ KeyedAccessStoreMode mode =
+ casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
+ modifier = GetTransitionMarkModifier(mode);
}
+ Map* map = nullptr;
+ if (!receiver_map().is_null()) {
+ map = *receiver_map();
+ }
+ PrintF(" (%c->%c%s) map=(%p", TransitionMarkFromState(old_state),
+ TransitionMarkFromState(new_state), modifier,
+ reinterpret_cast<void*>(map));
+ if (map != nullptr) {
+ PrintF(" dict=%u own=%u type=", map->is_dictionary_map(),
+ map->NumberOfOwnDescriptors());
+ std::cout << map->instance_type();
+ }
+ PrintF(") ");
+ name->ShortPrint(stdout);
+ PrintF("]\n");
}
@@ -171,6 +187,16 @@ IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
StackFrame* frame = it.frame();
DCHECK(fp == frame->fp() && pc_address == frame->pc_address());
#endif
+ // For interpreted functions, some bytecode handlers construct a
+ // frame. We have to skip the constructed frame to find the interpreted
+ // function's frame. Check if the there is an additional frame, and if there
+ // is skip this frame. However, the pc should not be updated. The call to
+ // ICs happen from bytecode handlers.
+ Object* frame_type =
+ Memory::Object_at(fp + TypedFrameConstants::kFrameTypeOffset);
+ if (frame_type == Smi::FromInt(StackFrame::STUB)) {
+ fp = Memory::Address_at(fp + TypedFrameConstants::kCallerFPOffset);
+ }
fp_ = fp;
if (FLAG_enable_embedded_constant_pool) {
constant_pool_address_ = constant_pool;
@@ -224,11 +250,6 @@ SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
// corresponding to the frame.
StackFrameIterator it(isolate());
while (it.frame()->fp() != this->fp()) it.Advance();
- if (FLAG_ignition && it.frame()->type() == StackFrame::STUB) {
- // Advance over bytecode handler frame.
- // TODO(rmcilroy): Remove this once bytecode handlers don't need a frame.
- it.Advance();
- }
JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
// Find the function on the stack and both the active code for the
// function and the original code.
@@ -504,19 +525,6 @@ void CompareIC::Clear(Isolate* isolate, Address address, Code* target,
PatchInlinedSmiCode(isolate, address, DISABLE_INLINED_SMI_CHECK);
}
-
-// static
-Handle<Code> KeyedLoadIC::ChooseMegamorphicStub(Isolate* isolate,
- ExtraICState extra_state) {
- // TODO(ishell): remove extra_ic_state
- if (FLAG_compiled_keyed_generic_loads) {
- return KeyedLoadGenericStub(isolate).GetCode();
- } else {
- return isolate->builtins()->KeyedLoadIC_Megamorphic();
- }
-}
-
-
static bool MigrateDeprecated(Handle<Object> object) {
if (!object->IsJSObject()) return false;
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
@@ -562,11 +570,11 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
nexus->ConfigureMonomorphic(name, map, handler);
} else if (kind() == Code::STORE_IC) {
StoreICNexus* nexus = casted_nexus<StoreICNexus>();
- nexus->ConfigureMonomorphic(map, Handle<Code>::cast(handler));
+ nexus->ConfigureMonomorphic(map, handler);
} else {
DCHECK(kind() == Code::KEYED_STORE_IC);
KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
- nexus->ConfigureMonomorphic(name, map, Handle<Code>::cast(handler));
+ nexus->ConfigureMonomorphic(name, map, handler);
}
vector_set_ = true;
@@ -691,11 +699,8 @@ static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
return true;
}
-bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> code) {
- DCHECK(code->IsSmi() || code->IsCode());
- if (!code->IsSmi() && !Code::cast(*code)->is_handler()) {
- return false;
- }
+bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> handler) {
+ DCHECK(IsHandler(*handler));
if (is_keyed() && state() != RECOMPUTE_HANDLER) return false;
Handle<Map> map = receiver_map();
MapHandleList maps;
@@ -735,16 +740,16 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> code) {
number_of_valid_maps++;
if (number_of_valid_maps > 1 && is_keyed()) return false;
if (number_of_valid_maps == 1) {
- ConfigureVectorState(name, receiver_map(), code);
+ ConfigureVectorState(name, receiver_map(), handler);
} else {
if (handler_to_overwrite >= 0) {
- handlers.Set(handler_to_overwrite, code);
+ handlers.Set(handler_to_overwrite, handler);
if (!map.is_identical_to(maps.at(handler_to_overwrite))) {
maps.Set(handler_to_overwrite, map);
}
} else {
maps.Add(map);
- handlers.Add(code);
+ handlers.Add(handler);
}
ConfigureVectorState(name, &maps, &handlers);
@@ -754,8 +759,7 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> code) {
}
void IC::UpdateMonomorphicIC(Handle<Object> handler, Handle<Name> name) {
- DCHECK(handler->IsSmi() ||
- (handler->IsCode() && Handle<Code>::cast(handler)->is_handler()));
+ DCHECK(IsHandler(*handler));
ConfigureVectorState(name, receiver_map(), handler);
}
@@ -786,24 +790,28 @@ bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
return transitioned_map == target_map;
}
-void IC::PatchCache(Handle<Name> name, Handle<Object> code) {
- DCHECK(code->IsCode() || (code->IsSmi() && (kind() == Code::LOAD_IC ||
- kind() == Code::KEYED_LOAD_IC)));
+void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
+ DCHECK(IsHandler(*handler));
+ // Currently only LoadIC and KeyedLoadIC support non-code handlers.
+ DCHECK_IMPLIES(!handler->IsCode(), kind() == Code::LOAD_IC ||
+ kind() == Code::KEYED_LOAD_IC ||
+ kind() == Code::STORE_IC ||
+ kind() == Code::KEYED_STORE_IC);
switch (state()) {
case UNINITIALIZED:
case PREMONOMORPHIC:
- UpdateMonomorphicIC(code, name);
+ UpdateMonomorphicIC(handler, name);
break;
case RECOMPUTE_HANDLER:
case MONOMORPHIC:
if (kind() == Code::LOAD_GLOBAL_IC) {
- UpdateMonomorphicIC(code, name);
+ UpdateMonomorphicIC(handler, name);
break;
}
// Fall through.
case POLYMORPHIC:
if (!is_keyed() || state() == RECOMPUTE_HANDLER) {
- if (UpdatePolymorphicIC(name, code)) break;
+ if (UpdatePolymorphicIC(name, handler)) break;
// For keyed stubs, we can't know whether old handlers were for the
// same key.
CopyICToMegamorphicCache(name);
@@ -812,7 +820,7 @@ void IC::PatchCache(Handle<Name> name, Handle<Object> code) {
ConfigureVectorState(MEGAMORPHIC, name);
// Fall through.
case MEGAMORPHIC:
- UpdateMegamorphicCache(*receiver_map(), *name, *code);
+ UpdateMegamorphicCache(*receiver_map(), *name, *handler);
// Indicate that we've handled this case.
DCHECK(UseVector());
vector_set_ = true;
@@ -825,6 +833,7 @@ void IC::PatchCache(Handle<Name> name, Handle<Object> code) {
Handle<Code> KeyedStoreIC::ChooseMegamorphicStub(Isolate* isolate,
ExtraICState extra_state) {
+ DCHECK(!FLAG_tf_store_ic_stub);
LanguageMode mode = StoreICState::GetLanguageMode(extra_state);
return is_strict(mode)
? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
@@ -833,13 +842,186 @@ Handle<Code> KeyedStoreIC::ChooseMegamorphicStub(Isolate* isolate,
Handle<Object> LoadIC::SimpleFieldLoad(FieldIndex index) {
if (FLAG_tf_load_ic_stub) {
- return handle(Smi::FromInt(index.GetLoadByFieldOffset()), isolate());
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldDH);
+ return LoadHandler::LoadField(isolate(), index);
}
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldStub);
LoadFieldStub stub(isolate(), index);
return stub.GetCode();
}
+namespace {
+
+template <bool fill_array = true>
+int InitPrototypeChecks(Isolate* isolate, Handle<Map> receiver_map,
+ Handle<JSObject> holder, Handle<Name> name,
+ Handle<FixedArray> array, int first_index) {
+ DCHECK(holder.is_null() || holder->HasFastProperties());
+
+ // We don't encode the requirement to check access rights because we already
+ // passed the access check for current native context and the access
+ // can't be revoked.
+
+ HandleScope scope(isolate);
+ int checks_count = 0;
+
+ if (receiver_map->IsPrimitiveMap() || receiver_map->IsJSGlobalProxyMap()) {
+ // The validity cell check for primitive and global proxy receivers does
+ // not guarantee that certain native context ever had access to other
+ // native context. However, a handler created for one native context could
+ // be used in other native context through the megamorphic stub cache.
+ // So we record the original native context to which this handler
+ // corresponds.
+ if (fill_array) {
+ Handle<Context> native_context = isolate->native_context();
+ array->set(LoadHandler::kFirstPrototypeIndex + checks_count,
+ native_context->self_weak_cell());
+ }
+ checks_count++;
+
+ } else if (receiver_map->IsJSGlobalObjectMap()) {
+ if (fill_array) {
+ Handle<JSGlobalObject> global = isolate->global_object();
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name, PropertyCellType::kInvalidated);
+ DCHECK(cell->value()->IsTheHole(isolate));
+ Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
+ array->set(LoadHandler::kFirstPrototypeIndex + checks_count, *weak_cell);
+ }
+ checks_count++;
+ }
+
+ // Create/count entries for each global or dictionary prototype appeared in
+ // the prototype chain contains from receiver till holder.
+ PrototypeIterator::WhereToEnd end = name->IsPrivate()
+ ? PrototypeIterator::END_AT_NON_HIDDEN
+ : PrototypeIterator::END_AT_NULL;
+ for (PrototypeIterator iter(receiver_map, end); !iter.IsAtEnd();
+ iter.Advance()) {
+ Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
+ if (holder.is_identical_to(current)) break;
+ Handle<Map> current_map(current->map(), isolate);
+
+ if (current_map->IsJSGlobalObjectMap()) {
+ if (fill_array) {
+ Handle<JSGlobalObject> global = Handle<JSGlobalObject>::cast(current);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsureEmptyPropertyCell(
+ global, name, PropertyCellType::kInvalidated);
+ DCHECK(cell->value()->IsTheHole(isolate));
+ Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
+ array->set(first_index + checks_count, *weak_cell);
+ }
+ checks_count++;
+
+ } else if (current_map->is_dictionary_map()) {
+ DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
+ if (fill_array) {
+ DCHECK_EQ(NameDictionary::kNotFound,
+ current->property_dictionary()->FindEntry(name));
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate);
+ array->set(first_index + checks_count, *weak_cell);
+ }
+ checks_count++;
+ }
+ }
+ return checks_count;
+}
+
+// Returns 0 if the validity cell check is enough to ensure that the
+// prototype chain from |receiver_map| till |holder| did not change.
+// If the |holder| is an empty handle then the full prototype chain is
+// checked.
+// Returns -1 if the handler has to be compiled or the number of prototype
+// checks otherwise.
+int GetPrototypeCheckCount(Isolate* isolate, Handle<Map> receiver_map,
+ Handle<JSObject> holder, Handle<Name> name) {
+ return InitPrototypeChecks<false>(isolate, receiver_map, holder, name,
+ Handle<FixedArray>(), 0);
+}
+
+} // namespace
+
+Handle<Object> LoadIC::LoadFromPrototype(Handle<Map> receiver_map,
+ Handle<JSObject> holder,
+ Handle<Name> name,
+ Handle<Object> smi_handler) {
+ int checks_count =
+ GetPrototypeCheckCount(isolate(), receiver_map, holder, name);
+ DCHECK_LE(0, checks_count);
+
+ if (receiver_map->IsPrimitiveMap() || receiver_map->IsJSGlobalProxyMap()) {
+ DCHECK(!receiver_map->is_dictionary_map());
+ DCHECK_LE(1, checks_count); // For native context.
+ smi_handler =
+ LoadHandler::EnableAccessCheckOnReceiver(isolate(), smi_handler);
+ } else if (receiver_map->is_dictionary_map() &&
+ !receiver_map->IsJSGlobalObjectMap()) {
+ smi_handler =
+ LoadHandler::EnableNegativeLookupOnReceiver(isolate(), smi_handler);
+ }
+
+ Handle<Cell> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ DCHECK(!validity_cell.is_null());
+
+ Handle<WeakCell> holder_cell =
+ Map::GetOrCreatePrototypeWeakCell(holder, isolate());
+
+ if (checks_count == 0) {
+ return isolate()->factory()->NewTuple3(holder_cell, smi_handler,
+ validity_cell);
+ }
+ Handle<FixedArray> handler_array(isolate()->factory()->NewFixedArray(
+ LoadHandler::kFirstPrototypeIndex + checks_count, TENURED));
+ handler_array->set(LoadHandler::kSmiHandlerIndex, *smi_handler);
+ handler_array->set(LoadHandler::kValidityCellIndex, *validity_cell);
+ handler_array->set(LoadHandler::kHolderCellIndex, *holder_cell);
+ InitPrototypeChecks(isolate(), receiver_map, holder, name, handler_array,
+ LoadHandler::kFirstPrototypeIndex);
+ return handler_array;
+}
+
+Handle<Object> LoadIC::LoadNonExistent(Handle<Map> receiver_map,
+ Handle<Name> name) {
+ Handle<JSObject> holder; // null handle
+ int checks_count =
+ GetPrototypeCheckCount(isolate(), receiver_map, holder, name);
+ DCHECK_LE(0, checks_count);
+
+ bool do_negative_lookup_on_receiver =
+ receiver_map->is_dictionary_map() && !receiver_map->IsJSGlobalObjectMap();
+ Handle<Object> smi_handler =
+ LoadHandler::LoadNonExistent(isolate(), do_negative_lookup_on_receiver);
+
+ if (receiver_map->IsPrimitiveMap() || receiver_map->IsJSGlobalProxyMap()) {
+ DCHECK(!receiver_map->is_dictionary_map());
+ DCHECK_LE(1, checks_count); // For native context.
+ smi_handler =
+ LoadHandler::EnableAccessCheckOnReceiver(isolate(), smi_handler);
+ }
+
+ Handle<Object> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (validity_cell.is_null()) {
+ DCHECK_EQ(0, checks_count);
+ validity_cell = handle(Smi::FromInt(0), isolate());
+ }
+
+ Factory* factory = isolate()->factory();
+ if (checks_count == 0) {
+ return factory->NewTuple3(factory->null_value(), smi_handler,
+ validity_cell);
+ }
+ Handle<FixedArray> handler_array(factory->NewFixedArray(
+ LoadHandler::kFirstPrototypeIndex + checks_count, TENURED));
+ handler_array->set(LoadHandler::kSmiHandlerIndex, *smi_handler);
+ handler_array->set(LoadHandler::kValidityCellIndex, *validity_cell);
+ handler_array->set(LoadHandler::kHolderCellIndex, *factory->null_value());
+ InitPrototypeChecks(isolate(), receiver_map, holder, name, handler_array,
+ LoadHandler::kFirstPrototypeIndex);
+ return handler_array;
+}
bool IsCompatibleReceiver(LookupIterator* lookup, Handle<Map> receiver_map) {
DCHECK(lookup->state() == LookupIterator::ACCESSOR);
@@ -884,6 +1066,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
if (state() == UNINITIALIZED && kind() != Code::LOAD_GLOBAL_IC) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
+ TRACE_HANDLER_STATS(isolate(), LoadIC_Premonomorphic);
ConfigureVectorState(PREMONOMORPHIC, Handle<Object>());
TRACE_IC("LoadIC", lookup->name());
return;
@@ -894,7 +1077,10 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
lookup->state() == LookupIterator::ACCESS_CHECK) {
code = slow_stub();
} else if (!lookup->IsFound()) {
- if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC) {
+ if (kind() == Code::LOAD_IC) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonexistentDH);
+ code = LoadNonExistent(receiver_map(), lookup->name());
+ } else if (kind() == Code::LOAD_GLOBAL_IC) {
code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(lookup->name(),
receiver_map());
// TODO(jkummerow/verwaest): Introduce a builtin that handles this case.
@@ -964,30 +1150,80 @@ StubCache* IC::stub_cache() {
return nullptr;
}
-void IC::UpdateMegamorphicCache(Map* map, Name* name, Object* code) {
- if (code->IsSmi()) {
- // TODO(jkummerow): Support Smis in the code cache.
- Handle<Map> map_handle(map, isolate());
- Handle<Name> name_handle(name, isolate());
- FieldIndex index =
- FieldIndex::ForLoadByFieldOffset(map, Smi::cast(code)->value());
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldStub);
- LoadFieldStub stub(isolate(), index);
- Code* handler = *stub.GetCode();
- stub_cache()->Set(*name_handle, *map_handle, handler);
- return;
+void IC::UpdateMegamorphicCache(Map* map, Name* name, Object* handler) {
+ stub_cache()->Set(name, map, handler);
+}
+
+void IC::TraceHandlerCacheHitStats(LookupIterator* lookup) {
+ if (!FLAG_runtime_call_stats) return;
+
+ if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC ||
+ kind() == Code::KEYED_LOAD_IC) {
+ switch (lookup->state()) {
+ case LookupIterator::ACCESS_CHECK:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_AccessCheck);
+ break;
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Exotic);
+ break;
+ case LookupIterator::INTERCEPTOR:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Interceptor);
+ break;
+ case LookupIterator::JSPROXY:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_JSProxy);
+ break;
+ case LookupIterator::NOT_FOUND:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_NonExistent);
+ break;
+ case LookupIterator::ACCESSOR:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Accessor);
+ break;
+ case LookupIterator::DATA:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Data);
+ break;
+ case LookupIterator::TRANSITION:
+ TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Transition);
+ break;
+ }
+ } else if (kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC) {
+ switch (lookup->state()) {
+ case LookupIterator::ACCESS_CHECK:
+ TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_AccessCheck);
+ break;
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Exotic);
+ break;
+ case LookupIterator::INTERCEPTOR:
+ TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Interceptor);
+ break;
+ case LookupIterator::JSPROXY:
+ TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_JSProxy);
+ break;
+ case LookupIterator::NOT_FOUND:
+ TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_NonExistent);
+ break;
+ case LookupIterator::ACCESSOR:
+ TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Accessor);
+ break;
+ case LookupIterator::DATA:
+ TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Data);
+ break;
+ case LookupIterator::TRANSITION:
+ TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_Transition);
+ break;
+ }
+ } else {
+ TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
}
- DCHECK(code->IsCode());
- stub_cache()->Set(name, map, Code::cast(code));
}
Handle<Object> IC::ComputeHandler(LookupIterator* lookup,
Handle<Object> value) {
// Try to find a globally shared handler stub.
- Handle<Object> handler_or_index = GetMapIndependentHandler(lookup);
- if (!handler_or_index.is_null()) {
- DCHECK(handler_or_index->IsCode() || handler_or_index->IsSmi());
- return handler_or_index;
+ Handle<Object> shared_handler = GetMapIndependentHandler(lookup);
+ if (!shared_handler.is_null()) {
+ DCHECK(IC::IsHandler(*shared_handler));
+ return shared_handler;
}
// Otherwise check the map's handler cache for a map-specific handler, and
@@ -1007,16 +1243,16 @@ Handle<Object> IC::ComputeHandler(LookupIterator* lookup,
stub_holder_map = receiver_map();
}
- Handle<Code> code = PropertyHandlerCompiler::Find(
+ Handle<Object> handler = PropertyHandlerCompiler::Find(
lookup->name(), stub_holder_map, kind(), flag);
// Use the cached value if it exists, and if it is different from the
// handler that just missed.
- if (!code.is_null()) {
- Handle<Object> handler;
- if (maybe_handler_.ToHandle(&handler)) {
- if (!handler.is_identical_to(code)) {
- TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
- return code;
+ if (!handler.is_null()) {
+ Handle<Object> current_handler;
+ if (maybe_handler_.ToHandle(&current_handler)) {
+ if (!current_handler.is_identical_to(handler)) {
+ TraceHandlerCacheHitStats(lookup);
+ return handler;
}
} else {
// maybe_handler_ is only populated for MONOMORPHIC and POLYMORPHIC ICs.
@@ -1024,24 +1260,27 @@ Handle<Object> IC::ComputeHandler(LookupIterator* lookup,
// cache (which just missed) is different from the cached handler.
if (state() == MEGAMORPHIC && lookup->GetReceiver()->IsHeapObject()) {
Map* map = Handle<HeapObject>::cast(lookup->GetReceiver())->map();
- Code* megamorphic_cached_code = stub_cache()->Get(*lookup->name(), map);
- if (megamorphic_cached_code != *code) {
- TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
- return code;
+ Object* megamorphic_cached_handler =
+ stub_cache()->Get(*lookup->name(), map);
+ if (megamorphic_cached_handler != *handler) {
+ TraceHandlerCacheHitStats(lookup);
+ return handler;
}
} else {
- TRACE_HANDLER_STATS(isolate(), IC_HandlerCacheHit);
- return code;
+ TraceHandlerCacheHitStats(lookup);
+ return handler;
}
}
}
- code = CompileHandler(lookup, value, flag);
- DCHECK(code->is_handler());
- DCHECK(Code::ExtractCacheHolderFromFlags(code->flags()) == flag);
- Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
-
- return code;
+ handler = CompileHandler(lookup, value, flag);
+ DCHECK(IC::IsHandler(*handler));
+ if (handler->IsCode()) {
+ Handle<Code> code = Handle<Code>::cast(handler);
+ DCHECK_EQ(Code::ExtractCacheHolderFromFlags(code->flags()), flag);
+ Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
+ }
+ return handler;
}
Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
@@ -1111,17 +1350,33 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
}
// Ruled out by IsCompatibleReceiver() above.
DCHECK(AccessorInfo::IsCompatibleReceiverMap(isolate(), info, map));
- if (!holder->HasFastProperties()) return slow_stub();
- if (receiver_is_holder) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterStub);
- int index = lookup->GetAccessorIndex();
- LoadApiGetterStub stub(isolate(), true, index);
- return stub.GetCode();
- }
- if (info->is_sloppy() && !receiver->IsJSReceiver()) {
+ if (!holder->HasFastProperties() ||
+ (info->is_sloppy() && !receiver->IsJSReceiver())) {
+ DCHECK(!holder->HasFastProperties() || !receiver_is_holder);
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
return slow_stub();
}
+ if (FLAG_tf_load_ic_stub) {
+ Handle<Object> smi_handler = LoadHandler::LoadApiGetter(
+ isolate(), lookup->GetAccessorIndex());
+ if (receiver_is_holder) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterDH);
+ return smi_handler;
+ }
+ if (kind() != Code::LOAD_GLOBAL_IC) {
+ TRACE_HANDLER_STATS(isolate(),
+ LoadIC_LoadApiGetterFromPrototypeDH);
+ return LoadFromPrototype(map, holder, lookup->name(),
+ smi_handler);
+ }
+ } else {
+ if (receiver_is_holder) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterStub);
+ int index = lookup->GetAccessorIndex();
+ LoadApiGetterStub stub(isolate(), true, index);
+ return stub.GetCode();
+ }
+ }
break; // Custom-compiled handler.
}
}
@@ -1153,18 +1408,36 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
// -------------- Fields --------------
if (lookup->property_details().type() == DATA) {
FieldIndex field = lookup->GetFieldIndex();
+ Handle<Object> smi_handler = SimpleFieldLoad(field);
if (receiver_is_holder) {
- return SimpleFieldLoad(field);
+ return smi_handler;
+ }
+ if (FLAG_tf_load_ic_stub && kind() != Code::LOAD_GLOBAL_IC) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldFromPrototypeDH);
+ return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
}
break; // Custom-compiled handler.
}
// -------------- Constant properties --------------
DCHECK(lookup->property_details().type() == DATA_CONSTANT);
- if (receiver_is_holder) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantStub);
- LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
- return stub.GetCode();
+ if (FLAG_tf_load_ic_stub) {
+ Handle<Object> smi_handler =
+ LoadHandler::LoadConstant(isolate(), lookup->GetConstantIndex());
+ if (receiver_is_holder) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantDH);
+ return smi_handler;
+ }
+ if (kind() != Code::LOAD_GLOBAL_IC) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantFromPrototypeDH);
+ return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
+ }
+ } else {
+ if (receiver_is_holder) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantStub);
+ LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
+ return stub.GetCode();
+ }
}
break; // Custom-compiled handler.
}
@@ -1182,9 +1455,9 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
return Handle<Code>::null();
}
-Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
- Handle<Object> unused,
- CacheHolderFlag cache_holder) {
+Handle<Object> LoadIC::CompileHandler(LookupIterator* lookup,
+ Handle<Object> unused,
+ CacheHolderFlag cache_holder) {
Handle<JSObject> holder = lookup->GetHolder<JSObject>();
#ifdef DEBUG
// Only used by DCHECKs below.
@@ -1229,6 +1502,10 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
DCHECK(IsCompatibleReceiver(lookup, map));
Handle<Object> accessors = lookup->GetAccessors();
if (accessors->IsAccessorPair()) {
+ if (lookup->TryLookupCachedProperty()) {
+ DCHECK_EQ(LookupIterator::DATA, lookup->state());
+ return ComputeHandler(lookup);
+ }
DCHECK(holder->HasFastProperties());
DCHECK(!GetSharedFunctionInfo()->HasDebugInfo());
Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
@@ -1421,7 +1698,9 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
if ((object->IsJSObject() && key->IsSmi()) ||
(object->IsString() && key->IsNumber())) {
UpdateLoadElement(Handle<HeapObject>::cast(object));
- TRACE_IC("LoadIC", key);
+ if (is_vector_set()) {
+ TRACE_IC("LoadIC", key);
+ }
}
}
@@ -1580,6 +1859,7 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
if (state() == UNINITIALIZED) {
// This is the first time we execute this inline cache. Set the target to
// the pre monomorphic stub to delay setting the monomorphic state.
+ TRACE_HANDLER_STATS(isolate(), StoreIC_Premonomorphic);
ConfigureVectorState(PREMONOMORPHIC, Handle<Object>());
TRACE_IC("StoreIC", lookup->name());
return;
@@ -1589,13 +1869,72 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
if (!use_ic) {
TRACE_GENERIC_IC(isolate(), "StoreIC", "LookupForWrite said 'false'");
}
- Handle<Code> code =
- use_ic ? Handle<Code>::cast(ComputeHandler(lookup, value)) : slow_stub();
+ Handle<Object> handler = use_ic ? ComputeHandler(lookup, value)
+ : Handle<Object>::cast(slow_stub());
- PatchCache(lookup->name(), code);
+ PatchCache(lookup->name(), handler);
TRACE_IC("StoreIC", lookup->name());
}
+Handle<Object> StoreIC::StoreTransition(Handle<Map> receiver_map,
+ Handle<JSObject> holder,
+ Handle<Map> transition,
+ Handle<Name> name) {
+ int descriptor = transition->LastAdded();
+ Handle<DescriptorArray> descriptors(transition->instance_descriptors());
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ DCHECK(!representation.IsNone());
+
+ // Declarative handlers don't support access checks.
+ DCHECK(!transition->is_access_check_needed());
+
+ Handle<Object> smi_handler;
+ if (details.type() == DATA_CONSTANT) {
+ smi_handler = StoreHandler::TransitionToConstant(isolate(), descriptor);
+
+ } else {
+ DCHECK_EQ(DATA, details.type());
+ bool extend_storage =
+ Map::cast(transition->GetBackPointer())->unused_property_fields() == 0;
+
+ FieldIndex index = FieldIndex::ForDescriptor(*transition, descriptor);
+ smi_handler = StoreHandler::TransitionToField(
+ isolate(), descriptor, index, representation, extend_storage);
+ }
+ // |holder| is either a receiver if the property is non-existent or
+ // one of the prototypes.
+ DCHECK(!holder.is_null());
+ bool is_nonexistent = holder->map() == transition->GetBackPointer();
+ if (is_nonexistent) holder = Handle<JSObject>::null();
+
+ int checks_count =
+ GetPrototypeCheckCount(isolate(), receiver_map, holder, name);
+ DCHECK_LE(0, checks_count);
+ DCHECK(!receiver_map->IsJSGlobalObjectMap());
+
+ Handle<Object> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (validity_cell.is_null()) {
+ DCHECK_EQ(0, checks_count);
+ validity_cell = handle(Smi::FromInt(0), isolate());
+ }
+
+ Handle<WeakCell> transition_cell = Map::WeakCellForMap(transition);
+
+ Factory* factory = isolate()->factory();
+ if (checks_count == 0) {
+ return factory->NewTuple3(transition_cell, smi_handler, validity_cell);
+ }
+ Handle<FixedArray> handler_array(factory->NewFixedArray(
+ StoreHandler::kFirstPrototypeIndex + checks_count, TENURED));
+ handler_array->set(StoreHandler::kSmiHandlerIndex, *smi_handler);
+ handler_array->set(StoreHandler::kValidityCellIndex, *validity_cell);
+ handler_array->set(StoreHandler::kTransitionCellIndex, *transition_cell);
+ InitPrototypeChecks(isolate(), receiver_map, holder, name, handler_array,
+ StoreHandler::kFirstPrototypeIndex);
+ return handler_array;
+}
static Handle<Code> PropertyCellStoreHandler(
Isolate* isolate, Handle<JSObject> receiver, Handle<JSGlobalObject> holder,
@@ -1632,8 +1971,13 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
}
-
DCHECK(lookup->IsCacheableTransition());
+ if (FLAG_tf_store_ic_stub) {
+ Handle<Map> transition = lookup->transition_map();
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransitionDH);
+ return StoreTransition(receiver_map(), holder, transition,
+ lookup->name());
+ }
break; // Custom-compiled handler.
}
@@ -1711,17 +2055,25 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
// -------------- Fields --------------
if (lookup->property_details().type() == DATA) {
- bool use_stub = true;
- if (lookup->representation().IsHeapObject()) {
- // Only use a generic stub if no types need to be tracked.
- Handle<FieldType> field_type = lookup->GetFieldType();
- use_stub = !field_type->IsClass();
- }
- if (use_stub) {
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldStub);
- StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
- lookup->representation());
- return stub.GetCode();
+ if (FLAG_tf_store_ic_stub) {
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldDH);
+ int descriptor = lookup->GetFieldDescriptorIndex();
+ FieldIndex index = lookup->GetFieldIndex();
+ return StoreHandler::StoreField(isolate(), descriptor, index,
+ lookup->representation());
+ } else {
+ bool use_stub = true;
+ if (lookup->representation().IsHeapObject()) {
+ // Only use a generic stub if no types need to be tracked.
+ Handle<FieldType> field_type = lookup->GetFieldType();
+ use_stub = !field_type->IsClass();
+ }
+ if (use_stub) {
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldStub);
+ StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+ lookup->representation());
+ return stub.GetCode();
+ }
}
break; // Custom-compiled handler.
}
@@ -1742,9 +2094,9 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
return Handle<Code>::null();
}
-Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
- Handle<Object> value,
- CacheHolderFlag cache_holder) {
+Handle<Object> StoreIC::CompileHandler(LookupIterator* lookup,
+ Handle<Object> value,
+ CacheHolderFlag cache_holder) {
DCHECK_NE(LookupIterator::JSPROXY, lookup->state());
// This is currently guaranteed by checks in StoreIC::Store.
@@ -1765,6 +2117,7 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
cell->set_value(isolate()->heap()->the_hole_value());
return code;
}
+ DCHECK(!FLAG_tf_store_ic_stub);
Handle<Map> transition = lookup->transition_map();
// Currently not handled by CompileStoreTransition.
DCHECK(holder->HasFastProperties());
@@ -1836,6 +2189,7 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
// -------------- Fields --------------
if (lookup->property_details().type() == DATA) {
+ DCHECK(!FLAG_tf_store_ic_stub);
#ifdef DEBUG
bool use_stub = true;
if (lookup->representation().IsHeapObject()) {
@@ -1981,7 +2335,6 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
}
}
- TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_Polymorphic);
MapHandleList transitioned_maps(target_receiver_maps.length());
CodeHandleList handlers(target_receiver_maps.length());
PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
@@ -2241,7 +2594,6 @@ void CallIC::HandleMiss(Handle<Object> function) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
// Runtime functions don't follow the IC's calling convention.
@@ -2258,7 +2610,6 @@ RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
// Runtime functions don't follow the IC's calling convention.
@@ -2279,7 +2630,7 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
} else if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
Handle<Name> key(vector->GetName(vector_slot), isolate);
- DCHECK_NE(*key, *isolate->factory()->empty_string());
+ DCHECK_NE(*key, isolate->heap()->empty_string());
DCHECK_EQ(*isolate->global_object(), *receiver);
LoadGlobalICNexus nexus(vector, vector_slot);
LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
@@ -2298,7 +2649,6 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
// Runtime functions don't follow the IC's calling convention.
@@ -2309,7 +2659,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
vector->GetKind(vector_slot));
Handle<String> name(vector->GetName(vector_slot), isolate);
- DCHECK_NE(*name, *isolate->factory()->empty_string());
+ DCHECK_NE(*name, isolate->heap()->empty_string());
LoadGlobalICNexus nexus(vector, vector_slot);
LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
@@ -2330,7 +2680,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
vector->GetKind(vector_slot));
Handle<String> name(vector->GetName(vector_slot), isolate);
- DCHECK_NE(*name, *isolate->factory()->empty_string());
+ DCHECK_NE(*name, isolate->heap()->empty_string());
Handle<JSGlobalObject> global = isolate->global_object();
@@ -2343,7 +2693,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
script_contexts, lookup_result.context_index);
Handle<Object> result =
FixedArray::get(*script_context, lookup_result.slot_index, isolate);
- if (*result == *isolate->factory()->the_hole_value()) {
+ if (*result == isolate->heap()->the_hole_value()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
}
@@ -2370,7 +2720,6 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
// Used from ic-<arch>.cc
RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
// Runtime functions don't follow the IC's calling convention.
@@ -2387,7 +2736,6 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
typedef LoadWithVectorDescriptor Descriptor;
DCHECK_EQ(Descriptor::kParameterCount, args.length());
@@ -2406,7 +2754,6 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
@@ -2434,7 +2781,6 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
@@ -2470,7 +2816,6 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
// Runtime functions don't follow the IC's calling convention.
Handle<Object> object = args.at<Object>(0);
@@ -2609,7 +2954,6 @@ MaybeHandle<Object> BinaryOpIC::Transition(
RUNTIME_FUNCTION(Runtime_BinaryOpIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
typedef BinaryOpDescriptor Descriptor;
@@ -2622,7 +2966,6 @@ RUNTIME_FUNCTION(Runtime_BinaryOpIC_Miss) {
RUNTIME_FUNCTION(Runtime_BinaryOpIC_MissWithAllocationSite) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
typedef BinaryOpWithAllocationSiteDescriptor Descriptor;
@@ -2686,7 +3029,6 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Used from CompareICStub::GenerateMiss in code-stubs-<arch>.cc.
RUNTIME_FUNCTION(Runtime_CompareIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
DCHECK(args.length() == 3);
CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
@@ -2711,7 +3053,6 @@ Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
RUNTIME_FUNCTION(Runtime_ToBooleanIC_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
DCHECK(args.length() == 1);
HandleScope scope(isolate);
Handle<Object> object = args.at<Object>(0);
@@ -2729,7 +3070,7 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 5);
HandleScope scope(isolate);
- if (FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
RETURN_RESULT_OR_FAILURE(
isolate, Runtime::SetObjectProperty(isolate, receiver, name, value,
language_mode));
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index bf395f1f2a..9e69cc85d0 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -81,6 +81,8 @@ class IC {
static InlineCacheState StateFromCode(Code* code);
+ static inline bool IsHandler(Object* object);
+
protected:
Address fp() const { return fp_; }
Address pc() const { return *pc_address_; }
@@ -138,6 +140,8 @@ class IC {
static void OnTypeFeedbackChanged(Isolate* isolate, Code* host);
static void PostPatching(Address address, Code* target, Code* old_target);
+ void TraceHandlerCacheHitStats(LookupIterator* lookup);
+
// Compute the handler either by compiling or by retrieving a cached version.
Handle<Object> ComputeHandler(LookupIterator* lookup,
Handle<Object> value = Handle<Code>::null());
@@ -145,11 +149,11 @@ class IC {
UNREACHABLE();
return Handle<Code>::null();
}
- virtual Handle<Code> CompileHandler(LookupIterator* lookup,
- Handle<Object> value,
- CacheHolderFlag cache_holder) {
+ virtual Handle<Object> CompileHandler(LookupIterator* lookup,
+ Handle<Object> value,
+ CacheHolderFlag cache_holder) {
UNREACHABLE();
- return Handle<Code>::null();
+ return Handle<Object>::null();
}
void UpdateMonomorphicIC(Handle<Object> handler, Handle<Name> name);
@@ -303,12 +307,23 @@ class LoadIC : public IC {
Handle<Object> GetMapIndependentHandler(LookupIterator* lookup) override;
- Handle<Code> CompileHandler(LookupIterator* lookup, Handle<Object> unused,
- CacheHolderFlag cache_holder) override;
+ Handle<Object> CompileHandler(LookupIterator* lookup, Handle<Object> unused,
+ CacheHolderFlag cache_holder) override;
private:
+ // Creates a data handler that represents a load of a field by given index.
Handle<Object> SimpleFieldLoad(FieldIndex index);
+ // Creates a data handler that represents a prototype chain check followed
+ // by given Smi-handler that encoded a load from the holder.
+ // Can be used only if GetPrototypeCheckCount() returns non negative value.
+ Handle<Object> LoadFromPrototype(Handle<Map> receiver_map,
+ Handle<JSObject> holder, Handle<Name> name,
+ Handle<Object> smi_handler);
+
+ // Creates a data handler that represents a load of a non-existent property.
+ Handle<Object> LoadNonExistent(Handle<Map> receiver_map, Handle<Name> name);
+
friend class IC;
};
@@ -341,10 +356,6 @@ class KeyedLoadIC : public LoadIC {
// Code generator routines.
static void GenerateMiss(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
- static void GenerateMegamorphic(MacroAssembler* masm);
-
- static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
- ExtraICState extra_state);
static void Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus);
@@ -402,10 +413,14 @@ class StoreIC : public IC {
void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode);
Handle<Object> GetMapIndependentHandler(LookupIterator* lookup) override;
- Handle<Code> CompileHandler(LookupIterator* lookup, Handle<Object> value,
- CacheHolderFlag cache_holder) override;
+ Handle<Object> CompileHandler(LookupIterator* lookup, Handle<Object> value,
+ CacheHolderFlag cache_holder) override;
private:
+ Handle<Object> StoreTransition(Handle<Map> receiver_map,
+ Handle<JSObject> holder,
+ Handle<Map> transition, Handle<Name> name);
+
friend class IC;
};
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
new file mode 100644
index 0000000000..30faba85e9
--- /dev/null
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -0,0 +1,549 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ic/keyed-store-generic.h"
+
+#include "src/compiler/code-assembler.h"
+#include "src/contexts.h"
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+
+using compiler::Node;
+
+class KeyedStoreGenericAssembler : public CodeStubAssembler {
+ public:
+ void KeyedStoreGeneric(const StoreICParameters* p,
+ LanguageMode language_mode);
+
+ private:
+ enum UpdateLength {
+ kDontChangeLength,
+ kIncrementLengthByOne,
+ kBumpLengthWithGap
+ };
+
+ void EmitGenericElementStore(Node* receiver, Node* receiver_map,
+ Node* instance_type, Node* intptr_index,
+ Node* value, Node* context, Label* slow);
+
+ void EmitGenericPropertyStore(Node* receiver, Node* receiver_map,
+ const StoreICParameters* p, Label* slow);
+
+ void BranchIfPrototypesHaveNonFastElements(Node* receiver_map,
+ Label* non_fast_elements,
+ Label* only_fast_elements);
+
+ void TryRewriteElements(Node* receiver, Node* receiver_map, Node* elements,
+ Node* native_context, ElementsKind from_kind,
+ ElementsKind to_kind, Label* bailout);
+
+ void StoreElementWithCapacity(Node* receiver, Node* receiver_map,
+ Node* elements, Node* elements_kind,
+ Node* intptr_index, Node* value, Node* context,
+ Label* slow, UpdateLength update_length);
+
+ void MaybeUpdateLengthAndReturn(Node* receiver, Node* index, Node* value,
+ UpdateLength update_length);
+
+ void TryChangeToHoleyMapHelper(Node* receiver, Node* receiver_map,
+ Node* native_context, ElementsKind packed_kind,
+ ElementsKind holey_kind, Label* done,
+ Label* map_mismatch, Label* bailout);
+ void TryChangeToHoleyMap(Node* receiver, Node* receiver_map,
+ Node* current_elements_kind, Node* context,
+ ElementsKind packed_kind, Label* bailout);
+ void TryChangeToHoleyMapMulti(Node* receiver, Node* receiver_map,
+ Node* current_elements_kind, Node* context,
+ ElementsKind packed_kind,
+ ElementsKind packed_kind_2, Label* bailout);
+
+ // Do not add fields, so that this is safe to reinterpret_cast to CSA.
+};
+
+void KeyedStoreGenericGenerator::Generate(
+ CodeStubAssembler* assembler, const CodeStubAssembler::StoreICParameters* p,
+ LanguageMode language_mode) {
+ STATIC_ASSERT(sizeof(CodeStubAssembler) ==
+ sizeof(KeyedStoreGenericAssembler));
+ auto assm = reinterpret_cast<KeyedStoreGenericAssembler*>(assembler);
+ assm->KeyedStoreGeneric(p, language_mode);
+}
+
+void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
+ Node* receiver_map, Label* non_fast_elements, Label* only_fast_elements) {
+ Variable var_map(this, MachineRepresentation::kTagged);
+ var_map.Bind(receiver_map);
+ Label loop_body(this, &var_map);
+ Goto(&loop_body);
+
+ Bind(&loop_body);
+ {
+ Node* map = var_map.value();
+ Node* prototype = LoadMapPrototype(map);
+ GotoIf(WordEqual(prototype, NullConstant()), only_fast_elements);
+ Node* prototype_map = LoadMap(prototype);
+ var_map.Bind(prototype_map);
+ Node* instance_type = LoadMapInstanceType(prototype_map);
+ STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
+ STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+ GotoIf(Int32LessThanOrEqual(instance_type,
+ Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+ non_fast_elements);
+ Node* elements_kind = LoadMapElementsKind(prototype_map);
+ STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND);
+ GotoIf(Int32LessThanOrEqual(elements_kind,
+ Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+ &loop_body);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(NO_ELEMENTS)), &loop_body);
+ Goto(non_fast_elements);
+ }
+}
+
+void KeyedStoreGenericAssembler::TryRewriteElements(
+ Node* receiver, Node* receiver_map, Node* elements, Node* native_context,
+ ElementsKind from_kind, ElementsKind to_kind, Label* bailout) {
+ DCHECK(IsFastPackedElementsKind(from_kind));
+ ElementsKind holey_from_kind = GetHoleyElementsKind(from_kind);
+ ElementsKind holey_to_kind = GetHoleyElementsKind(to_kind);
+ if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ TrapAllocationMemento(receiver, bailout);
+ }
+ Label perform_transition(this), check_holey_map(this);
+ Variable var_target_map(this, MachineType::PointerRepresentation());
+ // Check if the receiver has the default |from_kind| map.
+ {
+ Node* packed_map =
+ LoadContextElement(native_context, Context::ArrayMapIndex(from_kind));
+ GotoIf(WordNotEqual(receiver_map, packed_map), &check_holey_map);
+ var_target_map.Bind(
+ LoadContextElement(native_context, Context::ArrayMapIndex(to_kind)));
+ Goto(&perform_transition);
+ }
+
+ // Check if the receiver has the default |holey_from_kind| map.
+ Bind(&check_holey_map);
+ {
+ Node* holey_map = LoadContextElement(
+ native_context, Context::ArrayMapIndex(holey_from_kind));
+ GotoIf(WordNotEqual(receiver_map, holey_map), bailout);
+ var_target_map.Bind(LoadContextElement(
+ native_context, Context::ArrayMapIndex(holey_to_kind)));
+ Goto(&perform_transition);
+ }
+
+ // Found a supported transition target map, perform the transition!
+ Bind(&perform_transition);
+ {
+ if (IsFastDoubleElementsKind(from_kind) !=
+ IsFastDoubleElementsKind(to_kind)) {
+ Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
+ GrowElementsCapacity(receiver, elements, from_kind, to_kind, capacity,
+ capacity, INTPTR_PARAMETERS, bailout);
+ }
+ StoreObjectField(receiver, JSObject::kMapOffset, var_target_map.value());
+ }
+}
+
+void KeyedStoreGenericAssembler::TryChangeToHoleyMapHelper(
+ Node* receiver, Node* receiver_map, Node* native_context,
+ ElementsKind packed_kind, ElementsKind holey_kind, Label* done,
+ Label* map_mismatch, Label* bailout) {
+ Node* packed_map =
+ LoadContextElement(native_context, Context::ArrayMapIndex(packed_kind));
+ GotoIf(WordNotEqual(receiver_map, packed_map), map_mismatch);
+ if (AllocationSite::GetMode(packed_kind, holey_kind) ==
+ TRACK_ALLOCATION_SITE) {
+ TrapAllocationMemento(receiver, bailout);
+ }
+ Node* holey_map =
+ LoadContextElement(native_context, Context::ArrayMapIndex(holey_kind));
+ StoreObjectField(receiver, JSObject::kMapOffset, holey_map);
+ Goto(done);
+}
+
+void KeyedStoreGenericAssembler::TryChangeToHoleyMap(
+ Node* receiver, Node* receiver_map, Node* current_elements_kind,
+ Node* context, ElementsKind packed_kind, Label* bailout) {
+ ElementsKind holey_kind = GetHoleyElementsKind(packed_kind);
+ Label already_holey(this);
+
+ GotoIf(Word32Equal(current_elements_kind, Int32Constant(holey_kind)),
+ &already_holey);
+ Node* native_context = LoadNativeContext(context);
+ TryChangeToHoleyMapHelper(receiver, receiver_map, native_context, packed_kind,
+ holey_kind, &already_holey, bailout, bailout);
+ Bind(&already_holey);
+}
+
+void KeyedStoreGenericAssembler::TryChangeToHoleyMapMulti(
+ Node* receiver, Node* receiver_map, Node* current_elements_kind,
+ Node* context, ElementsKind packed_kind, ElementsKind packed_kind_2,
+ Label* bailout) {
+ ElementsKind holey_kind = GetHoleyElementsKind(packed_kind);
+ ElementsKind holey_kind_2 = GetHoleyElementsKind(packed_kind_2);
+ Label already_holey(this), check_other_kind(this);
+
+ GotoIf(Word32Equal(current_elements_kind, Int32Constant(holey_kind)),
+ &already_holey);
+ GotoIf(Word32Equal(current_elements_kind, Int32Constant(holey_kind_2)),
+ &already_holey);
+
+ Node* native_context = LoadNativeContext(context);
+ TryChangeToHoleyMapHelper(receiver, receiver_map, native_context, packed_kind,
+ holey_kind, &already_holey, &check_other_kind,
+ bailout);
+ Bind(&check_other_kind);
+ TryChangeToHoleyMapHelper(receiver, receiver_map, native_context,
+ packed_kind_2, holey_kind_2, &already_holey,
+ bailout, bailout);
+ Bind(&already_holey);
+}
+
+void KeyedStoreGenericAssembler::MaybeUpdateLengthAndReturn(
+ Node* receiver, Node* index, Node* value, UpdateLength update_length) {
+ if (update_length != kDontChangeLength) {
+ Node* new_length = SmiTag(IntPtrAdd(index, IntPtrConstant(1)));
+ StoreObjectFieldNoWriteBarrier(receiver, JSArray::kLengthOffset, new_length,
+ MachineRepresentation::kTagged);
+ }
+ Return(value);
+}
+
+void KeyedStoreGenericAssembler::StoreElementWithCapacity(
+ Node* receiver, Node* receiver_map, Node* elements, Node* elements_kind,
+ Node* intptr_index, Node* value, Node* context, Label* slow,
+ UpdateLength update_length) {
+ if (update_length != kDontChangeLength) {
+ CSA_ASSERT(this, Word32Equal(LoadMapInstanceType(receiver_map),
+ Int32Constant(JS_ARRAY_TYPE)));
+ }
+ STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
+ const int kHeaderSize = FixedArray::kHeaderSize - kHeapObjectTag;
+
+ Label check_double_elements(this), check_cow_elements(this);
+ Node* elements_map = LoadMap(elements);
+ GotoIf(WordNotEqual(elements_map, LoadRoot(Heap::kFixedArrayMapRootIndex)),
+ &check_double_elements);
+
+ // FixedArray backing store -> Smi or object elements.
+ {
+ Node* offset = ElementOffsetFromIndex(intptr_index, FAST_ELEMENTS,
+ INTPTR_PARAMETERS, kHeaderSize);
+ // Check if we're about to overwrite the hole. We can safely do that
+ // only if there can be no setters on the prototype chain.
+ // If we know that we're storing beyond the previous array length, we
+ // can skip the hole check (and always assume the hole).
+ {
+ Label hole_check_passed(this);
+ if (update_length == kDontChangeLength) {
+ Node* element = Load(MachineType::AnyTagged(), elements, offset);
+ GotoIf(WordNotEqual(element, TheHoleConstant()), &hole_check_passed);
+ }
+ BranchIfPrototypesHaveNonFastElements(receiver_map, slow,
+ &hole_check_passed);
+ Bind(&hole_check_passed);
+ }
+
+ // Check if the value we're storing matches the elements_kind. Smis
+ // can always be stored.
+ {
+ Label non_smi_value(this);
+ GotoUnless(TaggedIsSmi(value), &non_smi_value);
+ // If we're about to introduce holes, ensure holey elements.
+ if (update_length == kBumpLengthWithGap) {
+ TryChangeToHoleyMapMulti(receiver, receiver_map, elements_kind, context,
+ FAST_SMI_ELEMENTS, FAST_ELEMENTS, slow);
+ }
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
+ value);
+ MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
+
+ Bind(&non_smi_value);
+ }
+
+ // Check if we already have object elements; just do the store if so.
+ {
+ Label must_transition(this);
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ GotoIf(Int32LessThanOrEqual(elements_kind,
+ Int32Constant(FAST_HOLEY_SMI_ELEMENTS)),
+ &must_transition);
+ if (update_length == kBumpLengthWithGap) {
+ TryChangeToHoleyMap(receiver, receiver_map, elements_kind, context,
+ FAST_ELEMENTS, slow);
+ }
+ Store(MachineRepresentation::kTagged, elements, offset, value);
+ MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
+
+ Bind(&must_transition);
+ }
+
+ // Transition to the required ElementsKind.
+ {
+ Label transition_to_double(this), transition_to_object(this);
+ Node* native_context = LoadNativeContext(context);
+ Branch(WordEqual(LoadMap(value), LoadRoot(Heap::kHeapNumberMapRootIndex)),
+ &transition_to_double, &transition_to_object);
+ Bind(&transition_to_double);
+ {
+ // If we're adding holes at the end, always transition to a holey
+ // elements kind, otherwise try to remain packed.
+ ElementsKind target_kind = update_length == kBumpLengthWithGap
+ ? FAST_HOLEY_DOUBLE_ELEMENTS
+ : FAST_DOUBLE_ELEMENTS;
+ TryRewriteElements(receiver, receiver_map, elements, native_context,
+ FAST_SMI_ELEMENTS, target_kind, slow);
+ // Reload migrated elements.
+ Node* double_elements = LoadElements(receiver);
+ Node* double_offset = ElementOffsetFromIndex(
+ intptr_index, FAST_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
+ // Make sure we do not store signalling NaNs into double arrays.
+ Node* double_value = Float64SilenceNaN(LoadHeapNumberValue(value));
+ StoreNoWriteBarrier(MachineRepresentation::kFloat64, double_elements,
+ double_offset, double_value);
+ MaybeUpdateLengthAndReturn(receiver, intptr_index, value,
+ update_length);
+ }
+
+ Bind(&transition_to_object);
+ {
+ // If we're adding holes at the end, always transition to a holey
+ // elements kind, otherwise try to remain packed.
+ ElementsKind target_kind = update_length == kBumpLengthWithGap
+ ? FAST_HOLEY_ELEMENTS
+ : FAST_ELEMENTS;
+ TryRewriteElements(receiver, receiver_map, elements, native_context,
+ FAST_SMI_ELEMENTS, target_kind, slow);
+ // The elements backing store didn't change, no reload necessary.
+ CSA_ASSERT(this, WordEqual(elements, LoadElements(receiver)));
+ Store(MachineRepresentation::kTagged, elements, offset, value);
+ MaybeUpdateLengthAndReturn(receiver, intptr_index, value,
+ update_length);
+ }
+ }
+ }
+
+ Bind(&check_double_elements);
+ Node* fixed_double_array_map = LoadRoot(Heap::kFixedDoubleArrayMapRootIndex);
+ GotoIf(WordNotEqual(elements_map, fixed_double_array_map),
+ &check_cow_elements);
+ // FixedDoubleArray backing store -> double elements.
+ {
+ Node* offset = ElementOffsetFromIndex(intptr_index, FAST_DOUBLE_ELEMENTS,
+ INTPTR_PARAMETERS, kHeaderSize);
+ // Check if we're about to overwrite the hole. We can safely do that
+ // only if there can be no setters on the prototype chain.
+ {
+ Label hole_check_passed(this);
+ // If we know that we're storing beyond the previous array length, we
+ // can skip the hole check (and always assume the hole).
+ if (update_length == kDontChangeLength) {
+ Label found_hole(this);
+ LoadDoubleWithHoleCheck(elements, offset, &found_hole,
+ MachineType::None());
+ Goto(&hole_check_passed);
+ Bind(&found_hole);
+ }
+ BranchIfPrototypesHaveNonFastElements(receiver_map, slow,
+ &hole_check_passed);
+ Bind(&hole_check_passed);
+ }
+
+ // Try to store the value as a double.
+ {
+ Label non_number_value(this);
+ Node* double_value = PrepareValueForWrite(value, Representation::Double(),
+ &non_number_value);
+ // Make sure we do not store signalling NaNs into double arrays.
+ double_value = Float64SilenceNaN(double_value);
+ // If we're about to introduce holes, ensure holey elements.
+ if (update_length == kBumpLengthWithGap) {
+ TryChangeToHoleyMap(receiver, receiver_map, elements_kind, context,
+ FAST_DOUBLE_ELEMENTS, slow);
+ }
+ StoreNoWriteBarrier(MachineRepresentation::kFloat64, elements, offset,
+ double_value);
+ MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
+
+ Bind(&non_number_value);
+ }
+
+ // Transition to object elements.
+ {
+ Node* native_context = LoadNativeContext(context);
+ ElementsKind target_kind = update_length == kBumpLengthWithGap
+ ? FAST_HOLEY_ELEMENTS
+ : FAST_ELEMENTS;
+ TryRewriteElements(receiver, receiver_map, elements, native_context,
+ FAST_DOUBLE_ELEMENTS, target_kind, slow);
+ // Reload migrated elements.
+ Node* fast_elements = LoadElements(receiver);
+ Node* fast_offset = ElementOffsetFromIndex(
+ intptr_index, FAST_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
+ Store(MachineRepresentation::kTagged, fast_elements, fast_offset, value);
+ MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
+ }
+ }
+
+ Bind(&check_cow_elements);
+ {
+ // TODO(jkummerow): Use GrowElementsCapacity instead of bailing out.
+ Goto(slow);
+ }
+}
+
+void KeyedStoreGenericAssembler::EmitGenericElementStore(
+ Node* receiver, Node* receiver_map, Node* instance_type, Node* intptr_index,
+ Node* value, Node* context, Label* slow) {
+ Label if_in_bounds(this), if_increment_length_by_one(this),
+ if_bump_length_with_gap(this), if_grow(this), if_nonfast(this),
+ if_typed_array(this), if_dictionary(this);
+ Node* elements = LoadElements(receiver);
+ Node* elements_kind = LoadMapElementsKind(receiver_map);
+ GotoIf(
+ Int32GreaterThan(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+ &if_nonfast);
+
+ Label if_array(this);
+ GotoIf(Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE)), &if_array);
+ {
+ Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
+ Branch(UintPtrLessThan(intptr_index, capacity), &if_in_bounds, &if_grow);
+ }
+ Bind(&if_array);
+ {
+ Node* length = SmiUntag(LoadJSArrayLength(receiver));
+ GotoIf(UintPtrLessThan(intptr_index, length), &if_in_bounds);
+ Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
+ GotoIf(UintPtrGreaterThanOrEqual(intptr_index, capacity), &if_grow);
+ Branch(WordEqual(intptr_index, length), &if_increment_length_by_one,
+ &if_bump_length_with_gap);
+ }
+
+ Bind(&if_in_bounds);
+ {
+ StoreElementWithCapacity(receiver, receiver_map, elements, elements_kind,
+ intptr_index, value, context, slow,
+ kDontChangeLength);
+ }
+
+ Bind(&if_increment_length_by_one);
+ {
+ StoreElementWithCapacity(receiver, receiver_map, elements, elements_kind,
+ intptr_index, value, context, slow,
+ kIncrementLengthByOne);
+ }
+
+ Bind(&if_bump_length_with_gap);
+ {
+ StoreElementWithCapacity(receiver, receiver_map, elements, elements_kind,
+ intptr_index, value, context, slow,
+ kBumpLengthWithGap);
+ }
+
+ // Out-of-capacity accesses (index >= capacity) jump here. Additionally,
+ // an ElementsKind transition might be necessary.
+ Bind(&if_grow);
+ {
+ Comment("Grow backing store");
+ // TODO(jkummerow): Support inline backing store growth.
+ Goto(slow);
+ }
+
+ // Any ElementsKind > LAST_FAST_ELEMENTS_KIND jumps here for further dispatch.
+ Bind(&if_nonfast);
+ {
+ STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+ GotoIf(Int32GreaterThanOrEqual(
+ elements_kind,
+ Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)),
+ &if_typed_array);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(DICTIONARY_ELEMENTS)),
+ &if_dictionary);
+ Goto(slow);
+ }
+
+ Bind(&if_dictionary);
+ {
+ Comment("Dictionary");
+ // TODO(jkummerow): Support storing to dictionary elements.
+ Goto(slow);
+ }
+
+ Bind(&if_typed_array);
+ {
+ Comment("Typed array");
+ // TODO(jkummerow): Support typed arrays.
+ Goto(slow);
+ }
+}
+
+void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
+ Node* receiver, Node* receiver_map, const StoreICParameters* p,
+ Label* slow) {
+ Comment("stub cache probe");
+ // TODO(jkummerow): Don't rely on the stub cache as much.
+ // - existing properties can be overwritten inline (unless readonly).
+ // - for dictionary mode receivers, we can even add properties inline
+ // (unless the prototype chain prevents it).
+ Variable var_handler(this, MachineRepresentation::kTagged);
+ Label found_handler(this, &var_handler), stub_cache_miss(this);
+ TryProbeStubCache(isolate()->store_stub_cache(), receiver, p->name,
+ &found_handler, &var_handler, &stub_cache_miss);
+ Bind(&found_handler);
+ {
+ Comment("KeyedStoreGeneric found handler");
+ HandleStoreICHandlerCase(p, var_handler.value(), slow);
+ }
+ Bind(&stub_cache_miss);
+ {
+ Comment("KeyedStoreGeneric_miss");
+ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
+ p->vector, p->receiver, p->name);
+ }
+}
+
+void KeyedStoreGenericAssembler::KeyedStoreGeneric(const StoreICParameters* p,
+ LanguageMode language_mode) {
+ Variable var_index(this, MachineType::PointerRepresentation());
+ Label if_index(this), if_unique_name(this), slow(this);
+
+ Node* receiver = p->receiver;
+ GotoIf(TaggedIsSmi(receiver), &slow);
+ Node* receiver_map = LoadMap(receiver);
+ Node* instance_type = LoadMapInstanceType(receiver_map);
+ // Receivers requiring non-standard element accesses (interceptors, access
+ // checks, strings and string wrappers, proxies) are handled in the runtime.
+ GotoIf(Int32LessThanOrEqual(instance_type,
+ Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+ &slow);
+
+ TryToName(p->name, &if_index, &var_index, &if_unique_name, &slow);
+
+ Bind(&if_index);
+ {
+ Comment("integer index");
+ EmitGenericElementStore(receiver, receiver_map, instance_type,
+ var_index.value(), p->value, p->context, &slow);
+ }
+
+ Bind(&if_unique_name);
+ {
+ Comment("key is unique name");
+ EmitGenericPropertyStore(receiver, receiver_map, p, &slow);
+ }
+
+ Bind(&slow);
+ {
+ Comment("KeyedStoreGeneric_slow");
+ TailCallRuntime(Runtime::kSetProperty, p->context, p->receiver, p->name,
+ p->value, SmiConstant(language_mode));
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ic/keyed-store-generic.h b/deps/v8/src/ic/keyed-store-generic.h
new file mode 100644
index 0000000000..daeb61fe68
--- /dev/null
+++ b/deps/v8/src/ic/keyed-store-generic.h
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SRC_IC_KEYED_STORE_GENERIC_H_
+#define V8_SRC_IC_KEYED_STORE_GENERIC_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class KeyedStoreGenericGenerator {
+ public:
+ static void Generate(CodeStubAssembler* assembler,
+ const CodeStubAssembler::StoreICParameters* p,
+ LanguageMode language_mode);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SRC_IC_KEYED_STORE_GENERIC_H_
diff --git a/deps/v8/src/ic/mips/access-compiler-mips.cc b/deps/v8/src/ic/mips/access-compiler-mips.cc
index 2aa0283485..1c97ca3cad 100644
--- a/deps/v8/src/ic/mips/access-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/access-compiler-mips.cc
@@ -17,24 +17,22 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, a3, a0, t0};
- return registers;
-}
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, a3, a0, t0};
-Register* PropertyAccessCompiler::store_calling_convention() {
+ // Store calling convention.
// receiver, name, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, a3, t0};
- return registers;
-}
+ Register store_registers[] = {receiver, name, a3, t0};
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
+}
#undef __
} // namespace internal
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index df7a0df175..b2ddea5dac 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -393,10 +393,30 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ lw(scratch1, NativeContextMemOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+
+ if (!compare_native_contexts_only) {
+ __ Branch(&done, eq, scratch1, Operand(scratch2));
+
+ // Compare security tokens of current and expected native contexts.
+ __ lw(scratch1, ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ lw(scratch2, ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ }
+ __ Branch(miss, ne, scratch1, Operand(scratch2));
+
+ __ bind(&done);
+}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -415,17 +435,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Operand(Smi::FromInt(Map::kPrototypeChainValid)));
}
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ lw(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ GetWeakValue(scratch2, cell);
- __ Branch(miss, ne, scratch1, Operand(scratch2));
- }
-
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
@@ -435,46 +444,28 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
-
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -482,7 +473,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -493,7 +484,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index ce9e3d9403..561c9d331b 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -19,16 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
-}
-
-
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -129,141 +119,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver, Register map,
- Register scratch,
- int interceptor_bit, Label* slow) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, scratch,
- Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ Branch(slow, ne, at, Operand(zero_reg));
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register elements,
- Register scratch1, Register scratch2,
- Register result, Label* slow) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // elements - holds the elements of the receiver and its prototypes.
- //
- // scratch1 - used to hold elements length, bit fields, base addresses.
- //
- // scratch2 - used to hold maps, prototypes, and the loaded value.
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(elements);
-
- // Check that the key (index) is within bounds.
- __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(&in_bounds, lo, key, Operand(scratch1));
- // Out-of-bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- // Negative keys can't take the fast OOB path.
- __ Branch(slow, lt, key, Operand(zero_reg));
- __ bind(&check_prototypes);
- __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(&check_next_prototype);
- __ lw(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
- // scratch2: current prototype
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(&absent, eq, scratch2, Operand(at));
- __ lw(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
- __ lw(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- // elements: elements of current prototype
- // scratch2: map of current prototype
- __ lbu(scratch1, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
- __ Branch(slow, lo, scratch1, Operand(JS_OBJECT_TYPE));
- __ lbu(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
- __ And(at, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor)));
- __ Branch(slow, ne, at, Operand(zero_reg));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(slow, ne, elements, Operand(at));
- __ Branch(&check_next_prototype);
-
- __ bind(&absent);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ Branch(&done);
-
- __ bind(&in_bounds);
- // Fast case: Do the load.
- __ Addu(scratch1, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ Lsa(at, scratch1, key, kPointerSizeLog2 - kSmiTagSize);
- __ lw(scratch2, MemOperand(at));
-
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ Branch(&check_prototypes, eq, scratch2, Operand(at));
- __ Move(result, scratch2);
- __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map, Register hash,
- Label* index_string, Label* not_unique) {
- // The key is not a smi.
- Label unique;
- // Is it a name?
- __ GetObjectType(key, map, hash);
- __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
-
- // Is the string an array index, with cached numeric value?
- __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
- __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
- __ Branch(index_string, eq, at, Operand(zero_reg));
-
- // Is the string internalized? We know it's a string, so a single
- // bit test is enough.
- // map: key map
- __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0);
- __ And(at, hash, Operand(kIsNotInternalizedMask));
- __ Branch(not_unique, ne, at, Operand(zero_reg));
-
- __ bind(&unique);
-}
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = a0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -345,105 +200,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in ra.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register key = LoadDescriptor::NameRegister();
- Register receiver = LoadDescriptor::ReceiverRegister();
- DCHECK(key.is(a2));
- DCHECK(receiver.is(a1));
-
- Isolate* isolate = masm->isolate();
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
- Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(a0, a3, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, &slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, t0,
- a3);
- __ Ret();
-
- __ bind(&check_number_dictionary);
- __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
-
- // Check whether the elements is a number dictionary.
- // a3: elements map
- // t0: elements
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&slow, ne, a3, Operand(at));
- __ sra(a0, key, kSmiTagSize);
- __ LoadFromNumberDictionary(&slow, t0, key, v0, a0, a3, t1);
- __ Ret();
-
- // Slow case, key and receiver still in a2 and a1.
- __ bind(&slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, t0,
- a3);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
- Map::kHasNamedInterceptor, &slow);
-
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ lw(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&probe_dictionary, eq, t0, Operand(at));
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, t0, t1, t2, t5));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ li(slot, Operand(Smi::FromInt(slot_index)));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, t0, t1,
- t2, t5);
- // Cache miss.
- GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // a3: elements
- __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
- // Load the property to v0.
- GenerateDictionaryLoad(masm, &slow, a3, key, v0, t1, t0);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
- t0, a3);
- __ Ret();
-
- __ bind(&index_name);
- __ IndexFromHash(a3, key);
- // Now jump to the place where smi keys are handled.
- __ Branch(&index_smi);
-}
-
-
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
diff --git a/deps/v8/src/ic/mips64/access-compiler-mips64.cc b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
index bf6c73e86f..16d7a3d790 100644
--- a/deps/v8/src/ic/mips64/access-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
@@ -17,24 +17,22 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, a3, a0, a4};
- return registers;
-}
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, a3, a0, a4};
-Register* PropertyAccessCompiler::store_calling_convention() {
+ // Store calling convention.
// receiver, name, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, a3, a4};
- return registers;
-}
+ Register store_registers[] = {receiver, name, a3, a4};
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
+}
#undef __
} // namespace internal
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index 2190f6d63e..249f8fedb3 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -393,10 +393,30 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ ld(scratch1, NativeContextMemOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+
+ if (!compare_native_contexts_only) {
+ __ Branch(&done, eq, scratch1, Operand(scratch2));
+
+ // Compare security tokens of current and expected native contexts.
+ __ ld(scratch1, ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ ld(scratch2, ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ }
+ __ Branch(miss, ne, scratch1, Operand(scratch2));
+
+ __ bind(&done);
+}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -415,17 +435,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Operand(Smi::FromInt(Map::kPrototypeChainValid)));
}
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ ld(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ GetWeakValue(scratch2, cell);
- __ Branch(miss, ne, scratch1, Operand(scratch2));
- }
-
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
@@ -435,46 +444,28 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
-
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -482,7 +473,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -493,7 +484,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index c2f3cb6024..57efa350c8 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -19,16 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
-}
-
-
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -128,142 +118,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver, Register map,
- Register scratch,
- int interceptor_bit, Label* slow) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, scratch,
- Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ Branch(slow, ne, at, Operand(zero_reg));
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register elements,
- Register scratch1, Register scratch2,
- Register result, Label* slow) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // elements - holds the elements of the receiver and its prototypes.
- //
- // scratch1 - used to hold elements length, bit fields, base addresses.
- //
- // scratch2 - used to hold maps, prototypes, and the loaded value.
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(elements);
-
- // Check that the key (index) is within bounds.
- __ ld(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(&in_bounds, lo, key, Operand(scratch1));
- // Out-of-bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- // Negative keys can't take the fast OOB path.
- __ Branch(slow, lt, key, Operand(zero_reg));
- __ bind(&check_prototypes);
- __ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(&check_next_prototype);
- __ ld(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
- // scratch2: current prototype
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(&absent, eq, scratch2, Operand(at));
- __ ld(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
- __ ld(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- // elements: elements of current prototype
- // scratch2: map of current prototype
- __ lbu(scratch1, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
- __ Branch(slow, lo, scratch1, Operand(JS_OBJECT_TYPE));
- __ lbu(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
- __ And(at, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor)));
- __ Branch(slow, ne, at, Operand(zero_reg));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(slow, ne, elements, Operand(at));
- __ Branch(&check_next_prototype);
-
- __ bind(&absent);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ Branch(&done);
-
- __ bind(&in_bounds);
- // Fast case: Do the load.
- __ Daddu(scratch1, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ SmiScale(at, key, kPointerSizeLog2);
- __ daddu(at, at, scratch1);
- __ ld(scratch2, MemOperand(at));
-
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ Branch(&check_prototypes, eq, scratch2, Operand(at));
- __ Move(result, scratch2);
- __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map, Register hash,
- Label* index_string, Label* not_unique) {
- // The key is not a smi.
- Label unique;
- // Is it a name?
- __ GetObjectType(key, map, hash);
- __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
-
- // Is the string an array index, with cached numeric value?
- __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
- __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
- __ Branch(index_string, eq, at, Operand(zero_reg));
-
- // Is the string internalized? We know it's a string, so a single
- // bit test is enough.
- // map: key map
- __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0);
- __ And(at, hash, Operand(kIsNotInternalizedMask));
- __ Branch(not_unique, ne, at, Operand(zero_reg));
-
- __ bind(&unique);
-}
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = a0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -344,105 +198,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in ra.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register key = LoadDescriptor::NameRegister();
- Register receiver = LoadDescriptor::ReceiverRegister();
- DCHECK(key.is(a2));
- DCHECK(receiver.is(a1));
-
- Isolate* isolate = masm->isolate();
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
- Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(a0, a3, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, &slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, a4,
- a3);
- __ Ret();
-
- __ bind(&check_number_dictionary);
- __ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ ld(a3, FieldMemOperand(a4, JSObject::kMapOffset));
-
- // Check whether the elements is a number dictionary.
- // a3: elements map
- // a4: elements
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&slow, ne, a3, Operand(at));
- __ dsra32(a0, key, 0);
- __ LoadFromNumberDictionary(&slow, a4, key, v0, a0, a3, a5);
- __ Ret();
-
- // Slow case, key and receiver still in a2 and a1.
- __ bind(&slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, a4,
- a3);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
- Map::kHasNamedInterceptor, &slow);
-
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ ld(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&probe_dictionary, eq, a4, Operand(at));
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, a4, a5, a6, t1));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ li(slot, Operand(Smi::FromInt(slot_index)));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, a4, a5,
- a6, t1);
- // Cache miss.
- GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // a3: elements
- __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
- // Load the property to v0.
- GenerateDictionaryLoad(masm, &slow, a3, key, v0, a5, a4);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
- a4, a3);
- __ Ret();
-
- __ bind(&index_name);
- __ IndexFromHash(a3, key);
- // Now jump to the place where smi keys are handled.
- __ Branch(&index_smi);
-}
-
-
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
diff --git a/deps/v8/src/ic/ppc/access-compiler-ppc.cc b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
index 6143b4ce47..f78ef57e74 100644
--- a/deps/v8/src/ic/ppc/access-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
@@ -17,24 +17,22 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r6, r3, r7};
- return registers;
-}
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, r6, r3, r7};
-Register* PropertyAccessCompiler::store_calling_convention() {
+ // Store calling convention.
// receiver, name, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r6, r7};
- return registers;
-}
+ Register store_registers[] = {receiver, name, r6, r7};
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
+}
#undef __
} // namespace internal
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index aafdc77c9b..e0caaa6a1f 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -402,10 +402,34 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ LoadP(scratch1, NativeContextMemOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+ __ cmp(scratch1, scratch2);
+
+ if (!compare_native_contexts_only) {
+ __ beq(&done);
+
+ // Compare security tokens of current and expected native contexts.
+ __ LoadP(scratch1,
+ ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ LoadP(scratch2,
+ ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ __ cmp(scratch1, scratch2);
+ }
+ __ bne(miss);
+
+ __ bind(&done);
+}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -424,17 +448,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ bne(miss);
}
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ b(ne, miss);
- }
-
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
@@ -443,18 +456,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (receiver_map->IsJSGlobalObjectMap()) {
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
@@ -466,23 +470,20 @@ Register PropertyHandlerCompiler::CheckPrototypes(
DCHECK(current_map->IsJSGlobalProxyMap() ||
!current_map->is_access_check_needed());
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -490,7 +491,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -501,7 +502,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
index 6dd788146b..359a6a42dd 100644
--- a/deps/v8/src/ic/ppc/ic-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -19,18 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmpi(type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ beq(global_object);
- __ cmpi(type, Operand(JS_GLOBAL_PROXY_TYPE));
- __ beq(global_object);
-}
-
-
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -131,143 +119,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver, Register map,
- Register scratch,
- int interceptor_bit, Label* slow) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000);
- __ andi(r0, scratch,
- Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ bne(slow, cr0);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmpi(scratch, Operand(JS_OBJECT_TYPE));
- __ blt(slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register elements,
- Register scratch1, Register scratch2,
- Register result, Label* slow) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // elements - holds the elements of the receiver and its protoypes.
- //
- // scratch1 - used to hold elements length, bit fields, base addresses.
- //
- // scratch2 - used to hold maps, prototypes, and the loaded value.
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(elements);
-
- // Check that the key (index) is within bounds.
- __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmpl(key, scratch1);
- __ blt(&in_bounds);
- // Out-of-bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- __ cmpi(key, Operand::Zero());
- __ blt(slow); // Negative keys can't take the fast OOB path.
- __ bind(&check_prototypes);
- __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(&check_next_prototype);
- __ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
- // scratch2: current prototype
- __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
- __ beq(&absent);
- __ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
- __ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- // elements: elements of current prototype
- // scratch2: map of current prototype
- __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
- __ blt(slow);
- __ lbz(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
- __ andi(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor)));
- __ bne(slow, cr0);
- __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ bne(slow);
- __ jmp(&check_next_prototype);
-
- __ bind(&absent);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- __ bind(&in_bounds);
- // Fast case: Do the load.
- __ addi(scratch1, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- __ SmiToPtrArrayOffset(scratch2, key);
- __ LoadPX(scratch2, MemOperand(scratch2, scratch1));
- __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ beq(&check_prototypes);
- __ mr(result, scratch2);
- __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map, Register hash,
- Label* index_string, Label* not_unique) {
- // The key is not a smi.
- Label unique;
- // Is it a name?
- __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
- __ bgt(not_unique);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ beq(&unique);
-
- // Is the string an array index, with cached numeric value?
- __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset));
- __ mov(r8, Operand(Name::kContainsCachedArrayIndexMask));
- __ and_(r0, hash, r8, SetRC);
- __ beq(index_string, cr0);
-
- // Is the string internalized? We know it's a string, so a single
- // bit test is enough.
- // map: key map
- __ lbz(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0);
- __ andi(r0, hash, Operand(kIsNotInternalizedMask));
- __ bne(not_unique, cr0);
-
- __ bind(&unique);
-}
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = r3;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -349,107 +200,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in lr.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register key = LoadDescriptor::NameRegister();
- Register receiver = LoadDescriptor::ReceiverRegister();
- DCHECK(key.is(r5));
- DCHECK(receiver.is(r4));
-
- Isolate* isolate = masm->isolate();
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6,
- Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(r3, r6, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, &slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r7,
- r6);
- __ Ret();
-
- __ bind(&check_number_dictionary);
- __ LoadP(r7, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadP(r6, FieldMemOperand(r7, JSObject::kMapOffset));
-
- // Check whether the elements is a number dictionary.
- // r6: elements map
- // r7: elements
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r6, ip);
- __ bne(&slow);
- __ SmiUntag(r3, key);
- __ LoadFromNumberDictionary(&slow, r7, key, r3, r3, r6, r8);
- __ Ret();
-
- // Slow case, key and receiver still in r3 and r4.
- __ bind(&slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r7,
- r6);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, r3, r6, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6,
- Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ LoadP(r6, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r7, ip);
- __ beq(&probe_dictionary);
-
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, r7, r8, r9, r10));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, r7, r8,
- r9, r10);
- // Cache miss.
- GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // r6: elements
- __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, r3, &slow);
- // Load the property to r3.
- GenerateDictionaryLoad(masm, &slow, r6, key, r3, r8, r7);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
- r7, r6);
- __ Ret();
-
- __ bind(&index_name);
- __ IndexFromHash(r6, key);
- // Now jump to the place where smi keys are handled.
- __ b(&index_smi);
-}
-
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
diff --git a/deps/v8/src/ic/s390/access-compiler-s390.cc b/deps/v8/src/ic/s390/access-compiler-s390.cc
index 0a3285d5aa..ed8c089b9c 100644
--- a/deps/v8/src/ic/s390/access-compiler-s390.cc
+++ b/deps/v8/src/ic/s390/access-compiler-s390.cc
@@ -18,20 +18,21 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
__ Jump(code, RelocInfo::CODE_TARGET);
}
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r5, r2, r6};
- return registers;
-}
-Register* PropertyAccessCompiler::store_calling_convention() {
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, r5, r2, r6};
+
+ // Store calling convention.
// receiver, name, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, r5, r6};
- return registers;
+ Register store_registers[] = {receiver, name, r5, r6};
+
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
}
#undef __
diff --git a/deps/v8/src/ic/s390/handler-compiler-s390.cc b/deps/v8/src/ic/s390/handler-compiler-s390.cc
index 504bacebaf..72658ec1d1 100644
--- a/deps/v8/src/ic/s390/handler-compiler-s390.cc
+++ b/deps/v8/src/ic/s390/handler-compiler-s390.cc
@@ -383,9 +383,34 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ LoadP(scratch1, NativeContextMemOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+ __ CmpP(scratch1, scratch2);
+
+ if (!compare_native_contexts_only) {
+ __ beq(&done);
+
+ // Compare security tokens of current and expected native contexts.
+ __ LoadP(scratch1,
+ ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ LoadP(scratch2,
+ ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ __ CmpP(scratch1, scratch2);
+ }
+ __ bne(miss);
+
+ __ bind(&done);
+}
+
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -404,17 +429,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ bne(miss);
}
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ LoadP(scratch1, FieldMemOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ b(ne, miss);
- }
-
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
@@ -423,46 +437,29 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (receiver_map->IsJSGlobalObjectMap()) {
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -470,7 +467,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -481,7 +478,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/s390/ic-s390.cc b/deps/v8/src/ic/s390/ic-s390.cc
index 08eb3e4ff1..bd83af1f59 100644
--- a/deps/v8/src/ic/s390/ic-s390.cc
+++ b/deps/v8/src/ic/s390/ic-s390.cc
@@ -18,16 +18,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ CmpP(type, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ beq(global_object);
- __ CmpP(type, Operand(JS_GLOBAL_PROXY_TYPE));
- __ beq(global_object);
-}
-
// Helper function used from LoadIC GenerateNormal.
//
// elements: Property dictionary. It is not clobbered if a jump to the miss
@@ -127,141 +117,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
kDontSaveFPRegs);
}
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver, Register map,
- Register scratch,
- int interceptor_bit, Label* slow) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
- // Get the map of the receiver.
- __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check bit field.
- __ LoadlB(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000);
- __ mov(r0,
- Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ AndP(r0, scratch);
- __ bne(slow /*, cr0*/);
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing into string
- // objects work as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ LoadlB(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ CmpP(scratch, Operand(JS_OBJECT_TYPE));
- __ blt(slow);
-}
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register elements,
- Register scratch1, Register scratch2,
- Register result, Label* slow) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // elements - holds the elements of the receiver and its protoypes.
- //
- // scratch1 - used to hold elements length, bit fields, base addresses.
- //
- // scratch2 - used to hold maps, prototypes, and the loaded value.
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(elements);
-
- // Check that the key (index) is within bounds.
- __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ CmpLogicalP(key, scratch1);
- __ blt(&in_bounds, Label::kNear);
- // Out-of-bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- __ CmpP(key, Operand::Zero());
- __ blt(slow); // Negative keys can't take the fast OOB path.
- __ bind(&check_prototypes);
- __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(&check_next_prototype);
- __ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
- // scratch2: current prototype
- __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
- __ beq(&absent, Label::kNear);
- __ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
- __ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
- // elements: elements of current prototype
- // scratch2: map of current prototype
- __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
- __ blt(slow);
- __ LoadlB(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
- __ AndP(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor)));
- __ bne(slow);
- __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ bne(slow);
- __ jmp(&check_next_prototype);
-
- __ bind(&absent);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- __ bind(&in_bounds);
- // Fast case: Do the load.
- __ AddP(scratch1, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // The key is a smi.
- __ SmiToPtrArrayOffset(scratch2, key);
- __ LoadP(scratch2, MemOperand(scratch2, scratch1));
- __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ beq(&check_prototypes);
- __ LoadRR(result, scratch2);
- __ bind(&done);
-}
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map, Register hash,
- Label* index_string, Label* not_unique) {
- // The key is not a smi.
- Label unique;
- // Is it a name?
- __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
- __ bgt(not_unique);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ beq(&unique, Label::kNear);
-
- // Is the string an array index, with cached numeric value?
- __ LoadlW(hash, FieldMemOperand(key, Name::kHashFieldOffset));
- __ mov(r7, Operand(Name::kContainsCachedArrayIndexMask));
- __ AndP(r0, hash, r7);
- __ beq(index_string);
-
- // Is the string internalized? We know it's a string, so a single
- // bit test is enough.
- // map: key map
- __ LoadlB(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kInternalizedTag == 0);
- __ tmll(hash, Operand(kIsNotInternalizedMask));
- __ bne(not_unique);
-
- __ bind(&unique);
-}
-
void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = r2;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
@@ -339,103 +194,6 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is in lr.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register key = LoadDescriptor::NameRegister();
- Register receiver = LoadDescriptor::ReceiverRegister();
- DCHECK(key.is(r4));
- DCHECK(receiver.is(r3));
-
- Isolate* isolate = masm->isolate();
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5,
- Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(r2, r5, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, r2, r5, r6, r2, &slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r6,
- r5);
- __ Ret();
-
- __ bind(&check_number_dictionary);
- __ LoadP(r6, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadP(r5, FieldMemOperand(r6, JSObject::kMapOffset));
-
- // Check whether the elements is a number dictionary.
- // r5: elements map
- // r6: elements
- __ CompareRoot(r5, Heap::kHashTableMapRootIndex);
- __ bne(&slow, Label::kNear);
- __ SmiUntag(r2, key);
- __ LoadFromNumberDictionary(&slow, r6, key, r2, r2, r5, r7);
- __ Ret();
-
- // Slow case, key and receiver still in r2 and r3.
- __ bind(&slow);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r6,
- r5);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, r2, r5, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r5,
- Map::kHasNamedInterceptor, &slow);
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ LoadP(r5, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ CompareRoot(r6, Heap::kHashTableMapRootIndex);
- __ beq(&probe_dictionary);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, r6, r7, r8, r9));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, r6, r7,
- r8, r9);
- // Cache miss.
- GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // r5: elements
- __ LoadP(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
- // Load the property to r2.
- GenerateDictionaryLoad(masm, &slow, r5, key, r2, r7, r6);
- __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
- r6, r5);
- __ Ret();
-
- __ bind(&index_name);
- __ IndexFromHash(r5, key);
- // Now jump to the place where smi keys are handled.
- __ b(&index_smi);
-}
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index fe1adaaadb..84dbf48436 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -6,13 +6,18 @@
#include "src/ast/ast.h"
#include "src/base/bits.h"
+#include "src/ic/ic-inl.h"
#include "src/type-info.h"
namespace v8 {
namespace internal {
StubCache::StubCache(Isolate* isolate, Code::Kind ic_kind)
- : isolate_(isolate), ic_kind_(ic_kind) {}
+ : isolate_(isolate), ic_kind_(ic_kind) {
+ // Ensure the nullptr (aka Smi::kZero) which StubCache::Get() returns
+ // when the entry is not found is not considered as a handler.
+ DCHECK(!IC::IsHandler(nullptr));
+}
void StubCache::Initialize() {
DCHECK(base::bits::IsPowerOfTwo32(kPrimaryTableSize));
@@ -24,18 +29,23 @@ void StubCache::Initialize() {
namespace {
bool CommonStubCacheChecks(StubCache* stub_cache, Name* name, Map* map,
- Code* code) {
- // Validate that the name does not move on scavenge, and that we
+ Object* handler) {
+ // Validate that the name and handler do not move on scavenge, and that we
// can use identity checks instead of structural equality checks.
DCHECK(!name->GetHeap()->InNewSpace(name));
+ DCHECK(!name->GetHeap()->InNewSpace(handler));
DCHECK(name->IsUniqueName());
DCHECK(name->HasHashCode());
- if (code) {
- Code::Flags expected_flags = Code::RemoveHolderFromFlags(
- Code::ComputeHandlerFlags(stub_cache->ic_kind()));
- Code::Flags flags = Code::RemoveHolderFromFlags(code->flags());
- DCHECK_EQ(expected_flags, flags);
- DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(code->flags()));
+ if (handler) {
+ DCHECK(IC::IsHandler(handler));
+ if (handler->IsCode()) {
+ Code* code = Code::cast(handler);
+ Code::Flags expected_flags = Code::RemoveHolderFromFlags(
+ Code::ComputeHandlerFlags(stub_cache->ic_kind()));
+ Code::Flags flags = Code::RemoveHolderFromFlags(code->flags());
+ DCHECK_EQ(expected_flags, flags);
+ DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(code->flags()));
+ }
}
return true;
}
@@ -43,17 +53,17 @@ bool CommonStubCacheChecks(StubCache* stub_cache, Name* name, Map* map,
} // namespace
#endif
-Code* StubCache::Set(Name* name, Map* map, Code* code) {
- DCHECK(CommonStubCacheChecks(this, name, map, code));
+Object* StubCache::Set(Name* name, Map* map, Object* handler) {
+ DCHECK(CommonStubCacheChecks(this, name, map, handler));
// Compute the primary entry.
int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
- Code* old_code = primary->value;
+ Object* old_handler = primary->value;
// If the primary entry has useful data in it, we retire it to the
// secondary cache before overwriting it.
- if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) {
+ if (old_handler != isolate_->builtins()->builtin(Builtins::kIllegal)) {
Map* old_map = primary->map;
int seed = PrimaryOffset(primary->key, old_map);
int secondary_offset = SecondaryOffset(primary->key, seed);
@@ -63,13 +73,13 @@ Code* StubCache::Set(Name* name, Map* map, Code* code) {
// Update primary cache.
primary->key = name;
- primary->value = code;
+ primary->value = handler;
primary->map = map;
isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
- return code;
+ return handler;
}
-Code* StubCache::Get(Name* name, Map* map) {
+Object* StubCache::Get(Name* name, Map* map) {
DCHECK(CommonStubCacheChecks(this, name, map, nullptr));
int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
@@ -81,7 +91,7 @@ Code* StubCache::Get(Name* name, Map* map) {
if (secondary->key == name && secondary->map == map) {
return secondary->value;
}
- return NULL;
+ return nullptr;
}
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index ebcff448ad..bdd7f4a3be 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -35,14 +35,14 @@ class StubCache {
public:
struct Entry {
Name* key;
- Code* value;
+ Object* value;
Map* map;
};
void Initialize();
// Access cache for entry hash(name, map).
- Code* Set(Name* name, Map* map, Code* code);
- Code* Get(Name* name, Map* map);
+ Object* Set(Name* name, Map* map, Object* handler);
+ Object* Get(Name* name, Map* map);
// Clear the lookup table (@ mark compact collection).
void Clear();
// Collect all maps that match the name.
diff --git a/deps/v8/src/ic/x64/access-compiler-x64.cc b/deps/v8/src/ic/x64/access-compiler-x64.cc
index 2b292528c8..9e95b9506c 100644
--- a/deps/v8/src/ic/x64/access-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/access-compiler-x64.cc
@@ -11,30 +11,27 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
Handle<Code> code) {
__ jmp(code, RelocInfo::CODE_TARGET);
}
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, rax, rbx, rdi};
- return registers;
-}
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, rax, rbx, rdi};
-Register* PropertyAccessCompiler::store_calling_convention() {
+ // Store calling convention.
// receiver, name, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, rbx, rdi};
- return registers;
-}
+ Register store_registers[] = {receiver, name, rbx, rdi};
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
+}
#undef __
} // namespace internal
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index f386fc5b65..36acccc007 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -401,10 +401,32 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ movp(scratch1, NativeContextOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+ __ cmpp(scratch1, scratch2);
+
+ if (!compare_native_contexts_only) {
+ __ j(equal, &done);
+
+ // Compare security tokens of current and expected native contexts.
+ __ movp(scratch1, ContextOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ movp(scratch2, ContextOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ __ cmpp(scratch1, scratch2);
+ }
+ __ j(not_equal, miss);
+
+ __ bind(&done);
+}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -424,17 +446,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ j(not_equal, miss);
}
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ movp(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ j(not_equal, miss);
- }
-
// Keep track of the current object in register reg. On the first
// iteration, reg is an alias for object_reg, on later iterations,
// it is an alias for holder_reg.
@@ -446,46 +457,28 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- }
-
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -493,7 +486,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -504,7 +497,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index d0445a229a..a916e22fa5 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -18,18 +18,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
- __ j(equal, global_object);
- __ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
- __ j(equal, global_object);
-}
-
-
// Helper function used to load a property from a dictionary backing storage.
// This function may return false negatives, so miss_label
// must always call a backup property load that is complete.
@@ -133,237 +121,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
__ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
}
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver, Register map,
- int interceptor_bit, Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // Scratch registers:
- // map - used to hold the map of the receiver.
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
-
- // Check that the object is some kind of JS object EXCEPT JS Value type.
- // In the case that the object is a value-wrapper object,
- // we enter the runtime system to make sure that indexing
- // into string objects work as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
- __ CmpObjectType(receiver, JS_OBJECT_TYPE, map);
- __ j(below, slow);
-
- // Check bit field.
- __ testb(
- FieldOperand(map, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ j(not_zero, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register elements,
- Register scratch, Register result,
- Label* slow) {
- // Register use:
- //
- // receiver - holds the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the the same as 'receiver' or 'key'.
- // Unchanged on bailout so 'receiver' and 'key' can be safely
- // used by further computation.
- //
- // Scratch registers:
- //
- // elements - holds the elements of the receiver and its prototypes.
- //
- // scratch - used to hold maps, prototypes, and the loaded value.
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- __ movp(elements, FieldOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(elements);
- // Check that the key (index) is within bounds.
- __ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
- // Unsigned comparison rejects negative indices.
- __ j(below, &in_bounds);
-
- // Out-of-bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- __ SmiCompare(key, Smi::FromInt(0));
- __ j(less, slow); // Negative keys can't take the fast OOB path.
- __ bind(&check_prototypes);
- __ movp(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ bind(&check_next_prototype);
- __ movp(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
- // scratch: current prototype
- __ CompareRoot(scratch, Heap::kNullValueRootIndex);
- __ j(equal, &absent);
- __ movp(elements, FieldOperand(scratch, JSObject::kElementsOffset));
- __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- // elements: elements of current prototype
- // scratch: map of current prototype
- __ CmpInstanceType(scratch, JS_OBJECT_TYPE);
- __ j(below, slow);
- __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor)));
- __ j(not_zero, slow);
- __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ j(not_equal, slow);
- __ jmp(&check_next_prototype);
-
- __ bind(&absent);
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
-
- __ bind(&in_bounds);
- // Fast case: Do the load.
- SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
- __ movp(scratch, FieldOperand(elements, index.reg, index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ j(equal, &check_prototypes);
- __ Move(result, scratch);
- __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map, Register hash,
- Label* index_string, Label* not_unique) {
- // Register use:
- // key - holds the key and is unchanged. Assumed to be non-smi.
- // Scratch registers:
- // map - used to hold the map of the key.
- // hash - used to hold the hash of the key.
- Label unique;
- __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
- __ j(above, not_unique);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ j(equal, &unique);
-
- // Is the string an array index, with cached numeric value?
- __ movl(hash, FieldOperand(key, Name::kHashFieldOffset));
- __ testl(hash, Immediate(Name::kContainsCachedArrayIndexMask));
- __ j(zero, index_string); // The value in hash is used at jump target.
-
- // Is the string internalized? We already know it's a string so a single
- // bit test is enough.
- STATIC_ASSERT(kNotInternalizedTag != 0);
- __ testb(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(kIsNotInternalizedMask));
- __ j(not_zero, not_unique);
-
- __ bind(&unique);
-}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is on the stack.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- DCHECK(receiver.is(rdx));
- DCHECK(key.is(rcx));
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from below
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, rax,
- Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(rax, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, rax, rbx, rax, &slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
- __ ret(0);
-
- __ bind(&check_number_dictionary);
- __ SmiToInteger32(rbx, key);
- __ movp(rax, FieldOperand(receiver, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // rbx: key as untagged int32
- // rax: elements
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(not_equal, &slow);
- __ LoadFromNumberDictionary(&slow, rax, key, rbx, r9, rdi, rax);
- __ ret(0);
-
- __ bind(&slow);
- // Slow case: Jump to runtime.
- __ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
- KeyedLoadIC::GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, rax, rbx, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, rax, Map::kHasNamedInterceptor,
- &slow);
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ movp(rbx, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &probe_dictionary);
-
- Register megamorphic_scratch = rdi;
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadDescriptor::SlotRegister();
- DCHECK(!AreAliased(megamorphic_scratch, vector, slot));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ Move(vector, dummy_vector);
- __ Move(slot, Smi::FromInt(slot_index));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(
- masm, receiver, key, megamorphic_scratch, no_reg);
- // Cache miss.
- GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
- // rbx: elements
-
- __ movp(rax, FieldOperand(receiver, JSObject::kMapOffset));
- __ movb(rax, FieldOperand(rax, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, rax, &slow);
-
- GenerateDictionaryLoad(masm, &slow, rbx, key, rax, rdi, rax);
- __ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
- __ ret(0);
-
- __ bind(&index_name);
- __ IndexFromHash(rbx, key);
- __ jmp(&index_smi);
-}
-
-
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
diff --git a/deps/v8/src/ic/x87/access-compiler-x87.cc b/deps/v8/src/ic/x87/access-compiler-x87.cc
index e528de65ba..d1867553cd 100644
--- a/deps/v8/src/ic/x87/access-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/access-compiler-x87.cc
@@ -16,22 +16,21 @@ void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
__ jmp(code, RelocInfo::CODE_TARGET);
}
-
-Register* PropertyAccessCompiler::load_calling_convention() {
- // receiver, name, scratch1, scratch2, scratch3.
+void PropertyAccessCompiler::InitializePlatformSpecific(
+ AccessCompilerData* data) {
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
- static Register registers[] = {receiver, name, ebx, eax, edi};
- return registers;
-}
+ // Load calling convention.
+ // receiver, name, scratch1, scratch2, scratch3.
+ Register load_registers[] = {receiver, name, ebx, eax, edi};
-Register* PropertyAccessCompiler::store_calling_convention() {
+ // Store calling convention.
// receiver, name, scratch1, scratch2.
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- static Register registers[] = {receiver, name, ebx, edi};
- return registers;
+ Register store_registers[] = {receiver, name, ebx, edi};
+
+ data->Initialize(arraysize(load_registers), load_registers,
+ arraysize(store_registers), store_registers);
}
#undef __
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index 5eca3dc0cb..a5c32d37cc 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -411,10 +411,32 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
}
}
+void PropertyHandlerCompiler::GenerateAccessCheck(
+ Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
+ Label* miss, bool compare_native_contexts_only) {
+ Label done;
+ // Load current native context.
+ __ mov(scratch1, NativeContextOperand());
+ // Load expected native context.
+ __ LoadWeakValue(scratch2, native_context_cell, miss);
+ __ cmp(scratch1, scratch2);
+
+ if (!compare_native_contexts_only) {
+ __ j(equal, &done);
+
+ // Compare security tokens of current and expected native contexts.
+ __ mov(scratch1, ContextOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ mov(scratch2, ContextOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ __ cmp(scratch1, scratch2);
+ }
+ __ j(not_equal, miss);
+
+ __ bind(&done);
+}
Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
- Register scratch2, Handle<Name> name, Label* miss, PrototypeCheckType check,
+ Register scratch2, Handle<Name> name, Label* miss,
ReturnHolder return_what) {
Handle<Map> receiver_map = map();
@@ -433,17 +455,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ j(not_equal, miss);
}
- // The prototype chain of primitives (and their JSValue wrappers) depends
- // on the native context, which can't be guarded by validity cells.
- // |object_reg| holds the native context specific prototype in this case;
- // we need to check its map.
- if (check == CHECK_ALL_MAPS) {
- __ mov(scratch1, FieldOperand(object_reg, HeapObject::kMapOffset));
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- __ CmpWeakValue(scratch1, cell, scratch2);
- __ j(not_equal, miss);
- }
-
// Keep track of the current object in register reg.
Register reg = object_reg;
int depth = 0;
@@ -453,46 +464,28 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current = isolate()->global_object();
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (receiver_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
- }
-
- Handle<JSObject> prototype = Handle<JSObject>::null();
- Handle<Map> current_map = receiver_map;
+ Handle<Map> current_map(receiver_map->GetPrototypeChainRootMap(isolate()),
+ isolate());
Handle<Map> holder_map(holder()->map());
// Traverse the prototype chain and check the maps in the prototype chain for
// fast and global objects or do negative lookup for normal objects.
while (!current_map.is_identical_to(holder_map)) {
++depth;
- // Only global objects and objects that do not require access
- // checks are allowed in stubs.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
-
- prototype = handle(JSObject::cast(current_map->prototype()));
if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
} else if (current_map->is_dictionary_map()) {
DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
+ DCHECK(name->IsUniqueName());
DCHECK(current.is_null() ||
current->property_dictionary()->FindEntry(name) ==
NameDictionary::kNotFound);
if (depth > 1) {
- // TODO(jkummerow): Cache and re-use weak cell.
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
scratch2);
@@ -500,7 +493,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
reg = holder_reg; // From now on the object will be in holder_reg.
// Go to the next object in the prototype chain.
- current = prototype;
+ current = handle(JSObject::cast(current_map->prototype()));
current_map = handle(current->map());
}
@@ -511,7 +504,9 @@ Register PropertyHandlerCompiler::CheckPrototypes(
bool return_holder = return_what == RETURN_HOLDER;
if (return_holder && depth != 0) {
- __ LoadWeakValue(reg, isolate()->factory()->NewWeakCell(current), miss);
+ Handle<WeakCell> weak_cell =
+ Map::GetOrCreatePrototypeWeakCell(current, isolate());
+ __ LoadWeakValue(reg, weak_cell, miss);
}
// Return the register containing the holder.
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index baf435e0f2..f96e509f53 100644
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -18,18 +18,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
- Label* global_object) {
- // Register usage:
- // type: holds the receiver instance type on entry.
- __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
- __ j(equal, global_object);
- __ cmp(type, JS_GLOBAL_PROXY_TYPE);
- __ j(equal, global_object);
-}
-
-
// Helper function used to load a property from a dictionary backing
// storage. This function may fail to load a property even though it is
// in the dictionary, so code at miss_label must always call a backup
@@ -132,238 +120,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
__ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
}
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
- Register receiver, Register map,
- int interceptor_bit, Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // Scratch registers:
- // map - used to hold the map of the receiver.
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, slow);
-
- // Get the map of the receiver.
- __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Check bit field.
- __ test_b(
- FieldOperand(map, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
- __ j(not_zero, slow);
- // Check that the object is some kind of JS object EXCEPT JS Value type. In
- // the case that the object is a value-wrapper object, we enter the runtime
- // system to make sure that indexing into string objects works as intended.
- DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-
- __ CmpInstanceType(map, JS_OBJECT_TYPE);
- __ j(below, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
- Register key, Register scratch,
- Register scratch2, Register result,
- Label* slow) {
- // Register use:
- // receiver - holds the receiver and is unchanged.
- // key - holds the key and is unchanged (must be a smi).
- // Scratch registers:
- // scratch - used to hold elements of the receiver and the loaded value.
- // scratch2 - holds maps and prototypes during prototype chain check.
- // result - holds the result on exit if the load succeeds and
- // we fall through.
- Label check_prototypes, check_next_prototype;
- Label done, in_bounds, absent;
-
- __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
- __ AssertFastElements(scratch);
-
- // Check that the key (index) is within bounds.
- __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
- __ j(below, &in_bounds);
- // Out-of-bounds. Check the prototype chain to see if we can just return
- // 'undefined'.
- __ cmp(key, 0);
- __ j(less, slow); // Negative keys can't take the fast OOB path.
- __ bind(&check_prototypes);
- __ mov(scratch2, FieldOperand(receiver, HeapObject::kMapOffset));
- __ bind(&check_next_prototype);
- __ mov(scratch2, FieldOperand(scratch2, Map::kPrototypeOffset));
- // scratch2: current prototype
- __ cmp(scratch2, masm->isolate()->factory()->null_value());
- __ j(equal, &absent);
- __ mov(scratch, FieldOperand(scratch2, JSObject::kElementsOffset));
- __ mov(scratch2, FieldOperand(scratch2, HeapObject::kMapOffset));
- // scratch: elements of current prototype
- // scratch2: map of current prototype
- __ CmpInstanceType(scratch2, JS_OBJECT_TYPE);
- __ j(below, slow);
- __ test_b(FieldOperand(scratch2, Map::kBitFieldOffset),
- Immediate((1 << Map::kIsAccessCheckNeeded) |
- (1 << Map::kHasIndexedInterceptor)));
- __ j(not_zero, slow);
- __ cmp(scratch, masm->isolate()->factory()->empty_fixed_array());
- __ j(not_equal, slow);
- __ jmp(&check_next_prototype);
-
- __ bind(&absent);
- __ mov(result, masm->isolate()->factory()->undefined_value());
- __ jmp(&done);
-
- __ bind(&in_bounds);
- // Fast case: Do the load.
- STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
- __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
- __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
- // In case the loaded value is the_hole we have to check the prototype chain.
- __ j(equal, &check_prototypes);
- __ Move(result, scratch);
- __ bind(&done);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
- Register map, Register hash,
- Label* index_string, Label* not_unique) {
- // Register use:
- // key - holds the key and is unchanged. Assumed to be non-smi.
- // Scratch registers:
- // map - used to hold the map of the key.
- // hash - used to hold the hash of the key.
- Label unique;
- __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
- __ j(above, not_unique);
- STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
- __ j(equal, &unique);
-
- // Is the string an array index, with cached numeric value?
- __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
- __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
- __ j(zero, index_string);
-
- // Is the string internalized? We already know it's a string so a single
- // bit test is enough.
- STATIC_ASSERT(kNotInternalizedTag != 0);
- __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(kIsNotInternalizedMask));
- __ j(not_zero, not_unique);
-
- __ bind(&unique);
-}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is on the stack.
- Label slow, check_name, index_smi, index_name, property_array_property;
- Label probe_dictionary, check_number_dictionary;
-
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &check_name);
- __ bind(&index_smi);
- // Now the key is known to be a smi. This place is also jumped to from
- // where a numeric string is converted to a smi.
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, eax,
- Map::kHasIndexedInterceptor, &slow);
-
- // Check the receiver's map to see if it has fast elements.
- __ CheckFastElements(eax, &check_number_dictionary);
-
- GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow);
- Isolate* isolate = masm->isolate();
- Counters* counters = isolate->counters();
- __ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
- __ ret(0);
-
- __ bind(&check_number_dictionary);
- __ mov(ebx, key);
- __ SmiUntag(ebx);
- __ mov(eax, FieldOperand(receiver, JSObject::kElementsOffset));
-
- // Check whether the elements is a number dictionary.
- // ebx: untagged index
- // eax: elements
- __ CheckMap(eax, isolate->factory()->hash_table_map(), &slow,
- DONT_DO_SMI_CHECK);
- Label slow_pop_receiver;
- // Push receiver on the stack to free up a register for the dictionary
- // probing.
- __ push(receiver);
- __ LoadFromNumberDictionary(&slow_pop_receiver, eax, key, ebx, edx, edi, eax);
- // Pop receiver before returning.
- __ pop(receiver);
- __ ret(0);
-
- __ bind(&slow_pop_receiver);
- // Pop the receiver from the stack and jump to runtime.
- __ pop(receiver);
-
- __ bind(&slow);
- // Slow case: jump to runtime.
- __ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm);
-
- __ bind(&check_name);
- GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
-
- GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
- &slow);
-
- // If the receiver is a fast-case object, check the stub cache. Otherwise
- // probe the dictionary.
- __ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset));
- __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(isolate->factory()->hash_table_map()));
- __ j(equal, &probe_dictionary);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(isolate);
- int slot = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
- __ push(Immediate(Smi::FromInt(slot)));
- __ push(Immediate(dummy_vector));
-
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, key, ebx,
- edi);
-
- __ pop(LoadWithVectorDescriptor::VectorRegister());
- __ pop(LoadDescriptor::SlotRegister());
-
- // Cache miss.
- GenerateMiss(masm);
-
- // Do a quick inline probe of the receiver's dictionary, if it
- // exists.
- __ bind(&probe_dictionary);
-
- __ mov(eax, FieldOperand(receiver, JSObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
- GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
-
- GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
- __ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
- __ ret(0);
-
- __ bind(&index_name);
- __ IndexFromHash(ebx, key);
- // Now jump to the place where smi keys are handled.
- __ jmp(&index_smi);
-}
-
-
static void KeyedStoreGenerateMegamorphicHelper(
MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
diff --git a/deps/v8/src/icu_util.cc b/deps/v8/src/icu_util.cc
index bf59fd0aec..4b511d96f3 100644
--- a/deps/v8/src/icu_util.cc
+++ b/deps/v8/src/icu_util.cc
@@ -52,9 +52,9 @@ bool InitializeICUDefaultLocation(const char* exec_path,
}
char* icu_data_file_default;
#if defined(V8_TARGET_LITTLE_ENDIAN)
- RelativePath(&icu_data_file_default, exec_path, "icudtl.dat");
+ base::RelativePath(&icu_data_file_default, exec_path, "icudtl.dat");
#elif defined(V8_TARGET_BIG_ENDIAN)
- RelativePath(&icu_data_file_default, exec_path, "icudtb.dat");
+ base::RelativePath(&icu_data_file_default, exec_path, "icudtb.dat");
#else
#error Unknown byte ordering
#endif
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
index 15c090ff22..6ebb91ccbc 100644
--- a/deps/v8/src/inspector/BUILD.gn
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -4,7 +4,7 @@
import("../../gni/v8.gni")
-_inspector_protocol = "//third_party/WebKit/Source/platform/inspector_protocol"
+_inspector_protocol = v8_path_prefix + "/third_party/inspector_protocol"
import("$_inspector_protocol/inspector_protocol.gni")
_protocol_generated = [
@@ -51,6 +51,7 @@ inspector_protocol_generate("protocol_generated_sources") {
":protocol_compatibility",
]
+ inspector_protocol_dir = _inspector_protocol
out_dir = target_gen_dir
config_file = "inspector_protocol_config.json"
inputs = [
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index 4486204d74..d49c6a6254 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -1,8 +1,11 @@
include_rules = [
"-src",
- "+src/inspector",
"+src/base/atomicops.h",
"+src/base/macros.h",
"+src/base/logging.h",
"+src/base/platform/platform.h",
+ "+src/inspector",
+ "+src/tracing",
+ "-include/v8-debug.h",
+ "+src/debug/debug-interface.h",
]
diff --git a/deps/v8/src/inspector/debugger-script.js b/deps/v8/src/inspector/debugger-script.js
index 98910d69df..1614566ffa 100644
--- a/deps/v8/src/inspector/debugger-script.js
+++ b/deps/v8/src/inspector/debugger-script.js
@@ -33,18 +33,6 @@
var DebuggerScript = {};
-/** @enum */
-const PauseOnExceptionsState = {
- DontPauseOnExceptions: 0,
- PauseOnAllExceptions: 1,
- PauseOnUncaughtExceptions: 2
-};
-DebuggerScript.PauseOnExceptionsState = PauseOnExceptionsState;
-
-DebuggerScript._pauseOnExceptionsState = DebuggerScript.PauseOnExceptionsState.DontPauseOnExceptions;
-Debug.clearBreakOnException();
-Debug.clearBreakOnUncaughtException();
-
/**
* @param {?CompileEvent} eventData
*/
@@ -52,7 +40,7 @@ DebuggerScript.getAfterCompileScript = function(eventData)
{
var script = eventData.script().value();
if (!script.is_debugger_script)
- return DebuggerScript._formatScript(eventData.script().value());
+ return script;
return null;
}
@@ -152,82 +140,6 @@ DebuggerScript._executionContextId = function(contextData)
}
/**
- * @param {string|undefined} contextData
- * @return {string}
- */
-DebuggerScript._executionContextAuxData = function(contextData)
-{
- if (!contextData)
- return "";
- var match = contextData.match(/^[^,]*,[^,]*,(.*)$/);
- return match ? match[1] : "";
-}
-
-/**
- * @param {string} contextGroupId
- * @return {!Array<!FormattedScript>}
- */
-DebuggerScript.getScripts = function(contextGroupId)
-{
- var result = [];
- var scripts = Debug.scripts();
- var contextDataPrefix = null;
- if (contextGroupId)
- contextDataPrefix = contextGroupId + ",";
- for (var i = 0; i < scripts.length; ++i) {
- var script = scripts[i];
- if (contextDataPrefix) {
- if (!script.context_data)
- continue;
- // Context data is a string in the following format:
- // <contextGroupId>,<contextId>,<auxData>
- if (script.context_data.indexOf(contextDataPrefix) !== 0)
- continue;
- }
- if (script.is_debugger_script)
- continue;
- result.push(DebuggerScript._formatScript(script));
- }
- return result;
-}
-
-/**
- * @param {!Script} script
- * @return {!FormattedScript}
- */
-DebuggerScript._formatScript = function(script)
-{
- var lineEnds = script.line_ends;
- var lineCount = lineEnds.length;
- var endLine = script.line_offset + lineCount - 1;
- var endColumn;
- // V8 will not count last line if script source ends with \n.
- if (script.source[script.source.length - 1] === '\n') {
- endLine += 1;
- endColumn = 0;
- } else {
- if (lineCount === 1)
- endColumn = script.source.length + script.column_offset;
- else
- endColumn = script.source.length - (lineEnds[lineCount - 2] + 1);
- }
- return {
- id: script.id,
- name: script.nameOrSourceURL(),
- sourceURL: script.source_url,
- sourceMappingURL: script.source_mapping_url,
- source: script.source,
- startLine: script.line_offset,
- startColumn: script.column_offset,
- endLine: endLine,
- endColumn: endColumn,
- executionContextId: DebuggerScript._executionContextId(script.context_data),
- // Note that we cannot derive aux data from context id because of compilation cache.
- executionContextAuxData: DebuggerScript._executionContextAuxData(script.context_data)
- };
-}
-
-/**
* @param {!ExecutionState} execState
* @param {!BreakpointInfo} info
* @return {string|undefined}
@@ -253,32 +165,6 @@ DebuggerScript.removeBreakpoint = function(execState, info)
}
/**
- * @return {number}
- */
-DebuggerScript.pauseOnExceptionsState = function()
-{
- return DebuggerScript._pauseOnExceptionsState;
-}
-
-/**
- * @param {number} newState
- */
-DebuggerScript.setPauseOnExceptionsState = function(newState)
-{
- DebuggerScript._pauseOnExceptionsState = newState;
-
- if (DebuggerScript.PauseOnExceptionsState.PauseOnAllExceptions === newState)
- Debug.setBreakOnException();
- else
- Debug.clearBreakOnException();
-
- if (DebuggerScript.PauseOnExceptionsState.PauseOnUncaughtExceptions === newState)
- Debug.setBreakOnUncaughtException();
- else
- Debug.clearBreakOnUncaughtException();
-}
-
-/**
* @param {!ExecutionState} execState
* @param {number} limit
* @return {!Array<!JavaScriptCallFrame>}
@@ -291,43 +177,6 @@ DebuggerScript.currentCallFrames = function(execState, limit)
return frames;
}
-/**
- * @param {!ExecutionState} execState
- */
-DebuggerScript.stepIntoStatement = function(execState)
-{
- execState.prepareStep(Debug.StepAction.StepIn);
-}
-
-/**
- * @param {!ExecutionState} execState
- */
-DebuggerScript.stepFrameStatement = function(execState)
-{
- execState.prepareStep(Debug.StepAction.StepFrame);
-}
-
-/**
- * @param {!ExecutionState} execState
- */
-DebuggerScript.stepOverStatement = function(execState)
-{
- execState.prepareStep(Debug.StepAction.StepNext);
-}
-
-/**
- * @param {!ExecutionState} execState
- */
-DebuggerScript.stepOutOfFunction = function(execState)
-{
- execState.prepareStep(Debug.StepAction.StepOut);
-}
-
-DebuggerScript.clearStepping = function()
-{
- Debug.clearStepping();
-}
-
// Returns array in form:
// [ 0, <v8_result_report> ] in case of success
// or [ 1, <general_error_message>, <compiler_message>, <line_number>, <column_number> ] in case of compile error, numbers are 1-based.
@@ -416,6 +265,7 @@ DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
var frameDetails = frameMirror.details();
var funcObject = frameDetails.func();
+ var scriptObject = frameDetails.script();
var sourcePosition = frameDetails.sourcePosition();
var thisObject = frameDetails.receiver();
@@ -448,6 +298,7 @@ DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
// Calculated lazily.
var scopeChain;
var funcMirror;
+ var scriptMirror;
var location;
/** @type {!Array<?RawLocation>} */
var scopeStartLocations;
@@ -516,7 +367,7 @@ DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
{
if (!details) {
var scopeObjects = ensureScopeChain();
- var script = ensureFuncMirror().script();
+ var script = ensureScriptMirror();
/** @type {!Array<Scope>} */
var scopes = [];
for (var i = 0; i < scopeObjects.length; ++i) {
@@ -570,14 +421,24 @@ DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
}
/**
+ * @return {!ScriptMirror}
+ */
+ function ensureScriptMirror()
+ {
+ if (!scriptMirror) {
+ scriptMirror = MakeMirror(scriptObject);
+ }
+ return /** @type {!ScriptMirror} */(scriptMirror);
+ }
+
+ /**
* @return {!{line: number, column: number}}
*/
function ensureLocation()
{
if (!location) {
- var script = ensureFuncMirror().script();
- if (script)
- location = script.locationFromPosition(sourcePosition, true);
+ var script = ensureScriptMirror();
+ location = script.locationFromPosition(sourcePosition, true);
if (!location)
location = { line: 0, column: 0 };
}
@@ -616,12 +477,12 @@ DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
}
/**
- * @return {number|undefined}
+ * @return {number}
*/
function sourceID()
{
- var script = ensureFuncMirror().script();
- return script && script.id();
+ var script = ensureScriptMirror();
+ return script.id();
}
/**
diff --git a/deps/v8/src/inspector/debugger_script_externs.js b/deps/v8/src/inspector/debugger_script_externs.js
index c7df61f3f4..cc152d5537 100644
--- a/deps/v8/src/inspector/debugger_script_externs.js
+++ b/deps/v8/src/inspector/debugger_script_externs.js
@@ -44,7 +44,7 @@ var FormattedScript;
var JavaScriptCallFrameDetails;
/** @typedef {{
- sourceID: function():(number|undefined),
+ sourceID: function():(number),
line: function():number,
column: function():number,
thisObject: !Object,
@@ -61,19 +61,6 @@ var JavaScriptCallFrame;
*/
var Debug = {};
-Debug.setBreakOnException = function() {}
-
-Debug.clearBreakOnException = function() {}
-
-Debug.setBreakOnUncaughtException = function() {}
-
-/**
- * @return {undefined}
- */
-Debug.clearBreakOnUncaughtException = function() {}
-
-Debug.clearStepping = function() {}
-
Debug.clearAllBreakPoints = function() {}
/** @return {!Array<!Script>} */
@@ -203,9 +190,6 @@ BreakEvent.prototype.breakPointsHit = function() {}
/** @interface */
function ExecutionState() {}
-/** @param {!Debug.StepAction} action */
-ExecutionState.prototype.prepareStep = function(action) {}
-
/**
* @param {string} source
* @param {boolean} disableBreak
@@ -257,7 +241,6 @@ var SourceLocation;
* source_mapping_url: (string|undefined),
* is_debugger_script: boolean,
* source: string,
- * line_ends: !Array<number>,
* line_offset: number,
* column_offset: number,
* nameOrSourceURL: function():string,
@@ -288,6 +271,9 @@ FrameDetails.prototype.receiver = function() {}
/** @return {function()} */
FrameDetails.prototype.func = function() {}
+/** @return {!Object} */
+FrameDetails.prototype.script = function() {}
+
/** @return {boolean} */
FrameDetails.prototype.isAtReturn = function() {}
@@ -466,6 +452,9 @@ FrameMirror.prototype.allScopes = function(ignoreNestedScopes) {}
/** @return {!FrameDetails} */
FrameMirror.prototype.details = function() {}
+/** @return {!ScriptMirror} */
+FrameMirror.prototype.script = function() {}
+
/**
* @param {string} source
* @param {boolean} disableBreak
diff --git a/deps/v8/src/inspector/injected-script-source.js b/deps/v8/src/inspector/injected-script-source.js
index 39c6c9c1e8..f3c8d6b96e 100644
--- a/deps/v8/src/inspector/injected-script-source.js
+++ b/deps/v8/src/inspector/injected-script-source.js
@@ -260,18 +260,6 @@ InjectedScript.prototype = {
},
/**
- * @param {!Array<*>} array
- * @param {string} groupName
- * @param {boolean} forceValueType
- * @param {boolean} generatePreview
- */
- wrapObjectsInArray: function(array, groupName, forceValueType, generatePreview)
- {
- for (var i = 0; i < array.length; ++i)
- array[i] = this.wrapObject(array[i], groupName, forceValueType, generatePreview);
- },
-
- /**
* @param {!Object} table
* @param {!Array.<string>|string|boolean} columns
* @return {!RuntimeAgent.RemoteObject}
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index a100dea2e1..d605227222 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -54,11 +54,6 @@ using protocol::Runtime::InternalPropertyDescriptor;
using protocol::Runtime::RemoteObject;
using protocol::Maybe;
-static bool hasInternalError(ErrorString* errorString, bool hasError) {
- if (hasError) *errorString = "Internal error";
- return hasError;
-}
-
std::unique_ptr<InjectedScript> InjectedScript::create(
InspectedContext* inspectedContext) {
v8::Isolate* isolate = inspectedContext->isolate();
@@ -124,10 +119,9 @@ InjectedScript::InjectedScript(
InjectedScript::~InjectedScript() {}
-void InjectedScript::getProperties(
- ErrorString* errorString, v8::Local<v8::Object> object,
- const String16& groupName, bool ownProperties, bool accessorPropertiesOnly,
- bool generatePreview,
+Response InjectedScript::getProperties(
+ v8::Local<v8::Object> object, const String16& groupName, bool ownProperties,
+ bool accessorPropertiesOnly, bool generatePreview,
std::unique_ptr<Array<PropertyDescriptor>>* properties,
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
v8::HandleScope handles(m_context->isolate());
@@ -143,21 +137,23 @@ void InjectedScript::getProperties(
v8::TryCatch tryCatch(m_context->isolate());
v8::Local<v8::Value> resultValue = function.callWithoutExceptionHandling();
if (tryCatch.HasCaught()) {
- *exceptionDetails = createExceptionDetails(errorString, tryCatch, groupName,
- generatePreview);
+ Response response = createExceptionDetails(
+ tryCatch, groupName, generatePreview, exceptionDetails);
+ if (!response.isSuccess()) return response;
// FIXME: make properties optional
*properties = Array<PropertyDescriptor>::create();
- return;
+ return Response::OK();
}
- if (hasInternalError(errorString, resultValue.IsEmpty())) return;
- std::unique_ptr<protocol::Value> protocolValue =
- toProtocolValue(errorString, context, resultValue);
- if (!protocolValue) return;
- protocol::ErrorSupport errors(errorString);
+ if (resultValue.IsEmpty()) return Response::InternalError();
+ std::unique_ptr<protocol::Value> protocolValue;
+ Response response = toProtocolValue(context, resultValue, &protocolValue);
+ if (!response.isSuccess()) return response;
+ protocol::ErrorSupport errors;
std::unique_ptr<Array<PropertyDescriptor>> result =
Array<PropertyDescriptor>::parse(protocolValue.get(), &errors);
- if (!hasInternalError(errorString, errors.hasErrors()))
- *properties = std::move(result);
+ if (errors.hasErrors()) return Response::Error(errors.errors());
+ *properties = std::move(result);
+ return Response::OK();
}
void InjectedScript::releaseObject(const String16& objectId) {
@@ -172,55 +168,52 @@ void InjectedScript::releaseObject(const String16& objectId) {
m_native->unbind(boundId);
}
-std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapObject(
- ErrorString* errorString, v8::Local<v8::Value> value,
- const String16& groupName, bool forceValueType,
- bool generatePreview) const {
+Response InjectedScript::wrapObject(
+ v8::Local<v8::Value> value, const String16& groupName, bool forceValueType,
+ bool generatePreview,
+ std::unique_ptr<protocol::Runtime::RemoteObject>* result) const {
v8::HandleScope handles(m_context->isolate());
v8::Local<v8::Value> wrappedObject;
v8::Local<v8::Context> context = m_context->context();
- if (!wrapValue(errorString, value, groupName, forceValueType, generatePreview)
- .ToLocal(&wrappedObject))
- return nullptr;
+ Response response = wrapValue(value, groupName, forceValueType,
+ generatePreview, &wrappedObject);
+ if (!response.isSuccess()) return response;
protocol::ErrorSupport errors;
- std::unique_ptr<protocol::Value> protocolValue =
- toProtocolValue(errorString, context, wrappedObject);
- if (!protocolValue) return nullptr;
- std::unique_ptr<protocol::Runtime::RemoteObject> remoteObject =
+ std::unique_ptr<protocol::Value> protocolValue;
+ response = toProtocolValue(context, wrappedObject, &protocolValue);
+ if (!response.isSuccess()) return response;
+
+ *result =
protocol::Runtime::RemoteObject::parse(protocolValue.get(), &errors);
- if (!remoteObject) *errorString = errors.errors();
- return remoteObject;
+ if (!result->get()) return Response::Error(errors.errors());
+ return Response::OK();
}
-bool InjectedScript::wrapObjectProperty(ErrorString* errorString,
- v8::Local<v8::Object> object,
- v8::Local<v8::Name> key,
- const String16& groupName,
- bool forceValueType,
- bool generatePreview) const {
+Response InjectedScript::wrapObjectProperty(v8::Local<v8::Object> object,
+ v8::Local<v8::Name> key,
+ const String16& groupName,
+ bool forceValueType,
+ bool generatePreview) const {
v8::Local<v8::Value> property;
v8::Local<v8::Context> context = m_context->context();
- if (hasInternalError(errorString,
- !object->Get(context, key).ToLocal(&property)))
- return false;
+ if (!object->Get(context, key).ToLocal(&property))
+ return Response::InternalError();
v8::Local<v8::Value> wrappedProperty;
- if (!wrapValue(errorString, property, groupName, forceValueType,
- generatePreview)
- .ToLocal(&wrappedProperty))
- return false;
+ Response response = wrapValue(property, groupName, forceValueType,
+ generatePreview, &wrappedProperty);
+ if (!response.isSuccess()) return response;
v8::Maybe<bool> success =
createDataProperty(context, object, key, wrappedProperty);
- if (hasInternalError(errorString, success.IsNothing() || !success.FromJust()))
- return false;
- return true;
+ if (success.IsNothing() || !success.FromJust())
+ return Response::InternalError();
+ return Response::OK();
}
-bool InjectedScript::wrapPropertyInArray(ErrorString* errorString,
- v8::Local<v8::Array> array,
- v8::Local<v8::String> property,
- const String16& groupName,
- bool forceValueType,
- bool generatePreview) const {
+Response InjectedScript::wrapPropertyInArray(v8::Local<v8::Array> array,
+ v8::Local<v8::String> property,
+ const String16& groupName,
+ bool forceValueType,
+ bool generatePreview) const {
V8FunctionCall function(m_context->inspector(), m_context->context(),
v8Value(), "wrapPropertyInArray");
function.appendArgument(array);
@@ -230,29 +223,13 @@ bool InjectedScript::wrapPropertyInArray(ErrorString* errorString,
function.appendArgument(generatePreview);
bool hadException = false;
function.call(hadException);
- return !hasInternalError(errorString, hadException);
+ return hadException ? Response::InternalError() : Response::OK();
}
-bool InjectedScript::wrapObjectsInArray(ErrorString* errorString,
- v8::Local<v8::Array> array,
- const String16& groupName,
- bool forceValueType,
- bool generatePreview) const {
- V8FunctionCall function(m_context->inspector(), m_context->context(),
- v8Value(), "wrapObjectsInArray");
- function.appendArgument(array);
- function.appendArgument(groupName);
- function.appendArgument(forceValueType);
- function.appendArgument(generatePreview);
- bool hadException = false;
- function.call(hadException);
- return !hasInternalError(errorString, hadException);
-}
-
-v8::MaybeLocal<v8::Value> InjectedScript::wrapValue(
- ErrorString* errorString, v8::Local<v8::Value> value,
- const String16& groupName, bool forceValueType,
- bool generatePreview) const {
+Response InjectedScript::wrapValue(v8::Local<v8::Value> value,
+ const String16& groupName,
+ bool forceValueType, bool generatePreview,
+ v8::Local<v8::Value>* result) const {
V8FunctionCall function(m_context->inspector(), m_context->context(),
v8Value(), "wrapObject");
function.appendArgument(value);
@@ -260,10 +237,9 @@ v8::MaybeLocal<v8::Value> InjectedScript::wrapValue(
function.appendArgument(forceValueType);
function.appendArgument(generatePreview);
bool hadException = false;
- v8::Local<v8::Value> r = function.call(hadException);
- if (hasInternalError(errorString, hadException || r.IsEmpty()))
- return v8::MaybeLocal<v8::Value>();
- return r;
+ *result = function.call(hadException);
+ if (hadException || result->IsEmpty()) return Response::InternalError();
+ return Response::OK();
}
std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
@@ -280,21 +256,19 @@ std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
bool hadException = false;
v8::Local<v8::Value> r = function.call(hadException);
if (hadException || r.IsEmpty()) return nullptr;
- protocol::ErrorString errorString;
- std::unique_ptr<protocol::Value> protocolValue =
- toProtocolValue(&errorString, context, r);
- if (!protocolValue) return nullptr;
+ std::unique_ptr<protocol::Value> protocolValue;
+ Response response = toProtocolValue(context, r, &protocolValue);
+ if (!response.isSuccess()) return nullptr;
protocol::ErrorSupport errors;
return protocol::Runtime::RemoteObject::parse(protocolValue.get(), &errors);
}
-bool InjectedScript::findObject(ErrorString* errorString,
- const RemoteObjectId& objectId,
- v8::Local<v8::Value>* outObject) const {
+Response InjectedScript::findObject(const RemoteObjectId& objectId,
+ v8::Local<v8::Value>* outObject) const {
*outObject = m_native->objectForId(objectId.id());
if (outObject->IsEmpty())
- *errorString = "Could not find object with given id";
- return !outObject->IsEmpty();
+ return Response::Error("Could not find object with given id");
+ return Response::OK();
}
String16 InjectedScript::objectGroupName(const RemoteObjectId& objectId) const {
@@ -326,47 +300,41 @@ v8::Local<v8::Value> InjectedScript::lastEvaluationResult() const {
return m_lastEvaluationResult.Get(m_context->isolate());
}
-v8::MaybeLocal<v8::Value> InjectedScript::resolveCallArgument(
- ErrorString* errorString, protocol::Runtime::CallArgument* callArgument) {
+Response InjectedScript::resolveCallArgument(
+ protocol::Runtime::CallArgument* callArgument,
+ v8::Local<v8::Value>* result) {
if (callArgument->hasObjectId()) {
- std::unique_ptr<RemoteObjectId> remoteObjectId =
- RemoteObjectId::parse(errorString, callArgument->getObjectId(""));
- if (!remoteObjectId) return v8::MaybeLocal<v8::Value>();
- if (remoteObjectId->contextId() != m_context->contextId()) {
- *errorString =
+ std::unique_ptr<RemoteObjectId> remoteObjectId;
+ Response response =
+ RemoteObjectId::parse(callArgument->getObjectId(""), &remoteObjectId);
+ if (!response.isSuccess()) return response;
+ if (remoteObjectId->contextId() != m_context->contextId())
+ return Response::Error(
"Argument should belong to the same JavaScript world as target "
- "object";
- return v8::MaybeLocal<v8::Value>();
- }
- v8::Local<v8::Value> object;
- if (!findObject(errorString, *remoteObjectId, &object))
- return v8::MaybeLocal<v8::Value>();
- return object;
+ "object");
+ return findObject(*remoteObjectId, result);
}
if (callArgument->hasValue() || callArgument->hasUnserializableValue()) {
String16 value =
callArgument->hasValue()
? callArgument->getValue(nullptr)->toJSONString()
: "Number(\"" + callArgument->getUnserializableValue("") + "\")";
- v8::Local<v8::Value> object;
if (!m_context->inspector()
->compileAndRunInternalScript(
m_context->context(), toV8String(m_context->isolate(), value))
- .ToLocal(&object)) {
- *errorString = "Couldn't parse value object in call argument";
- return v8::MaybeLocal<v8::Value>();
+ .ToLocal(result)) {
+ return Response::Error("Couldn't parse value object in call argument");
}
- return object;
+ return Response::OK();
}
- return v8::Undefined(m_context->isolate());
+ *result = v8::Undefined(m_context->isolate());
+ return Response::OK();
}
-std::unique_ptr<protocol::Runtime::ExceptionDetails>
-InjectedScript::createExceptionDetails(ErrorString* errorString,
- const v8::TryCatch& tryCatch,
- const String16& objectGroup,
- bool generatePreview) {
- if (!tryCatch.HasCaught()) return nullptr;
+Response InjectedScript::createExceptionDetails(
+ const v8::TryCatch& tryCatch, const String16& objectGroup,
+ bool generatePreview, Maybe<protocol::Runtime::ExceptionDetails>* result) {
+ if (!tryCatch.HasCaught()) return Response::InternalError();
v8::Local<v8::Message> message = tryCatch.Message();
v8::Local<v8::Value> exception = tryCatch.Exception();
String16 messageText =
@@ -396,43 +364,44 @@ InjectedScript::createExceptionDetails(ErrorString* errorString,
->buildInspectorObjectImpl());
}
if (!exception.IsEmpty()) {
- std::unique_ptr<protocol::Runtime::RemoteObject> wrapped = wrapObject(
- errorString, exception, objectGroup, false /* forceValueType */,
- generatePreview && !exception->IsNativeError());
- if (!wrapped) return nullptr;
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrapped;
+ Response response =
+ wrapObject(exception, objectGroup, false /* forceValueType */,
+ generatePreview && !exception->IsNativeError(), &wrapped);
+ if (!response.isSuccess()) return response;
exceptionDetails->setException(std::move(wrapped));
}
- return exceptionDetails;
+ *result = std::move(exceptionDetails);
+ return Response::OK();
}
-void InjectedScript::wrapEvaluateResult(
- ErrorString* errorString, v8::MaybeLocal<v8::Value> maybeResultValue,
- const v8::TryCatch& tryCatch, const String16& objectGroup,
- bool returnByValue, bool generatePreview,
+Response InjectedScript::wrapEvaluateResult(
+ v8::MaybeLocal<v8::Value> maybeResultValue, const v8::TryCatch& tryCatch,
+ const String16& objectGroup, bool returnByValue, bool generatePreview,
std::unique_ptr<protocol::Runtime::RemoteObject>* result,
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
v8::Local<v8::Value> resultValue;
if (!tryCatch.HasCaught()) {
- if (hasInternalError(errorString, !maybeResultValue.ToLocal(&resultValue)))
- return;
- std::unique_ptr<RemoteObject> remoteObject = wrapObject(
- errorString, resultValue, objectGroup, returnByValue, generatePreview);
- if (!remoteObject) return;
+ if (!maybeResultValue.ToLocal(&resultValue))
+ return Response::InternalError();
+ Response response = wrapObject(resultValue, objectGroup, returnByValue,
+ generatePreview, result);
+ if (!response.isSuccess()) return response;
if (objectGroup == "console")
m_lastEvaluationResult.Reset(m_context->isolate(), resultValue);
- *result = std::move(remoteObject);
} else {
v8::Local<v8::Value> exception = tryCatch.Exception();
- std::unique_ptr<RemoteObject> remoteObject =
- wrapObject(errorString, exception, objectGroup, false,
- generatePreview && !exception->IsNativeError());
- if (!remoteObject) return;
+ Response response =
+ wrapObject(exception, objectGroup, false,
+ generatePreview && !exception->IsNativeError(), result);
+ if (!response.isSuccess()) return response;
// We send exception in result for compatibility reasons, even though it's
// accessible through exceptionDetails.exception.
- *result = std::move(remoteObject);
- *exceptionDetails = createExceptionDetails(errorString, tryCatch,
- objectGroup, generatePreview);
+ response = createExceptionDetails(tryCatch, objectGroup, generatePreview,
+ exceptionDetails);
+ if (!response.isSuccess()) return response;
}
+ return Response::OK();
}
v8::Local<v8::Object> InjectedScript::commandLineAPI() {
@@ -442,41 +411,35 @@ v8::Local<v8::Object> InjectedScript::commandLineAPI() {
return m_commandLineAPI.Get(m_context->isolate());
}
-InjectedScript::Scope::Scope(ErrorString* errorString,
- V8InspectorImpl* inspector, int contextGroupId)
- : m_errorString(errorString),
- m_inspector(inspector),
+InjectedScript::Scope::Scope(V8InspectorImpl* inspector, int contextGroupId)
+ : m_inspector(inspector),
m_contextGroupId(contextGroupId),
m_injectedScript(nullptr),
m_handleScope(inspector->isolate()),
m_tryCatch(inspector->isolate()),
m_ignoreExceptionsAndMuteConsole(false),
- m_previousPauseOnExceptionsState(V8Debugger::DontPauseOnExceptions),
+ m_previousPauseOnExceptionsState(v8::DebugInterface::NoBreakOnException),
m_userGesture(false) {}
-bool InjectedScript::Scope::initialize() {
+Response InjectedScript::Scope::initialize() {
cleanup();
// TODO(dgozman): what if we reattach to the same context group during
// evaluate? Introduce a session id?
V8InspectorSessionImpl* session =
m_inspector->sessionForContextGroup(m_contextGroupId);
- if (!session) {
- *m_errorString = "Internal error";
- return false;
- }
- findInjectedScript(session);
- if (!m_injectedScript) return false;
+ if (!session) return Response::InternalError();
+ Response response = findInjectedScript(session);
+ if (!response.isSuccess()) return response;
m_context = m_injectedScript->context()->context();
m_context->Enter();
- return true;
+ return Response::OK();
}
-bool InjectedScript::Scope::installCommandLineAPI() {
+void InjectedScript::Scope::installCommandLineAPI() {
DCHECK(m_injectedScript && !m_context.IsEmpty() &&
!m_commandLineAPIScope.get());
m_commandLineAPIScope.reset(new V8Console::CommandLineAPIScope(
m_context, m_injectedScript->commandLineAPI(), m_context->Global()));
- return true;
}
void InjectedScript::Scope::ignoreExceptionsAndMuteConsole() {
@@ -485,14 +448,14 @@ void InjectedScript::Scope::ignoreExceptionsAndMuteConsole() {
m_inspector->client()->muteMetrics(m_contextGroupId);
m_inspector->muteExceptions(m_contextGroupId);
m_previousPauseOnExceptionsState =
- setPauseOnExceptionsState(V8Debugger::DontPauseOnExceptions);
+ setPauseOnExceptionsState(v8::DebugInterface::NoBreakOnException);
}
-V8Debugger::PauseOnExceptionsState
+v8::DebugInterface::ExceptionBreakState
InjectedScript::Scope::setPauseOnExceptionsState(
- V8Debugger::PauseOnExceptionsState newState) {
+ v8::DebugInterface::ExceptionBreakState newState) {
if (!m_inspector->debugger()->enabled()) return newState;
- V8Debugger::PauseOnExceptionsState presentState =
+ v8::DebugInterface::ExceptionBreakState presentState =
m_inspector->debugger()->getPauseOnExceptionsState();
if (presentState != newState)
m_inspector->debugger()->setPauseOnExceptionsState(newState);
@@ -523,59 +486,57 @@ InjectedScript::Scope::~Scope() {
cleanup();
}
-InjectedScript::ContextScope::ContextScope(ErrorString* errorString,
- V8InspectorImpl* inspector,
+InjectedScript::ContextScope::ContextScope(V8InspectorImpl* inspector,
int contextGroupId,
int executionContextId)
- : InjectedScript::Scope(errorString, inspector, contextGroupId),
+ : InjectedScript::Scope(inspector, contextGroupId),
m_executionContextId(executionContextId) {}
InjectedScript::ContextScope::~ContextScope() {}
-void InjectedScript::ContextScope::findInjectedScript(
+Response InjectedScript::ContextScope::findInjectedScript(
V8InspectorSessionImpl* session) {
- m_injectedScript =
- session->findInjectedScript(m_errorString, m_executionContextId);
+ return session->findInjectedScript(m_executionContextId, m_injectedScript);
}
-InjectedScript::ObjectScope::ObjectScope(ErrorString* errorString,
- V8InspectorImpl* inspector,
+InjectedScript::ObjectScope::ObjectScope(V8InspectorImpl* inspector,
int contextGroupId,
const String16& remoteObjectId)
- : InjectedScript::Scope(errorString, inspector, contextGroupId),
+ : InjectedScript::Scope(inspector, contextGroupId),
m_remoteObjectId(remoteObjectId) {}
InjectedScript::ObjectScope::~ObjectScope() {}
-void InjectedScript::ObjectScope::findInjectedScript(
+Response InjectedScript::ObjectScope::findInjectedScript(
V8InspectorSessionImpl* session) {
- std::unique_ptr<RemoteObjectId> remoteId =
- RemoteObjectId::parse(m_errorString, m_remoteObjectId);
- if (!remoteId) return;
- InjectedScript* injectedScript =
- session->findInjectedScript(m_errorString, remoteId.get());
- if (!injectedScript) return;
+ std::unique_ptr<RemoteObjectId> remoteId;
+ Response response = RemoteObjectId::parse(m_remoteObjectId, &remoteId);
+ if (!response.isSuccess()) return response;
+ InjectedScript* injectedScript = nullptr;
+ response = session->findInjectedScript(remoteId.get(), injectedScript);
+ if (!response.isSuccess()) return response;
m_objectGroupName = injectedScript->objectGroupName(*remoteId);
- if (!injectedScript->findObject(m_errorString, *remoteId, &m_object)) return;
+ response = injectedScript->findObject(*remoteId, &m_object);
+ if (!response.isSuccess()) return response;
m_injectedScript = injectedScript;
+ return Response::OK();
}
-InjectedScript::CallFrameScope::CallFrameScope(ErrorString* errorString,
- V8InspectorImpl* inspector,
+InjectedScript::CallFrameScope::CallFrameScope(V8InspectorImpl* inspector,
int contextGroupId,
const String16& remoteObjectId)
- : InjectedScript::Scope(errorString, inspector, contextGroupId),
+ : InjectedScript::Scope(inspector, contextGroupId),
m_remoteCallFrameId(remoteObjectId) {}
InjectedScript::CallFrameScope::~CallFrameScope() {}
-void InjectedScript::CallFrameScope::findInjectedScript(
+Response InjectedScript::CallFrameScope::findInjectedScript(
V8InspectorSessionImpl* session) {
- std::unique_ptr<RemoteCallFrameId> remoteId =
- RemoteCallFrameId::parse(m_errorString, m_remoteCallFrameId);
- if (!remoteId) return;
+ std::unique_ptr<RemoteCallFrameId> remoteId;
+ Response response = RemoteCallFrameId::parse(m_remoteCallFrameId, &remoteId);
+ if (!response.isSuccess()) return response;
m_frameOrdinal = static_cast<size_t>(remoteId->frameOrdinal());
- m_injectedScript = session->findInjectedScript(m_errorString, remoteId.get());
+ return session->findInjectedScript(remoteId.get(), m_injectedScript);
}
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/injected-script.h b/deps/v8/src/inspector/injected-script.h
index 9b324c948d..6500f4dbb7 100644
--- a/deps/v8/src/inspector/injected-script.h
+++ b/deps/v8/src/inspector/injected-script.h
@@ -48,8 +48,8 @@ class V8FunctionCall;
class V8InspectorImpl;
class V8InspectorSessionImpl;
-using protocol::ErrorString;
using protocol::Maybe;
+using protocol::Response;
class InjectedScript final {
public:
@@ -58,56 +58,51 @@ class InjectedScript final {
InspectedContext* context() const { return m_context; }
- void getProperties(
- ErrorString*, v8::Local<v8::Object>, const String16& groupName,
- bool ownProperties, bool accessorPropertiesOnly, bool generatePreview,
+ Response getProperties(
+ v8::Local<v8::Object>, const String16& groupName, bool ownProperties,
+ bool accessorPropertiesOnly, bool generatePreview,
std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
result,
Maybe<protocol::Runtime::ExceptionDetails>*);
void releaseObject(const String16& objectId);
- std::unique_ptr<protocol::Runtime::RemoteObject> wrapObject(
- ErrorString*, v8::Local<v8::Value>, const String16& groupName,
- bool forceValueType = false, bool generatePreview = false) const;
- bool wrapObjectProperty(ErrorString*, v8::Local<v8::Object>,
- v8::Local<v8::Name> key, const String16& groupName,
- bool forceValueType = false,
- bool generatePreview = false) const;
- bool wrapPropertyInArray(ErrorString*, v8::Local<v8::Array>,
- v8::Local<v8::String> property,
- const String16& groupName,
- bool forceValueType = false,
- bool generatePreview = false) const;
- bool wrapObjectsInArray(ErrorString*, v8::Local<v8::Array>,
- const String16& groupName,
- bool forceValueType = false,
- bool generatePreview = false) const;
+ Response wrapObject(
+ v8::Local<v8::Value>, const String16& groupName, bool forceValueType,
+ bool generatePreview,
+ std::unique_ptr<protocol::Runtime::RemoteObject>* result) const;
+ Response wrapObjectProperty(v8::Local<v8::Object>, v8::Local<v8::Name> key,
+ const String16& groupName,
+ bool forceValueType = false,
+ bool generatePreview = false) const;
+ Response wrapPropertyInArray(v8::Local<v8::Array>,
+ v8::Local<v8::String> property,
+ const String16& groupName,
+ bool forceValueType = false,
+ bool generatePreview = false) const;
std::unique_ptr<protocol::Runtime::RemoteObject> wrapTable(
v8::Local<v8::Value> table, v8::Local<v8::Value> columns) const;
- bool findObject(ErrorString*, const RemoteObjectId&,
- v8::Local<v8::Value>*) const;
+ Response findObject(const RemoteObjectId&, v8::Local<v8::Value>*) const;
String16 objectGroupName(const RemoteObjectId&) const;
void releaseObjectGroup(const String16&);
void setCustomObjectFormatterEnabled(bool);
- v8::MaybeLocal<v8::Value> resolveCallArgument(
- ErrorString*, protocol::Runtime::CallArgument*);
-
- std::unique_ptr<protocol::Runtime::ExceptionDetails> createExceptionDetails(
- ErrorString*, const v8::TryCatch&, const String16& groupName,
- bool generatePreview);
- void wrapEvaluateResult(
- ErrorString*, v8::MaybeLocal<v8::Value> maybeResultValue,
- const v8::TryCatch&, const String16& objectGroup, bool returnByValue,
- bool generatePreview,
+ Response resolveCallArgument(protocol::Runtime::CallArgument*,
+ v8::Local<v8::Value>* result);
+
+ Response createExceptionDetails(
+ const v8::TryCatch&, const String16& groupName, bool generatePreview,
+ Maybe<protocol::Runtime::ExceptionDetails>* result);
+ Response wrapEvaluateResult(
+ v8::MaybeLocal<v8::Value> maybeResultValue, const v8::TryCatch&,
+ const String16& objectGroup, bool returnByValue, bool generatePreview,
std::unique_ptr<protocol::Runtime::RemoteObject>* result,
Maybe<protocol::Runtime::ExceptionDetails>*);
v8::Local<v8::Value> lastEvaluationResult() const;
class Scope {
public:
- bool initialize();
- bool installCommandLineAPI();
+ Response initialize();
+ void installCommandLineAPI();
void ignoreExceptionsAndMuteConsole();
void pretendUserGesture();
v8::Local<v8::Context> context() const { return m_context; }
@@ -115,37 +110,35 @@ class InjectedScript final {
const v8::TryCatch& tryCatch() const { return m_tryCatch; }
protected:
- Scope(ErrorString*, V8InspectorImpl*, int contextGroupId);
+ Scope(V8InspectorImpl*, int contextGroupId);
virtual ~Scope();
- virtual void findInjectedScript(V8InspectorSessionImpl*) = 0;
+ virtual Response findInjectedScript(V8InspectorSessionImpl*) = 0;
- ErrorString* m_errorString;
V8InspectorImpl* m_inspector;
int m_contextGroupId;
InjectedScript* m_injectedScript;
private:
void cleanup();
- V8Debugger::PauseOnExceptionsState setPauseOnExceptionsState(
- V8Debugger::PauseOnExceptionsState);
+ v8::DebugInterface::ExceptionBreakState setPauseOnExceptionsState(
+ v8::DebugInterface::ExceptionBreakState);
v8::HandleScope m_handleScope;
v8::TryCatch m_tryCatch;
v8::Local<v8::Context> m_context;
std::unique_ptr<V8Console::CommandLineAPIScope> m_commandLineAPIScope;
bool m_ignoreExceptionsAndMuteConsole;
- V8Debugger::PauseOnExceptionsState m_previousPauseOnExceptionsState;
+ v8::DebugInterface::ExceptionBreakState m_previousPauseOnExceptionsState;
bool m_userGesture;
};
class ContextScope : public Scope {
public:
- ContextScope(ErrorString*, V8InspectorImpl*, int contextGroupId,
- int executionContextId);
+ ContextScope(V8InspectorImpl*, int contextGroupId, int executionContextId);
~ContextScope();
private:
- void findInjectedScript(V8InspectorSessionImpl*) override;
+ Response findInjectedScript(V8InspectorSessionImpl*) override;
int m_executionContextId;
DISALLOW_COPY_AND_ASSIGN(ContextScope);
@@ -153,14 +146,14 @@ class InjectedScript final {
class ObjectScope : public Scope {
public:
- ObjectScope(ErrorString*, V8InspectorImpl*, int contextGroupId,
+ ObjectScope(V8InspectorImpl*, int contextGroupId,
const String16& remoteObjectId);
~ObjectScope();
const String16& objectGroupName() const { return m_objectGroupName; }
v8::Local<v8::Value> object() const { return m_object; }
private:
- void findInjectedScript(V8InspectorSessionImpl*) override;
+ Response findInjectedScript(V8InspectorSessionImpl*) override;
String16 m_remoteObjectId;
String16 m_objectGroupName;
v8::Local<v8::Value> m_object;
@@ -170,13 +163,13 @@ class InjectedScript final {
class CallFrameScope : public Scope {
public:
- CallFrameScope(ErrorString*, V8InspectorImpl*, int contextGroupId,
+ CallFrameScope(V8InspectorImpl*, int contextGroupId,
const String16& remoteCallFrameId);
~CallFrameScope();
size_t frameOrdinal() const { return m_frameOrdinal; }
private:
- void findInjectedScript(V8InspectorSessionImpl*) override;
+ Response findInjectedScript(V8InspectorSessionImpl*) override;
String16 m_remoteCallFrameId;
size_t m_frameOrdinal;
@@ -187,10 +180,9 @@ class InjectedScript final {
InjectedScript(InspectedContext*, v8::Local<v8::Object>,
std::unique_ptr<InjectedScriptNative>);
v8::Local<v8::Value> v8Value() const;
- v8::MaybeLocal<v8::Value> wrapValue(ErrorString*, v8::Local<v8::Value>,
- const String16& groupName,
- bool forceValueType,
- bool generatePreview) const;
+ Response wrapValue(v8::Local<v8::Value>, const String16& groupName,
+ bool forceValueType, bool generatePreview,
+ v8::Local<v8::Value>* result) const;
v8::Local<v8::Object> commandLineAPI();
InspectedContext* m_context;
diff --git a/deps/v8/src/inspector/inspected-context.cc b/deps/v8/src/inspector/inspected-context.cc
index 9100f64b2a..dab3bba050 100644
--- a/deps/v8/src/inspector/inspected-context.cc
+++ b/deps/v8/src/inspector/inspected-context.cc
@@ -14,23 +14,23 @@
namespace v8_inspector {
-void InspectedContext::weakCallback(
- const v8::WeakCallbackInfo<InspectedContext>& data) {
- InspectedContext* context = data.GetParameter();
- if (!context->m_context.IsEmpty()) {
- context->m_context.Reset();
- data.SetSecondPassCallback(&InspectedContext::weakCallback);
- } else {
- context->m_inspector->discardInspectedContext(context->m_contextGroupId,
- context->m_contextId);
- }
-}
+namespace {
-void InspectedContext::consoleWeakCallback(
- const v8::WeakCallbackInfo<InspectedContext>& data) {
- data.GetParameter()->m_console.Reset();
+void clearContext(const v8::WeakCallbackInfo<v8::Global<v8::Context>>& data) {
+ // Inspected context is created in V8InspectorImpl::contextCreated method
+ // and destroyed in V8InspectorImpl::contextDestroyed.
+ // Both methods takes valid v8::Local<v8::Context> handle to the same context,
+ // it means that context is created before InspectedContext constructor and is
+ // always destroyed after InspectedContext destructor therefore this callback
+ // should be never called.
+ // It's possible only if inspector client doesn't call contextDestroyed which
+ // is considered an error.
+ CHECK(false);
+ data.GetParameter()->Reset();
}
+} // namespace
+
InspectedContext::InspectedContext(V8InspectorImpl* inspector,
const V8ContextInfo& info, int contextId)
: m_inspector(inspector),
@@ -41,7 +41,7 @@ InspectedContext::InspectedContext(V8InspectorImpl* inspector,
m_humanReadableName(toString16(info.humanReadableName)),
m_auxData(toString16(info.auxData)),
m_reported(false) {
- m_context.SetWeak(this, &InspectedContext::weakCallback,
+ m_context.SetWeak(&m_context, &clearContext,
v8::WeakCallbackType::kParameter);
v8::Isolate* isolate = m_inspector->isolate();
@@ -54,12 +54,11 @@ InspectedContext::InspectedContext(V8InspectorImpl* inspector,
.FromMaybe(false))
return;
m_console.Reset(isolate, console);
- m_console.SetWeak(this, &InspectedContext::consoleWeakCallback,
- v8::WeakCallbackType::kParameter);
+ m_console.SetWeak();
}
InspectedContext::~InspectedContext() {
- if (!m_context.IsEmpty() && !m_console.IsEmpty()) {
+ if (!m_console.IsEmpty()) {
v8::HandleScope scope(isolate());
V8Console::clearInspectedContextIfNeeded(context(),
m_console.Get(isolate()));
diff --git a/deps/v8/src/inspector/inspected-context.h b/deps/v8/src/inspector/inspected-context.h
index d8e72cc353..f31eb76419 100644
--- a/deps/v8/src/inspector/inspected-context.h
+++ b/deps/v8/src/inspector/inspected-context.h
@@ -41,9 +41,6 @@ class InspectedContext {
private:
friend class V8InspectorImpl;
InspectedContext(V8InspectorImpl*, const V8ContextInfo&, int contextId);
- static void weakCallback(const v8::WeakCallbackInfo<InspectedContext>&);
- static void consoleWeakCallback(
- const v8::WeakCallbackInfo<InspectedContext>&);
V8InspectorImpl* m_inspector;
v8::Global<v8::Context> m_context;
diff --git a/deps/v8/src/inspector/inspector.gyp b/deps/v8/src/inspector/inspector.gyp
index 2d5c7a5153..c70722f852 100644
--- a/deps/v8/src/inspector/inspector.gyp
+++ b/deps/v8/src/inspector/inspector.gyp
@@ -4,11 +4,11 @@
{
'variables': {
- 'protocol_path': '<(PRODUCT_DIR)/../../third_party/WebKit/Source/platform/inspector_protocol',
+ 'protocol_path': '../../third_party/inspector_protocol',
},
'includes': [
'inspector.gypi',
- '<(PRODUCT_DIR)/../../../third_party/WebKit/Source/platform/inspector_protocol/inspector_protocol.gypi',
+ '<(PRODUCT_DIR)/../../../third_party/inspector_protocol/inspector_protocol.gypi',
],
'targets': [
{ 'target_name': 'inspector_injected_script',
@@ -97,7 +97,7 @@
'action': [
'python',
'<(protocol_path)/CodeGenerator.py',
- '--jinja_dir', '<(PRODUCT_DIR)/../../third_party',
+ '--jinja_dir', '../../third_party',
'--output_base', '<(SHARED_INTERMEDIATE_DIR)/src/inspector',
'--config', 'inspector_protocol_config.json',
],
diff --git a/deps/v8/src/inspector/java-script-call-frame.cc b/deps/v8/src/inspector/java-script-call-frame.cc
index b70af21f86..2da4f04249 100644
--- a/deps/v8/src/inspector/java-script-call-frame.cc
+++ b/deps/v8/src/inspector/java-script-call-frame.cc
@@ -30,10 +30,9 @@
#include "src/inspector/java-script-call-frame.h"
+#include "src/debug/debug-interface.h"
#include "src/inspector/string-util.h"
-#include "include/v8-debug.h"
-
namespace v8_inspector {
JavaScriptCallFrame::JavaScriptCallFrame(v8::Local<v8::Context> debuggerContext,
@@ -130,10 +129,10 @@ v8::MaybeLocal<v8::Value> JavaScriptCallFrame::restart() {
v8::Local<v8::Function> restartFunction = v8::Local<v8::Function>::Cast(
callFrame->Get(context, toV8StringInternalized(m_isolate, "restart"))
.ToLocalChecked());
- v8::Debug::SetLiveEditEnabled(m_isolate, true);
+ v8::DebugInterface::SetLiveEditEnabled(m_isolate, true);
v8::MaybeLocal<v8::Value> result = restartFunction->Call(
m_debuggerContext.Get(m_isolate), callFrame, 0, nullptr);
- v8::Debug::SetLiveEditEnabled(m_isolate, false);
+ v8::DebugInterface::SetLiveEditEnabled(m_isolate, false);
return result;
}
diff --git a/deps/v8/src/inspector/js_protocol.json b/deps/v8/src/inspector/js_protocol.json
index aff6806222..c1ac585ed1 100644
--- a/deps/v8/src/inspector/js_protocol.json
+++ b/deps/v8/src/inspector/js_protocol.json
@@ -538,6 +538,18 @@
"description": "Removes JavaScript breakpoint."
},
{
+ "name": "getPossibleBreakpoints",
+ "parameters": [
+ { "name": "start", "$ref": "Location", "description": "Start of range to search possible breakpoint locations in." },
+ { "name": "end", "$ref": "Location", "optional": true, "description": "End of range to search possible breakpoint locations in (excluding). When not specifed, end of scripts is used as end of range." }
+ ],
+ "returns": [
+ { "name": "locations", "type": "array", "items": { "$ref": "Location" }, "description": "List of the possible breakpoint locations." }
+ ],
+ "description": "Returns possible locations for breakpoint. scriptId in start and end range locations should be the same.",
+ "experimental": true
+ },
+ {
"name": "continueToLocation",
"parameters": [
{ "name": "location", "$ref": "Location", "description": "Location to continue to." }
diff --git a/deps/v8/src/inspector/remote-object-id.cc b/deps/v8/src/inspector/remote-object-id.cc
index d83020c6f2..aac6724498 100644
--- a/deps/v8/src/inspector/remote-object-id.cc
+++ b/deps/v8/src/inspector/remote-object-id.cc
@@ -27,44 +27,34 @@ RemoteObjectIdBase::parseInjectedScriptId(const String16& objectId) {
RemoteObjectId::RemoteObjectId() : RemoteObjectIdBase(), m_id(0) {}
-std::unique_ptr<RemoteObjectId> RemoteObjectId::parse(
- ErrorString* errorString, const String16& objectId) {
- std::unique_ptr<RemoteObjectId> result(new RemoteObjectId());
+Response RemoteObjectId::parse(const String16& objectId,
+ std::unique_ptr<RemoteObjectId>* result) {
+ std::unique_ptr<RemoteObjectId> remoteObjectId(new RemoteObjectId());
std::unique_ptr<protocol::DictionaryValue> parsedObjectId =
- result->parseInjectedScriptId(objectId);
- if (!parsedObjectId) {
- *errorString = "Invalid remote object id";
- return nullptr;
- }
+ remoteObjectId->parseInjectedScriptId(objectId);
+ if (!parsedObjectId) return Response::Error("Invalid remote object id");
- bool success = parsedObjectId->getInteger("id", &result->m_id);
- if (!success) {
- *errorString = "Invalid remote object id";
- return nullptr;
- }
- return result;
+ bool success = parsedObjectId->getInteger("id", &remoteObjectId->m_id);
+ if (!success) return Response::Error("Invalid remote object id");
+ *result = std::move(remoteObjectId);
+ return Response::OK();
}
RemoteCallFrameId::RemoteCallFrameId()
: RemoteObjectIdBase(), m_frameOrdinal(0) {}
-std::unique_ptr<RemoteCallFrameId> RemoteCallFrameId::parse(
- ErrorString* errorString, const String16& objectId) {
- std::unique_ptr<RemoteCallFrameId> result(new RemoteCallFrameId());
+Response RemoteCallFrameId::parse(const String16& objectId,
+ std::unique_ptr<RemoteCallFrameId>* result) {
+ std::unique_ptr<RemoteCallFrameId> remoteCallFrameId(new RemoteCallFrameId());
std::unique_ptr<protocol::DictionaryValue> parsedObjectId =
- result->parseInjectedScriptId(objectId);
- if (!parsedObjectId) {
- *errorString = "Invalid call frame id";
- return nullptr;
- }
+ remoteCallFrameId->parseInjectedScriptId(objectId);
+ if (!parsedObjectId) return Response::Error("Invalid call frame id");
- bool success = parsedObjectId->getInteger("ordinal", &result->m_frameOrdinal);
- if (!success) {
- *errorString = "Invalid call frame id";
- return nullptr;
- }
-
- return result;
+ bool success =
+ parsedObjectId->getInteger("ordinal", &remoteCallFrameId->m_frameOrdinal);
+ if (!success) return Response::Error("Invalid call frame id");
+ *result = std::move(remoteCallFrameId);
+ return Response::OK();
}
String16 RemoteCallFrameId::serialize(int injectedScriptId, int frameOrdinal) {
diff --git a/deps/v8/src/inspector/remote-object-id.h b/deps/v8/src/inspector/remote-object-id.h
index a32f568fb8..3e6928a87e 100644
--- a/deps/v8/src/inspector/remote-object-id.h
+++ b/deps/v8/src/inspector/remote-object-id.h
@@ -9,7 +9,7 @@
namespace v8_inspector {
-using protocol::ErrorString;
+using protocol::Response;
class RemoteObjectIdBase {
public:
@@ -27,7 +27,7 @@ class RemoteObjectIdBase {
class RemoteObjectId final : public RemoteObjectIdBase {
public:
- static std::unique_ptr<RemoteObjectId> parse(ErrorString*, const String16&);
+ static Response parse(const String16&, std::unique_ptr<RemoteObjectId>*);
~RemoteObjectId() {}
int id() const { return m_id; }
@@ -39,8 +39,7 @@ class RemoteObjectId final : public RemoteObjectIdBase {
class RemoteCallFrameId final : public RemoteObjectIdBase {
public:
- static std::unique_ptr<RemoteCallFrameId> parse(ErrorString*,
- const String16&);
+ static Response parse(const String16&, std::unique_ptr<RemoteCallFrameId>*);
~RemoteCallFrameId() {}
int frameOrdinal() const { return m_frameOrdinal; }
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index f6084602f4..09909a911b 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -377,7 +377,11 @@ String16 String16::fromInteger(int number) {
String16 String16::fromInteger(size_t number) {
const size_t kBufferSize = 50;
char buffer[kBufferSize];
+#if !defined(_WIN32) && !defined(_WIN64)
v8::base::OS::SNPrintF(buffer, kBufferSize, "%zu", number);
+#else
+ v8::base::OS::SNPrintF(buffer, kBufferSize, "%Iu", number);
+#endif
return String16(buffer);
}
@@ -443,6 +447,26 @@ void String16Builder::append(const char* characters, size_t length) {
m_buffer.insert(m_buffer.end(), characters, characters + length);
}
+void String16Builder::appendNumber(int number) {
+ const int kBufferSize = 11;
+ char buffer[kBufferSize];
+ int chars = v8::base::OS::SNPrintF(buffer, kBufferSize, "%d", number);
+ DCHECK_GT(kBufferSize, chars);
+ m_buffer.insert(m_buffer.end(), buffer, buffer + chars);
+}
+
+void String16Builder::appendNumber(size_t number) {
+ const int kBufferSize = 20;
+ char buffer[kBufferSize];
+#if !defined(_WIN32) && !defined(_WIN64)
+ int chars = v8::base::OS::SNPrintF(buffer, kBufferSize, "%zu", number);
+#else
+ int chars = v8::base::OS::SNPrintF(buffer, kBufferSize, "%Iu", number);
+#endif
+ DCHECK_GT(kBufferSize, chars);
+ m_buffer.insert(m_buffer.end(), buffer, buffer + chars);
+}
+
String16 String16Builder::toString() {
return String16(m_buffer.data(), m_buffer.size());
}
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index 6dc7759de0..360ec93864 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -21,7 +21,10 @@ class String16 {
static const size_t kNotFound = static_cast<size_t>(-1);
String16() {}
- String16(const String16& other) : m_impl(other.m_impl) {}
+ String16(const String16& other)
+ : m_impl(other.m_impl), hash_code(other.hash_code) {}
+ String16(const String16&& other)
+ : m_impl(std::move(other.m_impl)), hash_code(other.hash_code) {}
String16(const UChar* characters, size_t size) : m_impl(characters, size) {}
String16(const UChar* characters) // NOLINT(runtime/explicit)
: m_impl(characters) {}
@@ -31,6 +34,18 @@ class String16 {
m_impl.resize(size);
for (size_t i = 0; i < size; ++i) m_impl[i] = characters[i];
}
+ explicit String16(const std::basic_string<UChar>& impl) : m_impl(impl) {}
+
+ String16& operator=(const String16& other) {
+ m_impl = other.m_impl;
+ hash_code = other.hash_code;
+ return *this;
+ }
+ String16& operator=(String16&& other) {
+ m_impl = std::move(other.m_impl);
+ hash_code = other.hash_code;
+ return *this;
+ }
static String16 fromInteger(int);
static String16 fromInteger(size_t);
@@ -52,51 +67,53 @@ class String16 {
size_t reverseFind(const String16& str, size_t start = UINT_MAX) const {
return m_impl.rfind(str.m_impl, start);
}
- void swap(String16& other) { m_impl.swap(other.m_impl); }
+ size_t find(UChar c, size_t start = 0) const { return m_impl.find(c, start); }
+ size_t reverseFind(UChar c, size_t start = UINT_MAX) const {
+ return m_impl.rfind(c, start);
+ }
+ void swap(String16& other) {
+ m_impl.swap(other.m_impl);
+ std::swap(hash_code, other.hash_code);
+ }
// Convenience methods.
std::string utf8() const;
static String16 fromUTF8(const char* stringStart, size_t length);
- const std::basic_string<UChar>& impl() const { return m_impl; }
- explicit String16(const std::basic_string<UChar>& impl) : m_impl(impl) {}
-
std::size_t hash() const {
- if (!has_hash) {
- size_t hash = 0;
- for (size_t i = 0; i < length(); ++i) hash = 31 * hash + m_impl[i];
- hash_code = hash;
- has_hash = true;
+ if (!hash_code) {
+ for (char c : m_impl) hash_code = 31 * hash_code + c;
+ // Map hash code 0 to 1. This double the number of hash collisions for 1,
+ // but avoids recomputing the hash code.
+ if (!hash_code) ++hash_code;
}
return hash_code;
}
+ inline bool operator==(const String16& other) const {
+ return m_impl == other.m_impl;
+ }
+ inline bool operator<(const String16& other) const {
+ return m_impl < other.m_impl;
+ }
+ inline bool operator!=(const String16& other) const {
+ return m_impl != other.m_impl;
+ }
+ inline String16 operator+(const String16& other) const {
+ return String16(m_impl + other.m_impl);
+ }
+
+ // Defined later, since it uses the String16Builder.
+ template <typename... T>
+ static String16 concat(T... args);
+
private:
std::basic_string<UChar> m_impl;
- mutable bool has_hash = false;
mutable std::size_t hash_code = 0;
};
-inline bool operator==(const String16& a, const String16& b) {
- return a.impl() == b.impl();
-}
-inline bool operator<(const String16& a, const String16& b) {
- return a.impl() < b.impl();
-}
-inline bool operator!=(const String16& a, const String16& b) {
- return a.impl() != b.impl();
-}
-inline bool operator==(const String16& a, const char* b) {
- return a.impl() == String16(b).impl();
-}
-inline String16 operator+(const String16& a, const char* b) {
- return String16(a.impl() + String16(b).impl());
-}
inline String16 operator+(const char* a, const String16& b) {
- return String16(String16(a).impl() + b.impl());
-}
-inline String16 operator+(const String16& a, const String16& b) {
- return String16(a.impl() + b.impl());
+ return String16(a) + b;
}
class String16Builder {
@@ -107,13 +124,29 @@ class String16Builder {
void append(char);
void append(const UChar*, size_t);
void append(const char*, size_t);
+ void appendNumber(int);
+ void appendNumber(size_t);
String16 toString();
void reserveCapacity(size_t);
+ template <typename T, typename... R>
+ void appendAll(T first, R... rest) {
+ append(first);
+ appendAll(rest...);
+ }
+ void appendAll() {}
+
private:
std::vector<UChar> m_buffer;
};
+template <typename... T>
+String16 String16::concat(T... args) {
+ String16Builder builder;
+ builder.appendAll(args...);
+ return builder.toString();
+}
+
} // namespace v8_inspector
#if !defined(__APPLE__) || defined(_LIBCPP_VERSION)
diff --git a/deps/v8/src/inspector/string-util.cc b/deps/v8/src/inspector/string-util.cc
index e6b83a5d7d..e6ad5d0c5b 100644
--- a/deps/v8/src/inspector/string-util.cc
+++ b/deps/v8/src/inspector/string-util.cc
@@ -111,94 +111,6 @@ std::unique_ptr<protocol::Value> parseJSON(const String16& string) {
} // namespace protocol
-std::unique_ptr<protocol::Value> toProtocolValue(protocol::String* errorString,
- v8::Local<v8::Context> context,
- v8::Local<v8::Value> value,
- int maxDepth) {
- if (value.IsEmpty()) {
- UNREACHABLE();
- return nullptr;
- }
-
- if (!maxDepth) {
- *errorString = "Object reference chain is too long";
- return nullptr;
- }
- maxDepth--;
-
- if (value->IsNull() || value->IsUndefined()) return protocol::Value::null();
- if (value->IsBoolean())
- return protocol::FundamentalValue::create(value.As<v8::Boolean>()->Value());
- if (value->IsNumber()) {
- double doubleValue = value.As<v8::Number>()->Value();
- int intValue = static_cast<int>(doubleValue);
- if (intValue == doubleValue)
- return protocol::FundamentalValue::create(intValue);
- return protocol::FundamentalValue::create(doubleValue);
- }
- if (value->IsString())
- return protocol::StringValue::create(
- toProtocolString(value.As<v8::String>()));
- if (value->IsArray()) {
- v8::Local<v8::Array> array = value.As<v8::Array>();
- std::unique_ptr<protocol::ListValue> inspectorArray =
- protocol::ListValue::create();
- uint32_t length = array->Length();
- for (uint32_t i = 0; i < length; i++) {
- v8::Local<v8::Value> value;
- if (!array->Get(context, i).ToLocal(&value)) {
- *errorString = "Internal error";
- return nullptr;
- }
- std::unique_ptr<protocol::Value> element =
- toProtocolValue(errorString, context, value, maxDepth);
- if (!element) return nullptr;
- inspectorArray->pushValue(std::move(element));
- }
- return std::move(inspectorArray);
- }
- if (value->IsObject()) {
- std::unique_ptr<protocol::DictionaryValue> jsonObject =
- protocol::DictionaryValue::create();
- v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(value);
- v8::Local<v8::Array> propertyNames;
- if (!object->GetPropertyNames(context).ToLocal(&propertyNames)) {
- *errorString = "Internal error";
- return nullptr;
- }
- uint32_t length = propertyNames->Length();
- for (uint32_t i = 0; i < length; i++) {
- v8::Local<v8::Value> name;
- if (!propertyNames->Get(context, i).ToLocal(&name)) {
- *errorString = "Internal error";
- return nullptr;
- }
- // FIXME(yurys): v8::Object should support GetOwnPropertyNames
- if (name->IsString()) {
- v8::Maybe<bool> hasRealNamedProperty = object->HasRealNamedProperty(
- context, v8::Local<v8::String>::Cast(name));
- if (!hasRealNamedProperty.IsJust() || !hasRealNamedProperty.FromJust())
- continue;
- }
- v8::Local<v8::String> propertyName;
- if (!name->ToString(context).ToLocal(&propertyName)) continue;
- v8::Local<v8::Value> property;
- if (!object->Get(context, name).ToLocal(&property)) {
- *errorString = "Internal error";
- return nullptr;
- }
- std::unique_ptr<protocol::Value> propertyValue =
- toProtocolValue(errorString, context, property, maxDepth);
- if (!propertyValue) return nullptr;
- jsonObject->setValue(toProtocolString(propertyName),
- std::move(propertyValue));
- }
- return std::move(jsonObject);
- }
- *errorString = "Object couldn't be returned by value";
- return nullptr;
-}
-
// static
std::unique_ptr<StringBuffer> StringBuffer::create(const StringView& string) {
String16 owner = toString16(string);
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index 30137b8b78..e1a69e8906 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -40,11 +40,6 @@ std::unique_ptr<protocol::Value> parseJSON(const String16& json);
} // namespace protocol
-std::unique_ptr<protocol::Value> toProtocolValue(protocol::String* errorString,
- v8::Local<v8::Context>,
- v8::Local<v8::Value>,
- int maxDepth = 1000);
-
v8::Local<v8::String> toV8String(v8::Isolate*, const String16&);
v8::Local<v8::String> toV8StringInternalized(v8::Isolate*, const String16&);
v8::Local<v8::String> toV8StringInternalized(v8::Isolate*, const char*);
diff --git a/deps/v8/src/inspector/v8-console-agent-impl.cc b/deps/v8/src/inspector/v8-console-agent-impl.cc
index 8eb883cb75..6b0e12a77b 100644
--- a/deps/v8/src/inspector/v8-console-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-console-agent-impl.cc
@@ -26,28 +26,29 @@ V8ConsoleAgentImpl::V8ConsoleAgentImpl(
V8ConsoleAgentImpl::~V8ConsoleAgentImpl() {}
-void V8ConsoleAgentImpl::enable(ErrorString* errorString) {
- if (m_enabled) return;
+Response V8ConsoleAgentImpl::enable() {
+ if (m_enabled) return Response::OK();
m_state->setBoolean(ConsoleAgentState::consoleEnabled, true);
m_enabled = true;
m_session->inspector()->enableStackCapturingIfNeeded();
reportAllMessages();
+ return Response::OK();
}
-void V8ConsoleAgentImpl::disable(ErrorString* errorString) {
- if (!m_enabled) return;
+Response V8ConsoleAgentImpl::disable() {
+ if (!m_enabled) return Response::OK();
m_session->inspector()->disableStackCapturingIfNeeded();
m_state->setBoolean(ConsoleAgentState::consoleEnabled, false);
m_enabled = false;
+ return Response::OK();
}
-void V8ConsoleAgentImpl::clearMessages(ErrorString* errorString) {}
+Response V8ConsoleAgentImpl::clearMessages() { return Response::OK(); }
void V8ConsoleAgentImpl::restore() {
if (!m_state->booleanProperty(ConsoleAgentState::consoleEnabled, false))
return;
- ErrorString ignored;
- enable(&ignored);
+ enable();
}
void V8ConsoleAgentImpl::messageAdded(V8ConsoleMessage* message) {
diff --git a/deps/v8/src/inspector/v8-console-agent-impl.h b/deps/v8/src/inspector/v8-console-agent-impl.h
index f3d598bb34..db17e54718 100644
--- a/deps/v8/src/inspector/v8-console-agent-impl.h
+++ b/deps/v8/src/inspector/v8-console-agent-impl.h
@@ -14,7 +14,7 @@ namespace v8_inspector {
class V8ConsoleMessage;
class V8InspectorSessionImpl;
-using protocol::ErrorString;
+using protocol::Response;
class V8ConsoleAgentImpl : public protocol::Console::Backend {
public:
@@ -22,9 +22,9 @@ class V8ConsoleAgentImpl : public protocol::Console::Backend {
protocol::DictionaryValue* state);
~V8ConsoleAgentImpl() override;
- void enable(ErrorString*) override;
- void disable(ErrorString*) override;
- void clearMessages(ErrorString*) override;
+ Response enable() override;
+ Response disable() override;
+ Response clearMessages() override;
void restore();
void messageAdded(V8ConsoleMessage*);
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index ddd4bf629e..fee61177e7 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -618,12 +618,11 @@ static void inspectImpl(const v8::FunctionCallbackInfo<v8::Value>& info,
if (!context) return;
InjectedScript* injectedScript = context->getInjectedScript();
if (!injectedScript) return;
- ErrorString errorString;
- std::unique_ptr<protocol::Runtime::RemoteObject> wrappedObject =
- injectedScript->wrapObject(&errorString, info[0], "",
- false /** forceValueType */,
- false /** generatePreview */);
- if (!wrappedObject || !errorString.isEmpty()) return;
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrappedObject;
+ protocol::Response response =
+ injectedScript->wrapObject(info[0], "", false /** forceValueType */,
+ false /** generatePreview */, &wrappedObject);
+ if (!response.isSuccess()) return;
std::unique_ptr<protocol::DictionaryValue> hints =
protocol::DictionaryValue::create();
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 80e261119e..224ae282c4 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -6,6 +6,7 @@
#include <algorithm>
+#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/java-script-call-frame.h"
@@ -21,6 +22,7 @@
#include "src/inspector/v8-regex.h"
#include "src/inspector/v8-runtime-agent-impl.h"
#include "src/inspector/v8-stack-trace-impl.h"
+#include "src/inspector/v8-value-copier.h"
#include "include/v8-inspector.h"
@@ -52,8 +54,11 @@ static const char skipAllPauses[] = "skipAllPauses";
} // namespace DebuggerAgentState
-static const int maxSkipStepFrameCount = 128;
-static const char backtraceObjectGroup[] = "backtrace";
+static const int kMaxSkipStepFrameCount = 128;
+static const char kBacktraceObjectGroup[] = "backtrace";
+static const char kDebuggerNotEnabled[] = "Debugger agent is not enabled";
+static const char kDebuggerNotPaused[] =
+ "Can only perform operation while paused.";
static String16 breakpointIdSuffix(
V8DebuggerAgentImpl::BreakpointSource source) {
@@ -71,8 +76,14 @@ static String16 breakpointIdSuffix(
static String16 generateBreakpointId(
const String16& scriptId, int lineNumber, int columnNumber,
V8DebuggerAgentImpl::BreakpointSource source) {
- return scriptId + ":" + String16::fromInteger(lineNumber) + ":" +
- String16::fromInteger(columnNumber) + breakpointIdSuffix(source);
+ String16Builder builder;
+ builder.append(scriptId);
+ builder.append(':');
+ builder.appendNumber(lineNumber);
+ builder.append(':');
+ builder.appendNumber(columnNumber);
+ builder.append(breakpointIdSuffix(source));
+ return builder.toString();
}
static bool positionComparator(const std::pair<int, int>& a,
@@ -81,11 +92,6 @@ static bool positionComparator(const std::pair<int, int>& a,
return a.second < b.second;
}
-static bool hasInternalError(ErrorString* errorString, bool hasError) {
- if (hasError) *errorString = "Internal error";
- return hasError;
-}
-
static std::unique_ptr<protocol::Debugger::Location> buildProtocolLocation(
const String16& scriptId, int lineNumber, int columnNumber) {
return protocol::Debugger::Location::create()
@@ -120,13 +126,7 @@ V8DebuggerAgentImpl::V8DebuggerAgentImpl(
V8DebuggerAgentImpl::~V8DebuggerAgentImpl() {}
-bool V8DebuggerAgentImpl::checkEnabled(ErrorString* errorString) {
- if (enabled()) return true;
- *errorString = "Debugger agent is not enabled";
- return false;
-}
-
-void V8DebuggerAgentImpl::enable() {
+void V8DebuggerAgentImpl::enableImpl() {
// m_inspector->addListener may result in reporting all parsed scripts to
// the agent so it should already be in enabled state by then.
m_enabled = true;
@@ -145,24 +145,23 @@ void V8DebuggerAgentImpl::enable() {
bool V8DebuggerAgentImpl::enabled() { return m_enabled; }
-void V8DebuggerAgentImpl::enable(ErrorString* errorString) {
- if (enabled()) return;
+Response V8DebuggerAgentImpl::enable() {
+ if (enabled()) return Response::OK();
- if (!m_inspector->client()->canExecuteScripts(m_session->contextGroupId())) {
- *errorString = "Script execution is prohibited";
- return;
- }
+ if (!m_inspector->client()->canExecuteScripts(m_session->contextGroupId()))
+ return Response::Error("Script execution is prohibited");
- enable();
+ enableImpl();
+ return Response::OK();
}
-void V8DebuggerAgentImpl::disable(ErrorString*) {
- if (!enabled()) return;
+Response V8DebuggerAgentImpl::disable() {
+ if (!enabled()) return Response::OK();
m_state->setObject(DebuggerAgentState::javaScriptBreakpoints,
protocol::DictionaryValue::create());
m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState,
- V8Debugger::DontPauseOnExceptions);
+ v8::DebugInterface::NoBreakOnException);
m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, 0);
if (!m_pausedContext.IsEmpty()) m_debugger->continueProgram();
@@ -188,6 +187,7 @@ void V8DebuggerAgentImpl::disable(ErrorString*) {
m_state->remove(DebuggerAgentState::blackboxPattern);
m_enabled = false;
m_state->setBoolean(DebuggerAgentState::debuggerEnabled, false);
+ return Response::OK();
}
void V8DebuggerAgentImpl::restore() {
@@ -197,13 +197,11 @@ void V8DebuggerAgentImpl::restore() {
if (!m_inspector->client()->canExecuteScripts(m_session->contextGroupId()))
return;
- enable();
- ErrorString error;
+ enableImpl();
- int pauseState = V8Debugger::DontPauseOnExceptions;
+ int pauseState = v8::DebugInterface::NoBreakOnException;
m_state->getInteger(DebuggerAgentState::pauseOnExceptionsState, &pauseState);
- setPauseOnExceptionsImpl(&error, pauseState);
- DCHECK(error.isEmpty());
+ setPauseOnExceptionsImpl(pauseState);
m_skipAllPauses =
m_state->booleanProperty(DebuggerAgentState::skipAllPauses, false);
@@ -216,19 +214,20 @@ void V8DebuggerAgentImpl::restore() {
String16 blackboxPattern;
if (m_state->getString(DebuggerAgentState::blackboxPattern,
&blackboxPattern)) {
- if (!setBlackboxPattern(&error, blackboxPattern)) UNREACHABLE();
+ setBlackboxPattern(blackboxPattern);
}
}
-void V8DebuggerAgentImpl::setBreakpointsActive(ErrorString* errorString,
- bool active) {
- if (!checkEnabled(errorString)) return;
+Response V8DebuggerAgentImpl::setBreakpointsActive(bool active) {
+ if (!enabled()) return Response::Error(kDebuggerNotEnabled);
m_debugger->setBreakpointsActivated(active);
+ return Response::OK();
}
-void V8DebuggerAgentImpl::setSkipAllPauses(ErrorString*, bool skip) {
+Response V8DebuggerAgentImpl::setSkipAllPauses(bool skip) {
m_skipAllPauses = skip;
m_state->setBoolean(DebuggerAgentState::skipAllPauses, m_skipAllPauses);
+ return Response::OK();
}
static std::unique_ptr<protocol::DictionaryValue>
@@ -254,27 +253,21 @@ static bool matches(V8InspectorImpl* inspector, const String16& url,
return url == pattern;
}
-void V8DebuggerAgentImpl::setBreakpointByUrl(
- ErrorString* errorString, int lineNumber,
- const Maybe<String16>& optionalURL, const Maybe<String16>& optionalURLRegex,
- const Maybe<int>& optionalColumnNumber,
- const Maybe<String16>& optionalCondition, String16* outBreakpointId,
+Response V8DebuggerAgentImpl::setBreakpointByUrl(
+ int lineNumber, Maybe<String16> optionalURL,
+ Maybe<String16> optionalURLRegex, Maybe<int> optionalColumnNumber,
+ Maybe<String16> optionalCondition, String16* outBreakpointId,
std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations) {
*locations = Array<protocol::Debugger::Location>::create();
- if (optionalURL.isJust() == optionalURLRegex.isJust()) {
- *errorString = "Either url or urlRegex must be specified.";
- return;
- }
+ if (optionalURL.isJust() == optionalURLRegex.isJust())
+ return Response::Error("Either url or urlRegex must be specified.");
String16 url = optionalURL.isJust() ? optionalURL.fromJust()
: optionalURLRegex.fromJust();
int columnNumber = 0;
if (optionalColumnNumber.isJust()) {
columnNumber = optionalColumnNumber.fromJust();
- if (columnNumber < 0) {
- *errorString = "Incorrect column number";
- return;
- }
+ if (columnNumber < 0) return Response::Error("Incorrect column number");
}
String16 condition = optionalCondition.fromMaybe("");
bool isRegex = optionalURLRegex.isJust();
@@ -291,10 +284,8 @@ void V8DebuggerAgentImpl::setBreakpointByUrl(
m_state->setObject(DebuggerAgentState::javaScriptBreakpoints,
std::move(newValue));
}
- if (breakpointsCookie->get(breakpointId)) {
- *errorString = "Breakpoint at specified location already exists.";
- return;
- }
+ if (breakpointsCookie->get(breakpointId))
+ return Response::Error("Breakpoint at specified location already exists.");
breakpointsCookie->setObject(
breakpointId, buildObjectForBreakpointCookie(
@@ -310,30 +301,16 @@ void V8DebuggerAgentImpl::setBreakpointByUrl(
}
*outBreakpointId = breakpointId;
+ return Response::OK();
}
-static bool parseLocation(
- ErrorString* errorString,
- std::unique_ptr<protocol::Debugger::Location> location, String16* scriptId,
- int* lineNumber, int* columnNumber) {
- *scriptId = location->getScriptId();
- *lineNumber = location->getLineNumber();
- *columnNumber = location->getColumnNumber(0);
- return true;
-}
-
-void V8DebuggerAgentImpl::setBreakpoint(
- ErrorString* errorString,
+Response V8DebuggerAgentImpl::setBreakpoint(
std::unique_ptr<protocol::Debugger::Location> location,
- const Maybe<String16>& optionalCondition, String16* outBreakpointId,
+ Maybe<String16> optionalCondition, String16* outBreakpointId,
std::unique_ptr<protocol::Debugger::Location>* actualLocation) {
- String16 scriptId;
- int lineNumber;
- int columnNumber;
-
- if (!parseLocation(errorString, std::move(location), &scriptId, &lineNumber,
- &columnNumber))
- return;
+ String16 scriptId = location->getScriptId();
+ int lineNumber = location->getLineNumber();
+ int columnNumber = location->getColumnNumber(0);
String16 condition = optionalCondition.fromMaybe("");
@@ -341,28 +318,26 @@ void V8DebuggerAgentImpl::setBreakpoint(
scriptId, lineNumber, columnNumber, UserBreakpointSource);
if (m_breakpointIdToDebuggerBreakpointIds.find(breakpointId) !=
m_breakpointIdToDebuggerBreakpointIds.end()) {
- *errorString = "Breakpoint at specified location already exists.";
- return;
+ return Response::Error("Breakpoint at specified location already exists.");
}
ScriptBreakpoint breakpoint(lineNumber, columnNumber, condition);
*actualLocation = resolveBreakpoint(breakpointId, scriptId, breakpoint,
UserBreakpointSource);
- if (*actualLocation)
- *outBreakpointId = breakpointId;
- else
- *errorString = "Could not resolve breakpoint";
+ if (!*actualLocation) return Response::Error("Could not resolve breakpoint");
+ *outBreakpointId = breakpointId;
+ return Response::OK();
}
-void V8DebuggerAgentImpl::removeBreakpoint(ErrorString* errorString,
- const String16& breakpointId) {
- if (!checkEnabled(errorString)) return;
+Response V8DebuggerAgentImpl::removeBreakpoint(const String16& breakpointId) {
+ if (!enabled()) return Response::Error(kDebuggerNotEnabled);
protocol::DictionaryValue* breakpointsCookie =
m_state->getObject(DebuggerAgentState::javaScriptBreakpoints);
if (breakpointsCookie) breakpointsCookie->remove(breakpointId);
- removeBreakpoint(breakpointId);
+ removeBreakpointImpl(breakpointId);
+ return Response::OK();
}
-void V8DebuggerAgentImpl::removeBreakpoint(const String16& breakpointId) {
+void V8DebuggerAgentImpl::removeBreakpointImpl(const String16& breakpointId) {
DCHECK(enabled());
BreakpointIdToDebuggerBreakpointIdsMap::iterator
debuggerBreakpointIdsIterator =
@@ -380,27 +355,64 @@ void V8DebuggerAgentImpl::removeBreakpoint(const String16& breakpointId) {
m_breakpointIdToDebuggerBreakpointIds.erase(breakpointId);
}
-void V8DebuggerAgentImpl::continueToLocation(
- ErrorString* errorString,
+Response V8DebuggerAgentImpl::getPossibleBreakpoints(
+ std::unique_ptr<protocol::Debugger::Location> start,
+ Maybe<protocol::Debugger::Location> end,
+ std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations) {
+ String16 scriptId = start->getScriptId();
+
+ if (start->getLineNumber() < 0 || start->getColumnNumber(0) < 0)
+ return Response::Error(
+ "start.lineNumber and start.columnNumber should be >= 0");
+
+ v8::DebugInterface::Location v8Start(start->getLineNumber(),
+ start->getColumnNumber(0));
+ v8::DebugInterface::Location v8End;
+ if (end.isJust()) {
+ if (end.fromJust()->getScriptId() != scriptId)
+ return Response::Error("Locations should contain the same scriptId");
+ int line = end.fromJust()->getLineNumber();
+ int column = end.fromJust()->getColumnNumber(0);
+ if (line < 0 || column < 0)
+ return Response::Error(
+ "end.lineNumber and end.columnNumber should be >= 0");
+ v8End = v8::DebugInterface::Location(line, column);
+ }
+ auto it = m_scripts.find(scriptId);
+ if (it == m_scripts.end()) return Response::Error("Script not found");
+
+ std::vector<v8::DebugInterface::Location> v8Locations;
+ if (!it->second->getPossibleBreakpoints(v8Start, v8End, &v8Locations))
+ return Response::InternalError();
+
+ *locations = protocol::Array<protocol::Debugger::Location>::create();
+ for (size_t i = 0; i < v8Locations.size(); ++i) {
+ (*locations)
+ ->addItem(protocol::Debugger::Location::create()
+ .setScriptId(scriptId)
+ .setLineNumber(v8Locations[i].GetLineNumber())
+ .setColumnNumber(v8Locations[i].GetColumnNumber())
+ .build());
+ }
+ return Response::OK();
+}
+
+Response V8DebuggerAgentImpl::continueToLocation(
std::unique_ptr<protocol::Debugger::Location> location) {
- if (!checkEnabled(errorString)) return;
+ if (!enabled()) return Response::Error(kDebuggerNotEnabled);
if (!m_continueToLocationBreakpointId.isEmpty()) {
m_debugger->removeBreakpoint(m_continueToLocationBreakpointId);
m_continueToLocationBreakpointId = "";
}
- String16 scriptId;
- int lineNumber;
- int columnNumber;
-
- if (!parseLocation(errorString, std::move(location), &scriptId, &lineNumber,
- &columnNumber))
- return;
+ String16 scriptId = location->getScriptId();
+ int lineNumber = location->getLineNumber();
+ int columnNumber = location->getColumnNumber(0);
ScriptBreakpoint breakpoint(lineNumber, columnNumber, "");
m_continueToLocationBreakpointId = m_debugger->setBreakpoint(
scriptId, breakpoint, &lineNumber, &columnNumber);
- resume(errorString);
+ return resume();
}
bool V8DebuggerAgentImpl::isCurrentCallStackEmptyOrBlackboxed() {
@@ -471,7 +483,7 @@ V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::shouldSkipStepPause(
if (!isCallFrameWithUnknownScriptOrBlackboxed(topCallFrame))
return RequestNoSkip;
- if (m_skippedStepFrameCount >= maxSkipStepFrameCount) return RequestStepOut;
+ if (m_skippedStepFrameCount >= kMaxSkipStepFrameCount) return RequestStepOut;
if (!m_skippedStepFrameCount) m_recursionLevelForStepFrame = 1;
@@ -509,17 +521,14 @@ V8DebuggerAgentImpl::resolveBreakpoint(const String16& breakpointId,
return buildProtocolLocation(scriptId, actualLineNumber, actualColumnNumber);
}
-void V8DebuggerAgentImpl::searchInContent(
- ErrorString* error, const String16& scriptId, const String16& query,
- const Maybe<bool>& optionalCaseSensitive,
- const Maybe<bool>& optionalIsRegex,
+Response V8DebuggerAgentImpl::searchInContent(
+ const String16& scriptId, const String16& query,
+ Maybe<bool> optionalCaseSensitive, Maybe<bool> optionalIsRegex,
std::unique_ptr<Array<protocol::Debugger::SearchMatch>>* results) {
v8::HandleScope handles(m_isolate);
ScriptsMap::iterator it = m_scripts.find(scriptId);
- if (it == m_scripts.end()) {
- *error = String16("No script for id: " + scriptId);
- return;
- }
+ if (it == m_scripts.end())
+ return Response::Error("No script for id: " + scriptId);
std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>> matches =
searchInTextByLinesImpl(m_session,
@@ -529,44 +538,46 @@ void V8DebuggerAgentImpl::searchInContent(
*results = protocol::Array<protocol::Debugger::SearchMatch>::create();
for (size_t i = 0; i < matches.size(); ++i)
(*results)->addItem(std::move(matches[i]));
+ return Response::OK();
}
-void V8DebuggerAgentImpl::setScriptSource(
- ErrorString* errorString, const String16& scriptId,
- const String16& newContent, const Maybe<bool>& dryRun,
+Response V8DebuggerAgentImpl::setScriptSource(
+ const String16& scriptId, const String16& newContent, Maybe<bool> dryRun,
Maybe<protocol::Array<protocol::Debugger::CallFrame>>* newCallFrames,
Maybe<bool>* stackChanged, Maybe<StackTrace>* asyncStackTrace,
Maybe<protocol::Runtime::ExceptionDetails>* optOutCompileError) {
- if (!checkEnabled(errorString)) return;
+ if (!enabled()) return Response::Error(kDebuggerNotEnabled);
v8::HandleScope handles(m_isolate);
v8::Local<v8::String> newSource = toV8String(m_isolate, newContent);
- if (!m_debugger->setScriptSource(scriptId, newSource, dryRun.fromMaybe(false),
- errorString, optOutCompileError,
- &m_pausedCallFrames, stackChanged))
- return;
+ bool compileError = false;
+ Response response = m_debugger->setScriptSource(
+ scriptId, newSource, dryRun.fromMaybe(false), optOutCompileError,
+ &m_pausedCallFrames, stackChanged, &compileError);
+ if (!response.isSuccess() || compileError) return response;
ScriptsMap::iterator it = m_scripts.find(scriptId);
- if (it != m_scripts.end()) it->second->setSource(m_isolate, newSource);
+ if (it != m_scripts.end()) it->second->setSource(newSource);
- std::unique_ptr<Array<CallFrame>> callFrames = currentCallFrames(errorString);
- if (!callFrames) return;
+ std::unique_ptr<Array<CallFrame>> callFrames;
+ response = currentCallFrames(&callFrames);
+ if (!response.isSuccess()) return response;
*newCallFrames = std::move(callFrames);
*asyncStackTrace = currentAsyncStackTrace();
+ return Response::OK();
}
-void V8DebuggerAgentImpl::restartFrame(
- ErrorString* errorString, const String16& callFrameId,
+Response V8DebuggerAgentImpl::restartFrame(
+ const String16& callFrameId,
std::unique_ptr<Array<CallFrame>>* newCallFrames,
Maybe<StackTrace>* asyncStackTrace) {
- if (!assertPaused(errorString)) return;
- InjectedScript::CallFrameScope scope(
- errorString, m_inspector, m_session->contextGroupId(), callFrameId);
- if (!scope.initialize()) return;
- if (scope.frameOrdinal() >= m_pausedCallFrames.size()) {
- *errorString = "Could not find call frame with given id";
- return;
- }
+ if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+ InjectedScript::CallFrameScope scope(m_inspector, m_session->contextGroupId(),
+ callFrameId);
+ Response response = scope.initialize();
+ if (!response.isSuccess()) return response;
+ if (scope.frameOrdinal() >= m_pausedCallFrames.size())
+ return Response::Error("Could not find call frame with given id");
v8::Local<v8::Value> resultValue;
v8::Local<v8::Boolean> result;
@@ -575,28 +586,26 @@ void V8DebuggerAgentImpl::restartFrame(
scope.tryCatch().HasCaught() ||
!resultValue->ToBoolean(scope.context()).ToLocal(&result) ||
!result->Value()) {
- *errorString = "Internal error";
- return;
+ return Response::InternalError();
}
JavaScriptCallFrames frames = m_debugger->currentCallFrames();
m_pausedCallFrames.swap(frames);
- *newCallFrames = currentCallFrames(errorString);
- if (!*newCallFrames) return;
+ response = currentCallFrames(newCallFrames);
+ if (!response.isSuccess()) return response;
*asyncStackTrace = currentAsyncStackTrace();
+ return Response::OK();
}
-void V8DebuggerAgentImpl::getScriptSource(ErrorString* error,
- const String16& scriptId,
- String16* scriptSource) {
- if (!checkEnabled(error)) return;
+Response V8DebuggerAgentImpl::getScriptSource(const String16& scriptId,
+ String16* scriptSource) {
+ if (!enabled()) return Response::Error(kDebuggerNotEnabled);
ScriptsMap::iterator it = m_scripts.find(scriptId);
- if (it == m_scripts.end()) {
- *error = "No script for id: " + scriptId;
- return;
- }
+ if (it == m_scripts.end())
+ return Response::Error("No script for id: " + scriptId);
v8::HandleScope handles(m_isolate);
*scriptSource = toProtocolString(it->second->source(m_isolate));
+ return Response::OK();
}
void V8DebuggerAgentImpl::schedulePauseOnNextStatement(
@@ -632,103 +641,100 @@ void V8DebuggerAgentImpl::cancelPauseOnNextStatement() {
m_debugger->setPauseOnNextStatement(false);
}
-void V8DebuggerAgentImpl::pause(ErrorString* errorString) {
- if (!checkEnabled(errorString)) return;
- if (m_javaScriptPauseScheduled || m_debugger->isPaused()) return;
+Response V8DebuggerAgentImpl::pause() {
+ if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+ if (m_javaScriptPauseScheduled || m_debugger->isPaused())
+ return Response::OK();
clearBreakDetails();
m_javaScriptPauseScheduled = true;
m_scheduledDebuggerStep = NoStep;
m_skippedStepFrameCount = 0;
m_steppingFromFramework = false;
m_debugger->setPauseOnNextStatement(true);
+ return Response::OK();
}
-void V8DebuggerAgentImpl::resume(ErrorString* errorString) {
- if (!assertPaused(errorString)) return;
+Response V8DebuggerAgentImpl::resume() {
+ if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
m_scheduledDebuggerStep = NoStep;
m_steppingFromFramework = false;
- m_session->releaseObjectGroup(backtraceObjectGroup);
+ m_session->releaseObjectGroup(kBacktraceObjectGroup);
m_debugger->continueProgram();
+ return Response::OK();
}
-void V8DebuggerAgentImpl::stepOver(ErrorString* errorString) {
- if (!assertPaused(errorString)) return;
+Response V8DebuggerAgentImpl::stepOver() {
+ if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
// StepOver at function return point should fallback to StepInto.
JavaScriptCallFrame* frame =
!m_pausedCallFrames.empty() ? m_pausedCallFrames[0].get() : nullptr;
- if (frame && frame->isAtReturn()) {
- stepInto(errorString);
- return;
- }
+ if (frame && frame->isAtReturn()) return stepInto();
m_scheduledDebuggerStep = StepOver;
m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
- m_session->releaseObjectGroup(backtraceObjectGroup);
+ m_session->releaseObjectGroup(kBacktraceObjectGroup);
m_debugger->stepOverStatement();
+ return Response::OK();
}
-void V8DebuggerAgentImpl::stepInto(ErrorString* errorString) {
- if (!assertPaused(errorString)) return;
+Response V8DebuggerAgentImpl::stepInto() {
+ if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
m_scheduledDebuggerStep = StepInto;
m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
- m_session->releaseObjectGroup(backtraceObjectGroup);
+ m_session->releaseObjectGroup(kBacktraceObjectGroup);
m_debugger->stepIntoStatement();
+ return Response::OK();
}
-void V8DebuggerAgentImpl::stepOut(ErrorString* errorString) {
- if (!assertPaused(errorString)) return;
+Response V8DebuggerAgentImpl::stepOut() {
+ if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
m_scheduledDebuggerStep = StepOut;
m_skipNextDebuggerStepOut = false;
m_recursionLevelForStepOut = 1;
m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
- m_session->releaseObjectGroup(backtraceObjectGroup);
+ m_session->releaseObjectGroup(kBacktraceObjectGroup);
m_debugger->stepOutOfFunction();
+ return Response::OK();
}
-void V8DebuggerAgentImpl::setPauseOnExceptions(
- ErrorString* errorString, const String16& stringPauseState) {
- if (!checkEnabled(errorString)) return;
- V8Debugger::PauseOnExceptionsState pauseState;
+Response V8DebuggerAgentImpl::setPauseOnExceptions(
+ const String16& stringPauseState) {
+ if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+ v8::DebugInterface::ExceptionBreakState pauseState;
if (stringPauseState == "none") {
- pauseState = V8Debugger::DontPauseOnExceptions;
+ pauseState = v8::DebugInterface::NoBreakOnException;
} else if (stringPauseState == "all") {
- pauseState = V8Debugger::PauseOnAllExceptions;
+ pauseState = v8::DebugInterface::BreakOnAnyException;
} else if (stringPauseState == "uncaught") {
- pauseState = V8Debugger::PauseOnUncaughtExceptions;
+ pauseState = v8::DebugInterface::BreakOnUncaughtException;
} else {
- *errorString = "Unknown pause on exceptions mode: " + stringPauseState;
- return;
+ return Response::Error("Unknown pause on exceptions mode: " +
+ stringPauseState);
}
- setPauseOnExceptionsImpl(errorString, pauseState);
+ setPauseOnExceptionsImpl(pauseState);
+ return Response::OK();
}
-void V8DebuggerAgentImpl::setPauseOnExceptionsImpl(ErrorString* errorString,
- int pauseState) {
+void V8DebuggerAgentImpl::setPauseOnExceptionsImpl(int pauseState) {
m_debugger->setPauseOnExceptionsState(
- static_cast<V8Debugger::PauseOnExceptionsState>(pauseState));
- if (m_debugger->getPauseOnExceptionsState() != pauseState)
- *errorString = "Internal error. Could not change pause on exceptions state";
- else
- m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState, pauseState);
+ static_cast<v8::DebugInterface::ExceptionBreakState>(pauseState));
+ m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState, pauseState);
}
-void V8DebuggerAgentImpl::evaluateOnCallFrame(
- ErrorString* errorString, const String16& callFrameId,
- const String16& expression, const Maybe<String16>& objectGroup,
- const Maybe<bool>& includeCommandLineAPI, const Maybe<bool>& silent,
- const Maybe<bool>& returnByValue, const Maybe<bool>& generatePreview,
+Response V8DebuggerAgentImpl::evaluateOnCallFrame(
+ const String16& callFrameId, const String16& expression,
+ Maybe<String16> objectGroup, Maybe<bool> includeCommandLineAPI,
+ Maybe<bool> silent, Maybe<bool> returnByValue, Maybe<bool> generatePreview,
std::unique_ptr<RemoteObject>* result,
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
- if (!assertPaused(errorString)) return;
- InjectedScript::CallFrameScope scope(
- errorString, m_inspector, m_session->contextGroupId(), callFrameId);
- if (!scope.initialize()) return;
- if (scope.frameOrdinal() >= m_pausedCallFrames.size()) {
- *errorString = "Could not find call frame with given id";
- return;
- }
-
- if (includeCommandLineAPI.fromMaybe(false) && !scope.installCommandLineAPI())
- return;
+ if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+ InjectedScript::CallFrameScope scope(m_inspector, m_session->contextGroupId(),
+ callFrameId);
+ Response response = scope.initialize();
+ if (!response.isSuccess()) return response;
+ if (scope.frameOrdinal() >= m_pausedCallFrames.size())
+ return Response::Error("Could not find call frame with given id");
+
+ if (includeCommandLineAPI.fromMaybe(false)) scope.installCommandLineAPI();
if (silent.fromMaybe(false)) scope.ignoreExceptionsAndMuteConsole();
v8::MaybeLocal<v8::Value> maybeResultValue =
@@ -737,56 +743,52 @@ void V8DebuggerAgentImpl::evaluateOnCallFrame(
// Re-initialize after running client's code, as it could have destroyed
// context or session.
- if (!scope.initialize()) return;
- scope.injectedScript()->wrapEvaluateResult(
- errorString, maybeResultValue, scope.tryCatch(),
- objectGroup.fromMaybe(""), returnByValue.fromMaybe(false),
- generatePreview.fromMaybe(false), result, exceptionDetails);
+ response = scope.initialize();
+ if (!response.isSuccess()) return response;
+ return scope.injectedScript()->wrapEvaluateResult(
+ maybeResultValue, scope.tryCatch(), objectGroup.fromMaybe(""),
+ returnByValue.fromMaybe(false), generatePreview.fromMaybe(false), result,
+ exceptionDetails);
}
-void V8DebuggerAgentImpl::setVariableValue(
- ErrorString* errorString, int scopeNumber, const String16& variableName,
+Response V8DebuggerAgentImpl::setVariableValue(
+ int scopeNumber, const String16& variableName,
std::unique_ptr<protocol::Runtime::CallArgument> newValueArgument,
const String16& callFrameId) {
- if (!checkEnabled(errorString)) return;
- if (!assertPaused(errorString)) return;
- InjectedScript::CallFrameScope scope(
- errorString, m_inspector, m_session->contextGroupId(), callFrameId);
- if (!scope.initialize()) return;
-
+ if (!enabled()) return Response::Error(kDebuggerNotEnabled);
+ if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+ InjectedScript::CallFrameScope scope(m_inspector, m_session->contextGroupId(),
+ callFrameId);
+ Response response = scope.initialize();
+ if (!response.isSuccess()) return response;
v8::Local<v8::Value> newValue;
- if (!scope.injectedScript()
- ->resolveCallArgument(errorString, newValueArgument.get())
- .ToLocal(&newValue))
- return;
+ response = scope.injectedScript()->resolveCallArgument(newValueArgument.get(),
+ &newValue);
+ if (!response.isSuccess()) return response;
- if (scope.frameOrdinal() >= m_pausedCallFrames.size()) {
- *errorString = "Could not find call frame with given id";
- return;
- }
+ if (scope.frameOrdinal() >= m_pausedCallFrames.size())
+ return Response::Error("Could not find call frame with given id");
v8::MaybeLocal<v8::Value> result =
m_pausedCallFrames[scope.frameOrdinal()]->setVariableValue(
scopeNumber, toV8String(m_isolate, variableName), newValue);
- if (scope.tryCatch().HasCaught() || result.IsEmpty()) {
- *errorString = "Internal error";
- return;
- }
+ if (scope.tryCatch().HasCaught() || result.IsEmpty())
+ return Response::InternalError();
+ return Response::OK();
}
-void V8DebuggerAgentImpl::setAsyncCallStackDepth(ErrorString* errorString,
- int depth) {
- if (!checkEnabled(errorString)) return;
+Response V8DebuggerAgentImpl::setAsyncCallStackDepth(int depth) {
+ if (!enabled()) return Response::Error(kDebuggerNotEnabled);
m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, depth);
m_debugger->setAsyncCallStackDepth(this, depth);
+ return Response::OK();
}
-void V8DebuggerAgentImpl::setBlackboxPatterns(
- ErrorString* errorString,
+Response V8DebuggerAgentImpl::setBlackboxPatterns(
std::unique_ptr<protocol::Array<String16>> patterns) {
if (!patterns->length()) {
m_blackboxPattern = nullptr;
m_state->remove(DebuggerAgentState::blackboxPattern);
- return;
+ return Response::OK();
}
String16Builder patternBuilder;
@@ -798,48 +800,41 @@ void V8DebuggerAgentImpl::setBlackboxPatterns(
patternBuilder.append(patterns->get(patterns->length() - 1));
patternBuilder.append(')');
String16 pattern = patternBuilder.toString();
- if (!setBlackboxPattern(errorString, pattern)) return;
+ Response response = setBlackboxPattern(pattern);
+ if (!response.isSuccess()) return response;
m_state->setString(DebuggerAgentState::blackboxPattern, pattern);
+ return Response::OK();
}
-bool V8DebuggerAgentImpl::setBlackboxPattern(ErrorString* errorString,
- const String16& pattern) {
+Response V8DebuggerAgentImpl::setBlackboxPattern(const String16& pattern) {
std::unique_ptr<V8Regex> regex(new V8Regex(
m_inspector, pattern, true /** caseSensitive */, false /** multiline */));
- if (!regex->isValid()) {
- *errorString = "Pattern parser error: " + regex->errorMessage();
- return false;
- }
+ if (!regex->isValid())
+ return Response::Error("Pattern parser error: " + regex->errorMessage());
m_blackboxPattern = std::move(regex);
- return true;
+ return Response::OK();
}
-void V8DebuggerAgentImpl::setBlackboxedRanges(
- ErrorString* error, const String16& scriptId,
+Response V8DebuggerAgentImpl::setBlackboxedRanges(
+ const String16& scriptId,
std::unique_ptr<protocol::Array<protocol::Debugger::ScriptPosition>>
inPositions) {
- if (m_scripts.find(scriptId) == m_scripts.end()) {
- *error = "No script with passed id.";
- return;
- }
+ if (m_scripts.find(scriptId) == m_scripts.end())
+ return Response::Error("No script with passed id.");
if (!inPositions->length()) {
m_blackboxedPositions.erase(scriptId);
- return;
+ return Response::OK();
}
std::vector<std::pair<int, int>> positions;
positions.reserve(inPositions->length());
for (size_t i = 0; i < inPositions->length(); ++i) {
protocol::Debugger::ScriptPosition* position = inPositions->get(i);
- if (position->getLineNumber() < 0) {
- *error = "Position missing 'line' or 'line' < 0.";
- return;
- }
- if (position->getColumnNumber() < 0) {
- *error = "Position missing 'column' or 'column' < 0.";
- return;
- }
+ if (position->getLineNumber() < 0)
+ return Response::Error("Position missing 'line' or 'line' < 0.");
+ if (position->getColumnNumber() < 0)
+ return Response::Error("Position missing 'column' or 'column' < 0.");
positions.push_back(
std::make_pair(position->getLineNumber(), position->getColumnNumber()));
}
@@ -849,12 +844,12 @@ void V8DebuggerAgentImpl::setBlackboxedRanges(
if (positions[i - 1].first == positions[i].first &&
positions[i - 1].second < positions[i].second)
continue;
- *error =
- "Input positions array is not sorted or contains duplicate values.";
- return;
+ return Response::Error(
+ "Input positions array is not sorted or contains duplicate values.");
}
m_blackboxedPositions[scriptId] = positions;
+ return Response::OK();
}
void V8DebuggerAgentImpl::willExecuteScript(int scriptId) {
@@ -907,14 +902,15 @@ void V8DebuggerAgentImpl::changeJavaScriptRecursionLevel(int step) {
}
}
-std::unique_ptr<Array<CallFrame>> V8DebuggerAgentImpl::currentCallFrames(
- ErrorString* errorString) {
- if (m_pausedContext.IsEmpty() || !m_pausedCallFrames.size())
- return Array<CallFrame>::create();
- ErrorString ignored;
+Response V8DebuggerAgentImpl::currentCallFrames(
+ std::unique_ptr<Array<CallFrame>>* result) {
+ if (m_pausedContext.IsEmpty() || !m_pausedCallFrames.size()) {
+ *result = Array<CallFrame>::create();
+ return Response::OK();
+ }
v8::HandleScope handles(m_isolate);
v8::Local<v8::Context> debuggerContext =
- v8::Debug::GetDebugContext(m_isolate);
+ v8::DebugInterface::GetDebugContext(m_isolate);
v8::Context::Scope contextScope(debuggerContext);
v8::Local<v8::Array> objects = v8::Array::New(m_isolate);
@@ -925,104 +921,92 @@ std::unique_ptr<Array<CallFrame>> V8DebuggerAgentImpl::currentCallFrames(
m_pausedCallFrames[frameOrdinal];
v8::Local<v8::Object> details = currentCallFrame->details();
- if (hasInternalError(errorString, details.IsEmpty()))
- return Array<CallFrame>::create();
+ if (details.IsEmpty()) return Response::InternalError();
int contextId = currentCallFrame->contextId();
- InjectedScript* injectedScript =
- contextId ? m_session->findInjectedScript(&ignored, contextId)
- : nullptr;
+
+ InjectedScript* injectedScript = nullptr;
+ if (contextId) m_session->findInjectedScript(contextId, injectedScript);
String16 callFrameId =
RemoteCallFrameId::serialize(contextId, static_cast<int>(frameOrdinal));
- if (hasInternalError(
- errorString,
- !details
- ->Set(debuggerContext,
- toV8StringInternalized(m_isolate, "callFrameId"),
- toV8String(m_isolate, callFrameId))
- .FromMaybe(false)))
- return Array<CallFrame>::create();
+ if (!details
+ ->Set(debuggerContext,
+ toV8StringInternalized(m_isolate, "callFrameId"),
+ toV8String(m_isolate, callFrameId))
+ .FromMaybe(false)) {
+ return Response::InternalError();
+ }
if (injectedScript) {
v8::Local<v8::Value> scopeChain;
- if (hasInternalError(
- errorString,
- !details->Get(debuggerContext,
- toV8StringInternalized(m_isolate, "scopeChain"))
- .ToLocal(&scopeChain) ||
- !scopeChain->IsArray()))
- return Array<CallFrame>::create();
+ if (!details
+ ->Get(debuggerContext,
+ toV8StringInternalized(m_isolate, "scopeChain"))
+ .ToLocal(&scopeChain) ||
+ !scopeChain->IsArray()) {
+ return Response::InternalError();
+ }
v8::Local<v8::Array> scopeChainArray = scopeChain.As<v8::Array>();
- if (!injectedScript->wrapPropertyInArray(
- errorString, scopeChainArray,
- toV8StringInternalized(m_isolate, "object"),
- backtraceObjectGroup))
- return Array<CallFrame>::create();
- if (!injectedScript->wrapObjectProperty(
- errorString, details, toV8StringInternalized(m_isolate, "this"),
- backtraceObjectGroup))
- return Array<CallFrame>::create();
+ Response response = injectedScript->wrapPropertyInArray(
+ scopeChainArray, toV8StringInternalized(m_isolate, "object"),
+ kBacktraceObjectGroup);
+ if (!response.isSuccess()) return response;
+ response = injectedScript->wrapObjectProperty(
+ details, toV8StringInternalized(m_isolate, "this"),
+ kBacktraceObjectGroup);
+ if (!response.isSuccess()) return response;
if (details
->Has(debuggerContext,
toV8StringInternalized(m_isolate, "returnValue"))
.FromMaybe(false)) {
- if (!injectedScript->wrapObjectProperty(
- errorString, details,
- toV8StringInternalized(m_isolate, "returnValue"),
- backtraceObjectGroup))
- return Array<CallFrame>::create();
+ response = injectedScript->wrapObjectProperty(
+ details, toV8StringInternalized(m_isolate, "returnValue"),
+ kBacktraceObjectGroup);
+ if (!response.isSuccess()) return response;
}
} else {
- if (hasInternalError(errorString, !details
- ->Set(debuggerContext,
- toV8StringInternalized(
- m_isolate, "scopeChain"),
- v8::Array::New(m_isolate, 0))
- .FromMaybe(false)))
- return Array<CallFrame>::create();
+ if (!details
+ ->Set(debuggerContext,
+ toV8StringInternalized(m_isolate, "scopeChain"),
+ v8::Array::New(m_isolate, 0))
+ .FromMaybe(false)) {
+ return Response::InternalError();
+ }
v8::Local<v8::Object> remoteObject = v8::Object::New(m_isolate);
- if (hasInternalError(
- errorString,
- !remoteObject
- ->Set(debuggerContext,
- toV8StringInternalized(m_isolate, "type"),
- toV8StringInternalized(m_isolate, "undefined"))
- .FromMaybe(false)))
- return Array<CallFrame>::create();
- if (hasInternalError(errorString,
- !details
- ->Set(debuggerContext,
- toV8StringInternalized(m_isolate, "this"),
- remoteObject)
- .FromMaybe(false)))
- return Array<CallFrame>::create();
- if (hasInternalError(
- errorString,
- !details
- ->Delete(debuggerContext,
- toV8StringInternalized(m_isolate, "returnValue"))
- .FromMaybe(false)))
- return Array<CallFrame>::create();
+ if (!remoteObject
+ ->Set(debuggerContext, toV8StringInternalized(m_isolate, "type"),
+ toV8StringInternalized(m_isolate, "undefined"))
+ .FromMaybe(false)) {
+ return Response::InternalError();
+ }
+ if (!details
+ ->Set(debuggerContext, toV8StringInternalized(m_isolate, "this"),
+ remoteObject)
+ .FromMaybe(false)) {
+ return Response::InternalError();
+ }
+ if (!details
+ ->Delete(debuggerContext,
+ toV8StringInternalized(m_isolate, "returnValue"))
+ .FromMaybe(false)) {
+ return Response::InternalError();
+ }
}
- if (hasInternalError(
- errorString,
- !objects
- ->Set(debuggerContext, static_cast<int>(frameOrdinal), details)
- .FromMaybe(false)))
- return Array<CallFrame>::create();
+ if (!objects->Set(debuggerContext, static_cast<int>(frameOrdinal), details)
+ .FromMaybe(false)) {
+ return Response::InternalError();
+ }
}
- std::unique_ptr<protocol::Value> protocolValue =
- toProtocolValue(errorString, debuggerContext, objects);
- if (!protocolValue) return Array<CallFrame>::create();
+ std::unique_ptr<protocol::Value> protocolValue;
+ Response response = toProtocolValue(debuggerContext, objects, &protocolValue);
+ if (!response.isSuccess()) return response;
protocol::ErrorSupport errorSupport;
- std::unique_ptr<Array<CallFrame>> callFrames =
- Array<CallFrame>::parse(protocolValue.get(), &errorSupport);
- if (hasInternalError(errorString, !callFrames))
- return Array<CallFrame>::create();
- return callFrames;
+ *result = Array<CallFrame>::parse(protocolValue.get(), &errorSupport);
+ if (!*result) return Response::Error(errorSupport.errors());
+ return Response::OK();
}
std::unique_ptr<StackTrace> V8DebuggerAgentImpl::currentAsyncStackTrace() {
@@ -1049,8 +1033,8 @@ void V8DebuggerAgentImpl::didParseSource(
String16 scriptId = script->scriptId();
String16 scriptURL = script->sourceURL();
- const Maybe<String16>& sourceMapURLParam = script->sourceMappingURL();
- const Maybe<protocol::DictionaryValue>& executionContextAuxDataParam(
+ Maybe<String16> sourceMapURLParam = script->sourceMappingURL();
+ Maybe<protocol::DictionaryValue> executionContextAuxDataParam(
std::move(executionContextAuxData));
const bool* isLiveEditParam = isLiveEdit ? &isLiveEdit : nullptr;
const bool* hasSourceURLParam = hasSourceURL ? &hasSourceURL : nullptr;
@@ -1058,14 +1042,14 @@ void V8DebuggerAgentImpl::didParseSource(
m_frontend.scriptParsed(
scriptId, scriptURL, script->startLine(), script->startColumn(),
script->endLine(), script->endColumn(), script->executionContextId(),
- script->hash(), executionContextAuxDataParam, isLiveEditParam,
- sourceMapURLParam, hasSourceURLParam);
+ script->hash(), std::move(executionContextAuxDataParam),
+ isLiveEditParam, std::move(sourceMapURLParam), hasSourceURLParam);
else
m_frontend.scriptFailedToParse(
scriptId, scriptURL, script->startLine(), script->startColumn(),
script->endLine(), script->endColumn(), script->executionContextId(),
- script->hash(), executionContextAuxDataParam, sourceMapURLParam,
- hasSourceURLParam);
+ script->hash(), std::move(executionContextAuxDataParam),
+ std::move(sourceMapURLParam), hasSourceURLParam);
m_scripts[scriptId] = std::move(script);
@@ -1100,7 +1084,8 @@ void V8DebuggerAgentImpl::didParseSource(
V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::didPause(
v8::Local<v8::Context> context, v8::Local<v8::Value> exception,
- const std::vector<String16>& hitBreakpoints, bool isPromiseRejection) {
+ const std::vector<String16>& hitBreakpoints, bool isPromiseRejection,
+ bool isUncaught) {
JavaScriptCallFrames callFrames = m_debugger->currentCallFrames(1);
JavaScriptCallFrame* topCallFrame =
!callFrames.empty() ? callFrames.begin()->get() : nullptr;
@@ -1131,18 +1116,23 @@ V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::didPause(
v8::HandleScope handles(m_isolate);
if (!exception.IsEmpty()) {
- ErrorString ignored;
- InjectedScript* injectedScript =
- m_session->findInjectedScript(&ignored, V8Debugger::contextId(context));
+ InjectedScript* injectedScript = nullptr;
+ m_session->findInjectedScript(V8Debugger::contextId(context),
+ injectedScript);
if (injectedScript) {
m_breakReason =
isPromiseRejection
? protocol::Debugger::Paused::ReasonEnum::PromiseRejection
: protocol::Debugger::Paused::ReasonEnum::Exception;
- ErrorString errorString;
- auto obj = injectedScript->wrapObject(&errorString, exception,
- backtraceObjectGroup);
- m_breakAuxData = obj ? obj->serialize() : nullptr;
+ std::unique_ptr<protocol::Runtime::RemoteObject> obj;
+ injectedScript->wrapObject(exception, kBacktraceObjectGroup, false, false,
+ &obj);
+ if (obj) {
+ m_breakAuxData = obj->serialize();
+ m_breakAuxData->setBoolean("uncaught", isUncaught);
+ } else {
+ m_breakAuxData = nullptr;
+ }
// m_breakAuxData might be null after this.
}
}
@@ -1163,8 +1153,10 @@ V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::didPause(
}
}
- ErrorString errorString;
- m_frontend.paused(currentCallFrames(&errorString), m_breakReason,
+ std::unique_ptr<Array<CallFrame>> protocolCallFrames;
+ Response response = currentCallFrames(&protocolCallFrames);
+ if (!response.isSuccess()) protocolCallFrames = Array<CallFrame>::create();
+ m_frontend.paused(std::move(protocolCallFrames), m_breakReason,
std::move(m_breakAuxData), std::move(hitBreakpointIds),
currentAsyncStackTrace());
m_scheduledDebuggerStep = NoStep;
@@ -1209,19 +1201,11 @@ void V8DebuggerAgentImpl::breakProgramOnException(
std::unique_ptr<protocol::DictionaryValue> data) {
if (!enabled() ||
m_debugger->getPauseOnExceptionsState() ==
- V8Debugger::DontPauseOnExceptions)
+ v8::DebugInterface::NoBreakOnException)
return;
breakProgram(breakReason, std::move(data));
}
-bool V8DebuggerAgentImpl::assertPaused(ErrorString* errorString) {
- if (m_pausedContext.IsEmpty()) {
- *errorString = "Can only perform operation while paused.";
- return false;
- }
- return true;
-}
-
void V8DebuggerAgentImpl::clearBreakDetails() {
m_breakReason = protocol::Debugger::Paused::ReasonEnum::Other;
m_breakAuxData = nullptr;
@@ -1240,7 +1224,7 @@ void V8DebuggerAgentImpl::setBreakpointAt(const String16& scriptId,
void V8DebuggerAgentImpl::removeBreakpointAt(const String16& scriptId,
int lineNumber, int columnNumber,
BreakpointSource source) {
- removeBreakpoint(
+ removeBreakpointImpl(
generateBreakpointId(scriptId, lineNumber, columnNumber, source));
}
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index 62aa67b64b..e5285f4cc3 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -24,8 +24,8 @@ class V8InspectorSessionImpl;
class V8Regex;
class V8StackTraceImpl;
-using protocol::ErrorString;
using protocol::Maybe;
+using protocol::Response;
class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
public:
@@ -49,67 +49,69 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
void restore();
// Part of the protocol.
- void enable(ErrorString*) override;
- void disable(ErrorString*) override;
- void setBreakpointsActive(ErrorString*, bool active) override;
- void setSkipAllPauses(ErrorString*, bool skip) override;
- void setBreakpointByUrl(
- ErrorString*, int lineNumber, const Maybe<String16>& optionalURL,
- const Maybe<String16>& optionalURLRegex,
- const Maybe<int>& optionalColumnNumber,
- const Maybe<String16>& optionalCondition, String16*,
+ Response enable() override;
+ Response disable() override;
+ Response setBreakpointsActive(bool active) override;
+ Response setSkipAllPauses(bool skip) override;
+ Response setBreakpointByUrl(
+ int lineNumber, Maybe<String16> optionalURL,
+ Maybe<String16> optionalURLRegex, Maybe<int> optionalColumnNumber,
+ Maybe<String16> optionalCondition, String16*,
std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations)
override;
- void setBreakpoint(
- ErrorString*, std::unique_ptr<protocol::Debugger::Location>,
- const Maybe<String16>& optionalCondition, String16*,
+ Response setBreakpoint(
+ std::unique_ptr<protocol::Debugger::Location>,
+ Maybe<String16> optionalCondition, String16*,
std::unique_ptr<protocol::Debugger::Location>* actualLocation) override;
- void removeBreakpoint(ErrorString*, const String16& breakpointId) override;
- void continueToLocation(
- ErrorString*, std::unique_ptr<protocol::Debugger::Location>) override;
- void searchInContent(
- ErrorString*, const String16& scriptId, const String16& query,
- const Maybe<bool>& optionalCaseSensitive,
- const Maybe<bool>& optionalIsRegex,
+ Response removeBreakpoint(const String16& breakpointId) override;
+ Response continueToLocation(
+ std::unique_ptr<protocol::Debugger::Location>) override;
+ Response searchInContent(
+ const String16& scriptId, const String16& query,
+ Maybe<bool> optionalCaseSensitive, Maybe<bool> optionalIsRegex,
std::unique_ptr<protocol::Array<protocol::Debugger::SearchMatch>>*)
override;
- void setScriptSource(
- ErrorString*, const String16& inScriptId, const String16& inScriptSource,
- const Maybe<bool>& dryRun,
+ Response getPossibleBreakpoints(
+ std::unique_ptr<protocol::Debugger::Location> start,
+ Maybe<protocol::Debugger::Location> end,
+ std::unique_ptr<protocol::Array<protocol::Debugger::Location>>* locations)
+ override;
+ Response setScriptSource(
+ const String16& inScriptId, const String16& inScriptSource,
+ Maybe<bool> dryRun,
Maybe<protocol::Array<protocol::Debugger::CallFrame>>* optOutCallFrames,
Maybe<bool>* optOutStackChanged,
Maybe<protocol::Runtime::StackTrace>* optOutAsyncStackTrace,
Maybe<protocol::Runtime::ExceptionDetails>* optOutCompileError) override;
- void restartFrame(
- ErrorString*, const String16& callFrameId,
+ Response restartFrame(
+ const String16& callFrameId,
std::unique_ptr<protocol::Array<protocol::Debugger::CallFrame>>*
newCallFrames,
Maybe<protocol::Runtime::StackTrace>* asyncStackTrace) override;
- void getScriptSource(ErrorString*, const String16& scriptId,
- String16* scriptSource) override;
- void pause(ErrorString*) override;
- void resume(ErrorString*) override;
- void stepOver(ErrorString*) override;
- void stepInto(ErrorString*) override;
- void stepOut(ErrorString*) override;
- void setPauseOnExceptions(ErrorString*, const String16& pauseState) override;
- void evaluateOnCallFrame(
- ErrorString*, const String16& callFrameId, const String16& expression,
- const Maybe<String16>& objectGroup,
- const Maybe<bool>& includeCommandLineAPI, const Maybe<bool>& silent,
- const Maybe<bool>& returnByValue, const Maybe<bool>& generatePreview,
+ Response getScriptSource(const String16& scriptId,
+ String16* scriptSource) override;
+ Response pause() override;
+ Response resume() override;
+ Response stepOver() override;
+ Response stepInto() override;
+ Response stepOut() override;
+ Response setPauseOnExceptions(const String16& pauseState) override;
+ Response evaluateOnCallFrame(
+ const String16& callFrameId, const String16& expression,
+ Maybe<String16> objectGroup, Maybe<bool> includeCommandLineAPI,
+ Maybe<bool> silent, Maybe<bool> returnByValue,
+ Maybe<bool> generatePreview,
std::unique_ptr<protocol::Runtime::RemoteObject>* result,
Maybe<protocol::Runtime::ExceptionDetails>*) override;
- void setVariableValue(
- ErrorString*, int scopeNumber, const String16& variableName,
+ Response setVariableValue(
+ int scopeNumber, const String16& variableName,
std::unique_ptr<protocol::Runtime::CallArgument> newValue,
const String16& callFrame) override;
- void setAsyncCallStackDepth(ErrorString*, int depth) override;
- void setBlackboxPatterns(
- ErrorString*,
+ Response setAsyncCallStackDepth(int depth) override;
+ Response setBlackboxPatterns(
std::unique_ptr<protocol::Array<String16>> patterns) override;
- void setBlackboxedRanges(
- ErrorString*, const String16& scriptId,
+ Response setBlackboxedRanges(
+ const String16& scriptId,
std::unique_ptr<protocol::Array<protocol::Debugger::ScriptPosition>>
positions) override;
@@ -135,7 +137,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
SkipPauseRequest didPause(v8::Local<v8::Context>,
v8::Local<v8::Value> exception,
const std::vector<String16>& hitBreakpoints,
- bool isPromiseRejection);
+ bool isPromiseRejection, bool isUncaught);
void didContinue();
void didParseSource(std::unique_ptr<V8DebuggerScript>, bool success);
void willExecuteScript(int scriptId);
@@ -144,27 +146,25 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
v8::Isolate* isolate() { return m_isolate; }
private:
- bool checkEnabled(ErrorString*);
- void enable();
+ void enableImpl();
SkipPauseRequest shouldSkipExceptionPause(JavaScriptCallFrame* topCallFrame);
SkipPauseRequest shouldSkipStepPause(JavaScriptCallFrame* topCallFrame);
void schedulePauseOnNextStatementIfSteppingInto();
- std::unique_ptr<protocol::Array<protocol::Debugger::CallFrame>>
- currentCallFrames(ErrorString*);
+ Response currentCallFrames(
+ std::unique_ptr<protocol::Array<protocol::Debugger::CallFrame>>*);
std::unique_ptr<protocol::Runtime::StackTrace> currentAsyncStackTrace();
void changeJavaScriptRecursionLevel(int step);
- void setPauseOnExceptionsImpl(ErrorString*, int);
+ void setPauseOnExceptionsImpl(int);
std::unique_ptr<protocol::Debugger::Location> resolveBreakpoint(
const String16& breakpointId, const String16& scriptId,
const ScriptBreakpoint&, BreakpointSource);
- void removeBreakpoint(const String16& breakpointId);
- bool assertPaused(ErrorString*);
+ void removeBreakpointImpl(const String16& breakpointId);
void clearBreakDetails();
bool isCurrentCallStackEmptyOrBlackboxed();
@@ -174,7 +174,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
void internalSetAsyncCallStackDepth(int);
void increaseCachedSkipStackGeneration();
- bool setBlackboxPattern(ErrorString*, const String16& pattern);
+ Response setBlackboxPattern(const String16& pattern);
using ScriptsMap =
protocol::HashMap<String16, std::unique_ptr<V8DebuggerScript>>;
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index 485188a48f..ed0c0d63de 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -67,50 +67,66 @@ static String16 calculateHash(const String16& str) {
return hash.toString();
}
-static v8::Local<v8::Value> GetChecked(v8::Local<v8::Context> context,
- v8::Local<v8::Object> object,
- const char* name) {
- return object
- ->Get(context, toV8StringInternalized(context->GetIsolate(), name))
- .ToLocalChecked();
-}
+V8DebuggerScript::V8DebuggerScript(v8::Isolate* isolate,
+ v8::Local<v8::DebugInterface::Script> script,
+ bool isLiveEdit) {
+ m_isolate = script->GetIsolate();
+ m_id = String16::fromInteger(script->Id());
+ v8::Local<v8::String> tmp;
+ if (script->Name().ToLocal(&tmp)) m_url = toProtocolString(tmp);
+ if (script->SourceURL().ToLocal(&tmp)) {
+ m_sourceURL = toProtocolString(tmp);
+ if (m_url.isEmpty()) m_url = toProtocolString(tmp);
+ }
+ if (script->SourceMappingURL().ToLocal(&tmp))
+ m_sourceMappingURL = toProtocolString(tmp);
+ m_startLine = script->LineOffset();
+ m_startColumn = script->ColumnOffset();
+ std::vector<int> lineEnds = script->LineEnds();
+ CHECK(lineEnds.size());
+ int source_length = lineEnds[lineEnds.size() - 1];
+ if (lineEnds.size()) {
+ m_endLine = static_cast<int>(lineEnds.size()) + m_startLine - 1;
+ if (lineEnds.size() > 1) {
+ m_endColumn = source_length - lineEnds[lineEnds.size() - 2] - 1;
+ } else {
+ m_endColumn = source_length + m_startColumn;
+ }
+ } else {
+ m_endLine = m_startLine;
+ m_endColumn = m_startColumn;
+ }
-static int GetCheckedInt(v8::Local<v8::Context> context,
- v8::Local<v8::Object> object, const char* name) {
- return static_cast<int>(GetChecked(context, object, name)
- ->ToInteger(context)
- .ToLocalChecked()
- ->Value());
-}
+ if (script->ContextData().ToLocal(&tmp)) {
+ String16 contextData = toProtocolString(tmp);
+ size_t firstComma = contextData.find(",", 0);
+ size_t secondComma = firstComma != String16::kNotFound
+ ? contextData.find(",", firstComma + 1)
+ : String16::kNotFound;
+ if (secondComma != String16::kNotFound) {
+ String16 executionContextId =
+ contextData.substring(firstComma + 1, secondComma - firstComma - 1);
+ bool isOk = false;
+ m_executionContextId = executionContextId.toInteger(&isOk);
+ if (!isOk) m_executionContextId = 0;
+ m_executionContextAuxData = contextData.substring(secondComma + 1);
+ }
+ }
-V8DebuggerScript::V8DebuggerScript(v8::Local<v8::Context> context,
- v8::Local<v8::Object> object,
- bool isLiveEdit) {
- v8::Isolate* isolate = context->GetIsolate();
- v8::Local<v8::Value> idValue = GetChecked(context, object, "id");
- DCHECK(!idValue.IsEmpty() && idValue->IsInt32());
- m_id = String16::fromInteger(idValue->Int32Value(context).FromJust());
-
- m_url = toProtocolStringWithTypeCheck(GetChecked(context, object, "name"));
- m_sourceURL =
- toProtocolStringWithTypeCheck(GetChecked(context, object, "sourceURL"));
- m_sourceMappingURL = toProtocolStringWithTypeCheck(
- GetChecked(context, object, "sourceMappingURL"));
- m_startLine = GetCheckedInt(context, object, "startLine");
- m_startColumn = GetCheckedInt(context, object, "startColumn");
- m_endLine = GetCheckedInt(context, object, "endLine");
- m_endColumn = GetCheckedInt(context, object, "endColumn");
- m_executionContextAuxData = toProtocolStringWithTypeCheck(
- GetChecked(context, object, "executionContextAuxData"));
- m_executionContextId = GetCheckedInt(context, object, "executionContextId");
m_isLiveEdit = isLiveEdit;
- v8::Local<v8::Value> sourceValue;
- if (!object->Get(context, toV8StringInternalized(isolate, "source"))
- .ToLocal(&sourceValue) ||
- !sourceValue->IsString())
- return;
- setSource(isolate, sourceValue.As<v8::String>());
+ if (script->Source().ToLocal(&tmp)) {
+ m_source.Reset(m_isolate, tmp);
+ String16 source = toProtocolString(tmp);
+ m_hash = calculateHash(source);
+ // V8 will not count last line if script source ends with \n.
+ if (source.length() > 1 && source[source.length() - 1] == '\n') {
+ m_endLine++;
+ m_endColumn = 0;
+ }
+ }
+
+ m_script.Reset(m_isolate, script);
}
V8DebuggerScript::~V8DebuggerScript() {}
@@ -131,10 +147,18 @@ void V8DebuggerScript::setSourceMappingURL(const String16& sourceMappingURL) {
m_sourceMappingURL = sourceMappingURL;
}
-void V8DebuggerScript::setSource(v8::Isolate* isolate,
- v8::Local<v8::String> source) {
- m_source.Reset(isolate, source);
+void V8DebuggerScript::setSource(v8::Local<v8::String> source) {
+ m_source.Reset(m_isolate, source);
m_hash = calculateHash(toProtocolString(source));
}
+bool V8DebuggerScript::getPossibleBreakpoints(
+ const v8::DebugInterface::Location& start,
+ const v8::DebugInterface::Location& end,
+ std::vector<v8::DebugInterface::Location>* locations) {
+ v8::HandleScope scope(m_isolate);
+ v8::Local<v8::DebugInterface::Script> script = m_script.Get(m_isolate);
+ return script->GetPossibleBreakpoints(start, end, locations);
+}
+
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index 78c44b5eb9..97b5ba9e51 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -34,12 +34,14 @@
#include "src/inspector/string-16.h"
#include "include/v8.h"
+#include "src/debug/debug-interface.h"
namespace v8_inspector {
class V8DebuggerScript {
public:
- V8DebuggerScript(v8::Local<v8::Context>, v8::Local<v8::Object>,
+ V8DebuggerScript(v8::Isolate* isolate,
+ v8::Local<v8::DebugInterface::Script> script,
bool isLiveEdit);
~V8DebuggerScript();
@@ -62,7 +64,12 @@ class V8DebuggerScript {
void setSourceURL(const String16&);
void setSourceMappingURL(const String16&);
- void setSource(v8::Isolate*, v8::Local<v8::String>);
+ void setSource(v8::Local<v8::String>);
+
+ bool getPossibleBreakpoints(
+ const v8::DebugInterface::Location& start,
+ const v8::DebugInterface::Location& end,
+ std::vector<v8::DebugInterface::Location>* locations);
private:
String16 m_id;
@@ -79,6 +86,9 @@ class V8DebuggerScript {
String16 m_executionContextAuxData;
bool m_isLiveEdit;
+ v8::Isolate* m_isolate;
+ v8::Global<v8::DebugInterface::Script> m_script;
+
DISALLOW_COPY_AND_ASSIGN(V8DebuggerScript);
};
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index d393f81ad4..b3657e577c 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -14,11 +14,11 @@
#include "src/inspector/v8-stack-trace-impl.h"
#include "src/inspector/v8-value-copier.h"
+#include "include/v8-util.h"
+
namespace v8_inspector {
namespace {
-const char stepIntoV8MethodName[] = "stepIntoStatement";
-const char stepOutV8MethodName[] = "stepOutOfFunction";
static const char v8AsyncTaskEventEnqueue[] = "enqueue";
static const char v8AsyncTaskEventEnqueueRecurring[] = "enqueueRecurring";
static const char v8AsyncTaskEventWillHandle[] = "willHandle";
@@ -55,7 +55,8 @@ V8Debugger::V8Debugger(v8::Isolate* isolate, V8InspectorImpl* inspector)
m_breakpointsActivated(true),
m_runningNestedMessageLoop(false),
m_ignoreScriptParsedEventsCounter(0),
- m_maxAsyncCallStackDepth(0) {}
+ m_maxAsyncCallStackDepth(0),
+ m_pauseOnExceptionsState(v8::DebugInterface::NoBreakOnException) {}
V8Debugger::~V8Debugger() {}
@@ -63,9 +64,14 @@ void V8Debugger::enable() {
if (m_enableCount++) return;
DCHECK(!enabled());
v8::HandleScope scope(m_isolate);
- v8::Debug::SetDebugEventListener(m_isolate, &V8Debugger::v8DebugEventCallback,
- v8::External::New(m_isolate, this));
- m_debuggerContext.Reset(m_isolate, v8::Debug::GetDebugContext(m_isolate));
+ v8::DebugInterface::SetDebugEventListener(m_isolate,
+ &V8Debugger::v8DebugEventCallback,
+ v8::External::New(m_isolate, this));
+ m_debuggerContext.Reset(m_isolate,
+ v8::DebugInterface::GetDebugContext(m_isolate));
+ v8::DebugInterface::ChangeBreakOnException(
+ m_isolate, v8::DebugInterface::NoBreakOnException);
+ m_pauseOnExceptionsState = v8::DebugInterface::NoBreakOnException;
compileDebuggerScript();
}
@@ -76,7 +82,7 @@ void V8Debugger::disable() {
m_debuggerScript.Reset();
m_debuggerContext.Reset();
allAsyncTasksCanceled();
- v8::Debug::SetDebugEventListener(m_isolate, nullptr);
+ v8::DebugInterface::SetDebugEventListener(m_isolate, nullptr);
}
bool V8Debugger::enabled() const { return !m_debuggerScript.IsEmpty(); }
@@ -112,29 +118,20 @@ void V8Debugger::getCompiledScripts(
int contextGroupId,
std::vector<std::unique_ptr<V8DebuggerScript>>& result) {
v8::HandleScope scope(m_isolate);
- v8::MicrotasksScope microtasks(m_isolate,
- v8::MicrotasksScope::kDoNotRunMicrotasks);
- v8::Local<v8::Context> context = debuggerContext();
- v8::Local<v8::Object> debuggerScript = m_debuggerScript.Get(m_isolate);
- DCHECK(!debuggerScript->IsUndefined());
- v8::Local<v8::Function> getScriptsFunction = v8::Local<v8::Function>::Cast(
- debuggerScript
- ->Get(context, toV8StringInternalized(m_isolate, "getScripts"))
- .ToLocalChecked());
- v8::Local<v8::Value> argv[] = {v8::Integer::New(m_isolate, contextGroupId)};
- v8::Local<v8::Value> value;
- if (!getScriptsFunction->Call(context, debuggerScript, arraysize(argv), argv)
- .ToLocal(&value))
- return;
- DCHECK(value->IsArray());
- v8::Local<v8::Array> scriptsArray = v8::Local<v8::Array>::Cast(value);
- result.reserve(scriptsArray->Length());
- for (unsigned i = 0; i < scriptsArray->Length(); ++i) {
- v8::Local<v8::Object> scriptObject = v8::Local<v8::Object>::Cast(
- scriptsArray->Get(context, v8::Integer::New(m_isolate, i))
- .ToLocalChecked());
- result.push_back(wrapUnique(
- new V8DebuggerScript(context, scriptObject, inLiveEditScope)));
+ v8::PersistentValueVector<v8::DebugInterface::Script> scripts(m_isolate);
+ v8::DebugInterface::GetLoadedScripts(m_isolate, scripts);
+ String16 contextPrefix = String16::fromInteger(contextGroupId) + ",";
+ for (size_t i = 0; i < scripts.Size(); ++i) {
+ v8::Local<v8::DebugInterface::Script> script = scripts.Get(i);
+ if (!script->WasCompiled()) continue;
+ v8::ScriptOriginOptions origin = script->OriginOptions();
+ if (origin.IsEmbedderDebugScript()) continue;
+ v8::Local<v8::String> v8ContextData;
+ if (!script->ContextData().ToLocal(&v8ContextData)) continue;
+ String16 contextData = toProtocolString(v8ContextData);
+ if (contextData.find(contextPrefix) != 0) continue;
+ result.push_back(
+ wrapUnique(new V8DebuggerScript(m_isolate, script, false)));
}
}
@@ -171,7 +168,7 @@ String16 V8Debugger::setBreakpoint(const String16& sourceID,
->Get(context, toV8StringInternalized(m_isolate, "setBreakpoint"))
.ToLocalChecked());
v8::Local<v8::Value> breakpointId =
- v8::Debug::Call(debuggerContext(), setBreakpointFunction, info)
+ v8::DebugInterface::Call(debuggerContext(), setBreakpointFunction, info)
.ToLocalChecked();
if (!breakpointId->IsString()) return "";
*actualLineNumber =
@@ -206,7 +203,7 @@ void V8Debugger::removeBreakpoint(const String16& breakpointId) {
->Get(context,
toV8StringInternalized(m_isolate, "removeBreakpoint"))
.ToLocalChecked());
- v8::Debug::Call(debuggerContext(), removeBreakpointFunction, info)
+ v8::DebugInterface::Call(debuggerContext(), removeBreakpointFunction, info)
.ToLocalChecked();
}
@@ -219,7 +216,8 @@ void V8Debugger::clearBreakpoints() {
m_debuggerScript.Get(m_isolate)
->Get(context, toV8StringInternalized(m_isolate, "clearBreakpoints"))
.ToLocalChecked());
- v8::Debug::Call(debuggerContext(), clearBreakpoints).ToLocalChecked();
+ v8::DebugInterface::Call(debuggerContext(), clearBreakpoints)
+ .ToLocalChecked();
}
void V8Debugger::setBreakpointsActivated(bool activated) {
@@ -243,42 +241,32 @@ void V8Debugger::setBreakpointsActivated(bool activated) {
->Get(context, toV8StringInternalized(m_isolate,
"setBreakpointsActivated"))
.ToLocalChecked());
- v8::Debug::Call(debuggerContext(), setBreakpointsActivated, info)
+ v8::DebugInterface::Call(debuggerContext(), setBreakpointsActivated, info)
.ToLocalChecked();
m_breakpointsActivated = activated;
}
-V8Debugger::PauseOnExceptionsState V8Debugger::getPauseOnExceptionsState() {
+v8::DebugInterface::ExceptionBreakState
+V8Debugger::getPauseOnExceptionsState() {
DCHECK(enabled());
- v8::HandleScope scope(m_isolate);
- v8::Local<v8::Context> context = debuggerContext();
- v8::Context::Scope contextScope(context);
-
- v8::Local<v8::Value> argv[] = {v8::Undefined(m_isolate)};
- v8::Local<v8::Value> result =
- callDebuggerMethod("pauseOnExceptionsState", 0, argv).ToLocalChecked();
- return static_cast<V8Debugger::PauseOnExceptionsState>(
- result->Int32Value(context).FromJust());
+ return m_pauseOnExceptionsState;
}
void V8Debugger::setPauseOnExceptionsState(
- PauseOnExceptionsState pauseOnExceptionsState) {
+ v8::DebugInterface::ExceptionBreakState pauseOnExceptionsState) {
DCHECK(enabled());
- v8::HandleScope scope(m_isolate);
- v8::Context::Scope contextScope(debuggerContext());
-
- v8::Local<v8::Value> argv[] = {
- v8::Int32::New(m_isolate, pauseOnExceptionsState)};
- callDebuggerMethod("setPauseOnExceptionsState", 1, argv);
+ if (m_pauseOnExceptionsState == pauseOnExceptionsState) return;
+ v8::DebugInterface::ChangeBreakOnException(m_isolate, pauseOnExceptionsState);
+ m_pauseOnExceptionsState = pauseOnExceptionsState;
}
void V8Debugger::setPauseOnNextStatement(bool pause) {
if (m_runningNestedMessageLoop) return;
if (pause)
- v8::Debug::DebugBreak(m_isolate);
+ v8::DebugInterface::DebugBreak(m_isolate);
else
- v8::Debug::CancelDebugBreak(m_isolate);
+ v8::DebugInterface::CancelDebugBreak(m_isolate);
}
bool V8Debugger::canBreakProgram() {
@@ -306,7 +294,7 @@ void V8Debugger::breakProgram() {
v8::ConstructorBehavior::kThrow)
.ToLocal(&breakFunction))
return;
- v8::Debug::Call(debuggerContext(), breakFunction).ToLocalChecked();
+ v8::DebugInterface::Call(debuggerContext(), breakFunction).ToLocalChecked();
}
void V8Debugger::continueProgram() {
@@ -318,52 +306,42 @@ void V8Debugger::continueProgram() {
void V8Debugger::stepIntoStatement() {
DCHECK(isPaused());
DCHECK(!m_executionState.IsEmpty());
- v8::HandleScope handleScope(m_isolate);
- v8::Local<v8::Value> argv[] = {m_executionState};
- callDebuggerMethod(stepIntoV8MethodName, 1, argv);
+ v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepIn);
continueProgram();
}
void V8Debugger::stepOverStatement() {
DCHECK(isPaused());
DCHECK(!m_executionState.IsEmpty());
- v8::HandleScope handleScope(m_isolate);
- v8::Local<v8::Value> argv[] = {m_executionState};
- callDebuggerMethod("stepOverStatement", 1, argv);
+ v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepNext);
continueProgram();
}
void V8Debugger::stepOutOfFunction() {
DCHECK(isPaused());
DCHECK(!m_executionState.IsEmpty());
- v8::HandleScope handleScope(m_isolate);
- v8::Local<v8::Value> argv[] = {m_executionState};
- callDebuggerMethod(stepOutV8MethodName, 1, argv);
+ v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepOut);
continueProgram();
}
void V8Debugger::clearStepping() {
DCHECK(enabled());
- v8::HandleScope scope(m_isolate);
- v8::Context::Scope contextScope(debuggerContext());
-
- v8::Local<v8::Value> argv[] = {v8::Undefined(m_isolate)};
- callDebuggerMethod("clearStepping", 0, argv);
+ v8::DebugInterface::ClearStepping(m_isolate);
}
-bool V8Debugger::setScriptSource(
+Response V8Debugger::setScriptSource(
const String16& sourceID, v8::Local<v8::String> newSource, bool dryRun,
- ErrorString* error,
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails,
- JavaScriptCallFrames* newCallFrames, Maybe<bool>* stackChanged) {
+ JavaScriptCallFrames* newCallFrames, Maybe<bool>* stackChanged,
+ bool* compileError) {
class EnableLiveEditScope {
public:
explicit EnableLiveEditScope(v8::Isolate* isolate) : m_isolate(isolate) {
- v8::Debug::SetLiveEditEnabled(m_isolate, true);
+ v8::DebugInterface::SetLiveEditEnabled(m_isolate, true);
inLiveEditScope = true;
}
~EnableLiveEditScope() {
- v8::Debug::SetLiveEditEnabled(m_isolate, false);
+ v8::DebugInterface::SetLiveEditEnabled(m_isolate, false);
inLiveEditScope = false;
}
@@ -371,6 +349,7 @@ bool V8Debugger::setScriptSource(
v8::Isolate* m_isolate;
};
+ *compileError = false;
DCHECK(enabled());
v8::HandleScope scope(m_isolate);
@@ -391,10 +370,9 @@ bool V8Debugger::setScriptSource(
if (tryCatch.HasCaught()) {
v8::Local<v8::Message> message = tryCatch.Message();
if (!message.IsEmpty())
- *error = toProtocolStringWithTypeCheck(message->Get());
+ return Response::Error(toProtocolStringWithTypeCheck(message->Get()));
else
- *error = "Unknown error.";
- return false;
+ return Response::InternalError();
}
v8result = maybeResult.ToLocalChecked();
}
@@ -419,7 +397,7 @@ bool V8Debugger::setScriptSource(
JavaScriptCallFrames frames = currentCallFrames();
newCallFrames->swap(frames);
}
- return true;
+ return Response::OK();
}
// Compile error.
case 1: {
@@ -441,11 +419,11 @@ bool V8Debugger::setScriptSource(
->Value()) -
1)
.build();
- return false;
+ *compileError = true;
+ return Response::OK();
}
}
- *error = "Unknown error.";
- return false;
+ return Response::InternalError();
}
JavaScriptCallFrames V8Debugger::currentCallFrames(int limit) {
@@ -459,8 +437,8 @@ JavaScriptCallFrames V8Debugger::currentCallFrames(int limit) {
toV8StringInternalized(m_isolate, "currentCallFrames"))
.ToLocalChecked());
currentCallFramesV8 =
- v8::Debug::Call(debuggerContext(), currentCallFramesFunction,
- v8::Integer::New(m_isolate, limit))
+ v8::DebugInterface::Call(debuggerContext(), currentCallFramesFunction,
+ v8::Integer::New(m_isolate, limit))
.ToLocalChecked();
} else {
v8::Local<v8::Value> argv[] = {m_executionState,
@@ -508,7 +486,7 @@ void V8Debugger::handleProgramBreak(v8::Local<v8::Context> pausedContext,
v8::Local<v8::Object> executionState,
v8::Local<v8::Value> exception,
v8::Local<v8::Array> hitBreakpointNumbers,
- bool isPromiseRejection) {
+ bool isPromiseRejection, bool isUncaught) {
// Don't allow nested breaks.
if (m_runningNestedMessageLoop) return;
@@ -531,7 +509,7 @@ void V8Debugger::handleProgramBreak(v8::Local<v8::Context> pausedContext,
m_pausedContext = pausedContext;
m_executionState = executionState;
V8DebuggerAgentImpl::SkipPauseRequest result = agent->didPause(
- pausedContext, exception, breakpointIds, isPromiseRejection);
+ pausedContext, exception, breakpointIds, isPromiseRejection, isUncaught);
if (result == V8DebuggerAgentImpl::RequestNoSkip) {
m_runningNestedMessageLoop = true;
int groupId = getGroupId(pausedContext);
@@ -547,19 +525,16 @@ void V8Debugger::handleProgramBreak(v8::Local<v8::Context> pausedContext,
m_executionState.Clear();
if (result == V8DebuggerAgentImpl::RequestStepFrame) {
- v8::Local<v8::Value> argv[] = {executionState};
- callDebuggerMethod("stepFrameStatement", 1, argv);
+ v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepFrame);
} else if (result == V8DebuggerAgentImpl::RequestStepInto) {
- v8::Local<v8::Value> argv[] = {executionState};
- callDebuggerMethod(stepIntoV8MethodName, 1, argv);
+ v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepIn);
} else if (result == V8DebuggerAgentImpl::RequestStepOut) {
- v8::Local<v8::Value> argv[] = {executionState};
- callDebuggerMethod(stepOutV8MethodName, 1, argv);
+ v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepOut);
}
}
void V8Debugger::v8DebugEventCallback(
- const v8::Debug::EventDetails& eventDetails) {
+ const v8::DebugInterface::EventDetails& eventDetails) {
V8Debugger* thisPtr = toV8Debugger(eventDetails.GetCallbackData());
thisPtr->handleV8DebugEvent(eventDetails);
}
@@ -575,12 +550,12 @@ v8::Local<v8::Value> V8Debugger::callInternalGetterFunction(
.ToLocalChecked();
DCHECK(!getterValue.IsEmpty() && getterValue->IsFunction());
return v8::Local<v8::Function>::Cast(getterValue)
- ->Call(m_isolate->GetCurrentContext(), object, 0, 0)
+ ->Call(m_isolate->GetCurrentContext(), object, 0, nullptr)
.ToLocalChecked();
}
void V8Debugger::handleV8DebugEvent(
- const v8::Debug::EventDetails& eventDetails) {
+ const v8::DebugInterface::EventDetails& eventDetails) {
if (!enabled()) return;
v8::DebugEvent event = eventDetails.GetEvent();
if (event != v8::AsyncTaskEvent && event != v8::Break &&
@@ -604,26 +579,35 @@ void V8Debugger::handleV8DebugEvent(
v8::HandleScope scope(m_isolate);
if (m_ignoreScriptParsedEventsCounter == 0 &&
(event == v8::AfterCompile || event == v8::CompileError)) {
- v8::Context::Scope contextScope(debuggerContext());
+ v8::Local<v8::Context> context = debuggerContext();
+ v8::Context::Scope contextScope(context);
v8::Local<v8::Value> argv[] = {eventDetails.GetEventData()};
v8::Local<v8::Value> value =
callDebuggerMethod("getAfterCompileScript", 1, argv).ToLocalChecked();
if (value->IsNull()) return;
DCHECK(value->IsObject());
v8::Local<v8::Object> scriptObject = v8::Local<v8::Object>::Cast(value);
+ v8::Local<v8::DebugInterface::Script> script;
+ if (!v8::DebugInterface::Script::Wrap(m_isolate, scriptObject)
+ .ToLocal(&script))
+ return;
agent->didParseSource(
- wrapUnique(new V8DebuggerScript(debuggerContext(), scriptObject,
- inLiveEditScope)),
+ wrapUnique(new V8DebuggerScript(m_isolate, script, inLiveEditScope)),
event == v8::AfterCompile);
} else if (event == v8::Exception) {
+ v8::Local<v8::Context> context = debuggerContext();
v8::Local<v8::Object> eventData = eventDetails.GetEventData();
v8::Local<v8::Value> exception =
callInternalGetterFunction(eventData, "exception");
v8::Local<v8::Value> promise =
callInternalGetterFunction(eventData, "promise");
bool isPromiseRejection = !promise.IsEmpty() && promise->IsObject();
+ v8::Local<v8::Value> uncaught =
+ callInternalGetterFunction(eventData, "uncaught");
+ bool isUncaught = uncaught->BooleanValue(context).FromJust();
handleProgramBreak(eventContext, eventDetails.GetExecutionState(),
- exception, v8::Local<v8::Array>(), isPromiseRejection);
+ exception, v8::Local<v8::Array>(), isPromiseRejection,
+ isUncaught);
} else if (event == v8::Break) {
v8::Local<v8::Value> argv[] = {eventDetails.GetEventData()};
v8::Local<v8::Value> hitBreakpoints =
@@ -729,7 +713,8 @@ v8::MaybeLocal<v8::Value> V8Debugger::functionScopes(
v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
v8::Local<v8::Context> context, v8::Local<v8::Value> value) {
v8::Local<v8::Array> properties;
- if (!v8::Debug::GetInternalProperties(m_isolate, value).ToLocal(&properties))
+ if (!v8::DebugInterface::GetInternalProperties(m_isolate, value)
+ .ToLocal(&properties))
return v8::MaybeLocal<v8::Array>();
if (value->IsFunction()) {
v8::Local<v8::Function> function = value.As<v8::Function>();
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index 83c1b21b02..4c7477899a 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -8,12 +8,12 @@
#include <vector>
#include "src/base/macros.h"
+#include "src/debug/debug-interface.h"
#include "src/inspector/java-script-call-frame.h"
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/Runtime.h"
#include "src/inspector/v8-debugger-script.h"
-#include "include/v8-debug.h"
#include "include/v8-inspector.h"
namespace v8_inspector {
@@ -23,7 +23,7 @@ class V8DebuggerAgentImpl;
class V8InspectorImpl;
class V8StackTraceImpl;
-using protocol::ErrorString;
+using protocol::Response;
class V8Debugger {
public:
@@ -42,13 +42,8 @@ class V8Debugger {
void setBreakpointsActivated(bool);
bool breakpointsActivated() const { return m_breakpointsActivated; }
- enum PauseOnExceptionsState {
- DontPauseOnExceptions,
- PauseOnAllExceptions,
- PauseOnUncaughtExceptions
- };
- PauseOnExceptionsState getPauseOnExceptionsState();
- void setPauseOnExceptionsState(PauseOnExceptionsState);
+ v8::DebugInterface::ExceptionBreakState getPauseOnExceptionsState();
+ void setPauseOnExceptionsState(v8::DebugInterface::ExceptionBreakState);
void setPauseOnNextStatement(bool);
bool canBreakProgram();
void breakProgram();
@@ -58,12 +53,11 @@ class V8Debugger {
void stepOutOfFunction();
void clearStepping();
- bool setScriptSource(const String16& sourceID,
- v8::Local<v8::String> newSource, bool dryRun,
- ErrorString*,
- protocol::Maybe<protocol::Runtime::ExceptionDetails>*,
- JavaScriptCallFrames* newCallFrames,
- protocol::Maybe<bool>* stackChanged);
+ Response setScriptSource(
+ const String16& sourceID, v8::Local<v8::String> newSource, bool dryRun,
+ protocol::Maybe<protocol::Runtime::ExceptionDetails>*,
+ JavaScriptCallFrames* newCallFrames, protocol::Maybe<bool>* stackChanged,
+ bool* compileError);
JavaScriptCallFrames currentCallFrames(int limit = 0);
// Each script inherits debug data from v8::Context where it has been
@@ -113,11 +107,12 @@ class V8Debugger {
v8::Local<v8::Object> executionState,
v8::Local<v8::Value> exception,
v8::Local<v8::Array> hitBreakpoints,
- bool isPromiseRejection = false);
- static void v8DebugEventCallback(const v8::Debug::EventDetails&);
+ bool isPromiseRejection = false,
+ bool isUncaught = false);
+ static void v8DebugEventCallback(const v8::DebugInterface::EventDetails&);
v8::Local<v8::Value> callInternalGetterFunction(v8::Local<v8::Object>,
const char* functionName);
- void handleV8DebugEvent(const v8::Debug::EventDetails&);
+ void handleV8DebugEvent(const v8::DebugInterface::EventDetails&);
void handleV8AsyncTaskEvent(v8::Local<v8::Context>,
v8::Local<v8::Object> executionState,
v8::Local<v8::Object> eventData);
@@ -152,6 +147,8 @@ class V8Debugger {
std::vector<std::unique_ptr<V8StackTraceImpl>> m_currentStacks;
protocol::HashMap<V8DebuggerAgentImpl*, int> m_maxAsyncCallStackDepthMap;
+ v8::DebugInterface::ExceptionBreakState m_pauseOnExceptionsState;
+
DISALLOW_COPY_AND_ASSIGN(V8Debugger);
};
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index 84c890bf3f..0ff04e75b9 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -164,39 +164,42 @@ void V8HeapProfilerAgentImpl::restore() {
HeapProfilerAgentState::allocationTrackingEnabled, false));
if (m_state->booleanProperty(
HeapProfilerAgentState::samplingHeapProfilerEnabled, false)) {
- ErrorString error;
double samplingInterval = m_state->doubleProperty(
HeapProfilerAgentState::samplingHeapProfilerInterval, -1);
DCHECK_GE(samplingInterval, 0);
- startSampling(&error, Maybe<double>(samplingInterval));
+ startSampling(Maybe<double>(samplingInterval));
}
}
-void V8HeapProfilerAgentImpl::collectGarbage(ErrorString*) {
+Response V8HeapProfilerAgentImpl::collectGarbage() {
m_isolate->LowMemoryNotification();
+ return Response::OK();
}
-void V8HeapProfilerAgentImpl::startTrackingHeapObjects(
- ErrorString*, const protocol::Maybe<bool>& trackAllocations) {
+Response V8HeapProfilerAgentImpl::startTrackingHeapObjects(
+ Maybe<bool> trackAllocations) {
m_state->setBoolean(HeapProfilerAgentState::heapObjectsTrackingEnabled, true);
bool allocationTrackingEnabled = trackAllocations.fromMaybe(false);
m_state->setBoolean(HeapProfilerAgentState::allocationTrackingEnabled,
allocationTrackingEnabled);
startTrackingHeapObjectsInternal(allocationTrackingEnabled);
+ return Response::OK();
}
-void V8HeapProfilerAgentImpl::stopTrackingHeapObjects(
- ErrorString* error, const protocol::Maybe<bool>& reportProgress) {
+Response V8HeapProfilerAgentImpl::stopTrackingHeapObjects(
+ Maybe<bool> reportProgress) {
requestHeapStatsUpdate();
- takeHeapSnapshot(error, reportProgress);
+ takeHeapSnapshot(std::move(reportProgress));
stopTrackingHeapObjectsInternal();
+ return Response::OK();
}
-void V8HeapProfilerAgentImpl::enable(ErrorString*) {
+Response V8HeapProfilerAgentImpl::enable() {
m_state->setBoolean(HeapProfilerAgentState::heapProfilerEnabled, true);
+ return Response::OK();
}
-void V8HeapProfilerAgentImpl::disable(ErrorString* error) {
+Response V8HeapProfilerAgentImpl::disable() {
stopTrackingHeapObjectsInternal();
if (m_state->booleanProperty(
HeapProfilerAgentState::samplingHeapProfilerEnabled, false)) {
@@ -205,15 +208,12 @@ void V8HeapProfilerAgentImpl::disable(ErrorString* error) {
}
m_isolate->GetHeapProfiler()->ClearObjectIds();
m_state->setBoolean(HeapProfilerAgentState::heapProfilerEnabled, false);
+ return Response::OK();
}
-void V8HeapProfilerAgentImpl::takeHeapSnapshot(
- ErrorString* errorString, const protocol::Maybe<bool>& reportProgress) {
+Response V8HeapProfilerAgentImpl::takeHeapSnapshot(Maybe<bool> reportProgress) {
v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
- if (!profiler) {
- *errorString = "Cannot access v8 heap profiler";
- return;
- }
+ if (!profiler) return Response::Error("Cannot access v8 heap profiler");
std::unique_ptr<HeapSnapshotProgress> progress;
if (reportProgress.fromMaybe(false))
progress = wrapUnique(new HeapSnapshotProgress(&m_frontend));
@@ -221,80 +221,62 @@ void V8HeapProfilerAgentImpl::takeHeapSnapshot(
GlobalObjectNameResolver resolver(m_session);
const v8::HeapSnapshot* snapshot =
profiler->TakeHeapSnapshot(progress.get(), &resolver);
- if (!snapshot) {
- *errorString = "Failed to take heap snapshot";
- return;
- }
+ if (!snapshot) return Response::Error("Failed to take heap snapshot");
HeapSnapshotOutputStream stream(&m_frontend);
snapshot->Serialize(&stream);
const_cast<v8::HeapSnapshot*>(snapshot)->Delete();
+ return Response::OK();
}
-void V8HeapProfilerAgentImpl::getObjectByHeapObjectId(
- ErrorString* error, const String16& heapSnapshotObjectId,
- const protocol::Maybe<String16>& objectGroup,
+Response V8HeapProfilerAgentImpl::getObjectByHeapObjectId(
+ const String16& heapSnapshotObjectId, Maybe<String16> objectGroup,
std::unique_ptr<protocol::Runtime::RemoteObject>* result) {
bool ok;
int id = heapSnapshotObjectId.toInteger(&ok);
- if (!ok) {
- *error = "Invalid heap snapshot object id";
- return;
- }
+ if (!ok) return Response::Error("Invalid heap snapshot object id");
v8::HandleScope handles(m_isolate);
v8::Local<v8::Object> heapObject = objectByHeapObjectId(m_isolate, id);
- if (heapObject.IsEmpty()) {
- *error = "Object is not available";
- return;
- }
+ if (heapObject.IsEmpty()) return Response::Error("Object is not available");
- if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject)) {
- *error = "Object is not available";
- return;
- }
+ if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject))
+ return Response::Error("Object is not available");
*result = m_session->wrapObject(heapObject->CreationContext(), heapObject,
objectGroup.fromMaybe(""), false);
- if (!result) *error = "Object is not available";
+ if (!result) return Response::Error("Object is not available");
+ return Response::OK();
}
-void V8HeapProfilerAgentImpl::addInspectedHeapObject(
- ErrorString* errorString, const String16& inspectedHeapObjectId) {
+Response V8HeapProfilerAgentImpl::addInspectedHeapObject(
+ const String16& inspectedHeapObjectId) {
bool ok;
int id = inspectedHeapObjectId.toInteger(&ok);
- if (!ok) {
- *errorString = "Invalid heap snapshot object id";
- return;
- }
+ if (!ok) return Response::Error("Invalid heap snapshot object id");
v8::HandleScope handles(m_isolate);
v8::Local<v8::Object> heapObject = objectByHeapObjectId(m_isolate, id);
- if (heapObject.IsEmpty()) {
- *errorString = "Object is not available";
- return;
- }
-
- if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject)) {
- *errorString = "Object is not available";
- return;
- }
+ if (heapObject.IsEmpty()) return Response::Error("Object is not available");
+ if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject))
+ return Response::Error("Object is not available");
m_session->addInspectedObject(wrapUnique(new InspectableHeapObject(id)));
+ return Response::OK();
}
-void V8HeapProfilerAgentImpl::getHeapObjectId(ErrorString* errorString,
- const String16& objectId,
- String16* heapSnapshotObjectId) {
+Response V8HeapProfilerAgentImpl::getHeapObjectId(
+ const String16& objectId, String16* heapSnapshotObjectId) {
v8::HandleScope handles(m_isolate);
v8::Local<v8::Value> value;
v8::Local<v8::Context> context;
- if (!m_session->unwrapObject(errorString, objectId, &value, &context,
- nullptr) ||
- value->IsUndefined())
- return;
+ Response response =
+ m_session->unwrapObject(objectId, &value, &context, nullptr);
+ if (!response.isSuccess()) return response;
+ if (value->IsUndefined()) return Response::InternalError();
v8::SnapshotObjectId id = m_isolate->GetHeapProfiler()->GetObjectId(value);
*heapSnapshotObjectId = String16::fromInteger(static_cast<size_t>(id));
+ return Response::OK();
}
void V8HeapProfilerAgentImpl::requestHeapStatsUpdate() {
@@ -332,13 +314,10 @@ void V8HeapProfilerAgentImpl::stopTrackingHeapObjectsInternal() {
m_state->setBoolean(HeapProfilerAgentState::allocationTrackingEnabled, false);
}
-void V8HeapProfilerAgentImpl::startSampling(
- ErrorString* errorString, const Maybe<double>& samplingInterval) {
+Response V8HeapProfilerAgentImpl::startSampling(
+ Maybe<double> samplingInterval) {
v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
- if (!profiler) {
- *errorString = "Cannot access v8 heap profiler";
- return;
- }
+ if (!profiler) return Response::Error("Cannot access v8 heap profiler");
const unsigned defaultSamplingInterval = 1 << 15;
double samplingIntervalValue =
samplingInterval.fromMaybe(defaultSamplingInterval);
@@ -349,6 +328,7 @@ void V8HeapProfilerAgentImpl::startSampling(
profiler->StartSamplingHeapProfiler(
static_cast<uint64_t>(samplingIntervalValue), 128,
v8::HeapProfiler::kSamplingForceGC);
+ return Response::OK();
}
namespace {
@@ -379,14 +359,10 @@ buildSampingHeapProfileNode(const v8::AllocationProfile::Node* node) {
}
} // namespace
-void V8HeapProfilerAgentImpl::stopSampling(
- ErrorString* errorString,
+Response V8HeapProfilerAgentImpl::stopSampling(
std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfile>* profile) {
v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
- if (!profiler) {
- *errorString = "Cannot access v8 heap profiler";
- return;
- }
+ if (!profiler) return Response::Error("Cannot access v8 heap profiler");
v8::HandleScope scope(
m_isolate); // Allocation profile contains Local handles.
std::unique_ptr<v8::AllocationProfile> v8Profile(
@@ -394,14 +370,13 @@ void V8HeapProfilerAgentImpl::stopSampling(
profiler->StopSamplingHeapProfiler();
m_state->setBoolean(HeapProfilerAgentState::samplingHeapProfilerEnabled,
false);
- if (!v8Profile) {
- *errorString = "Cannot access v8 sampled heap profile.";
- return;
- }
+ if (!v8Profile)
+ return Response::Error("Cannot access v8 sampled heap profile.");
v8::AllocationProfile::Node* root = v8Profile->GetRootNode();
*profile = protocol::HeapProfiler::SamplingHeapProfile::create()
.setHead(buildSampingHeapProfileNode(root))
.build();
+ return Response::OK();
}
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
index caa969870b..e0e244715f 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
@@ -15,8 +15,8 @@ namespace v8_inspector {
class V8InspectorSessionImpl;
-using protocol::ErrorString;
using protocol::Maybe;
+using protocol::Response;
class V8HeapProfilerAgentImpl : public protocol::HeapProfiler::Backend {
public:
@@ -25,32 +25,26 @@ class V8HeapProfilerAgentImpl : public protocol::HeapProfiler::Backend {
~V8HeapProfilerAgentImpl() override;
void restore();
- void collectGarbage(ErrorString*) override;
+ Response collectGarbage() override;
- void enable(ErrorString*) override;
- void startTrackingHeapObjects(ErrorString*,
- const Maybe<bool>& trackAllocations) override;
- void stopTrackingHeapObjects(ErrorString*,
- const Maybe<bool>& reportProgress) override;
+ Response enable() override;
+ Response startTrackingHeapObjects(Maybe<bool> trackAllocations) override;
+ Response stopTrackingHeapObjects(Maybe<bool> reportProgress) override;
- void disable(ErrorString*) override;
+ Response disable() override;
- void takeHeapSnapshot(ErrorString*,
- const Maybe<bool>& reportProgress) override;
+ Response takeHeapSnapshot(Maybe<bool> reportProgress) override;
- void getObjectByHeapObjectId(
- ErrorString*, const String16& heapSnapshotObjectId,
- const Maybe<String16>& objectGroup,
+ Response getObjectByHeapObjectId(
+ const String16& heapSnapshotObjectId, Maybe<String16> objectGroup,
std::unique_ptr<protocol::Runtime::RemoteObject>* result) override;
- void addInspectedHeapObject(ErrorString*,
- const String16& inspectedHeapObjectId) override;
- void getHeapObjectId(ErrorString*, const String16& objectId,
- String16* heapSnapshotObjectId) override;
-
- void startSampling(ErrorString*,
- const Maybe<double>& samplingInterval) override;
- void stopSampling(
- ErrorString*,
+ Response addInspectedHeapObject(
+ const String16& inspectedHeapObjectId) override;
+ Response getHeapObjectId(const String16& objectId,
+ String16* heapSnapshotObjectId) override;
+
+ Response startSampling(Maybe<double> samplingInterval) override;
+ Response stopSampling(
std::unique_ptr<protocol::HeapProfiler::SamplingHeapProfile>*) override;
private:
diff --git a/deps/v8/src/inspector/v8-injected-script-host.cc b/deps/v8/src/inspector/v8-injected-script-host.cc
index dc41ef8631..3748ec9aa3 100644
--- a/deps/v8/src/inspector/v8-injected-script-host.cc
+++ b/deps/v8/src/inspector/v8-injected-script-host.cc
@@ -166,12 +166,69 @@ void V8InjectedScriptHost::subtypeCallback(
void V8InjectedScriptHost::getInternalPropertiesCallback(
const v8::FunctionCallbackInfo<v8::Value>& info) {
if (info.Length() < 1) return;
- v8::Local<v8::Array> properties;
- if (unwrapInspector(info)
- ->debugger()
- ->internalProperties(info.GetIsolate()->GetCurrentContext(), info[0])
- .ToLocal(&properties))
+
+ std::unordered_set<String16> allowedProperties;
+ if (info[0]->IsBooleanObject() || info[0]->IsNumberObject() ||
+ info[0]->IsStringObject() || info[0]->IsSymbolObject()) {
+ allowedProperties.insert(String16("[[PrimitiveValue]]"));
+ } else if (info[0]->IsPromise()) {
+ allowedProperties.insert(String16("[[PromiseStatus]]"));
+ allowedProperties.insert(String16("[[PromiseValue]]"));
+ } else if (info[0]->IsGeneratorObject()) {
+ allowedProperties.insert(String16("[[GeneratorStatus]]"));
+ } else if (info[0]->IsMapIterator() || info[0]->IsSetIterator()) {
+ allowedProperties.insert(String16("[[IteratorHasMore]]"));
+ allowedProperties.insert(String16("[[IteratorIndex]]"));
+ allowedProperties.insert(String16("[[IteratorKind]]"));
+ allowedProperties.insert(String16("[[Entries]]"));
+ } else if (info[0]->IsMap() || info[0]->IsWeakMap() || info[0]->IsSet() ||
+ info[0]->IsWeakSet()) {
+ allowedProperties.insert(String16("[[Entries]]"));
+ }
+ if (!allowedProperties.size()) return;
+
+ v8::Isolate* isolate = info.GetIsolate();
+ v8::Local<v8::Array> allProperties;
+ if (!unwrapInspector(info)
+ ->debugger()
+ ->internalProperties(isolate->GetCurrentContext(), info[0])
+ .ToLocal(&allProperties) ||
+ !allProperties->IsArray() || allProperties->Length() % 2 != 0)
+ return;
+
+ {
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8::TryCatch tryCatch(isolate);
+ v8::Isolate::DisallowJavascriptExecutionScope throwJs(
+ isolate,
+ v8::Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE);
+
+ v8::Local<v8::Array> properties = v8::Array::New(isolate);
+ if (tryCatch.HasCaught()) return;
+
+ uint32_t outputIndex = 0;
+ for (uint32_t i = 0; i < allProperties->Length(); i += 2) {
+ v8::Local<v8::Value> key;
+ if (!allProperties->Get(context, i).ToLocal(&key)) continue;
+ if (tryCatch.HasCaught()) {
+ tryCatch.Reset();
+ continue;
+ }
+ String16 keyString = toProtocolStringWithTypeCheck(key);
+ if (keyString.isEmpty() ||
+ allowedProperties.find(keyString) == allowedProperties.end())
+ continue;
+ v8::Local<v8::Value> value;
+ if (!allProperties->Get(context, i + 1).ToLocal(&value)) continue;
+ if (tryCatch.HasCaught()) {
+ tryCatch.Reset();
+ continue;
+ }
+ createDataProperty(context, properties, outputIndex++, key);
+ createDataProperty(context, properties, outputIndex++, value);
+ }
info.GetReturnValue().Set(properties);
+ }
}
void V8InjectedScriptHost::objectHasOwnPropertyCallback(
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index c3d3f48f00..e415575304 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -104,12 +104,11 @@ V8InspectorSessionImpl::V8InspectorSessionImpl(V8InspectorImpl* inspector,
}
V8InspectorSessionImpl::~V8InspectorSessionImpl() {
- ErrorString errorString;
- m_consoleAgent->disable(&errorString);
- m_profilerAgent->disable(&errorString);
- m_heapProfilerAgent->disable(&errorString);
- m_debuggerAgent->disable(&errorString);
- m_runtimeAgent->disable(&errorString);
+ m_consoleAgent->disable();
+ m_profilerAgent->disable();
+ m_heapProfilerAgent->disable();
+ m_debuggerAgent->disable();
+ m_runtimeAgent->disable();
discardInjectedScripts();
m_inspector->disconnect(this);
@@ -165,42 +164,35 @@ void V8InspectorSessionImpl::discardInjectedScripts() {
}
}
-InjectedScript* V8InspectorSessionImpl::findInjectedScript(
- ErrorString* errorString, int contextId) {
- if (!contextId) {
- *errorString = "Cannot find context with specified id";
- return nullptr;
- }
+Response V8InspectorSessionImpl::findInjectedScript(
+ int contextId, InjectedScript*& injectedScript) {
+ injectedScript = nullptr;
+ if (!contextId)
+ return Response::Error("Cannot find context with specified id");
const V8InspectorImpl::ContextByIdMap* contexts =
m_inspector->contextGroup(m_contextGroupId);
- if (!contexts) {
- *errorString = "Cannot find context with specified id";
- return nullptr;
- }
+ if (!contexts)
+ return Response::Error("Cannot find context with specified id");
auto contextsIt = contexts->find(contextId);
- if (contextsIt == contexts->end()) {
- *errorString = "Cannot find context with specified id";
- return nullptr;
- }
+ if (contextsIt == contexts->end())
+ return Response::Error("Cannot find context with specified id");
const std::unique_ptr<InspectedContext>& context = contextsIt->second;
if (!context->getInjectedScript()) {
- if (!context->createInjectedScript()) {
- *errorString = "Cannot access specified execution context";
- return nullptr;
- }
+ if (!context->createInjectedScript())
+ return Response::Error("Cannot access specified execution context");
if (m_customObjectFormatterEnabled)
context->getInjectedScript()->setCustomObjectFormatterEnabled(true);
}
- return context->getInjectedScript();
+ injectedScript = context->getInjectedScript();
+ return Response::OK();
}
-InjectedScript* V8InspectorSessionImpl::findInjectedScript(
- ErrorString* errorString, RemoteObjectIdBase* objectId) {
- return objectId ? findInjectedScript(errorString, objectId->contextId())
- : nullptr;
+Response V8InspectorSessionImpl::findInjectedScript(
+ RemoteObjectIdBase* objectId, InjectedScript*& injectedScript) {
+ return findInjectedScript(objectId->contextId(), injectedScript);
}
void V8InspectorSessionImpl::releaseObjectGroup(const StringView& objectGroup) {
@@ -230,31 +222,35 @@ bool V8InspectorSessionImpl::unwrapObject(
std::unique_ptr<StringBuffer>* error, const StringView& objectId,
v8::Local<v8::Value>* object, v8::Local<v8::Context>* context,
std::unique_ptr<StringBuffer>* objectGroup) {
- ErrorString errorString;
String16 objectGroupString;
- bool result =
- unwrapObject(&errorString, toString16(objectId), object, context,
- objectGroup ? &objectGroupString : nullptr);
- if (error) *error = StringBufferImpl::adopt(errorString);
+ Response response = unwrapObject(toString16(objectId), object, context,
+ objectGroup ? &objectGroupString : nullptr);
+ if (!response.isSuccess()) {
+ if (error) {
+ String16 errorMessage = response.errorMessage();
+ *error = StringBufferImpl::adopt(errorMessage);
+ }
+ return false;
+ }
if (objectGroup) *objectGroup = StringBufferImpl::adopt(objectGroupString);
- return result;
+ return true;
}
-bool V8InspectorSessionImpl::unwrapObject(ErrorString* errorString,
- const String16& objectId,
- v8::Local<v8::Value>* object,
- v8::Local<v8::Context>* context,
- String16* objectGroup) {
- std::unique_ptr<RemoteObjectId> remoteId =
- RemoteObjectId::parse(errorString, objectId);
- if (!remoteId) return false;
- InjectedScript* injectedScript =
- findInjectedScript(errorString, remoteId.get());
- if (!injectedScript) return false;
- if (!injectedScript->findObject(errorString, *remoteId, object)) return false;
+Response V8InspectorSessionImpl::unwrapObject(const String16& objectId,
+ v8::Local<v8::Value>* object,
+ v8::Local<v8::Context>* context,
+ String16* objectGroup) {
+ std::unique_ptr<RemoteObjectId> remoteId;
+ Response response = RemoteObjectId::parse(objectId, &remoteId);
+ if (!response.isSuccess()) return response;
+ InjectedScript* injectedScript = nullptr;
+ response = findInjectedScript(remoteId.get(), injectedScript);
+ if (!response.isSuccess()) return response;
+ response = injectedScript->findObject(*remoteId, object);
+ if (!response.isSuccess()) return response;
*context = injectedScript->context()->context();
if (objectGroup) *objectGroup = injectedScript->objectGroupName(*remoteId);
- return true;
+ return Response::OK();
}
std::unique_ptr<protocol::Runtime::API::RemoteObject>
@@ -269,21 +265,20 @@ V8InspectorSessionImpl::wrapObject(v8::Local<v8::Context> context,
v8::Local<v8::Value> value,
const String16& groupName,
bool generatePreview) {
- ErrorString errorString;
- InjectedScript* injectedScript =
- findInjectedScript(&errorString, V8Debugger::contextId(context));
+ InjectedScript* injectedScript = nullptr;
+ findInjectedScript(V8Debugger::contextId(context), injectedScript);
if (!injectedScript) return nullptr;
- return injectedScript->wrapObject(&errorString, value, groupName, false,
- generatePreview);
+ std::unique_ptr<protocol::Runtime::RemoteObject> result;
+ injectedScript->wrapObject(value, groupName, false, generatePreview, &result);
+ return result;
}
std::unique_ptr<protocol::Runtime::RemoteObject>
V8InspectorSessionImpl::wrapTable(v8::Local<v8::Context> context,
v8::Local<v8::Value> table,
v8::Local<v8::Value> columns) {
- ErrorString errorString;
- InjectedScript* injectedScript =
- findInjectedScript(&errorString, V8Debugger::contextId(context));
+ InjectedScript* injectedScript = nullptr;
+ findInjectedScript(V8Debugger::contextId(context), injectedScript);
if (!injectedScript) return nullptr;
return injectedScript->wrapTable(table, columns);
}
@@ -386,19 +381,12 @@ void V8InspectorSessionImpl::breakProgram(const StringView& breakReason,
}
void V8InspectorSessionImpl::setSkipAllPauses(bool skip) {
- ErrorString errorString;
- m_debuggerAgent->setSkipAllPauses(&errorString, skip);
+ m_debuggerAgent->setSkipAllPauses(skip);
}
-void V8InspectorSessionImpl::resume() {
- ErrorString errorString;
- m_debuggerAgent->resume(&errorString);
-}
+void V8InspectorSessionImpl::resume() { m_debuggerAgent->resume(); }
-void V8InspectorSessionImpl::stepOver() {
- ErrorString errorString;
- m_debuggerAgent->stepOver(&errorString);
-}
+void V8InspectorSessionImpl::stepOver() { m_debuggerAgent->stepOver(); }
std::vector<std::unique_ptr<protocol::Debugger::API::SearchMatch>>
V8InspectorSessionImpl::searchInTextByLines(const StringView& text,
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index e84e8c99a7..af65aa3c93 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -26,7 +26,7 @@ class V8ProfilerAgentImpl;
class V8RuntimeAgentImpl;
class V8SchemaAgentImpl;
-using protocol::ErrorString;
+using protocol::Response;
class V8InspectorSessionImpl : public V8InspectorSession,
public protocol::FrontendChannel {
@@ -44,8 +44,8 @@ class V8InspectorSessionImpl : public V8InspectorSession,
V8RuntimeAgentImpl* runtimeAgent() { return m_runtimeAgent.get(); }
int contextGroupId() const { return m_contextGroupId; }
- InjectedScript* findInjectedScript(ErrorString*, int contextId);
- InjectedScript* findInjectedScript(ErrorString*, RemoteObjectIdBase*);
+ Response findInjectedScript(int contextId, InjectedScript*&);
+ Response findInjectedScript(RemoteObjectIdBase*, InjectedScript*&);
void reset();
void discardInjectedScripts();
void reportAllContexts(V8RuntimeAgentImpl*);
@@ -57,9 +57,8 @@ class V8InspectorSessionImpl : public V8InspectorSession,
v8::Local<v8::Context>, v8::Local<v8::Value> table,
v8::Local<v8::Value> columns);
std::vector<std::unique_ptr<protocol::Schema::Domain>> supportedDomainsImpl();
- bool unwrapObject(ErrorString*, const String16& objectId,
- v8::Local<v8::Value>*, v8::Local<v8::Context>*,
- String16* objectGroup);
+ Response unwrapObject(const String16& objectId, v8::Local<v8::Value>*,
+ v8::Local<v8::Context>*, String16* objectGroup);
void releaseObjectGroup(const String16& objectGroup);
// V8InspectorSession implementation.
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
index 0511ca39b5..8b888a066b 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -201,34 +201,34 @@ void V8ProfilerAgentImpl::consoleProfileEnd(const String16& title) {
resolvedTitle);
}
-void V8ProfilerAgentImpl::enable(ErrorString*) {
- if (m_enabled) return;
+Response V8ProfilerAgentImpl::enable() {
+ if (m_enabled) return Response::OK();
m_enabled = true;
DCHECK(!m_profiler);
m_profiler = v8::CpuProfiler::New(m_isolate);
m_state->setBoolean(ProfilerAgentState::profilerEnabled, true);
+ return Response::OK();
}
-void V8ProfilerAgentImpl::disable(ErrorString* errorString) {
- if (!m_enabled) return;
+Response V8ProfilerAgentImpl::disable() {
+ if (!m_enabled) return Response::OK();
for (size_t i = m_startedProfiles.size(); i > 0; --i)
stopProfiling(m_startedProfiles[i - 1].m_id, false);
m_startedProfiles.clear();
- stop(nullptr, nullptr);
+ stop(nullptr);
m_profiler->Dispose();
m_profiler = nullptr;
m_enabled = false;
m_state->setBoolean(ProfilerAgentState::profilerEnabled, false);
+ return Response::OK();
}
-void V8ProfilerAgentImpl::setSamplingInterval(ErrorString* error,
- int interval) {
- if (m_recordingCPUProfile) {
- *error = "Cannot change sampling interval when profiling.";
- return;
- }
+Response V8ProfilerAgentImpl::setSamplingInterval(int interval) {
+ if (m_recordingCPUProfile)
+ return Response::Error("Cannot change sampling interval when profiling.");
m_state->setInteger(ProfilerAgentState::samplingInterval, interval);
m_profiler->SetSamplingInterval(interval);
+ return Response::OK();
}
void V8ProfilerAgentImpl::restore() {
@@ -243,39 +243,34 @@ void V8ProfilerAgentImpl::restore() {
if (interval) m_profiler->SetSamplingInterval(interval);
if (m_state->booleanProperty(ProfilerAgentState::userInitiatedProfiling,
false)) {
- ErrorString error;
- start(&error);
+ start();
}
}
-void V8ProfilerAgentImpl::start(ErrorString* error) {
- if (m_recordingCPUProfile) return;
- if (!m_enabled) {
- *error = "Profiler is not enabled";
- return;
- }
+Response V8ProfilerAgentImpl::start() {
+ if (m_recordingCPUProfile) return Response::OK();
+ if (!m_enabled) return Response::Error("Profiler is not enabled");
m_recordingCPUProfile = true;
m_frontendInitiatedProfileId = nextProfileId();
startProfiling(m_frontendInitiatedProfileId);
m_state->setBoolean(ProfilerAgentState::userInitiatedProfiling, true);
+ return Response::OK();
}
-void V8ProfilerAgentImpl::stop(
- ErrorString* errorString,
+Response V8ProfilerAgentImpl::stop(
std::unique_ptr<protocol::Profiler::Profile>* profile) {
- if (!m_recordingCPUProfile) {
- if (errorString) *errorString = "No recording profiles found";
- return;
- }
+ if (!m_recordingCPUProfile)
+ return Response::Error("No recording profiles found");
m_recordingCPUProfile = false;
std::unique_ptr<protocol::Profiler::Profile> cpuProfile =
stopProfiling(m_frontendInitiatedProfileId, !!profile);
if (profile) {
*profile = std::move(cpuProfile);
- if (!profile->get() && errorString) *errorString = "Profile is not found";
+ if (!profile->get()) return Response::Error("Profile is not found");
}
m_frontendInitiatedProfileId = String16();
m_state->setBoolean(ProfilerAgentState::userInitiatedProfiling, false);
+ return Response::OK();
}
String16 V8ProfilerAgentImpl::nextProfileId() {
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.h b/deps/v8/src/inspector/v8-profiler-agent-impl.h
index ee8997653a..a634ff3cd9 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.h
@@ -20,7 +20,7 @@ namespace v8_inspector {
class V8InspectorSessionImpl;
-using protocol::ErrorString;
+using protocol::Response;
class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
public:
@@ -31,12 +31,11 @@ class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
bool enabled() const { return m_enabled; }
void restore();
- void enable(ErrorString*) override;
- void disable(ErrorString*) override;
- void setSamplingInterval(ErrorString*, int) override;
- void start(ErrorString*) override;
- void stop(ErrorString*,
- std::unique_ptr<protocol::Profiler::Profile>*) override;
+ Response enable() override;
+ Response disable() override;
+ Response setSamplingInterval(int) override;
+ Response start() override;
+ Response stop(std::unique_ptr<protocol::Profiler::Profile>*) override;
void consoleProfile(const String16& title);
void consoleProfileEnd(const String16& title);
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index 640ec317d2..4dbe60f8f3 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -41,6 +41,7 @@
#include "src/inspector/v8-inspector-impl.h"
#include "src/inspector/v8-inspector-session-impl.h"
#include "src/inspector/v8-stack-trace-impl.h"
+#include "src/tracing/trace-event.h"
#include "include/v8-inspector.h"
@@ -54,11 +55,6 @@ static const char runtimeEnabled[] = "runtimeEnabled";
using protocol::Runtime::RemoteObject;
-static bool hasInternalError(ErrorString* errorString, bool hasError) {
- if (hasError) *errorString = "Internal error";
- return hasError;
-}
-
namespace {
template <typename Callback>
@@ -71,11 +67,11 @@ class ProtocolPromiseHandler {
bool returnByValue, bool generatePreview,
std::unique_ptr<Callback> callback) {
if (value.IsEmpty()) {
- callback->sendFailure("Internal error");
+ callback->sendFailure(Response::InternalError());
return;
}
if (!value.ToLocalChecked()->IsPromise()) {
- callback->sendFailure(notPromiseError);
+ callback->sendFailure(Response::Error(notPromiseError));
return;
}
v8::MicrotasksScope microtasks_scope(inspector->isolate(),
@@ -93,7 +89,7 @@ class ProtocolPromiseHandler {
v8::ConstructorBehavior::kThrow)
.ToLocalChecked();
if (promise->Then(context, thenCallbackFunction).IsEmpty()) {
- rawCallback->sendFailure("Internal error");
+ rawCallback->sendFailure(Response::InternalError());
return;
}
v8::Local<v8::Function> catchCallbackFunction =
@@ -101,7 +97,7 @@ class ProtocolPromiseHandler {
v8::ConstructorBehavior::kThrow)
.ToLocalChecked();
if (promise->Catch(context, catchCallbackFunction).IsEmpty()) {
- rawCallback->sendFailure("Internal error");
+ rawCallback->sendFailure(Response::InternalError());
return;
}
}
@@ -179,25 +175,27 @@ class ProtocolPromiseHandler {
data.GetParameter()->m_wrapper.Reset();
data.SetSecondPassCallback(cleanup);
} else {
- data.GetParameter()->m_callback->sendFailure("Promise was collected");
+ data.GetParameter()->m_callback->sendFailure(
+ Response::Error("Promise was collected"));
delete data.GetParameter();
}
}
std::unique_ptr<protocol::Runtime::RemoteObject> wrapObject(
v8::Local<v8::Value> value) {
- ErrorString errorString;
- InjectedScript::ContextScope scope(&errorString, m_inspector,
- m_contextGroupId, m_executionContextId);
- if (!scope.initialize()) {
- m_callback->sendFailure(errorString);
+ InjectedScript::ContextScope scope(m_inspector, m_contextGroupId,
+ m_executionContextId);
+ Response response = scope.initialize();
+ if (!response.isSuccess()) {
+ m_callback->sendFailure(response);
return nullptr;
}
- std::unique_ptr<protocol::Runtime::RemoteObject> wrappedValue =
- scope.injectedScript()->wrapObject(&errorString, value, m_objectGroup,
- m_returnByValue, m_generatePreview);
- if (!wrappedValue) {
- m_callback->sendFailure(errorString);
+ std::unique_ptr<protocol::Runtime::RemoteObject> wrappedValue;
+ response = scope.injectedScript()->wrapObject(
+ value, m_objectGroup, m_returnByValue, m_generatePreview,
+ &wrappedValue);
+ if (!response.isSuccess()) {
+ m_callback->sendFailure(response);
return nullptr;
}
return wrappedValue;
@@ -222,34 +220,30 @@ bool wrapEvaluateResultAsync(InjectedScript* injectedScript,
std::unique_ptr<RemoteObject> result;
Maybe<protocol::Runtime::ExceptionDetails> exceptionDetails;
- ErrorString errorString;
- injectedScript->wrapEvaluateResult(
- &errorString, maybeResultValue, tryCatch, objectGroup, returnByValue,
- generatePreview, &result, &exceptionDetails);
- if (errorString.isEmpty()) {
- callback->sendSuccess(std::move(result), exceptionDetails);
+ Response response = injectedScript->wrapEvaluateResult(
+ maybeResultValue, tryCatch, objectGroup, returnByValue, generatePreview,
+ &result, &exceptionDetails);
+ if (response.isSuccess()) {
+ callback->sendSuccess(std::move(result), std::move(exceptionDetails));
return true;
}
- callback->sendFailure(errorString);
+ callback->sendFailure(response);
return false;
}
-int ensureContext(ErrorString* errorString, V8InspectorImpl* inspector,
- int contextGroupId, const Maybe<int>& executionContextId) {
- int contextId;
+Response ensureContext(V8InspectorImpl* inspector, int contextGroupId,
+ Maybe<int> executionContextId, int* contextId) {
if (executionContextId.isJust()) {
- contextId = executionContextId.fromJust();
+ *contextId = executionContextId.fromJust();
} else {
v8::HandleScope handles(inspector->isolate());
v8::Local<v8::Context> defaultContext =
inspector->client()->ensureDefaultContextInGroup(contextGroupId);
- if (defaultContext.IsEmpty()) {
- *errorString = "Cannot find default execution context";
- return 0;
- }
- contextId = V8Debugger::contextId(defaultContext);
+ if (defaultContext.IsEmpty())
+ return Response::Error("Cannot find default execution context");
+ *contextId = V8Debugger::contextId(defaultContext);
}
- return contextId;
+ return Response::OK();
}
} // namespace
@@ -266,36 +260,33 @@ V8RuntimeAgentImpl::V8RuntimeAgentImpl(
V8RuntimeAgentImpl::~V8RuntimeAgentImpl() {}
void V8RuntimeAgentImpl::evaluate(
- const String16& expression, const Maybe<String16>& objectGroup,
- const Maybe<bool>& includeCommandLineAPI, const Maybe<bool>& silent,
- const Maybe<int>& executionContextId, const Maybe<bool>& returnByValue,
- const Maybe<bool>& generatePreview, const Maybe<bool>& userGesture,
- const Maybe<bool>& awaitPromise,
- std::unique_ptr<EvaluateCallback> callback) {
- ErrorString errorString;
- int contextId =
- ensureContext(&errorString, m_inspector, m_session->contextGroupId(),
- executionContextId);
- if (!errorString.isEmpty()) {
- callback->sendFailure(errorString);
+ const String16& expression, Maybe<String16> objectGroup,
+ Maybe<bool> includeCommandLineAPI, Maybe<bool> silent,
+ Maybe<int> executionContextId, Maybe<bool> returnByValue,
+ Maybe<bool> generatePreview, Maybe<bool> userGesture,
+ Maybe<bool> awaitPromise, std::unique_ptr<EvaluateCallback> callback) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
+ "EvaluateScript");
+ int contextId = 0;
+ Response response = ensureContext(m_inspector, m_session->contextGroupId(),
+ std::move(executionContextId), &contextId);
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
return;
}
- InjectedScript::ContextScope scope(&errorString, m_inspector,
- m_session->contextGroupId(), contextId);
- if (!scope.initialize()) {
- callback->sendFailure(errorString);
+ InjectedScript::ContextScope scope(m_inspector, m_session->contextGroupId(),
+ contextId);
+ response = scope.initialize();
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
return;
}
if (silent.fromMaybe(false)) scope.ignoreExceptionsAndMuteConsole();
if (userGesture.fromMaybe(false)) scope.pretendUserGesture();
- if (includeCommandLineAPI.fromMaybe(false) &&
- !scope.installCommandLineAPI()) {
- callback->sendFailure(errorString);
- return;
- }
+ if (includeCommandLineAPI.fromMaybe(false)) scope.installCommandLineAPI();
bool evalIsDisabled = !scope.context()->IsCodeGenerationFromStringsAllowed();
// Temporarily enable allow evals for inspector.
@@ -312,8 +303,9 @@ void V8RuntimeAgentImpl::evaluate(
// Re-initialize after running client's code, as it could have destroyed
// context or session.
- if (!scope.initialize()) {
- callback->sendFailure(errorString);
+ response = scope.initialize();
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
return;
}
@@ -333,14 +325,14 @@ void V8RuntimeAgentImpl::evaluate(
}
void V8RuntimeAgentImpl::awaitPromise(
- const String16& promiseObjectId, const Maybe<bool>& returnByValue,
- const Maybe<bool>& generatePreview,
+ const String16& promiseObjectId, Maybe<bool> returnByValue,
+ Maybe<bool> generatePreview,
std::unique_ptr<AwaitPromiseCallback> callback) {
- ErrorString errorString;
- InjectedScript::ObjectScope scope(
- &errorString, m_inspector, m_session->contextGroupId(), promiseObjectId);
- if (!scope.initialize()) {
- callback->sendFailure(errorString);
+ InjectedScript::ObjectScope scope(m_inspector, m_session->contextGroupId(),
+ promiseObjectId);
+ Response response = scope.initialize();
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
return;
}
ProtocolPromiseHandler<AwaitPromiseCallback>::add(
@@ -353,17 +345,15 @@ void V8RuntimeAgentImpl::awaitPromise(
void V8RuntimeAgentImpl::callFunctionOn(
const String16& objectId, const String16& expression,
- const Maybe<protocol::Array<protocol::Runtime::CallArgument>>&
- optionalArguments,
- const Maybe<bool>& silent, const Maybe<bool>& returnByValue,
- const Maybe<bool>& generatePreview, const Maybe<bool>& userGesture,
- const Maybe<bool>& awaitPromise,
+ Maybe<protocol::Array<protocol::Runtime::CallArgument>> optionalArguments,
+ Maybe<bool> silent, Maybe<bool> returnByValue, Maybe<bool> generatePreview,
+ Maybe<bool> userGesture, Maybe<bool> awaitPromise,
std::unique_ptr<CallFunctionOnCallback> callback) {
- ErrorString errorString;
- InjectedScript::ObjectScope scope(&errorString, m_inspector,
- m_session->contextGroupId(), objectId);
- if (!scope.initialize()) {
- callback->sendFailure(errorString);
+ InjectedScript::ObjectScope scope(m_inspector, m_session->contextGroupId(),
+ objectId);
+ Response response = scope.initialize();
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
return;
}
@@ -376,10 +366,10 @@ void V8RuntimeAgentImpl::callFunctionOn(
argv.reset(new v8::Local<v8::Value>[argc]);
for (int i = 0; i < argc; ++i) {
v8::Local<v8::Value> argumentValue;
- if (!scope.injectedScript()
- ->resolveCallArgument(&errorString, arguments->get(i))
- .ToLocal(&argumentValue)) {
- callback->sendFailure(errorString);
+ response = scope.injectedScript()->resolveCallArgument(arguments->get(i),
+ &argumentValue);
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
return;
}
argv[i] = argumentValue;
@@ -395,8 +385,9 @@ void V8RuntimeAgentImpl::callFunctionOn(
toV8String(m_inspector->isolate(), "(" + expression + ")"));
// Re-initialize after running client's code, as it could have destroyed
// context or session.
- if (!scope.initialize()) {
- callback->sendFailure(errorString);
+ response = scope.initialize();
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
return;
}
@@ -410,7 +401,8 @@ void V8RuntimeAgentImpl::callFunctionOn(
v8::Local<v8::Value> functionValue;
if (!maybeFunctionValue.ToLocal(&functionValue) ||
!functionValue->IsFunction()) {
- callback->sendFailure("Given expression does not evaluate to a function");
+ callback->sendFailure(
+ Response::Error("Given expression does not evaluate to a function"));
return;
}
@@ -419,8 +411,9 @@ void V8RuntimeAgentImpl::callFunctionOn(
argv.get());
// Re-initialize after running client's code, as it could have destroyed
// context or session.
- if (!scope.initialize()) {
- callback->sendFailure(errorString);
+ response = scope.initialize();
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
return;
}
@@ -441,10 +434,9 @@ void V8RuntimeAgentImpl::callFunctionOn(
std::move(callback));
}
-void V8RuntimeAgentImpl::getProperties(
- ErrorString* errorString, const String16& objectId,
- const Maybe<bool>& ownProperties, const Maybe<bool>& accessorPropertiesOnly,
- const Maybe<bool>& generatePreview,
+Response V8RuntimeAgentImpl::getProperties(
+ const String16& objectId, Maybe<bool> ownProperties,
+ Maybe<bool> accessorPropertiesOnly, Maybe<bool> generatePreview,
std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
result,
Maybe<protocol::Array<protocol::Runtime::InternalPropertyDescriptor>>*
@@ -452,105 +444,103 @@ void V8RuntimeAgentImpl::getProperties(
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
using protocol::Runtime::InternalPropertyDescriptor;
- InjectedScript::ObjectScope scope(errorString, m_inspector,
- m_session->contextGroupId(), objectId);
- if (!scope.initialize()) return;
+ InjectedScript::ObjectScope scope(m_inspector, m_session->contextGroupId(),
+ objectId);
+ Response response = scope.initialize();
+ if (!response.isSuccess()) return response;
scope.ignoreExceptionsAndMuteConsole();
- if (!scope.object()->IsObject()) {
- *errorString = "Value with given id is not an object";
- return;
- }
+ if (!scope.object()->IsObject())
+ return Response::Error("Value with given id is not an object");
v8::Local<v8::Object> object = scope.object().As<v8::Object>();
- scope.injectedScript()->getProperties(
- errorString, object, scope.objectGroupName(),
- ownProperties.fromMaybe(false), accessorPropertiesOnly.fromMaybe(false),
- generatePreview.fromMaybe(false), result, exceptionDetails);
- if (!errorString->isEmpty() || exceptionDetails->isJust() ||
- accessorPropertiesOnly.fromMaybe(false))
- return;
+ response = scope.injectedScript()->getProperties(
+ object, scope.objectGroupName(), ownProperties.fromMaybe(false),
+ accessorPropertiesOnly.fromMaybe(false), generatePreview.fromMaybe(false),
+ result, exceptionDetails);
+ if (!response.isSuccess()) return response;
+ if (exceptionDetails->isJust() || accessorPropertiesOnly.fromMaybe(false))
+ return Response::OK();
v8::Local<v8::Array> propertiesArray;
- if (hasInternalError(errorString, !m_inspector->debugger()
- ->internalProperties(scope.context(),
- scope.object())
- .ToLocal(&propertiesArray)))
- return;
+ if (!m_inspector->debugger()
+ ->internalProperties(scope.context(), scope.object())
+ .ToLocal(&propertiesArray)) {
+ return Response::InternalError();
+ }
std::unique_ptr<protocol::Array<InternalPropertyDescriptor>>
propertiesProtocolArray =
protocol::Array<InternalPropertyDescriptor>::create();
for (uint32_t i = 0; i < propertiesArray->Length(); i += 2) {
v8::Local<v8::Value> name;
- if (hasInternalError(
- errorString,
- !propertiesArray->Get(scope.context(), i).ToLocal(&name)) ||
- !name->IsString())
- return;
+ if (!propertiesArray->Get(scope.context(), i).ToLocal(&name) ||
+ !name->IsString()) {
+ return Response::InternalError();
+ }
v8::Local<v8::Value> value;
- if (hasInternalError(
- errorString,
- !propertiesArray->Get(scope.context(), i + 1).ToLocal(&value)))
- return;
- std::unique_ptr<RemoteObject> wrappedValue =
- scope.injectedScript()->wrapObject(errorString, value,
- scope.objectGroupName());
- if (!wrappedValue) return;
+ if (!propertiesArray->Get(scope.context(), i + 1).ToLocal(&value))
+ return Response::InternalError();
+ std::unique_ptr<RemoteObject> wrappedValue;
+ protocol::Response response = scope.injectedScript()->wrapObject(
+ value, scope.objectGroupName(), false, false, &wrappedValue);
+ if (!response.isSuccess()) return response;
propertiesProtocolArray->addItem(
InternalPropertyDescriptor::create()
.setName(toProtocolString(name.As<v8::String>()))
.setValue(std::move(wrappedValue))
.build());
}
- if (!propertiesProtocolArray->length()) return;
- *internalProperties = std::move(propertiesProtocolArray);
+ if (propertiesProtocolArray->length())
+ *internalProperties = std::move(propertiesProtocolArray);
+ return Response::OK();
}
-void V8RuntimeAgentImpl::releaseObject(ErrorString* errorString,
- const String16& objectId) {
- InjectedScript::ObjectScope scope(errorString, m_inspector,
- m_session->contextGroupId(), objectId);
- if (!scope.initialize()) return;
+Response V8RuntimeAgentImpl::releaseObject(const String16& objectId) {
+ InjectedScript::ObjectScope scope(m_inspector, m_session->contextGroupId(),
+ objectId);
+ Response response = scope.initialize();
+ if (!response.isSuccess()) return response;
scope.injectedScript()->releaseObject(objectId);
+ return Response::OK();
}
-void V8RuntimeAgentImpl::releaseObjectGroup(ErrorString*,
- const String16& objectGroup) {
+Response V8RuntimeAgentImpl::releaseObjectGroup(const String16& objectGroup) {
m_session->releaseObjectGroup(objectGroup);
+ return Response::OK();
}
-void V8RuntimeAgentImpl::runIfWaitingForDebugger(ErrorString* errorString) {
+Response V8RuntimeAgentImpl::runIfWaitingForDebugger() {
m_inspector->client()->runIfWaitingForDebugger(m_session->contextGroupId());
+ return Response::OK();
}
-void V8RuntimeAgentImpl::setCustomObjectFormatterEnabled(ErrorString*,
- bool enabled) {
+Response V8RuntimeAgentImpl::setCustomObjectFormatterEnabled(bool enabled) {
m_state->setBoolean(V8RuntimeAgentImplState::customObjectFormatterEnabled,
enabled);
m_session->setCustomObjectFormatterEnabled(enabled);
+ return Response::OK();
}
-void V8RuntimeAgentImpl::discardConsoleEntries(ErrorString*) {
+Response V8RuntimeAgentImpl::discardConsoleEntries() {
V8ConsoleMessageStorage* storage =
m_inspector->ensureConsoleMessageStorage(m_session->contextGroupId());
storage->clear();
+ return Response::OK();
}
-void V8RuntimeAgentImpl::compileScript(
- ErrorString* errorString, const String16& expression,
- const String16& sourceURL, bool persistScript,
- const Maybe<int>& executionContextId, Maybe<String16>* scriptId,
+Response V8RuntimeAgentImpl::compileScript(
+ const String16& expression, const String16& sourceURL, bool persistScript,
+ Maybe<int> executionContextId, Maybe<String16>* scriptId,
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
- if (!m_enabled) {
- *errorString = "Runtime agent is not enabled";
- return;
- }
- int contextId =
- ensureContext(errorString, m_inspector, m_session->contextGroupId(),
- executionContextId);
- if (!errorString->isEmpty()) return;
- InjectedScript::ContextScope scope(errorString, m_inspector,
- m_session->contextGroupId(), contextId);
- if (!scope.initialize()) return;
+ if (!m_enabled) return Response::Error("Runtime agent is not enabled");
+
+ int contextId = 0;
+ Response response = ensureContext(m_inspector, m_session->contextGroupId(),
+ std::move(executionContextId), &contextId);
+ if (!response.isSuccess()) return response;
+ InjectedScript::ContextScope scope(m_inspector, m_session->contextGroupId(),
+ contextId);
+ response = scope.initialize();
+ if (!response.isSuccess()) return response;
if (!persistScript) m_inspector->debugger()->muteScriptParsedEvents();
v8::Local<v8::Script> script = m_inspector->compileScript(
@@ -558,15 +548,17 @@ void V8RuntimeAgentImpl::compileScript(
sourceURL, false);
if (!persistScript) m_inspector->debugger()->unmuteScriptParsedEvents();
if (script.IsEmpty()) {
- if (scope.tryCatch().HasCaught())
- *exceptionDetails = scope.injectedScript()->createExceptionDetails(
- errorString, scope.tryCatch(), String16(), false);
- else
- *errorString = "Script compilation failed";
- return;
+ if (scope.tryCatch().HasCaught()) {
+ response = scope.injectedScript()->createExceptionDetails(
+ scope.tryCatch(), String16(), false, exceptionDetails);
+ if (!response.isSuccess()) return response;
+ return Response::OK();
+ } else {
+ return Response::Error("Script compilation failed");
+ }
}
- if (!persistScript) return;
+ if (!persistScript) return Response::OK();
String16 scriptValueId =
String16::fromInteger(script->GetUnboundScript()->GetId());
@@ -574,38 +566,39 @@ void V8RuntimeAgentImpl::compileScript(
new v8::Global<v8::Script>(m_inspector->isolate(), script));
m_compiledScripts[scriptValueId] = std::move(global);
*scriptId = scriptValueId;
+ return Response::OK();
}
void V8RuntimeAgentImpl::runScript(
- const String16& scriptId, const Maybe<int>& executionContextId,
- const Maybe<String16>& objectGroup, const Maybe<bool>& silent,
- const Maybe<bool>& includeCommandLineAPI, const Maybe<bool>& returnByValue,
- const Maybe<bool>& generatePreview, const Maybe<bool>& awaitPromise,
+ const String16& scriptId, Maybe<int> executionContextId,
+ Maybe<String16> objectGroup, Maybe<bool> silent,
+ Maybe<bool> includeCommandLineAPI, Maybe<bool> returnByValue,
+ Maybe<bool> generatePreview, Maybe<bool> awaitPromise,
std::unique_ptr<RunScriptCallback> callback) {
if (!m_enabled) {
- callback->sendFailure("Runtime agent is not enabled");
+ callback->sendFailure(Response::Error("Runtime agent is not enabled"));
return;
}
auto it = m_compiledScripts.find(scriptId);
if (it == m_compiledScripts.end()) {
- callback->sendFailure("No script with given id");
+ callback->sendFailure(Response::Error("No script with given id"));
return;
}
- ErrorString errorString;
- int contextId =
- ensureContext(&errorString, m_inspector, m_session->contextGroupId(),
- executionContextId);
- if (!errorString.isEmpty()) {
- callback->sendFailure(errorString);
+ int contextId = 0;
+ Response response = ensureContext(m_inspector, m_session->contextGroupId(),
+ std::move(executionContextId), &contextId);
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
return;
}
- InjectedScript::ContextScope scope(&errorString, m_inspector,
- m_session->contextGroupId(), contextId);
- if (!scope.initialize()) {
- callback->sendFailure(errorString);
+ InjectedScript::ContextScope scope(m_inspector, m_session->contextGroupId(),
+ contextId);
+ response = scope.initialize();
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
return;
}
@@ -615,19 +608,22 @@ void V8RuntimeAgentImpl::runScript(
m_compiledScripts.erase(it);
v8::Local<v8::Script> script = scriptWrapper->Get(m_inspector->isolate());
if (script.IsEmpty()) {
- callback->sendFailure("Script execution failed");
+ callback->sendFailure(Response::Error("Script execution failed"));
return;
}
- if (includeCommandLineAPI.fromMaybe(false) && !scope.installCommandLineAPI())
- return;
+ if (includeCommandLineAPI.fromMaybe(false)) scope.installCommandLineAPI();
v8::MaybeLocal<v8::Value> maybeResultValue =
m_inspector->runCompiledScript(scope.context(), script);
// Re-initialize after running client's code, as it could have destroyed
// context or session.
- if (!scope.initialize()) return;
+ response = scope.initialize();
+ if (!response.isSuccess()) {
+ callback->sendFailure(response);
+ return;
+ }
if (!awaitPromise.fromMaybe(false) || scope.tryCatch().HasCaught()) {
wrapEvaluateResultAsync(scope.injectedScript(), maybeResultValue,
@@ -649,15 +645,14 @@ void V8RuntimeAgentImpl::restore() {
if (!m_state->booleanProperty(V8RuntimeAgentImplState::runtimeEnabled, false))
return;
m_frontend.executionContextsCleared();
- ErrorString error;
- enable(&error);
+ enable();
if (m_state->booleanProperty(
V8RuntimeAgentImplState::customObjectFormatterEnabled, false))
m_session->setCustomObjectFormatterEnabled(true);
}
-void V8RuntimeAgentImpl::enable(ErrorString* errorString) {
- if (m_enabled) return;
+Response V8RuntimeAgentImpl::enable() {
+ if (m_enabled) return Response::OK();
m_inspector->client()->beginEnsureAllContextsInGroup(
m_session->contextGroupId());
m_enabled = true;
@@ -667,12 +662,13 @@ void V8RuntimeAgentImpl::enable(ErrorString* errorString) {
V8ConsoleMessageStorage* storage =
m_inspector->ensureConsoleMessageStorage(m_session->contextGroupId());
for (const auto& message : storage->messages()) {
- if (!reportMessage(message.get(), false)) return;
+ if (!reportMessage(message.get(), false)) break;
}
+ return Response::OK();
}
-void V8RuntimeAgentImpl::disable(ErrorString* errorString) {
- if (!m_enabled) return;
+Response V8RuntimeAgentImpl::disable() {
+ if (!m_enabled) return Response::OK();
m_enabled = false;
m_state->setBoolean(V8RuntimeAgentImplState::runtimeEnabled, false);
m_inspector->disableStackCapturingIfNeeded();
@@ -680,6 +676,7 @@ void V8RuntimeAgentImpl::disable(ErrorString* errorString) {
reset();
m_inspector->client()->endEnsureAllContextsInGroup(
m_session->contextGroupId());
+ return Response::OK();
}
void V8RuntimeAgentImpl::reset() {
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.h b/deps/v8/src/inspector/v8-runtime-agent-impl.h
index edeeed47ed..9caa1fba47 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.h
@@ -46,7 +46,7 @@ class V8ConsoleMessage;
class V8InspectorImpl;
class V8InspectorSessionImpl;
-using protocol::ErrorString;
+using protocol::Response;
using protocol::Maybe;
class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
@@ -57,51 +57,45 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
void restore();
// Part of the protocol.
- void enable(ErrorString*) override;
- void disable(ErrorString*) override;
- void evaluate(const String16& expression, const Maybe<String16>& objectGroup,
- const Maybe<bool>& includeCommandLineAPI,
- const Maybe<bool>& silent, const Maybe<int>& executionContextId,
- const Maybe<bool>& returnByValue,
- const Maybe<bool>& generatePreview,
- const Maybe<bool>& userGesture, const Maybe<bool>& awaitPromise,
+ Response enable() override;
+ Response disable() override;
+ void evaluate(const String16& expression, Maybe<String16> objectGroup,
+ Maybe<bool> includeCommandLineAPI, Maybe<bool> silent,
+ Maybe<int> executionContextId, Maybe<bool> returnByValue,
+ Maybe<bool> generatePreview, Maybe<bool> userGesture,
+ Maybe<bool> awaitPromise,
std::unique_ptr<EvaluateCallback>) override;
- void awaitPromise(const String16& promiseObjectId,
- const Maybe<bool>& returnByValue,
- const Maybe<bool>& generatePreview,
+ void awaitPromise(const String16& promiseObjectId, Maybe<bool> returnByValue,
+ Maybe<bool> generatePreview,
std::unique_ptr<AwaitPromiseCallback>) override;
void callFunctionOn(
const String16& objectId, const String16& expression,
- const Maybe<protocol::Array<protocol::Runtime::CallArgument>>&
- optionalArguments,
- const Maybe<bool>& silent, const Maybe<bool>& returnByValue,
- const Maybe<bool>& generatePreview, const Maybe<bool>& userGesture,
- const Maybe<bool>& awaitPromise,
+ Maybe<protocol::Array<protocol::Runtime::CallArgument>> optionalArguments,
+ Maybe<bool> silent, Maybe<bool> returnByValue,
+ Maybe<bool> generatePreview, Maybe<bool> userGesture,
+ Maybe<bool> awaitPromise,
std::unique_ptr<CallFunctionOnCallback>) override;
- void releaseObject(ErrorString*, const String16& objectId) override;
- void getProperties(
- ErrorString*, const String16& objectId, const Maybe<bool>& ownProperties,
- const Maybe<bool>& accessorPropertiesOnly,
- const Maybe<bool>& generatePreview,
+ Response releaseObject(const String16& objectId) override;
+ Response getProperties(
+ const String16& objectId, Maybe<bool> ownProperties,
+ Maybe<bool> accessorPropertiesOnly, Maybe<bool> generatePreview,
std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
result,
Maybe<protocol::Array<protocol::Runtime::InternalPropertyDescriptor>>*
internalProperties,
Maybe<protocol::Runtime::ExceptionDetails>*) override;
- void releaseObjectGroup(ErrorString*, const String16& objectGroup) override;
- void runIfWaitingForDebugger(ErrorString*) override;
- void setCustomObjectFormatterEnabled(ErrorString*, bool) override;
- void discardConsoleEntries(ErrorString*) override;
- void compileScript(ErrorString*, const String16& expression,
- const String16& sourceURL, bool persistScript,
- const Maybe<int>& executionContextId, Maybe<String16>*,
- Maybe<protocol::Runtime::ExceptionDetails>*) override;
- void runScript(const String16&, const Maybe<int>& executionContextId,
- const Maybe<String16>& objectGroup, const Maybe<bool>& silent,
- const Maybe<bool>& includeCommandLineAPI,
- const Maybe<bool>& returnByValue,
- const Maybe<bool>& generatePreview,
- const Maybe<bool>& awaitPromise,
+ Response releaseObjectGroup(const String16& objectGroup) override;
+ Response runIfWaitingForDebugger() override;
+ Response setCustomObjectFormatterEnabled(bool) override;
+ Response discardConsoleEntries() override;
+ Response compileScript(const String16& expression, const String16& sourceURL,
+ bool persistScript, Maybe<int> executionContextId,
+ Maybe<String16>*,
+ Maybe<protocol::Runtime::ExceptionDetails>*) override;
+ void runScript(const String16&, Maybe<int> executionContextId,
+ Maybe<String16> objectGroup, Maybe<bool> silent,
+ Maybe<bool> includeCommandLineAPI, Maybe<bool> returnByValue,
+ Maybe<bool> generatePreview, Maybe<bool> awaitPromise,
std::unique_ptr<RunScriptCallback>) override;
void reset();
diff --git a/deps/v8/src/inspector/v8-schema-agent-impl.cc b/deps/v8/src/inspector/v8-schema-agent-impl.cc
index 9eed5bdf81..d7b6cdcb01 100644
--- a/deps/v8/src/inspector/v8-schema-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-schema-agent-impl.cc
@@ -16,14 +16,14 @@ V8SchemaAgentImpl::V8SchemaAgentImpl(V8InspectorSessionImpl* session,
V8SchemaAgentImpl::~V8SchemaAgentImpl() {}
-void V8SchemaAgentImpl::getDomains(
- ErrorString*,
+Response V8SchemaAgentImpl::getDomains(
std::unique_ptr<protocol::Array<protocol::Schema::Domain>>* result) {
std::vector<std::unique_ptr<protocol::Schema::Domain>> domains =
m_session->supportedDomainsImpl();
*result = protocol::Array<protocol::Schema::Domain>::create();
for (size_t i = 0; i < domains.size(); ++i)
(*result)->addItem(std::move(domains[i]));
+ return Response::OK();
}
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-schema-agent-impl.h b/deps/v8/src/inspector/v8-schema-agent-impl.h
index 6150201f8b..e733aa0d5a 100644
--- a/deps/v8/src/inspector/v8-schema-agent-impl.h
+++ b/deps/v8/src/inspector/v8-schema-agent-impl.h
@@ -13,7 +13,7 @@ namespace v8_inspector {
class V8InspectorSessionImpl;
-using protocol::ErrorString;
+using protocol::Response;
class V8SchemaAgentImpl : public protocol::Schema::Backend {
public:
@@ -21,8 +21,7 @@ class V8SchemaAgentImpl : public protocol::Schema::Backend {
protocol::DictionaryValue* state);
~V8SchemaAgentImpl() override;
- void getDomains(
- ErrorString*,
+ Response getDomains(
std::unique_ptr<protocol::Array<protocol::Schema::Domain>>*) override;
private:
diff --git a/deps/v8/src/inspector/v8-value-copier.cc b/deps/v8/src/inspector/v8-value-copier.cc
index 09d86b7b98..fcaeb618ca 100644
--- a/deps/v8/src/inspector/v8-value-copier.cc
+++ b/deps/v8/src/inspector/v8-value-copier.cc
@@ -73,6 +73,96 @@ class V8ValueCopier {
int m_calls;
};
+protocol::Response toProtocolValue(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value, int maxDepth,
+ std::unique_ptr<protocol::Value>* result) {
+ using protocol::Response;
+ if (value.IsEmpty()) {
+ UNREACHABLE();
+ return Response::InternalError();
+ }
+
+ if (!maxDepth) return Response::Error("Object reference chain is too long");
+ maxDepth--;
+
+ if (value->IsNull() || value->IsUndefined()) {
+ *result = protocol::Value::null();
+ return Response::OK();
+ }
+ if (value->IsBoolean()) {
+ *result =
+ protocol::FundamentalValue::create(value.As<v8::Boolean>()->Value());
+ return Response::OK();
+ }
+ if (value->IsNumber()) {
+ double doubleValue = value.As<v8::Number>()->Value();
+ int intValue = static_cast<int>(doubleValue);
+ if (intValue == doubleValue) {
+ *result = protocol::FundamentalValue::create(intValue);
+ return Response::OK();
+ }
+ *result = protocol::FundamentalValue::create(doubleValue);
+ return Response::OK();
+ }
+ if (value->IsString()) {
+ *result =
+ protocol::StringValue::create(toProtocolString(value.As<v8::String>()));
+ return Response::OK();
+ }
+ if (value->IsArray()) {
+ v8::Local<v8::Array> array = value.As<v8::Array>();
+ std::unique_ptr<protocol::ListValue> inspectorArray =
+ protocol::ListValue::create();
+ uint32_t length = array->Length();
+ for (uint32_t i = 0; i < length; i++) {
+ v8::Local<v8::Value> value;
+ if (!array->Get(context, i).ToLocal(&value))
+ return Response::InternalError();
+ std::unique_ptr<protocol::Value> element;
+ Response response = toProtocolValue(context, value, maxDepth, &element);
+ if (!response.isSuccess()) return response;
+ inspectorArray->pushValue(std::move(element));
+ }
+ *result = std::move(inspectorArray);
+ return Response::OK();
+ }
+ if (value->IsObject()) {
+ std::unique_ptr<protocol::DictionaryValue> jsonObject =
+ protocol::DictionaryValue::create();
+ v8::Local<v8::Object> object = v8::Local<v8::Object>::Cast(value);
+ v8::Local<v8::Array> propertyNames;
+ if (!object->GetPropertyNames(context).ToLocal(&propertyNames))
+ return Response::InternalError();
+ uint32_t length = propertyNames->Length();
+ for (uint32_t i = 0; i < length; i++) {
+ v8::Local<v8::Value> name;
+ if (!propertyNames->Get(context, i).ToLocal(&name))
+ return Response::InternalError();
+ // FIXME(yurys): v8::Object should support GetOwnPropertyNames
+ if (name->IsString()) {
+ v8::Maybe<bool> hasRealNamedProperty = object->HasRealNamedProperty(
+ context, v8::Local<v8::String>::Cast(name));
+ if (!hasRealNamedProperty.IsJust() || !hasRealNamedProperty.FromJust())
+ continue;
+ }
+ v8::Local<v8::String> propertyName;
+ if (!name->ToString(context).ToLocal(&propertyName)) continue;
+ v8::Local<v8::Value> property;
+ if (!object->Get(context, name).ToLocal(&property))
+ return Response::InternalError();
+ std::unique_ptr<protocol::Value> propertyValue;
+ Response response =
+ toProtocolValue(context, property, maxDepth, &propertyValue);
+ if (!response.isSuccess()) return response;
+ jsonObject->setValue(toProtocolString(propertyName),
+ std::move(propertyValue));
+ }
+ *result = std::move(jsonObject);
+ return Response::OK();
+ }
+ return Response::Error("Object couldn't be returned by value");
+}
+
} // namespace
v8::MaybeLocal<v8::Value> copyValueFromDebuggerContext(
@@ -107,4 +197,10 @@ v8::Maybe<bool> createDataProperty(v8::Local<v8::Context> context,
return array->CreateDataProperty(context, index, value);
}
+protocol::Response toProtocolValue(v8::Local<v8::Context> context,
+ v8::Local<v8::Value> value,
+ std::unique_ptr<protocol::Value>* result) {
+ return toProtocolValue(context, value, 1000, result);
+}
+
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-value-copier.h b/deps/v8/src/inspector/v8-value-copier.h
index c24a5648a2..ee887e5ad4 100644
--- a/deps/v8/src/inspector/v8-value-copier.h
+++ b/deps/v8/src/inspector/v8-value-copier.h
@@ -5,6 +5,8 @@
#ifndef V8_INSPECTOR_V8VALUECOPIER_H_
#define V8_INSPECTOR_V8VALUECOPIER_H_
+#include "src/inspector/protocol/Protocol.h"
+
#include "include/v8.h"
namespace v8_inspector {
@@ -19,6 +21,9 @@ v8::Maybe<bool> createDataProperty(v8::Local<v8::Context>,
v8::Maybe<bool> createDataProperty(v8::Local<v8::Context>, v8::Local<v8::Array>,
int index, v8::Local<v8::Value>);
+protocol::Response toProtocolValue(v8::Local<v8::Context>, v8::Local<v8::Value>,
+ std::unique_ptr<protocol::Value>* result);
+
} // namespace v8_inspector
#endif // V8_INSPECTOR_V8VALUECOPIER_H_
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index 2628b9fb6f..d14b1a1011 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -183,22 +183,6 @@ void StoreNamedTransitionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(len, registers);
}
-void StoreGlobalViaContextDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kSlot, kValue
- MachineType machine_types[] = {MachineType::Int32(),
- MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void StoreGlobalViaContextDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {SlotRegister(), ValueRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void StringCompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {LeftRegister(), RightRegister()};
@@ -233,7 +217,6 @@ void LoadWithVectorDescriptor::InitializePlatformIndependent(
machine_types);
}
-
void LoadWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister(),
@@ -241,6 +224,24 @@ void LoadWithVectorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void LoadICProtoArrayDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kReceiver, kName, kSlot, kVector, kHandler
+ MachineType machine_types[] = {
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::TaggedSigned(), MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void LoadICProtoArrayDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister(),
+ VectorRegister(), HandlerRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void StoreWithVectorDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kReceiver, kName, kValue, kSlot, kVector
@@ -378,14 +379,35 @@ void CallFunctionWithFeedbackDescriptor::InitializePlatformIndependent(
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
- // kFunction, kSlot, kVector
- MachineType machine_types[] = {MachineType::TaggedPointer(),
- MachineType::TaggedSigned(),
- MachineType::AnyTagged()};
+ // kFunction, kActualArgumentsCount, kSlot, kVector
+ MachineType machine_types[] = {
+ MachineType::TaggedPointer(), MachineType::Int32(),
+ MachineType::TaggedSigned(), MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void BuiltinDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::Int32()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
+void BuiltinDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {NewTargetRegister(), ArgumentsCountRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+const Register BuiltinDescriptor::ArgumentsCountRegister() {
+ return kJavaScriptCallArgCountRegister;
+}
+const Register BuiltinDescriptor::NewTargetRegister() {
+ return kJavaScriptCallNewTargetRegister;
+}
+
void ArrayNoArgumentConstructorDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kFunction, kAllocationSite, kActualArgumentsCount, kFunctionParameter
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index 09dc377338..3b49041a09 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -8,6 +8,7 @@
#include <memory>
#include "src/assembler.h"
+#include "src/globals.h"
#include "src/macro-assembler.h"
namespace v8 {
@@ -20,6 +21,7 @@ class PlatformInterfaceDescriptor;
V(ContextOnly) \
V(Load) \
V(LoadWithVector) \
+ V(LoadICProtoArray) \
V(LoadGlobal) \
V(LoadGlobalWithVector) \
V(Store) \
@@ -48,7 +50,6 @@ class PlatformInterfaceDescriptor;
V(ConstructStub) \
V(ConstructTrampoline) \
V(RegExpExec) \
- V(RegExpConstructResult) \
V(CopyFastSmiOrObjectElements) \
V(TransitionElementsKind) \
V(AllocateHeapNumber) \
@@ -62,6 +63,7 @@ class PlatformInterfaceDescriptor;
V(AllocateInt8x16) \
V(AllocateUint8x16) \
V(AllocateBool8x16) \
+ V(Builtin) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
@@ -82,7 +84,6 @@ class PlatformInterfaceDescriptor;
V(ArgumentAdaptor) \
V(ApiCallback) \
V(ApiGetter) \
- V(StoreGlobalViaContext) \
V(MathPowTagged) \
V(MathPowInteger) \
V(GrowArrayElements) \
@@ -93,7 +94,7 @@ class PlatformInterfaceDescriptor;
V(InterpreterCEntry) \
V(ResumeGenerator)
-class CallInterfaceDescriptorData {
+class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
public:
CallInterfaceDescriptorData() : register_param_count_(-1), param_count_(-1) {}
@@ -389,6 +390,15 @@ class LoadWithVectorDescriptor : public LoadDescriptor {
static const Register VectorRegister();
};
+class LoadICProtoArrayDescriptor : public LoadWithVectorDescriptor {
+ public:
+ DEFINE_PARAMETERS(kReceiver, kName, kSlot, kVector, kHandler)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadICProtoArrayDescriptor,
+ LoadWithVectorDescriptor)
+
+ static const Register HandlerRegister();
+};
+
class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
public:
DEFINE_PARAMETERS(kSlot, kVector)
@@ -553,7 +563,7 @@ class CallFunctionWithFeedbackDescriptor : public CallInterfaceDescriptor {
class CallFunctionWithFeedbackAndVectorDescriptor
: public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kFunction, kSlot, kVector)
+ DEFINE_PARAMETERS(kFunction, kActualArgumentsCount, kSlot, kVector)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
CallFunctionWithFeedbackAndVectorDescriptor, CallInterfaceDescriptor)
};
@@ -571,23 +581,6 @@ class RegExpExecDescriptor : public CallInterfaceDescriptor {
CallInterfaceDescriptor)
};
-class RegExpConstructResultDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kLength, kIndex, kInput)
- DECLARE_DESCRIPTOR(RegExpConstructResultDescriptor, CallInterfaceDescriptor)
-};
-
-
-class StoreGlobalViaContextDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kSlot, kValue)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StoreGlobalViaContextDescriptor,
- CallInterfaceDescriptor)
-
- static const Register SlotRegister();
- static const Register ValueRegister();
-};
-
class CopyFastSmiOrObjectElementsDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kObject)
@@ -615,6 +608,15 @@ class AllocateHeapNumberDescriptor : public CallInterfaceDescriptor {
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
+class BuiltinDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kNewTarget, kArgumentsCount)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(BuiltinDescriptor,
+ CallInterfaceDescriptor)
+ static const Register ArgumentsCountRegister();
+ static const Register NewTargetRegister();
+};
+
class ArrayNoArgumentConstructorDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kFunction, kAllocationSite, kActualArgumentsCount,
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index dfa395095a..904a8e021d 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -31,7 +31,8 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(
register_allocator_(fixed_register_count()),
bytecode_array_writer_(zone, &constant_array_builder_,
source_position_mode),
- pipeline_(&bytecode_array_writer_) {
+ pipeline_(&bytecode_array_writer_),
+ register_optimizer_(nullptr) {
DCHECK_GE(parameter_count_, 0);
DCHECK_GE(context_register_count_, 0);
DCHECK_GE(local_register_count_, 0);
@@ -45,14 +46,12 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(
}
if (FLAG_ignition_reo) {
- pipeline_ = new (zone) BytecodeRegisterOptimizer(
+ register_optimizer_ = new (zone) BytecodeRegisterOptimizer(
zone, &register_allocator_, fixed_register_count(), parameter_count,
pipeline_);
}
- return_position_ =
- literal ? std::max(literal->start_position(), literal->end_position() - 1)
- : kNoSourcePosition;
+ return_position_ = literal ? literal->return_position() : kNoSourcePosition;
}
Register BytecodeArrayBuilder::first_context_register() const {
@@ -75,108 +74,222 @@ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
DCHECK(!bytecode_generated_);
bytecode_generated_ = true;
+ int register_count = total_register_count();
+
+ if (register_optimizer_) {
+ register_optimizer_->Flush();
+ register_count = register_optimizer_->maxiumum_register_index() + 1;
+ }
+
Handle<FixedArray> handler_table =
handler_table_builder()->ToHandlerTable(isolate);
- return pipeline_->ToBytecodeArray(isolate, total_register_count(),
- parameter_count(), handler_table);
-}
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
- uint32_t operand1, uint32_t operand2,
- uint32_t operand3) {
- DCHECK(OperandsAreValid(bytecode, 4, operand0, operand1, operand2, operand3));
- BytecodeNode node(bytecode, operand0, operand1, operand2, operand3,
- &latest_source_info_);
- pipeline()->Write(&node);
+ return pipeline_->ToBytecodeArray(isolate, register_count, parameter_count(),
+ handler_table);
+}
+
+BytecodeSourceInfo BytecodeArrayBuilder::CurrentSourcePosition(
+ Bytecode bytecode) {
+ BytecodeSourceInfo source_position;
+ if (latest_source_info_.is_valid()) {
+ // Statement positions need to be emitted immediately. Expression
+ // positions can be pushed back until a bytecode is found that can
+ // throw (if expression position filtering is turned on). We only
+ // invalidate the existing source position information if it is used.
+ if (latest_source_info_.is_statement() ||
+ !FLAG_ignition_filter_expression_positions ||
+ !Bytecodes::IsWithoutExternalSideEffects(bytecode)) {
+ source_position = latest_source_info_;
+ latest_source_info_.set_invalid();
+ }
+ }
+ return source_position;
}
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
- uint32_t operand1, uint32_t operand2) {
- DCHECK(OperandsAreValid(bytecode, 3, operand0, operand1, operand2));
- BytecodeNode node(bytecode, operand0, operand1, operand2,
- &latest_source_info_);
- pipeline()->Write(&node);
-}
+namespace {
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0,
- uint32_t operand1) {
- DCHECK(OperandsAreValid(bytecode, 2, operand0, operand1));
- BytecodeNode node(bytecode, operand0, operand1, &latest_source_info_);
- pipeline()->Write(&node);
-}
+template <OperandTypeInfo type_info>
+class UnsignedOperandHelper {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, size_t value)) {
+ DCHECK(IsValid(value));
+ return static_cast<uint32_t>(value);
+ }
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0) {
- DCHECK(OperandsAreValid(bytecode, 1, operand0));
- BytecodeNode node(bytecode, operand0, &latest_source_info_);
- pipeline()->Write(&node);
-}
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, int value)) {
+ DCHECK_GE(value, 0);
+ return Convert(builder, static_cast<size_t>(value));
+ }
-void BytecodeArrayBuilder::Output(Bytecode bytecode) {
- DCHECK(OperandsAreValid(bytecode, 0));
- BytecodeNode node(bytecode, &latest_source_info_);
- pipeline()->Write(&node);
-}
+ private:
+ static bool IsValid(size_t value) {
+ switch (type_info) {
+ case OperandTypeInfo::kFixedUnsignedByte:
+ return value <= kMaxUInt8;
+ case OperandTypeInfo::kFixedUnsignedShort:
+ return value <= kMaxUInt16;
+ case OperandTypeInfo::kScalableUnsignedByte:
+ return value <= kMaxUInt32;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+ }
+};
+
+template <OperandType>
+class OperandHelper {};
+
+#define DEFINE_UNSIGNED_OPERAND_HELPER(Name, Type) \
+ template <> \
+ class OperandHelper<OperandType::k##Name> \
+ : public UnsignedOperandHelper<Type> {};
+UNSIGNED_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
+#undef DEFINE_UNSIGNED_OPERAND_HELPER
+
+template <>
+class OperandHelper<OperandType::kImm> {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, int value)) {
+ return static_cast<uint32_t>(value);
+ }
+};
-void BytecodeArrayBuilder::OutputJump(Bytecode bytecode, BytecodeLabel* label) {
- BytecodeNode node(bytecode, 0, &latest_source_info_);
- pipeline_->WriteJump(&node, label);
- LeaveBasicBlock();
-}
+template <>
+class OperandHelper<OperandType::kReg> {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, Register reg)) {
+ return builder->GetInputRegisterOperand(reg);
+ }
+};
+
+template <>
+class OperandHelper<OperandType::kRegList> {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
+ RegisterList reg_list)) {
+ return builder->GetInputRegisterListOperand(reg_list);
+ }
+};
+
+template <>
+class OperandHelper<OperandType::kRegPair> {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
+ RegisterList reg_list)) {
+ DCHECK_EQ(reg_list.register_count(), 2);
+ return builder->GetInputRegisterListOperand(reg_list);
+ }
+};
-void BytecodeArrayBuilder::OutputJump(Bytecode bytecode, uint32_t operand0,
- BytecodeLabel* label) {
- BytecodeNode node(bytecode, 0, operand0, &latest_source_info_);
- pipeline_->WriteJump(&node, label);
- LeaveBasicBlock();
-}
+template <>
+class OperandHelper<OperandType::kRegOut> {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder, Register reg)) {
+ return builder->GetOutputRegisterOperand(reg);
+ }
+};
+
+template <>
+class OperandHelper<OperandType::kRegOutPair> {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
+ RegisterList reg_list)) {
+ DCHECK_EQ(2, reg_list.register_count());
+ return builder->GetOutputRegisterListOperand(reg_list);
+ }
+};
+
+template <>
+class OperandHelper<OperandType::kRegOutTriple> {
+ public:
+ INLINE(static uint32_t Convert(BytecodeArrayBuilder* builder,
+ RegisterList reg_list)) {
+ DCHECK_EQ(3, reg_list.register_count());
+ return builder->GetOutputRegisterListOperand(reg_list);
+ }
+};
+
+} // namespace
+
+template <OperandType... operand_types>
+class BytecodeNodeBuilder {
+ public:
+ template <typename... Operands>
+ INLINE(static BytecodeNode Make(BytecodeArrayBuilder* builder,
+ BytecodeSourceInfo source_info,
+ Bytecode bytecode, Operands... operands)) {
+ builder->PrepareToOutputBytecode(bytecode);
+ // The "OperandHelper<operand_types>::Convert(builder, operands)..." will
+ // expand both the OperandType... and Operands... parameter packs e.g. for:
+ // BytecodeNodeBuilder<OperandType::kReg, OperandType::kImm>::Make<
+ // Register, int>(..., Register reg, int immediate)
+ // the code will expand into:
+ // OperandHelper<OperandType::kReg>::Convert(builder, reg),
+ // OperandHelper<OperandType::kImm>::Convert(builder, immediate),
+ return BytecodeNode(
+ bytecode, OperandHelper<operand_types>::Convert(builder, operands)...,
+ source_info);
+ }
+};
+
+#define DEFINE_BYTECODE_OUTPUT(name, accumulator_use, ...) \
+ template <typename... Operands> \
+ void BytecodeArrayBuilder::Output##name(Operands... operands) { \
+ BytecodeNode node(BytecodeNodeBuilder<__VA_ARGS__>::Make<Operands...>( \
+ this, CurrentSourcePosition(Bytecode::k##name), Bytecode::k##name, \
+ operands...)); \
+ pipeline()->Write(&node); \
+ } \
+ \
+ template <typename... Operands> \
+ void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \
+ Operands... operands) { \
+ DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \
+ BytecodeNode node(BytecodeNodeBuilder<__VA_ARGS__>::Make<Operands...>( \
+ this, CurrentSourcePosition(Bytecode::k##name), Bytecode::k##name, \
+ operands...)); \
+ pipeline()->WriteJump(&node, label); \
+ LeaveBasicBlock(); \
+ }
+BYTECODE_LIST(DEFINE_BYTECODE_OUTPUT)
+#undef DEFINE_BYTECODE_OUTPUT
BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
Register reg,
int feedback_slot) {
switch (op) {
case Token::Value::ADD:
- Output(Bytecode::kAdd, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputAdd(reg, feedback_slot);
break;
case Token::Value::SUB:
- Output(Bytecode::kSub, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputSub(reg, feedback_slot);
break;
case Token::Value::MUL:
- Output(Bytecode::kMul, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputMul(reg, feedback_slot);
break;
case Token::Value::DIV:
- Output(Bytecode::kDiv, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputDiv(reg, feedback_slot);
break;
case Token::Value::MOD:
- Output(Bytecode::kMod, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputMod(reg, feedback_slot);
break;
case Token::Value::BIT_OR:
- Output(Bytecode::kBitwiseOr, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputBitwiseOr(reg, feedback_slot);
break;
case Token::Value::BIT_XOR:
- Output(Bytecode::kBitwiseXor, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputBitwiseXor(reg, feedback_slot);
break;
case Token::Value::BIT_AND:
- Output(Bytecode::kBitwiseAnd, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputBitwiseAnd(reg, feedback_slot);
break;
case Token::Value::SHL:
- Output(Bytecode::kShiftLeft, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputShiftLeft(reg, feedback_slot);
break;
case Token::Value::SAR:
- Output(Bytecode::kShiftRight, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputShiftRight(reg, feedback_slot);
break;
case Token::Value::SHR:
- Output(Bytecode::kShiftRightLogical, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputShiftRightLogical(reg, feedback_slot);
break;
default:
UNREACHABLE();
@@ -187,21 +300,21 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op,
int feedback_slot) {
if (op == Token::Value::ADD) {
- Output(Bytecode::kInc, UnsignedOperand(feedback_slot));
+ OutputInc(feedback_slot);
} else {
DCHECK_EQ(op, Token::Value::SUB);
- Output(Bytecode::kDec, UnsignedOperand(feedback_slot));
+ OutputDec(feedback_slot);
}
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LogicalNot() {
- Output(Bytecode::kToBooleanLogicalNot);
+ OutputToBooleanLogicalNot();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::TypeOf() {
- Output(Bytecode::kTypeOf);
+ OutputTypeOf();
return *this;
}
@@ -209,38 +322,31 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
Token::Value op, Register reg, int feedback_slot) {
switch (op) {
case Token::Value::EQ:
- Output(Bytecode::kTestEqual, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputTestEqual(reg, feedback_slot);
break;
case Token::Value::NE:
- Output(Bytecode::kTestNotEqual, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputTestNotEqual(reg, feedback_slot);
break;
case Token::Value::EQ_STRICT:
- Output(Bytecode::kTestEqualStrict, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputTestEqualStrict(reg, feedback_slot);
break;
case Token::Value::LT:
- Output(Bytecode::kTestLessThan, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputTestLessThan(reg, feedback_slot);
break;
case Token::Value::GT:
- Output(Bytecode::kTestGreaterThan, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputTestGreaterThan(reg, feedback_slot);
break;
case Token::Value::LTE:
- Output(Bytecode::kTestLessThanOrEqual, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputTestLessThanOrEqual(reg, feedback_slot);
break;
case Token::Value::GTE:
- Output(Bytecode::kTestGreaterThanOrEqual, RegisterOperand(reg),
- UnsignedOperand(feedback_slot));
+ OutputTestGreaterThanOrEqual(reg, feedback_slot);
break;
case Token::Value::INSTANCEOF:
- Output(Bytecode::kTestInstanceOf, RegisterOperand(reg));
+ OutputTestInstanceOf(reg);
break;
case Token::Value::IN:
- Output(Bytecode::kTestIn, RegisterOperand(reg));
+ OutputTestIn(reg);
break;
default:
UNREACHABLE();
@@ -250,7 +356,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadConstantPoolEntry(
size_t entry) {
- Output(Bytecode::kLdaConstant, UnsignedOperand(entry));
+ OutputLdaConstant(entry);
return *this;
}
@@ -258,70 +364,82 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
v8::internal::Smi* smi) {
int32_t raw_smi = smi->value();
if (raw_smi == 0) {
- Output(Bytecode::kLdaZero);
+ OutputLdaZero();
} else {
- Output(Bytecode::kLdaSmi, SignedOperand(raw_smi));
+ OutputLdaSmi(raw_smi);
}
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
size_t entry = GetConstantPoolEntry(object);
- Output(Bytecode::kLdaConstant, UnsignedOperand(entry));
+ OutputLdaConstant(entry);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadUndefined() {
- Output(Bytecode::kLdaUndefined);
+ OutputLdaUndefined();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNull() {
- Output(Bytecode::kLdaNull);
+ OutputLdaNull();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadTheHole() {
- Output(Bytecode::kLdaTheHole);
+ OutputLdaTheHole();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadTrue() {
- Output(Bytecode::kLdaTrue);
+ OutputLdaTrue();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadFalse() {
- Output(Bytecode::kLdaFalse);
+ OutputLdaFalse();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
Register reg) {
- Output(Bytecode::kLdar, RegisterOperand(reg));
+ if (register_optimizer_) {
+ register_optimizer_->DoLdar(reg, CurrentSourcePosition(Bytecode::kLdar));
+ } else {
+ OutputLdar(reg);
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
Register reg) {
- Output(Bytecode::kStar, RegisterOperand(reg));
+ if (register_optimizer_) {
+ register_optimizer_->DoStar(reg, CurrentSourcePosition(Bytecode::kStar));
+ } else {
+ OutputStar(reg);
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
Register to) {
DCHECK(from != to);
- Output(Bytecode::kMov, RegisterOperand(from), RegisterOperand(to));
+ if (register_optimizer_) {
+ register_optimizer_->DoMov(from, to, CurrentSourcePosition(Bytecode::kMov));
+ } else {
+ OutputMov(from, to);
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(int feedback_slot,
TypeofMode typeof_mode) {
if (typeof_mode == INSIDE_TYPEOF) {
- Output(Bytecode::kLdaGlobalInsideTypeof, feedback_slot);
+ OutputLdaGlobalInsideTypeof(feedback_slot);
} else {
DCHECK_EQ(typeof_mode, NOT_INSIDE_TYPEOF);
- Output(Bytecode::kLdaGlobal, UnsignedOperand(feedback_slot));
+ OutputLdaGlobal(feedback_slot);
}
return *this;
}
@@ -330,12 +448,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
size_t name_index = GetConstantPoolEntry(name);
if (language_mode == SLOPPY) {
- Output(Bytecode::kStaGlobalSloppy, UnsignedOperand(name_index),
- UnsignedOperand(feedback_slot));
+ OutputStaGlobalSloppy(name_index, feedback_slot);
} else {
DCHECK_EQ(language_mode, STRICT);
- Output(Bytecode::kStaGlobalStrict, UnsignedOperand(name_index),
- UnsignedOperand(feedback_slot));
+ OutputStaGlobalStrict(name_index, feedback_slot);
}
return *this;
}
@@ -343,16 +459,22 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
int slot_index,
int depth) {
- Output(Bytecode::kLdaContextSlot, RegisterOperand(context),
- UnsignedOperand(slot_index), UnsignedOperand(depth));
+ if (context.is_current_context() && depth == 0) {
+ OutputLdaCurrentContextSlot(slot_index);
+ } else {
+ OutputLdaContextSlot(context, slot_index, depth);
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
int slot_index,
int depth) {
- Output(Bytecode::kStaContextSlot, RegisterOperand(context),
- UnsignedOperand(slot_index), UnsignedOperand(depth));
+ if (context.is_current_context() && depth == 0) {
+ OutputStaCurrentContextSlot(slot_index);
+ } else {
+ OutputStaContextSlot(context, slot_index, depth);
+ }
return *this;
}
@@ -360,10 +482,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
const Handle<String> name, TypeofMode typeof_mode) {
size_t name_index = GetConstantPoolEntry(name);
if (typeof_mode == INSIDE_TYPEOF) {
- Output(Bytecode::kLdaLookupSlotInsideTypeof, UnsignedOperand(name_index));
+ OutputLdaLookupSlotInsideTypeof(name_index);
} else {
DCHECK_EQ(typeof_mode, NOT_INSIDE_TYPEOF);
- Output(Bytecode::kLdaLookupSlot, UnsignedOperand(name_index));
+ OutputLdaLookupSlot(name_index);
}
return *this;
}
@@ -371,24 +493,26 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupContextSlot(
const Handle<String> name, TypeofMode typeof_mode, int slot_index,
int depth) {
- Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
- ? Bytecode::kLdaLookupContextSlotInsideTypeof
- : Bytecode::kLdaLookupContextSlot;
size_t name_index = GetConstantPoolEntry(name);
- Output(bytecode, UnsignedOperand(name_index), UnsignedOperand(slot_index),
- UnsignedOperand(depth));
+ if (typeof_mode == INSIDE_TYPEOF) {
+ OutputLdaLookupContextSlotInsideTypeof(name_index, slot_index, depth);
+ } else {
+ DCHECK(typeof_mode == NOT_INSIDE_TYPEOF);
+ OutputLdaLookupContextSlot(name_index, slot_index, depth);
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupGlobalSlot(
const Handle<String> name, TypeofMode typeof_mode, int feedback_slot,
int depth) {
- Bytecode bytecode = (typeof_mode == INSIDE_TYPEOF)
- ? Bytecode::kLdaLookupGlobalSlotInsideTypeof
- : Bytecode::kLdaLookupGlobalSlot;
size_t name_index = GetConstantPoolEntry(name);
- Output(bytecode, UnsignedOperand(name_index), UnsignedOperand(feedback_slot),
- UnsignedOperand(depth));
+ if (typeof_mode == INSIDE_TYPEOF) {
+ OutputLdaLookupGlobalSlotInsideTypeof(name_index, feedback_slot, depth);
+ } else {
+ DCHECK(typeof_mode == NOT_INSIDE_TYPEOF);
+ OutputLdaLookupGlobalSlot(name_index, feedback_slot, depth);
+ }
return *this;
}
@@ -396,10 +520,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
const Handle<String> name, LanguageMode language_mode) {
size_t name_index = GetConstantPoolEntry(name);
if (language_mode == SLOPPY) {
- Output(Bytecode::kStaLookupSlotSloppy, UnsignedOperand(name_index));
+ OutputStaLookupSlotSloppy(name_index);
} else {
DCHECK_EQ(language_mode, STRICT);
- Output(Bytecode::kStaLookupSlotStrict, UnsignedOperand(name_index));
+ OutputStaLookupSlotStrict(name_index);
}
return *this;
}
@@ -407,15 +531,13 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
Register object, const Handle<Name> name, int feedback_slot) {
size_t name_index = GetConstantPoolEntry(name);
- Output(Bytecode::kLdaNamedProperty, RegisterOperand(object),
- UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+ OutputLdaNamedProperty(object, name_index, feedback_slot);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
Register object, int feedback_slot) {
- Output(Bytecode::kLdaKeyedProperty, RegisterOperand(object),
- UnsignedOperand(feedback_slot));
+ OutputLdaKeyedProperty(object, feedback_slot);
return *this;
}
@@ -424,12 +546,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
LanguageMode language_mode) {
size_t name_index = GetConstantPoolEntry(name);
if (language_mode == SLOPPY) {
- Output(Bytecode::kStaNamedPropertySloppy, RegisterOperand(object),
- UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+ OutputStaNamedPropertySloppy(object, name_index, feedback_slot);
} else {
DCHECK_EQ(language_mode, STRICT);
- Output(Bytecode::kStaNamedPropertyStrict, RegisterOperand(object),
- UnsignedOperand(name_index), UnsignedOperand(feedback_slot));
+ OutputStaNamedPropertyStrict(object, name_index, feedback_slot);
}
return *this;
}
@@ -438,27 +558,24 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
Register object, Register key, int feedback_slot,
LanguageMode language_mode) {
if (language_mode == SLOPPY) {
- Output(Bytecode::kStaKeyedPropertySloppy, RegisterOperand(object),
- RegisterOperand(key), UnsignedOperand(feedback_slot));
+ OutputStaKeyedPropertySloppy(object, key, feedback_slot);
} else {
DCHECK_EQ(language_mode, STRICT);
- Output(Bytecode::kStaKeyedPropertyStrict, RegisterOperand(object),
- RegisterOperand(key), UnsignedOperand(feedback_slot));
+ OutputStaKeyedPropertyStrict(object, key, feedback_slot);
}
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(size_t entry,
int flags) {
- Output(Bytecode::kCreateClosure, UnsignedOperand(entry),
- UnsignedOperand(flags));
+ OutputCreateClosure(entry, flags);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateBlockContext(
Handle<ScopeInfo> scope_info) {
size_t entry = GetConstantPoolEntry(scope_info);
- Output(Bytecode::kCreateBlockContext, UnsignedOperand(entry));
+ OutputCreateBlockContext(entry);
return *this;
}
@@ -466,21 +583,19 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateCatchContext(
Register exception, Handle<String> name, Handle<ScopeInfo> scope_info) {
size_t name_index = GetConstantPoolEntry(name);
size_t scope_info_index = GetConstantPoolEntry(scope_info);
- Output(Bytecode::kCreateCatchContext, RegisterOperand(exception),
- UnsignedOperand(name_index), UnsignedOperand(scope_info_index));
+ OutputCreateCatchContext(exception, name_index, scope_info_index);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateFunctionContext(int slots) {
- Output(Bytecode::kCreateFunctionContext, UnsignedOperand(slots));
+ OutputCreateFunctionContext(slots);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateWithContext(
Register object, Handle<ScopeInfo> scope_info) {
size_t scope_info_index = GetConstantPoolEntry(scope_info);
- Output(Bytecode::kCreateWithContext, RegisterOperand(object),
- UnsignedOperand(scope_info_index));
+ OutputCreateWithContext(object, scope_info_index);
return *this;
}
@@ -488,13 +603,13 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
CreateArgumentsType type) {
switch (type) {
case CreateArgumentsType::kMappedArguments:
- Output(Bytecode::kCreateMappedArguments);
+ OutputCreateMappedArguments();
break;
case CreateArgumentsType::kUnmappedArguments:
- Output(Bytecode::kCreateUnmappedArguments);
+ OutputCreateUnmappedArguments();
break;
case CreateArgumentsType::kRestParameter:
- Output(Bytecode::kCreateRestParameter);
+ OutputCreateRestParameter();
break;
default:
UNREACHABLE();
@@ -505,17 +620,14 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArguments(
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
Handle<String> pattern, int literal_index, int flags) {
size_t pattern_entry = GetConstantPoolEntry(pattern);
- Output(Bytecode::kCreateRegExpLiteral, UnsignedOperand(pattern_entry),
- UnsignedOperand(literal_index), UnsignedOperand(flags));
+ OutputCreateRegExpLiteral(pattern_entry, literal_index, flags);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
Handle<FixedArray> constant_elements, int literal_index, int flags) {
size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
- Output(Bytecode::kCreateArrayLiteral,
- UnsignedOperand(constant_elements_entry),
- UnsignedOperand(literal_index), UnsignedOperand(flags));
+ OutputCreateArrayLiteral(constant_elements_entry, literal_index, flags);
return *this;
}
@@ -523,42 +635,43 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
Handle<FixedArray> constant_properties, int literal_index, int flags,
Register output) {
size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
- Output(Bytecode::kCreateObjectLiteral,
- UnsignedOperand(constant_properties_entry),
- UnsignedOperand(literal_index), UnsignedOperand(flags),
- RegisterOperand(output));
+ OutputCreateObjectLiteral(constant_properties_entry, literal_index, flags,
+ output);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
- Output(Bytecode::kPushContext, RegisterOperand(context));
+ OutputPushContext(context);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
- Output(Bytecode::kPopContext, RegisterOperand(context));
+ OutputPopContext(context);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToObject(
Register out) {
- Output(Bytecode::kToObject, RegisterOperand(out));
+ OutputToObject(out);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToName(
Register out) {
- Output(Bytecode::kToName, RegisterOperand(out));
+ OutputToName(out);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ConvertAccumulatorToNumber(
Register out) {
- Output(Bytecode::kToNumber, RegisterOperand(out));
+ OutputToNumber(out);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
+ // Flush the register optimizer when binding a label to ensure all
+ // expected registers are valid when jumping to this label.
+ if (register_optimizer_) register_optimizer_->Flush();
pipeline_->BindLabel(label);
LeaveBasicBlock();
return *this;
@@ -572,42 +685,42 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target,
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Jump(BytecodeLabel* label) {
- OutputJump(Bytecode::kJump, label);
+ OutputJump(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfTrue(BytecodeLabel* label) {
// The peephole optimizer attempts to simplify JumpIfToBooleanTrue
// to JumpIfTrue.
- OutputJump(Bytecode::kJumpIfToBooleanTrue, label);
+ OutputJumpIfToBooleanTrue(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfFalse(BytecodeLabel* label) {
- OutputJump(Bytecode::kJumpIfToBooleanFalse, label);
+ OutputJumpIfToBooleanFalse(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNull(BytecodeLabel* label) {
- OutputJump(Bytecode::kJumpIfNull, label);
+ OutputJumpIfNull(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfUndefined(
BytecodeLabel* label) {
- OutputJump(Bytecode::kJumpIfUndefined, label);
+ OutputJumpIfUndefined(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
BytecodeLabel* label) {
- OutputJump(Bytecode::kJumpIfNotHole, label);
+ OutputJumpIfNotHole(label, 0);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop(BytecodeLabel* label,
int loop_depth) {
- OutputJump(Bytecode::kJumpLoop, UnsignedOperand(loop_depth), label);
+ OutputJumpLoop(label, 0, loop_depth);
return *this;
}
@@ -625,44 +738,42 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
// statement's position.
latest_source_info_.ForceExpressionPosition(position);
}
- Output(Bytecode::kStackCheck);
+ OutputStackCheck();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
- Output(Bytecode::kThrow);
+ OutputThrow();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ReThrow() {
- Output(Bytecode::kReThrow);
+ OutputReThrow();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
SetReturnPosition();
- Output(Bytecode::kReturn);
+ OutputReturn();
return_seen_in_block_ = true;
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Debugger() {
- Output(Bytecode::kDebugger);
+ OutputDebugger();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
Register receiver, RegisterList cache_info_triple) {
DCHECK_EQ(3, cache_info_triple.register_count());
- Output(Bytecode::kForInPrepare, RegisterOperand(receiver),
- RegisterOperand(cache_info_triple.first_register()));
+ OutputForInPrepare(receiver, cache_info_triple);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInContinue(
Register index, Register cache_length) {
- Output(Bytecode::kForInContinue, RegisterOperand(index),
- RegisterOperand(cache_length));
+ OutputForInContinue(index, cache_length);
return *this;
}
@@ -670,27 +781,36 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(
Register receiver, Register index, RegisterList cache_type_array_pair,
int feedback_slot) {
DCHECK_EQ(2, cache_type_array_pair.register_count());
- Output(Bytecode::kForInNext, RegisterOperand(receiver),
- RegisterOperand(index),
- RegisterOperand(cache_type_array_pair.first_register()),
- UnsignedOperand(feedback_slot));
+ OutputForInNext(receiver, index, cache_type_array_pair, feedback_slot);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
- Output(Bytecode::kForInStep, RegisterOperand(index));
+ OutputForInStep(index);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreModuleVariable(int cell_index,
+ int depth) {
+ OutputStaModuleVariable(cell_index, depth);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadModuleVariable(int cell_index,
+ int depth) {
+ OutputLdaModuleVariable(cell_index, depth);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::SuspendGenerator(
Register generator) {
- Output(Bytecode::kSuspendGenerator, RegisterOperand(generator));
+ OutputSuspendGenerator(generator);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ResumeGenerator(
Register generator) {
- Output(Bytecode::kResumeGenerator, RegisterOperand(generator));
+ OutputResumeGenerator(generator);
return *this;
}
@@ -722,18 +842,18 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryEnd(int handler_id) {
BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
RegisterList args,
int feedback_slot,
+ Call::CallType call_type,
TailCallMode tail_call_mode) {
if (tail_call_mode == TailCallMode::kDisallow) {
- Output(Bytecode::kCall, RegisterOperand(callable),
- RegisterOperand(args.first_register()),
- UnsignedOperand(args.register_count()),
- UnsignedOperand(feedback_slot));
+ if (call_type == Call::NAMED_PROPERTY_CALL ||
+ call_type == Call::KEYED_PROPERTY_CALL) {
+ OutputCallProperty(callable, args, args.register_count(), feedback_slot);
+ } else {
+ OutputCall(callable, args, args.register_count(), feedback_slot);
+ }
} else {
DCHECK(tail_call_mode == TailCallMode::kAllow);
- Output(Bytecode::kTailCall, RegisterOperand(callable),
- RegisterOperand(args.first_register()),
- UnsignedOperand(args.register_count()),
- UnsignedOperand(feedback_slot));
+ OutputTailCall(callable, args, args.register_count(), feedback_slot);
}
return *this;
}
@@ -741,10 +861,7 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
RegisterList args,
int feedback_slot_id) {
- Output(Bytecode::kNew, RegisterOperand(constructor),
- RegisterOperand(args.first_register()),
- UnsignedOperand(args.register_count()),
- UnsignedOperand(feedback_slot_id));
+ OutputNew(constructor, args, args.register_count(), feedback_slot_id);
return *this;
}
@@ -752,17 +869,15 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
Runtime::FunctionId function_id, RegisterList args) {
DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
- Bytecode bytecode;
- uint32_t id;
if (IntrinsicsHelper::IsSupported(function_id)) {
- bytecode = Bytecode::kInvokeIntrinsic;
- id = static_cast<uint32_t>(IntrinsicsHelper::FromRuntimeId(function_id));
+ IntrinsicsHelper::IntrinsicId intrinsic_id =
+ IntrinsicsHelper::FromRuntimeId(function_id);
+ OutputInvokeIntrinsic(static_cast<int>(intrinsic_id), args,
+ args.register_count());
} else {
- bytecode = Bytecode::kCallRuntime;
- id = static_cast<uint32_t>(function_id);
+ OutputCallRuntime(static_cast<int>(function_id), args,
+ args.register_count());
}
- Output(bytecode, id, RegisterOperand(args.first_register()),
- UnsignedOperand(args.register_count()));
return *this;
}
@@ -782,10 +897,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
DCHECK(Bytecodes::SizeForUnsignedOperand(function_id) <= OperandSize::kShort);
DCHECK_EQ(2, return_pair.register_count());
- Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
- RegisterOperand(args.first_register()),
- UnsignedOperand(args.register_count()),
- RegisterOperand(return_pair.first_register()));
+ OutputCallRuntimeForPair(static_cast<uint16_t>(function_id), args,
+ args.register_count(), return_pair);
return *this;
}
@@ -797,19 +910,17 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
RegisterList args) {
- Output(Bytecode::kCallJSRuntime, UnsignedOperand(context_index),
- RegisterOperand(args.first_register()),
- UnsignedOperand(args.register_count()));
+ OutputCallJSRuntime(context_index, args, args.register_count());
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
LanguageMode language_mode) {
if (language_mode == SLOPPY) {
- Output(Bytecode::kDeletePropertySloppy, RegisterOperand(object));
+ OutputDeletePropertySloppy(object);
} else {
DCHECK_EQ(language_mode, STRICT);
- Output(Bytecode::kDeletePropertyStrict, RegisterOperand(object));
+ OutputDeletePropertyStrict(object);
}
return *this;
}
@@ -850,88 +961,50 @@ bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
}
}
-bool BytecodeArrayBuilder::OperandsAreValid(
- Bytecode bytecode, int operand_count, uint32_t operand0, uint32_t operand1,
- uint32_t operand2, uint32_t operand3) const {
- if (Bytecodes::NumberOfOperands(bytecode) != operand_count) {
- return false;
- }
-
- uint32_t operands[] = {operand0, operand1, operand2, operand3};
- const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
- for (int i = 0; i < operand_count; ++i) {
- switch (operand_types[i]) {
- case OperandType::kNone:
+bool BytecodeArrayBuilder::RegisterListIsValid(RegisterList reg_list) const {
+ if (reg_list.register_count() == 0) {
+ return reg_list.first_register() == Register(0);
+ } else {
+ int first_reg_index = reg_list.first_register().index();
+ for (int i = 0; i < reg_list.register_count(); i++) {
+ if (!RegisterIsValid(Register(first_reg_index + i))) {
return false;
- case OperandType::kFlag8:
- case OperandType::kIntrinsicId:
- if (Bytecodes::SizeForUnsignedOperand(operands[i]) >
- OperandSize::kByte) {
- return false;
- }
- break;
- case OperandType::kRuntimeId:
- if (Bytecodes::SizeForUnsignedOperand(operands[i]) >
- OperandSize::kShort) {
- return false;
- }
- break;
- case OperandType::kIdx:
- // TODO(leszeks): Possibly split this up into constant pool indices and
- // other indices, for checking.
- break;
- case OperandType::kUImm:
- case OperandType::kImm:
- break;
- case OperandType::kRegList: {
- CHECK_LT(i, operand_count - 1);
- CHECK(operand_types[i + 1] == OperandType::kRegCount);
- int reg_count = static_cast<int>(operands[i + 1]);
- if (reg_count == 0) {
- return Register::FromOperand(operands[i]) == Register(0);
- } else {
- Register start = Register::FromOperand(operands[i]);
- Register end(start.index() + reg_count - 1);
- if (!RegisterIsValid(start) || !RegisterIsValid(end) || start > end) {
- return false;
- }
- }
- i++; // Skip past kRegCount operand.
- break;
- }
- case OperandType::kReg:
- case OperandType::kRegOut: {
- Register reg = Register::FromOperand(operands[i]);
- if (!RegisterIsValid(reg)) {
- return false;
- }
- break;
}
- case OperandType::kRegOutPair:
- case OperandType::kRegPair: {
- Register reg0 = Register::FromOperand(operands[i]);
- Register reg1 = Register(reg0.index() + 1);
- if (!RegisterIsValid(reg0) || !RegisterIsValid(reg1)) {
- return false;
- }
- break;
- }
- case OperandType::kRegOutTriple: {
- Register reg0 = Register::FromOperand(operands[i]);
- Register reg1 = Register(reg0.index() + 1);
- Register reg2 = Register(reg0.index() + 2);
- if (!RegisterIsValid(reg0) || !RegisterIsValid(reg1) ||
- !RegisterIsValid(reg2)) {
- return false;
- }
- break;
- }
- case OperandType::kRegCount:
- UNREACHABLE(); // Dealt with in kRegList above.
}
+ return true;
}
+}
+
+void BytecodeArrayBuilder::PrepareToOutputBytecode(Bytecode bytecode) {
+ if (register_optimizer_) register_optimizer_->PrepareForBytecode(bytecode);
+}
+
+uint32_t BytecodeArrayBuilder::GetInputRegisterOperand(Register reg) {
+ DCHECK(RegisterIsValid(reg));
+ if (register_optimizer_) reg = register_optimizer_->GetInputRegister(reg);
+ return static_cast<uint32_t>(reg.ToOperand());
+}
+
+uint32_t BytecodeArrayBuilder::GetOutputRegisterOperand(Register reg) {
+ DCHECK(RegisterIsValid(reg));
+ if (register_optimizer_) register_optimizer_->PrepareOutputRegister(reg);
+ return static_cast<uint32_t>(reg.ToOperand());
+}
+
+uint32_t BytecodeArrayBuilder::GetInputRegisterListOperand(
+ RegisterList reg_list) {
+ DCHECK(RegisterListIsValid(reg_list));
+ if (register_optimizer_)
+ reg_list = register_optimizer_->GetInputRegisterList(reg_list);
+ return static_cast<uint32_t>(reg_list.first_register().ToOperand());
+}
- return true;
+uint32_t BytecodeArrayBuilder::GetOutputRegisterListOperand(
+ RegisterList reg_list) {
+ DCHECK(RegisterListIsValid(reg_list));
+ if (register_optimizer_)
+ register_optimizer_->PrepareOutputRegisterList(reg_list);
+ return static_cast<uint32_t>(reg_list.first_register().ToOperand());
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index a9fa7a7bb5..cc5b5e782b 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -6,6 +6,8 @@
#define V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
#include "src/ast/ast.h"
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/bytecode-register.h"
@@ -24,9 +26,11 @@ namespace interpreter {
class BytecodeLabel;
class BytecodeNode;
class BytecodePipelineStage;
+class BytecodeRegisterOptimizer;
class Register;
-class BytecodeArrayBuilder final : public ZoneObject {
+class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
+ : public NON_EXPORTED_BASE(ZoneObject) {
public:
BytecodeArrayBuilder(
Isolate* isolate, Zone* zone, int parameter_count, int context_count,
@@ -95,6 +99,14 @@ class BytecodeArrayBuilder final : public ZoneObject {
BytecodeArrayBuilder& StoreContextSlot(Register context, int slot_index,
int depth);
+ // Load from a module variable into the accumulator. |depth| is the depth of
+ // the current context relative to the module context.
+ BytecodeArrayBuilder& LoadModuleVariable(int cell_index, int depth);
+
+ // Store from the accumulator into a module variable. |depth| is the depth of
+ // the current context relative to the module context.
+ BytecodeArrayBuilder& StoreModuleVariable(int cell_index, int depth);
+
// Register-accumulator transfers.
BytecodeArrayBuilder& LoadAccumulatorWithRegister(Register reg);
BytecodeArrayBuilder& StoreAccumulatorInRegister(Register reg);
@@ -183,10 +195,11 @@ class BytecodeArrayBuilder final : public ZoneObject {
// Call a JS function. The JSFunction or Callable to be called should be in
// |callable|. The arguments should be in |args|, with the receiver in
- // |args[0]|. Type feedback is recorded in the |feedback_slot| in the type
- // feedback vector.
+ // |args[0]|. The call type of the expression is in |call_type|. Type feedback
+ // is recorded in the |feedback_slot| in the type feedback vector.
BytecodeArrayBuilder& Call(
Register callable, RegisterList args, int feedback_slot,
+ Call::CallType call_type,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
// Call the new operator. The accumulator holds the |new_target|.
@@ -317,6 +330,12 @@ class BytecodeArrayBuilder final : public ZoneObject {
bool RequiresImplicitReturn() const { return !return_seen_in_block_; }
+ // Returns the raw operand value for the given register or register list.
+ uint32_t GetInputRegisterOperand(Register reg);
+ uint32_t GetOutputRegisterOperand(Register reg);
+ uint32_t GetInputRegisterListOperand(RegisterList reg_list);
+ uint32_t GetOutputRegisterListOperand(RegisterList reg_list);
+
// Accessors
BytecodeRegisterAllocator* register_allocator() {
return &register_allocator_;
@@ -328,41 +347,22 @@ class BytecodeArrayBuilder final : public ZoneObject {
private:
friend class BytecodeRegisterAllocator;
+ template <OperandType... operand_types>
+ friend class BytecodeNodeBuilder;
- INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2, uint32_t operand3));
- INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2));
- INLINE(void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1));
- INLINE(void Output(Bytecode bytecode, uint32_t operand0));
- INLINE(void Output(Bytecode bytecode));
+ // Returns the current source position for the given |bytecode|.
+ INLINE(BytecodeSourceInfo CurrentSourcePosition(Bytecode bytecode));
- INLINE(void OutputJump(Bytecode bytecode, BytecodeLabel* label));
- INLINE(void OutputJump(Bytecode bytecode, uint32_t operand0,
- BytecodeLabel* label));
+#define DECLARE_BYTECODE_OUTPUT(Name, ...) \
+ template <typename... Operands> \
+ INLINE(void Output##Name(Operands... operands)); \
+ template <typename... Operands> \
+ INLINE(void Output##Name(BytecodeLabel* label, Operands... operands));
+ BYTECODE_LIST(DECLARE_BYTECODE_OUTPUT)
+#undef DECLARE_OPERAND_TYPE_INFO
bool RegisterIsValid(Register reg) const;
- bool OperandsAreValid(Bytecode bytecode, int operand_count,
- uint32_t operand0 = 0, uint32_t operand1 = 0,
- uint32_t operand2 = 0, uint32_t operand3 = 0) const;
-
- static uint32_t RegisterOperand(Register reg) {
- return static_cast<uint32_t>(reg.ToOperand());
- }
-
- static uint32_t SignedOperand(int value) {
- return static_cast<uint32_t>(value);
- }
-
- static uint32_t UnsignedOperand(int value) {
- DCHECK_GE(value, 0);
- return static_cast<uint32_t>(value);
- }
-
- static uint32_t UnsignedOperand(size_t value) {
- DCHECK_LE(value, kMaxUInt32);
- return static_cast<uint32_t>(value);
- }
+ bool RegisterListIsValid(RegisterList reg_list) const;
// Set position for return.
void SetReturnPosition();
@@ -375,6 +375,8 @@ class BytecodeArrayBuilder final : public ZoneObject {
// during bytecode generation.
BytecodeArrayBuilder& Illegal();
+ void PrepareToOutputBytecode(Bytecode bytecode);
+
void LeaveBasicBlock() { return_seen_in_block_ = false; }
BytecodeArrayWriter* bytecode_array_writer() {
@@ -403,6 +405,7 @@ class BytecodeArrayBuilder final : public ZoneObject {
BytecodeRegisterAllocator register_allocator_;
BytecodeArrayWriter bytecode_array_writer_;
BytecodePipelineStage* pipeline_;
+ BytecodeRegisterOptimizer* register_optimizer_;
BytecodeSourceInfo latest_source_info_;
static int const kNoFeedbackSlot = 0;
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index 09226252cc..03279cbd43 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -5,6 +5,7 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
+#include "src/globals.h"
#include "src/handles.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
@@ -15,7 +16,7 @@ namespace v8 {
namespace internal {
namespace interpreter {
-class BytecodeArrayIterator {
+class V8_EXPORT_PRIVATE BytecodeArrayIterator {
public:
explicit BytecodeArrayIterator(Handle<BytecodeArray> bytecode_array);
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index fb3876819e..28f997b534 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -94,9 +94,9 @@ void BytecodeArrayWriter::UpdateSourcePositionTable(
int bytecode_offset = static_cast<int>(bytecodes()->size());
const BytecodeSourceInfo& source_info = node->source_info();
if (source_info.is_valid()) {
- source_position_table_builder()->AddPosition(bytecode_offset,
- source_info.source_position(),
- source_info.is_statement());
+ source_position_table_builder()->AddPosition(
+ bytecode_offset, SourcePosition(source_info.source_position()),
+ source_info.is_statement());
}
}
@@ -211,8 +211,6 @@ void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location,
// and update the jump instruction and operand.
size_t entry = constant_array_builder()->CommitReservedEntry(
OperandSize::kShort, Smi::FromInt(delta));
- DCHECK_EQ(Bytecodes::SizeForUnsignedOperand(static_cast<uint32_t>(entry)),
- OperandSize::kShort);
jump_bytecode = GetJumpWithConstantOperand(jump_bytecode);
bytecodes()->at(jump_location) = Bytecodes::ToByte(jump_bytecode);
WriteUnalignedUInt16(operand_bytes, static_cast<uint16_t>(entry));
@@ -275,7 +273,7 @@ void BytecodeArrayWriter::PatchJump(size_t jump_target, size_t jump_location) {
void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
DCHECK(Bytecodes::IsJump(node->bytecode()));
- DCHECK_EQ(0, node->operand(0));
+ DCHECK_EQ(0u, node->operand(0));
size_t current_offset = bytecodes()->size();
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index 712fcb9837..3810ca0847 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_WRITER_H_
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-pipeline.h"
#include "src/source-position-table.h"
@@ -20,7 +22,8 @@ class ConstantArrayBuilder;
// Class for emitting bytecode as the final stage of the bytecode
// generation pipeline.
-class BytecodeArrayWriter final : public BytecodePipelineStage {
+class V8_EXPORT_PRIVATE BytecodeArrayWriter final
+ : public NON_EXPORTED_BASE(BytecodePipelineStage) {
public:
BytecodeArrayWriter(
Zone* zone, ConstantArrayBuilder* constant_array_builder,
diff --git a/deps/v8/src/interpreter/bytecode-dead-code-optimizer.h b/deps/v8/src/interpreter/bytecode-dead-code-optimizer.h
index 188d610890..7350981c73 100644
--- a/deps/v8/src/interpreter/bytecode-dead-code-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-dead-code-optimizer.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_DEAD_CODE_OPTIMIZER_H_
#define V8_INTERPRETER_BYTECODE_DEAD_CODE_OPTIMIZER_H_
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-pipeline.h"
namespace v8 {
@@ -13,8 +15,9 @@ namespace interpreter {
// An optimization stage for eliminating obviously dead code in bytecode
// generation.
-class BytecodeDeadCodeOptimizer final : public BytecodePipelineStage,
- public ZoneObject {
+class V8_EXPORT_PRIVATE BytecodeDeadCodeOptimizer final
+ : public NON_EXPORTED_BASE(BytecodePipelineStage),
+ public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit BytecodeDeadCodeOptimizer(BytecodePipelineStage* next_stage);
diff --git a/deps/v8/src/interpreter/bytecode-decoder.h b/deps/v8/src/interpreter/bytecode-decoder.h
index d1749efb7f..51d0e41ff7 100644
--- a/deps/v8/src/interpreter/bytecode-decoder.h
+++ b/deps/v8/src/interpreter/bytecode-decoder.h
@@ -7,6 +7,7 @@
#include <iosfwd>
+#include "src/globals.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
@@ -14,7 +15,7 @@ namespace v8 {
namespace internal {
namespace interpreter {
-class BytecodeDecoder final {
+class V8_EXPORT_PRIVATE BytecodeDecoder final {
public:
// Decodes a register operand in a byte array.
static Register DecodeRegisterOperand(const uint8_t* operand_start,
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index db5a596b85..99e76725d5 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -14,6 +14,7 @@
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/control-flow-builders.h"
#include "src/objects.h"
+#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
namespace v8 {
@@ -361,7 +362,7 @@ void BytecodeGenerator::ControlScope::PerformCommand(Command command,
return;
}
current = current->outer();
- if (current->context() != context) {
+ if (current->context() != context && context->ShouldPopContext()) {
// Pop context to the expected depth.
// TODO(rmcilroy): Only emit a single context pop.
generator()->builder()->PopContext(current->context()->reg());
@@ -571,7 +572,11 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
generator_state_(),
loop_depth_(0),
home_object_symbol_(info->isolate()->factory()->home_object_symbol()),
- prototype_string_(info->isolate()->factory()->prototype_string()) {
+ empty_fixed_array_(info->isolate()->factory()->empty_fixed_array()) {
+ AstValueFactory* ast_value_factory = info->parse_info()->ast_value_factory();
+ const AstRawString* prototype_string = ast_value_factory->prototype_string();
+ ast_value_factory->Internalize(info->isolate());
+ prototype_string_ = prototype_string->string();
}
Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(Isolate* isolate) {
@@ -678,6 +683,9 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// Visit declarations within the function scope.
VisitDeclarations(scope()->declarations());
+ // Emit initializing assignments for module namespace imports (if any).
+ VisitModuleNamespaceImports();
+
// Perform a stack-check before the body.
builder()->StackCheck(info()->literal()->start_position());
@@ -826,8 +834,9 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
case VariableLocation::MODULE:
if (variable->IsExport() && variable->binding_needs_init()) {
builder()->LoadTheHole();
- VisitVariableAssignment(variable, Token::INIT,
- FeedbackVectorSlot::Invalid());
+ BuildVariableAssignment(variable, Token::INIT,
+ FeedbackVectorSlot::Invalid(),
+ HoleCheckMode::kElided);
}
// Nothing to do for imports.
break;
@@ -846,8 +855,9 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
VisitForAccumulatorValue(decl->fun());
- VisitVariableAssignment(variable, Token::INIT,
- FeedbackVectorSlot::Invalid());
+ BuildVariableAssignment(variable, Token::INIT,
+ FeedbackVectorSlot::Invalid(),
+ HoleCheckMode::kElided);
break;
}
case VariableLocation::CONTEXT: {
@@ -871,19 +881,38 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
DCHECK_EQ(variable->mode(), LET);
DCHECK(variable->IsExport());
VisitForAccumulatorValue(decl->fun());
- VisitVariableAssignment(variable, Token::INIT,
- FeedbackVectorSlot::Invalid());
+ BuildVariableAssignment(variable, Token::INIT,
+ FeedbackVectorSlot::Invalid(),
+ HoleCheckMode::kElided);
break;
}
}
-void BytecodeGenerator::VisitDeclarations(
- ZoneList<Declaration*>* declarations) {
+void BytecodeGenerator::VisitModuleNamespaceImports() {
+ if (!scope()->is_module_scope()) return;
+
+ RegisterAllocationScope register_scope(this);
+ Register module_request = register_allocator()->NewRegister();
+
+ ModuleDescriptor* descriptor = scope()->AsModuleScope()->module();
+ for (auto entry : descriptor->namespace_imports()) {
+ builder()
+ ->LoadLiteral(Smi::FromInt(entry->module_request))
+ .StoreAccumulatorInRegister(module_request)
+ .CallRuntime(Runtime::kGetModuleNamespace, module_request);
+ Variable* var = scope()->LookupLocal(entry->local_name);
+ DCHECK_NOT_NULL(var);
+ BuildVariableAssignment(var, Token::INIT, FeedbackVectorSlot::Invalid(),
+ HoleCheckMode::kElided);
+ }
+}
+
+void BytecodeGenerator::VisitDeclarations(Declaration::List* declarations) {
RegisterAllocationScope register_scope(this);
DCHECK(globals_builder()->empty());
- for (int i = 0; i < declarations->length(); i++) {
+ for (Declaration* decl : *declarations) {
RegisterAllocationScope register_scope(this);
- Visit(declarations->at(i));
+ Visit(decl);
}
if (globals_builder()->empty()) return;
@@ -1126,8 +1155,9 @@ void BytecodeGenerator::VisitForInAssignment(Expression* expr,
LhsKind assign_type = Property::GetAssignType(property);
switch (assign_type) {
case VARIABLE: {
- Variable* variable = expr->AsVariableProxy()->var();
- VisitVariableAssignment(variable, Token::ASSIGN, slot);
+ VariableProxy* proxy = expr->AsVariableProxy();
+ BuildVariableAssignment(proxy->var(), Token::ASSIGN, slot,
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
@@ -1206,7 +1236,7 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Set up loop counter
Register index = register_allocator()->NewRegister();
- builder()->LoadLiteral(Smi::FromInt(0));
+ builder()->LoadLiteral(Smi::kZero);
builder()->StoreAccumulatorInRegister(index);
// The loop
@@ -1374,11 +1404,12 @@ void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
builder()->CallRuntime(Runtime::kToFastProperties, literal);
// Assign to class variable.
if (expr->class_variable_proxy() != nullptr) {
- Variable* var = expr->class_variable_proxy()->var();
+ VariableProxy* proxy = expr->class_variable_proxy();
FeedbackVectorSlot slot = expr->NeedsProxySlot()
? expr->ProxySlot()
: FeedbackVectorSlot::Invalid();
- VisitVariableAssignment(var, Token::INIT, slot);
+ BuildVariableAssignment(proxy->var(), Token::INIT, slot,
+ HoleCheckMode::kElided);
}
}
@@ -1541,11 +1572,14 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
FastCloneShallowObjectStub::IsSupported(expr),
FastCloneShallowObjectStub::PropertiesCount(expr->properties_count()),
expr->ComputeFlags());
- // Allocate in the outer scope since this register is used to return the
- // expression's results to the caller.
+ // If constant properties is an empty fixed array, use our cached
+ // empty_fixed_array to ensure it's only added to the constant pool once.
+ Handle<FixedArray> constant_properties = expr->properties_count() == 0
+ ? empty_fixed_array()
+ : expr->constant_properties();
Register literal = register_allocator()->NewRegister();
- builder()->CreateObjectLiteral(expr->constant_properties(),
- expr->literal_index(), flags, literal);
+ builder()->CreateObjectLiteral(constant_properties, expr->literal_index(),
+ flags, literal);
// Store computed values into the literal.
int property_index = 0;
@@ -1752,17 +1786,13 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
builder()->SetExpressionPosition(proxy);
- VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
+ BuildVariableLoad(proxy->var(), proxy->VariableFeedbackSlot(),
+ proxy->hole_check_mode());
}
-void BytecodeGenerator::BuildHoleCheckForVariableLoad(Variable* variable) {
- if (variable->binding_needs_init()) {
- BuildThrowIfHole(variable->name());
- }
-}
-
-void BytecodeGenerator::VisitVariableLoad(Variable* variable,
+void BytecodeGenerator::BuildVariableLoad(Variable* variable,
FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode,
TypeofMode typeof_mode) {
switch (variable->location()) {
case VariableLocation::LOCAL: {
@@ -1771,7 +1801,9 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
// VisitForRegisterScope, in order to avoid register aliasing if
// subsequent expressions assign to the same variable.
builder()->LoadAccumulatorWithRegister(source);
- BuildHoleCheckForVariableLoad(variable);
+ if (hole_check_mode == HoleCheckMode::kRequired) {
+ BuildThrowIfHole(variable->name());
+ }
break;
}
case VariableLocation::PARAMETER: {
@@ -1782,7 +1814,9 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
// VisitForRegisterScope, in order to avoid register aliasing if
// subsequent expressions assign to the same variable.
builder()->LoadAccumulatorWithRegister(source);
- BuildHoleCheckForVariableLoad(variable);
+ if (hole_check_mode == HoleCheckMode::kRequired) {
+ BuildThrowIfHole(variable->name());
+ }
break;
}
case VariableLocation::UNALLOCATED: {
@@ -1801,7 +1835,9 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
}
builder()->LoadContextSlot(context_reg, variable->index(), depth);
- BuildHoleCheckForVariableLoad(variable);
+ if (hole_check_mode == HoleCheckMode::kRequired) {
+ BuildThrowIfHole(variable->name());
+ }
break;
}
case VariableLocation::LOOKUP: {
@@ -1812,7 +1848,9 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
execution_context()->ContextChainDepth(local_variable->scope());
builder()->LoadLookupContextSlot(variable->name(), typeof_mode,
local_variable->index(), depth);
- BuildHoleCheckForVariableLoad(variable);
+ if (hole_check_mode == HoleCheckMode::kRequired) {
+ BuildThrowIfHole(variable->name());
+ }
break;
}
case DYNAMIC_GLOBAL: {
@@ -1827,36 +1865,21 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
break;
}
case VariableLocation::MODULE: {
- ModuleDescriptor* descriptor = scope()->GetModuleScope()->module();
- if (variable->IsExport()) {
- auto it = descriptor->regular_exports().find(variable->raw_name());
- DCHECK(it != descriptor->regular_exports().end());
- Register export_name = register_allocator()->NewRegister();
- builder()
- ->LoadLiteral(it->second->export_name->string())
- .StoreAccumulatorInRegister(export_name)
- .CallRuntime(Runtime::kLoadModuleExport, export_name);
- } else {
- auto it = descriptor->regular_imports().find(variable->raw_name());
- DCHECK(it != descriptor->regular_imports().end());
- RegisterList args = register_allocator()->NewRegisterList(2);
- builder()
- ->LoadLiteral(it->second->import_name->string())
- .StoreAccumulatorInRegister(args[0])
- .LoadLiteral(Smi::FromInt(it->second->module_request))
- .StoreAccumulatorInRegister(args[1])
- .CallRuntime(Runtime::kLoadModuleImport, args);
+ int depth = execution_context()->ContextChainDepth(variable->scope());
+ builder()->LoadModuleVariable(variable->index(), depth);
+ if (hole_check_mode == HoleCheckMode::kRequired) {
+ BuildThrowIfHole(variable->name());
}
- BuildHoleCheckForVariableLoad(variable);
break;
}
}
}
-void BytecodeGenerator::VisitVariableLoadForAccumulatorValue(
- Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
+void BytecodeGenerator::BuildVariableLoadForAccumulatorValue(
+ Variable* variable, FeedbackVectorSlot slot, HoleCheckMode hole_check_mode,
+ TypeofMode typeof_mode) {
ValueResultScope accumulator_result(this);
- VisitVariableLoad(variable, slot, typeof_mode);
+ BuildVariableLoad(variable, slot, hole_check_mode, typeof_mode);
}
void BytecodeGenerator::BuildReturn() {
@@ -1911,29 +1934,26 @@ void BytecodeGenerator::BuildThrowIfNotHole(Handle<String> name) {
void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
Token::Value op) {
- if (op != Token::INIT) {
- // Perform an initialization check for let/const declared variables.
- // E.g. let x = (x = 20); is not allowed.
- BuildThrowIfHole(variable->name());
- } else {
- DCHECK(variable->is_this() && variable->mode() == CONST &&
- op == Token::INIT);
+ if (variable->is_this() && variable->mode() == CONST && op == Token::INIT) {
// Perform an initialization check for 'this'. 'this' variable is the
// only variable able to trigger bind operations outside the TDZ
// via 'super' calls.
BuildThrowIfNotHole(variable->name());
+ } else {
+ // Perform an initialization check for let/const declared variables.
+ // E.g. let x = (x = 20); is not allowed.
+ DCHECK(IsLexicalVariableMode(variable->mode()));
+ BuildThrowIfHole(variable->name());
}
}
-void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
+void BytecodeGenerator::BuildVariableAssignment(Variable* variable,
Token::Value op,
- FeedbackVectorSlot slot) {
+ FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode) {
VariableMode mode = variable->mode();
RegisterAllocationScope assignment_register_scope(this);
BytecodeLabel end_label;
- bool hole_check_required =
- variable->binding_needs_init() &&
- (op != Token::INIT || (mode == CONST && variable->is_this()));
switch (variable->location()) {
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
@@ -1944,7 +1964,7 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
destination = Register(variable->index());
}
- if (hole_check_required) {
+ if (hole_check_mode == HoleCheckMode::kRequired) {
// Load destination to check for hole.
Register value_temp = register_allocator()->NewRegister();
builder()
@@ -1979,7 +1999,7 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
context_reg = execution_context()->reg();
}
- if (hole_check_required) {
+ if (hole_check_mode == HoleCheckMode::kRequired) {
// Load destination to check for hole.
Register value_temp = register_allocator()->NewRegister();
builder()
@@ -2014,18 +2034,16 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
// assignments for them.
DCHECK(variable->IsExport());
- ModuleDescriptor* mod = scope()->GetModuleScope()->module();
- // There may be several export names for this local name, but it doesn't
- // matter which one we pick, as they all map to the same cell.
- auto it = mod->regular_exports().find(variable->raw_name());
- DCHECK(it != mod->regular_exports().end());
-
- RegisterList args = register_allocator()->NewRegisterList(2);
- builder()
- ->StoreAccumulatorInRegister(args[1])
- .LoadLiteral(it->second->export_name->string())
- .StoreAccumulatorInRegister(args[0])
- .CallRuntime(Runtime::kStoreModuleExport, args);
+ int depth = execution_context()->ContextChainDepth(variable->scope());
+ if (hole_check_mode == HoleCheckMode::kRequired) {
+ Register value_temp = register_allocator()->NewRegister();
+ builder()
+ ->StoreAccumulatorInRegister(value_temp)
+ .LoadModuleVariable(variable->index(), depth);
+ BuildHoleCheckForVariableAssignment(variable, op);
+ builder()->LoadAccumulatorWithRegister(value_temp);
+ }
+ builder()->StoreModuleVariable(variable->index(), depth);
break;
}
}
@@ -2087,7 +2105,8 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE: {
VariableProxy* proxy = expr->target()->AsVariableProxy();
- VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
+ BuildVariableLoad(proxy->var(), proxy->VariableFeedbackSlot(),
+ proxy->hole_check_mode());
builder()->StoreAccumulatorInRegister(old_value);
break;
}
@@ -2136,10 +2155,11 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
FeedbackVectorSlot slot = expr->AssignmentSlot();
switch (assign_type) {
case VARIABLE: {
- // TODO(oth): The VisitVariableAssignment() call is hard to reason about.
+ // TODO(oth): The BuildVariableAssignment() call is hard to reason about.
// Is the value in the accumulator safe? Yes, but scary.
- Variable* variable = expr->target()->AsVariableProxy()->var();
- VisitVariableAssignment(variable, expr->op(), slot);
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ BuildVariableAssignment(proxy->var(), expr->op(), slot,
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY:
@@ -2273,10 +2293,12 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
}
}
-void BytecodeGenerator::VisitPropertyLoadForAccumulator(Register obj,
- Property* expr) {
+void BytecodeGenerator::VisitPropertyLoadForRegister(Register obj,
+ Property* expr,
+ Register destination) {
ValueResultScope result_scope(this);
VisitPropertyLoad(obj, expr);
+ builder()->StoreAccumulatorInRegister(destination);
}
void BytecodeGenerator::VisitNamedSuperPropertyLoad(Property* property,
@@ -2325,11 +2347,10 @@ void BytecodeGenerator::VisitProperty(Property* expr) {
}
void BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args,
- RegisterList arg_regs,
- size_t first_argument_register) {
+ RegisterList* arg_regs) {
// Visit arguments.
for (int i = 0; i < static_cast<int>(args->length()); i++) {
- VisitForRegisterValue(args->at(i), arg_regs[first_argument_register + i]);
+ VisitAndPushIntoRegisterList(args->at(i), arg_regs);
}
}
@@ -2342,11 +2363,11 @@ void BytecodeGenerator::VisitCall(Call* expr) {
}
Register callee = register_allocator()->NewRegister();
-
- // Add an argument register for the receiver.
- RegisterList args =
- register_allocator()->NewRegisterList(expr->arguments()->length() + 1);
- Register receiver = args[0];
+ // Grow the args list as we visit receiver / arguments to avoid allocating all
+ // the registers up-front. Otherwise these registers are unavailable during
+ // receiver / argument visiting and we can end up with memory leaks due to
+ // registers keeping objects alive.
+ RegisterList args = register_allocator()->NewGrowableRegisterList();
// Prepare the callee and the receiver to the function call. This depends on
// the semantics of the underlying call type.
@@ -2354,54 +2375,55 @@ void BytecodeGenerator::VisitCall(Call* expr) {
case Call::NAMED_PROPERTY_CALL:
case Call::KEYED_PROPERTY_CALL: {
Property* property = callee_expr->AsProperty();
- VisitForAccumulatorValue(property->obj());
- builder()->StoreAccumulatorInRegister(receiver);
- VisitPropertyLoadForAccumulator(receiver, property);
- builder()->StoreAccumulatorInRegister(callee);
+ VisitAndPushIntoRegisterList(property->obj(), &args);
+ VisitPropertyLoadForRegister(args[0], property, callee);
break;
}
case Call::GLOBAL_CALL: {
// Receiver is undefined for global calls.
- builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
+ BuildPushUndefinedIntoRegisterList(&args);
// Load callee as a global variable.
VariableProxy* proxy = callee_expr->AsVariableProxy();
- VisitVariableLoadForAccumulatorValue(proxy->var(),
- proxy->VariableFeedbackSlot());
+ BuildVariableLoadForAccumulatorValue(proxy->var(),
+ proxy->VariableFeedbackSlot(),
+ proxy->hole_check_mode());
builder()->StoreAccumulatorInRegister(callee);
break;
}
- case Call::LOOKUP_SLOT_CALL:
- case Call::POSSIBLY_EVAL_CALL: {
- if (callee_expr->AsVariableProxy()->var()->IsLookupSlot()) {
+ case Call::WITH_CALL: {
+ Register receiver = register_allocator()->GrowRegisterList(&args);
+ DCHECK(callee_expr->AsVariableProxy()->var()->IsLookupSlot());
+ {
RegisterAllocationScope inner_register_scope(this);
Register name = register_allocator()->NewRegister();
// Call %LoadLookupSlotForCall to get the callee and receiver.
DCHECK(Register::AreContiguous(callee, receiver));
RegisterList result_pair(callee.index(), 2);
+ USE(receiver);
Variable* variable = callee_expr->AsVariableProxy()->var();
builder()
->LoadLiteral(variable->name())
.StoreAccumulatorInRegister(name)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, name,
result_pair);
- break;
}
- // Fall through.
- DCHECK_EQ(call_type, Call::POSSIBLY_EVAL_CALL);
+ break;
}
case Call::OTHER_CALL: {
- builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
+ BuildPushUndefinedIntoRegisterList(&args);
VisitForRegisterValue(callee_expr, callee);
break;
}
case Call::NAMED_SUPER_PROPERTY_CALL: {
+ Register receiver = register_allocator()->GrowRegisterList(&args);
Property* property = callee_expr->AsProperty();
VisitNamedSuperPropertyLoad(property, receiver);
builder()->StoreAccumulatorInRegister(callee);
break;
}
case Call::KEYED_SUPER_PROPERTY_CALL: {
+ Register receiver = register_allocator()->GrowRegisterList(&args);
Property* property = callee_expr->AsProperty();
VisitKeyedSuperPropertyLoad(property, receiver);
builder()->StoreAccumulatorInRegister(callee);
@@ -2414,12 +2436,12 @@ void BytecodeGenerator::VisitCall(Call* expr) {
// Evaluate all arguments to the function call and store in sequential args
// registers.
- VisitArguments(expr->arguments(), args, 1);
+ VisitArguments(expr->arguments(), &args);
+ CHECK_EQ(expr->arguments()->length() + 1, args.register_count());
// Resolve callee for a potential direct eval call. This block will mutate the
// callee value.
- if (call_type == Call::POSSIBLY_EVAL_CALL &&
- expr->arguments()->length() > 0) {
+ if (expr->is_possibly_eval() && expr->arguments()->length() > 0) {
RegisterAllocationScope inner_register_scope(this);
// Set up arguments for ResolvePossiblyDirectEval by copying callee, source
// strings and function closure, and loading language and
@@ -2445,18 +2467,9 @@ void BytecodeGenerator::VisitCall(Call* expr) {
builder()->SetExpressionPosition(expr);
- int feedback_slot_index;
- if (expr->CallFeedbackICSlot().IsInvalid()) {
- DCHECK(call_type == Call::POSSIBLY_EVAL_CALL);
- // Valid type feedback slots can only be greater than kReservedIndexCount.
- // We use 0 to indicate an invalid slot id. Statically assert that 0 cannot
- // be a valid slot id.
- STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
- feedback_slot_index = 0;
- } else {
- feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
- }
- builder()->Call(callee, args, feedback_slot_index, expr->tail_call_mode());
+ int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
+ builder()->Call(callee, args, feedback_slot_index, call_type,
+ expr->tail_call_mode());
}
void BytecodeGenerator::VisitCallSuper(Call* expr) {
@@ -2470,9 +2483,8 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
Register constructor = this_function; // Re-use dead this_function register.
builder()->StoreAccumulatorInRegister(constructor);
- RegisterList args =
- register_allocator()->NewRegisterList(expr->arguments()->length());
- VisitArguments(expr->arguments(), args);
+ RegisterList args = register_allocator()->NewGrowableRegisterList();
+ VisitArguments(expr->arguments(), &args);
// The new target is loaded into the accumulator from the
// {new.target} variable.
@@ -2480,20 +2492,20 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
// Call construct.
builder()->SetExpressionPosition(expr);
- // Valid type feedback slots can only be greater than kReservedIndexCount.
- // Assert that 0 cannot be valid a valid slot id.
- STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
- // Type feedback is not necessary for super constructor calls. The type
- // information can be inferred in most cases. Slot id 0 indicates type
- // feedback is not required.
- builder()->New(constructor, args, 0);
+ // TODO(turbofan): For now we do gather feedback on super constructor
+ // calls, utilizing the existing machinery to inline the actual call
+ // target and the JSCreate for the implicit receiver allocation. This
+ // is not an ideal solution for super constructor calls, but it gets
+ // the job done for now. In the long run we might want to revisit this
+ // and come up with a better way.
+ int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
+ builder()->New(constructor, args, feedback_slot_index);
}
void BytecodeGenerator::VisitCallNew(CallNew* expr) {
Register constructor = VisitForRegisterValue(expr->expression());
- RegisterList args =
- register_allocator()->NewRegisterList(expr->arguments()->length());
- VisitArguments(expr->arguments(), args);
+ RegisterList args = register_allocator()->NewGrowableRegisterList();
+ VisitArguments(expr->arguments(), &args);
builder()->SetExpressionPosition(expr);
// The accumulator holds new target which is the same as the
@@ -2505,18 +2517,15 @@ void BytecodeGenerator::VisitCallNew(CallNew* expr) {
void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
+ RegisterList args = register_allocator()->NewGrowableRegisterList();
// Allocate a register for the receiver and load it with undefined.
- RegisterList args =
- register_allocator()->NewRegisterList(expr->arguments()->length() + 1);
- Register receiver = args[0];
- builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
- VisitArguments(expr->arguments(), args, 1);
+ BuildPushUndefinedIntoRegisterList(&args);
+ VisitArguments(expr->arguments(), &args);
builder()->CallJSRuntime(expr->context_index(), args);
} else {
// Evaluate all arguments to the runtime call.
- RegisterList args =
- register_allocator()->NewRegisterList(expr->arguments()->length());
- VisitArguments(expr->arguments(), args);
+ RegisterList args = register_allocator()->NewGrowableRegisterList();
+ VisitArguments(expr->arguments(), &args);
Runtime::FunctionId function_id = expr->function()->function_id;
builder()->CallRuntime(function_id, args);
}
@@ -2532,8 +2541,9 @@ void BytecodeGenerator::VisitTypeOf(UnaryOperation* expr) {
// Typeof does not throw a reference error on global variables, hence we
// perform a non-contextual load in case the operand is a variable proxy.
VariableProxy* proxy = expr->expression()->AsVariableProxy();
- VisitVariableLoadForAccumulatorValue(
- proxy->var(), proxy->VariableFeedbackSlot(), INSIDE_TYPEOF);
+ BuildVariableLoadForAccumulatorValue(
+ proxy->var(), proxy->VariableFeedbackSlot(), proxy->hole_check_mode(),
+ INSIDE_TYPEOF);
} else {
VisitForAccumulatorValue(expr->expression());
}
@@ -2657,8 +2667,9 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
switch (assign_type) {
case VARIABLE: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
- VisitVariableLoadForAccumulatorValue(proxy->var(),
- proxy->VariableFeedbackSlot());
+ BuildVariableLoadForAccumulatorValue(proxy->var(),
+ proxy->VariableFeedbackSlot(),
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
@@ -2709,7 +2720,9 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
if (is_postfix) {
// Convert old value into a number before saving it.
old_value = register_allocator()->NewRegister();
- builder()->ConvertAccumulatorToNumber(old_value);
+ builder()
+ ->ConvertAccumulatorToNumber(old_value)
+ .LoadAccumulatorWithRegister(old_value);
}
// Perform +1/-1 operation.
@@ -2721,8 +2734,9 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
FeedbackVectorSlot feedback_slot = expr->CountSlot();
switch (assign_type) {
case VARIABLE: {
- Variable* variable = expr->expression()->AsVariableProxy()->var();
- VisitVariableAssignment(variable, expr->op(), feedback_slot);
+ VariableProxy* proxy = expr->expression()->AsVariableProxy();
+ BuildVariableAssignment(proxy->var(), expr->op(), feedback_slot,
+ proxy->hole_check_mode());
break;
}
case NAMED_PROPERTY: {
@@ -2821,7 +2835,7 @@ void BytecodeGenerator::VisitLogicalOrExpression(BinaryOperation* binop) {
if (execution_result()->IsTest()) {
TestResultScope* test_result = execution_result()->AsTest();
- if (left->ToBooleanIsTrue() || right->ToBooleanIsTrue()) {
+ if (left->ToBooleanIsTrue()) {
builder()->Jump(test_result->NewThenLabel());
} else if (left->ToBooleanIsFalse() && right->ToBooleanIsFalse()) {
builder()->Jump(test_result->NewElseLabel());
@@ -2856,7 +2870,7 @@ void BytecodeGenerator::VisitLogicalAndExpression(BinaryOperation* binop) {
if (execution_result()->IsTest()) {
TestResultScope* test_result = execution_result()->AsTest();
- if (left->ToBooleanIsFalse() || right->ToBooleanIsFalse()) {
+ if (left->ToBooleanIsFalse()) {
builder()->Jump(test_result->NewElseLabel());
} else if (left->ToBooleanIsTrue() && right->ToBooleanIsTrue()) {
builder()->Jump(test_result->NewThenLabel());
@@ -3019,8 +3033,9 @@ void BytecodeGenerator::VisitArgumentsObject(Variable* variable) {
? CreateArgumentsType::kUnmappedArguments
: CreateArgumentsType::kMappedArguments;
builder()->CreateArguments(type);
- VisitVariableAssignment(variable, Token::ASSIGN,
- FeedbackVectorSlot::Invalid());
+ BuildVariableAssignment(variable, Token::ASSIGN,
+ FeedbackVectorSlot::Invalid(),
+ HoleCheckMode::kElided);
}
void BytecodeGenerator::VisitRestArgumentsArray(Variable* rest) {
@@ -3030,7 +3045,8 @@ void BytecodeGenerator::VisitRestArgumentsArray(Variable* rest) {
// variable.
builder()->CreateArguments(CreateArgumentsType::kRestParameter);
DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
- VisitVariableAssignment(rest, Token::ASSIGN, FeedbackVectorSlot::Invalid());
+ BuildVariableAssignment(rest, Token::ASSIGN, FeedbackVectorSlot::Invalid(),
+ HoleCheckMode::kElided);
}
void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
@@ -3038,7 +3054,8 @@ void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
// Store the closure we were called with in the given variable.
builder()->LoadAccumulatorWithRegister(Register::function_closure());
- VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
+ BuildVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid(),
+ HoleCheckMode::kElided);
}
void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
@@ -3046,7 +3063,8 @@ void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
// Store the new target we were called with in the given variable.
builder()->LoadAccumulatorWithRegister(Register::new_target());
- VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
+ BuildVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid(),
+ HoleCheckMode::kElided);
// TODO(mstarzinger): The <new.target> register is not set by the deoptimizer
// and we need to make sure {BytecodeRegisterOptimizer} flushes its state
@@ -3120,6 +3138,28 @@ void BytecodeGenerator::VisitForRegisterValue(Expression* expr,
builder()->StoreAccumulatorInRegister(destination);
}
+// Visits the expression |expr| and pushes the result into a new register
+// added to the end of |reg_list|.
+void BytecodeGenerator::VisitAndPushIntoRegisterList(Expression* expr,
+ RegisterList* reg_list) {
+ {
+ ValueResultScope register_scope(this);
+ Visit(expr);
+ }
+ // Grow the register list after visiting the expression to avoid reserving
+ // the register across the expression evaluation, which could cause memory
+ // leaks for deep expressions due to dead objects being kept alive by pointers
+ // in registers.
+ Register destination = register_allocator()->GrowRegisterList(reg_list);
+ builder()->StoreAccumulatorInRegister(destination);
+}
+
+void BytecodeGenerator::BuildPushUndefinedIntoRegisterList(
+ RegisterList* reg_list) {
+ Register reg = register_allocator()->GrowRegisterList(reg_list);
+ builder()->LoadUndefined().StoreAccumulatorInRegister(reg);
+}
+
// Visits the expression |expr| for testing its boolean value and jumping to the
// |then| or |other| label depending on value and short-circuit semantics
void BytecodeGenerator::VisitForTest(Expression* expr,
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 03067de08d..bcab9975d0 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -32,7 +32,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
#undef DECLARE_VISIT
// Visiting function for declarations list and statements are overridden.
- void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void VisitDeclarations(Declaration::List* declarations);
void VisitStatements(ZoneList<Statement*>* statments);
private:
@@ -72,10 +72,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// Used by flow control routines to evaluate loop condition.
void VisitCondition(Expression* expr);
- // Visit the arguments expressions in |args| and store them in |args_regs|
- // starting at register |first_argument_register| in the list.
- void VisitArguments(ZoneList<Expression*>* args, RegisterList arg_regs,
- size_t first_argument_register = 0);
+ // Visit the arguments expressions in |args| and store them in |args_regs|,
+ // growing |args_regs| for each argument visited.
+ void VisitArguments(ZoneList<Expression*>* args, RegisterList* arg_regs);
// Visit a keyed super property load. The optional
// |opt_receiver_out| register will have the receiver stored to it
@@ -92,18 +91,19 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Register opt_receiver_out);
void VisitPropertyLoad(Register obj, Property* expr);
- void VisitPropertyLoadForAccumulator(Register obj, Property* expr);
+ void VisitPropertyLoadForRegister(Register obj, Property* expr,
+ Register destination);
- void VisitVariableLoad(Variable* variable, FeedbackVectorSlot slot,
+ void BuildVariableLoad(Variable* variable, FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
- void VisitVariableLoadForAccumulatorValue(
+ void BuildVariableLoadForAccumulatorValue(
Variable* variable, FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
- MUST_USE_RESULT Register
- VisitVariableLoadForRegisterValue(Variable* variable, FeedbackVectorSlot slot,
- TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
- void VisitVariableAssignment(Variable* variable, Token::Value op,
- FeedbackVectorSlot slot);
+ void BuildVariableAssignment(Variable* variable, Token::Value op,
+ FeedbackVectorSlot slot,
+ HoleCheckMode hole_check_mode);
void BuildReturn();
void BuildReThrow();
@@ -111,7 +111,6 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildThrowIfHole(Handle<String> name);
void BuildThrowIfNotHole(Handle<String> name);
void BuildThrowReferenceError(Handle<String> name);
- void BuildHoleCheckForVariableLoad(Variable* variable);
void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
// Build jump to targets[value], where
@@ -143,6 +142,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
ObjectLiteralProperty* property,
Register value_out);
void VisitForInAssignment(Expression* expr, FeedbackVectorSlot slot);
+ void VisitModuleNamespaceImports();
// Visit the header/body of a loop iteration.
void VisitIterationHeader(IterationStatement* stmt,
@@ -152,12 +152,15 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
// Visit a statement and switch scopes, the context is in the accumulator.
void VisitInScope(Statement* stmt, Scope* scope);
+ void BuildPushUndefinedIntoRegisterList(RegisterList* reg_list);
+
// Visitors for obtaining expression result in the accumulator, in a
// register, or just getting the effect.
void VisitForAccumulatorValue(Expression* expr);
void VisitForAccumulatorValueOrTheHole(Expression* expr);
MUST_USE_RESULT Register VisitForRegisterValue(Expression* expr);
void VisitForRegisterValue(Expression* expr, Register destination);
+ void VisitAndPushIntoRegisterList(Expression* expr, RegisterList* reg_list);
void VisitForEffect(Expression* expr);
void VisitForTest(Expression* expr, BytecodeLabels* then_labels,
BytecodeLabels* else_labels, TestFallthrough fallthrough);
@@ -194,6 +197,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Handle<Name> home_object_symbol() const { return home_object_symbol_; }
Handle<Name> prototype_string() const { return prototype_string_; }
+ Handle<FixedArray> empty_fixed_array() const { return empty_fixed_array_; }
Zone* zone_;
BytecodeArrayBuilder* builder_;
@@ -216,6 +220,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
Handle<Name> home_object_symbol_;
Handle<Name> prototype_string_;
+ Handle<FixedArray> empty_fixed_array_;
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-operands.h b/deps/v8/src/interpreter/bytecode-operands.h
index b35c4866be..55485027d3 100644
--- a/deps/v8/src/interpreter/bytecode-operands.h
+++ b/deps/v8/src/interpreter/bytecode-operands.h
@@ -14,8 +14,8 @@ namespace interpreter {
#define INVALID_OPERAND_TYPE_LIST(V) V(None, OperandTypeInfo::kNone)
#define REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
- V(RegList, OperandTypeInfo::kScalableSignedByte) \
V(Reg, OperandTypeInfo::kScalableSignedByte) \
+ V(RegList, OperandTypeInfo::kScalableSignedByte) \
V(RegPair, OperandTypeInfo::kScalableSignedByte)
#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V) \
@@ -23,22 +23,25 @@ namespace interpreter {
V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
-#define SCALAR_OPERAND_TYPE_LIST(V) \
+#define UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
V(Idx, OperandTypeInfo::kScalableUnsignedByte) \
V(UImm, OperandTypeInfo::kScalableUnsignedByte) \
- V(Imm, OperandTypeInfo::kScalableSignedByte) \
V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
+#define SIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
+ V(Imm, OperandTypeInfo::kScalableSignedByte)
+
#define REGISTER_OPERAND_TYPE_LIST(V) \
REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)
#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
INVALID_OPERAND_TYPE_LIST(V) \
- SCALAR_OPERAND_TYPE_LIST(V)
+ UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
+ SIGNED_SCALAR_OPERAND_TYPE_LIST(V)
// The list of operand types used by bytecodes.
#define OPERAND_TYPE_LIST(V) \
@@ -114,9 +117,12 @@ inline AccumulatorUse operator|(AccumulatorUse lhs, AccumulatorUse rhs) {
return static_cast<AccumulatorUse>(result);
}
-std::ostream& operator<<(std::ostream& os, const AccumulatorUse& use);
-std::ostream& operator<<(std::ostream& os, const OperandScale& operand_scale);
-std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const AccumulatorUse& use);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const OperandScale& operand_scale);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const OperandSize& operand_size);
std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc b/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
index c87d31c39f..40552943f7 100644
--- a/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
@@ -13,7 +13,7 @@ namespace interpreter {
BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
BytecodePipelineStage* next_stage)
- : next_stage_(next_stage), last_(Bytecode::kIllegal) {
+ : next_stage_(next_stage), last_(Bytecode::kIllegal, BytecodeSourceInfo()) {
InvalidateLast();
}
@@ -77,8 +77,7 @@ void BytecodePeepholeOptimizer::SetLast(const BytecodeNode* const node) {
// source position information. NOP without source information can
// always be elided.
DCHECK(node->bytecode() != Bytecode::kNop || node->source_info().is_valid());
-
- last_.Clone(node);
+ last_ = *node;
}
bool BytecodePeepholeOptimizer::CanElideLastBasedOnSourcePosition(
@@ -117,24 +116,6 @@ bool BytecodePeepholeOptimizer::CanElideLastBasedOnSourcePosition(
namespace {
-void TransformLdaStarToLdrLdar(Bytecode new_bytecode, BytecodeNode* const last,
- BytecodeNode* const current) {
- DCHECK_EQ(current->bytecode(), Bytecode::kStar);
-
- //
- // An example transformation here would be:
- //
- // LdaGlobal i0, i1 ____\ LdrGlobal i0, i1, R
- // Star R ====/ Ldar R
- //
- // which loads a global value into both a register and the
- // accumulator. However, in the second form the Ldar can often be
- // peephole optimized away unlike the Star in the first form.
- //
- last->Transform(new_bytecode, current->operand(0));
- current->set_bytecode(Bytecode::kLdar, current->operand(0));
-}
-
void TransformLdaSmiBinaryOpToBinaryOpWithSmi(Bytecode new_bytecode,
BytecodeNode* const last,
BytecodeNode* const current) {
@@ -142,7 +123,7 @@ void TransformLdaSmiBinaryOpToBinaryOpWithSmi(Bytecode new_bytecode,
current->set_bytecode(new_bytecode, last->operand(0), current->operand(0),
current->operand(1));
if (last->source_info().is_valid()) {
- current->source_info_ptr()->Clone(last->source_info());
+ current->set_source_info(last->source_info());
}
}
@@ -153,7 +134,7 @@ void TransformLdaZeroBinaryOpToBinaryOpWithZero(Bytecode new_bytecode,
current->set_bytecode(new_bytecode, 0, current->operand(0),
current->operand(1));
if (last->source_info().is_valid()) {
- current->source_info_ptr()->Clone(last->source_info());
+ current->set_source_info(last->source_info());
}
}
@@ -223,7 +204,7 @@ void BytecodePeepholeOptimizer::ElideLastAction(
// |node| can not have a valid source position if the source
// position of last() is valid (per rules in
// CanElideLastBasedOnSourcePosition()).
- node->source_info_ptr()->Clone(last()->source_info());
+ node->set_source_info(last()->source_info());
}
SetLast(node);
} else {
@@ -240,17 +221,6 @@ void BytecodePeepholeOptimizer::ChangeBytecodeAction(
DefaultAction(node);
}
-void BytecodePeepholeOptimizer::TransformLdaStarToLdrLdarAction(
- BytecodeNode* const node, const PeepholeActionAndData* action_data) {
- DCHECK(LastIsValid());
- DCHECK(!Bytecodes::IsJump(node->bytecode()));
-
- if (!node->source_info().is_statement()) {
- TransformLdaStarToLdrLdar(action_data->bytecode, last(), node);
- }
- DefaultAction(node);
-}
-
void BytecodePeepholeOptimizer::TransformLdaSmiBinaryOpToBinaryOpWithSmiAction(
BytecodeNode* const node, const PeepholeActionAndData* action_data) {
DCHECK(LastIsValid());
@@ -314,7 +284,7 @@ void BytecodePeepholeOptimizer::ElideLastBeforeJumpAction(
if (!CanElideLastBasedOnSourcePosition(node)) {
next_stage()->Write(last());
} else if (!node->source_info().is_valid()) {
- node->source_info_ptr()->Clone(last()->source_info());
+ node->set_source_info(last()->source_info());
}
InvalidateLast();
}
diff --git a/deps/v8/src/interpreter/bytecode-peephole-optimizer.h b/deps/v8/src/interpreter/bytecode-peephole-optimizer.h
index cedd742f87..7e7e02a370 100644
--- a/deps/v8/src/interpreter/bytecode-peephole-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-peephole-optimizer.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
#define V8_INTERPRETER_BYTECODE_PEEPHOLE_OPTIMIZER_H_
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-peephole-table.h"
#include "src/interpreter/bytecode-pipeline.h"
@@ -17,8 +19,9 @@ class BytecodePeepholeActionAndData;
// An optimization stage for performing peephole optimizations on
// generated bytecode. The optimizer may buffer one bytecode
// internally.
-class BytecodePeepholeOptimizer final : public BytecodePipelineStage,
- public ZoneObject {
+class V8_EXPORT_PRIVATE BytecodePeepholeOptimizer final
+ : public NON_EXPORTED_BASE(BytecodePipelineStage),
+ public NON_EXPORTED_BASE(ZoneObject) {
public:
explicit BytecodePeepholeOptimizer(BytecodePipelineStage* next_stage);
diff --git a/deps/v8/src/interpreter/bytecode-peephole-table.h b/deps/v8/src/interpreter/bytecode-peephole-table.h
index e716aef496..1790f5a109 100644
--- a/deps/v8/src/interpreter/bytecode-peephole-table.h
+++ b/deps/v8/src/interpreter/bytecode-peephole-table.h
@@ -19,7 +19,6 @@ namespace interpreter {
V(ElideCurrentIfOperand0MatchesAction) \
V(ElideLastAction) \
V(ChangeBytecodeAction) \
- V(TransformLdaStarToLdrLdarAction) \
V(TransformLdaSmiBinaryOpToBinaryOpWithSmiAction) \
V(TransformLdaZeroBinaryOpToBinaryOpWithZeroAction)
diff --git a/deps/v8/src/interpreter/bytecode-pipeline.cc b/deps/v8/src/interpreter/bytecode-pipeline.cc
index 6e6a6b6fab..06accd75dc 100644
--- a/deps/v8/src/interpreter/bytecode-pipeline.cc
+++ b/deps/v8/src/interpreter/bytecode-pipeline.cc
@@ -11,19 +11,6 @@ namespace v8 {
namespace internal {
namespace interpreter {
-BytecodeNode::BytecodeNode(const BytecodeNode& other) {
- memcpy(this, &other, sizeof(other));
-}
-
-BytecodeNode& BytecodeNode::operator=(const BytecodeNode& other) {
- memcpy(this, &other, sizeof(other));
- return *this;
-}
-
-void BytecodeNode::Clone(const BytecodeNode* const other) {
- memcpy(this, other, sizeof(*other));
-}
-
void BytecodeNode::Print(std::ostream& os) const {
#ifdef DEBUG
std::ios saved_state(nullptr);
diff --git a/deps/v8/src/interpreter/bytecode-pipeline.h b/deps/v8/src/interpreter/bytecode-pipeline.h
index 0b1a1f1bf3..d508defea0 100644
--- a/deps/v8/src/interpreter/bytecode-pipeline.h
+++ b/deps/v8/src/interpreter/bytecode-pipeline.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_PIPELINE_H_
#define V8_INTERPRETER_BYTECODE_PIPELINE_H_
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
@@ -93,14 +95,6 @@ class BytecodeSourceInfo final {
source_position_ = source_position;
}
- // Clones a source position. The current instance is expected to be
- // invalid.
- void Clone(const BytecodeSourceInfo& other) {
- DCHECK(!is_valid());
- position_type_ = other.position_type_;
- source_position_ = other.source_position_;
- }
-
int source_position() const {
DCHECK(is_valid());
return source_position_;
@@ -138,81 +132,79 @@ class BytecodeSourceInfo final {
// A container for a generated bytecode, it's operands, and source information.
// These must be allocated by a BytecodeNodeAllocator instance.
-class BytecodeNode final : ZoneObject {
+class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
public:
- INLINE(BytecodeNode(const Bytecode bytecode,
- BytecodeSourceInfo* source_info = nullptr))
+ INLINE(BytecodeNode(Bytecode bytecode,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(0),
- operand_scale_(OperandScale::kSingle) {
+ operand_scale_(OperandScale::kSingle),
+ source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
- AttachSourceInfo(source_info);
}
- INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
- BytecodeSourceInfo* source_info = nullptr))
+ INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(1),
- operand_scale_(OperandScale::kSingle) {
+ operand_scale_(OperandScale::kSingle),
+ source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
- AttachSourceInfo(source_info);
}
- INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
- uint32_t operand1,
- BytecodeSourceInfo* source_info = nullptr))
+ INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(2),
- operand_scale_(OperandScale::kSingle) {
+ operand_scale_(OperandScale::kSingle),
+ source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
SetOperand(1, operand1);
- AttachSourceInfo(source_info);
}
- INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
- uint32_t operand1, uint32_t operand2,
- BytecodeSourceInfo* source_info = nullptr))
+ INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(3),
- operand_scale_(OperandScale::kSingle) {
+ operand_scale_(OperandScale::kSingle),
+ source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
SetOperand(1, operand1);
SetOperand(2, operand2);
- AttachSourceInfo(source_info);
}
- INLINE(BytecodeNode(const Bytecode bytecode, uint32_t operand0,
- uint32_t operand1, uint32_t operand2, uint32_t operand3,
- BytecodeSourceInfo* source_info = nullptr))
+ INLINE(BytecodeNode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo()))
: bytecode_(bytecode),
operand_count_(4),
- operand_scale_(OperandScale::kSingle) {
+ operand_scale_(OperandScale::kSingle),
+ source_info_(source_info) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count());
SetOperand(0, operand0);
SetOperand(1, operand1);
SetOperand(2, operand2);
SetOperand(3, operand3);
- AttachSourceInfo(source_info);
}
- BytecodeNode(const BytecodeNode& other);
- BytecodeNode& operator=(const BytecodeNode& other);
-
// Replace the bytecode of this node with |bytecode| and keep the operands.
void replace_bytecode(Bytecode bytecode) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode_),
Bytecodes::NumberOfOperands(bytecode));
bytecode_ = bytecode;
}
+
void set_bytecode(Bytecode bytecode) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
bytecode_ = bytecode;
operand_count_ = 0;
operand_scale_ = OperandScale::kSingle;
}
+
void set_bytecode(Bytecode bytecode, uint32_t operand0) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
bytecode_ = bytecode;
@@ -220,6 +212,7 @@ class BytecodeNode final : ZoneObject {
operand_scale_ = OperandScale::kSingle;
SetOperand(0, operand0);
}
+
void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
bytecode_ = bytecode;
@@ -228,6 +221,7 @@ class BytecodeNode final : ZoneObject {
SetOperand(0, operand0);
SetOperand(1, operand1);
}
+
void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
uint32_t operand2) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
@@ -239,9 +233,6 @@ class BytecodeNode final : ZoneObject {
SetOperand(2, operand2);
}
- // Clone |other|.
- void Clone(const BytecodeNode* const other);
-
// Print to stream |os|.
void Print(std::ostream& os) const;
@@ -266,18 +257,6 @@ class BytecodeNode final : ZoneObject {
SetOperand(operand_count() - 1, extra_operand);
}
- // Updates the operand at |operand_index| to |operand|.
- void UpdateOperand(int operand_index, uint32_t operand) {
- DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(bytecode()));
- operands_[operand_index] = operand;
- if ((Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index) &&
- Bytecodes::ScaleForSignedOperand(operand) != operand_scale_) ||
- (Bytecodes::OperandIsScalableUnsignedByte(bytecode(), operand_index) &&
- Bytecodes::ScaleForUnsignedOperand(operand) != operand_scale_)) {
- UpdateScale();
- }
- }
-
Bytecode bytecode() const { return bytecode_; }
uint32_t operand(int i) const {
@@ -290,27 +269,14 @@ class BytecodeNode final : ZoneObject {
OperandScale operand_scale() const { return operand_scale_; }
const BytecodeSourceInfo& source_info() const { return source_info_; }
- BytecodeSourceInfo* source_info_ptr() { return &source_info_; }
+ void set_source_info(BytecodeSourceInfo source_info) {
+ source_info_ = source_info;
+ }
bool operator==(const BytecodeNode& other) const;
bool operator!=(const BytecodeNode& other) const { return !(*this == other); }
private:
- INLINE(void AttachSourceInfo(BytecodeSourceInfo* source_info)) {
- if (source_info && source_info->is_valid()) {
- // Statement positions need to be emitted immediately. Expression
- // positions can be pushed back until a bytecode is found that can
- // throw (if expression position filtering is turned on). We only
- // invalidate the existing source position information if it is used.
- if (source_info->is_statement() ||
- !FLAG_ignition_filter_expression_positions ||
- !Bytecodes::IsWithoutExternalSideEffects(bytecode())) {
- source_info_.Clone(*source_info);
- source_info->set_invalid();
- }
- }
- }
-
INLINE(void UpdateScaleForOperand(int operand_index, uint32_t operand)) {
if (Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index)) {
operand_scale_ =
@@ -327,13 +293,6 @@ class BytecodeNode final : ZoneObject {
UpdateScaleForOperand(operand_index, operand);
}
- void UpdateScale() {
- operand_scale_ = OperandScale::kSingle;
- for (int i = 0; i < operand_count(); i++) {
- UpdateScaleForOperand(i, operands_[i]);
- }
- }
-
Bytecode bytecode_;
uint32_t operands_[Bytecodes::kMaxOperands];
int operand_count_;
@@ -341,8 +300,10 @@ class BytecodeNode final : ZoneObject {
BytecodeSourceInfo source_info_;
};
-std::ostream& operator<<(std::ostream& os, const BytecodeSourceInfo& info);
-std::ostream& operator<<(std::ostream& os, const BytecodeNode& node);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const BytecodeSourceInfo& info);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const BytecodeNode& node);
} // namespace interpreter
} // namespace internal
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.h b/deps/v8/src/interpreter/bytecode-register-allocator.h
index e9de4661d3..72e0133f43 100644
--- a/deps/v8/src/interpreter/bytecode-register-allocator.h
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.h
@@ -52,6 +52,27 @@ class BytecodeRegisterAllocator final {
return reg_list;
}
+ // Returns a growable register list.
+ RegisterList NewGrowableRegisterList() {
+ RegisterList reg_list(next_register_index_, 0);
+ return reg_list;
+ }
+
+ // Appends a new register to |reg_list| increasing it's count by one and
+ // returning the register added.
+ //
+ // Note: no other new registers must be currently allocated since the register
+ // list was originally allocated.
+ Register GrowRegisterList(RegisterList* reg_list) {
+ Register reg(NewRegister());
+ reg_list->IncrementRegisterCount();
+ // If the following CHECK fails then a register was allocated (and not
+ // freed) between the creation of the RegisterList and this call to add a
+ // Register.
+ CHECK_EQ(reg.index(), reg_list->last_register().index());
+ return reg;
+ }
+
// Release all registers above |register_index|.
void ReleaseRegisters(int register_index) {
if (observer_) {
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
index acbe0ba5a1..563956e5c6 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -8,7 +8,7 @@ namespace v8 {
namespace internal {
namespace interpreter {
-const uint32_t BytecodeRegisterOptimizer::kInvalidEquivalenceId;
+const uint32_t BytecodeRegisterOptimizer::kInvalidEquivalenceId = kMaxUInt32;
// A class for tracking the state of a register. This class tracks
// which equivalence set a register is a member of and also whether a
@@ -230,81 +230,7 @@ BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
DCHECK(accumulator_info_->register_value() == accumulator_);
}
-// override
-Handle<BytecodeArray> BytecodeRegisterOptimizer::ToBytecodeArray(
- Isolate* isolate, int register_count, int parameter_count,
- Handle<FixedArray> handler_table) {
- FlushState();
- return next_stage_->ToBytecodeArray(isolate, max_register_index_ + 1,
- parameter_count, handler_table);
-}
-
-// override
-void BytecodeRegisterOptimizer::Write(BytecodeNode* node) {
- // Jumps are handled by WriteJump.
- DCHECK(!Bytecodes::IsJump(node->bytecode()));
- //
- // Transfers with observable registers as the destination will be
- // immediately materialized so the source position information will
- // be ordered correctly.
- //
- // Transfers without observable destination registers will initially
- // be emitted as Nop's with the source position. They may, or may
- // not, be materialized by the optimizer. However, the source
- // position is not lost and being attached to a Nop is fine as the
- // destination register is not observable in the debugger.
- //
- switch (node->bytecode()) {
- case Bytecode::kLdar: {
- DoLdar(node);
- return;
- }
- case Bytecode::kStar: {
- DoStar(node);
- return;
- }
- case Bytecode::kMov: {
- DoMov(node);
- return;
- }
- default:
- break;
- }
-
- if (node->bytecode() == Bytecode::kDebugger ||
- node->bytecode() == Bytecode::kSuspendGenerator) {
- // All state must be flushed before emitting
- // - a call to the debugger (as it can manipulate locals and parameters),
- // - a generator suspend (as this involves saving all registers).
- FlushState();
- }
-
- PrepareOperands(node);
- next_stage_->Write(node);
-}
-
-// override
-void BytecodeRegisterOptimizer::WriteJump(BytecodeNode* node,
- BytecodeLabel* label) {
- FlushState();
- next_stage_->WriteJump(node, label);
-}
-
-// override
-void BytecodeRegisterOptimizer::BindLabel(BytecodeLabel* label) {
- FlushState();
- next_stage_->BindLabel(label);
-}
-
-// override
-void BytecodeRegisterOptimizer::BindLabel(const BytecodeLabel& target,
- BytecodeLabel* label) {
- // There is no need to flush here, it will have been flushed when |target|
- // was bound.
- next_stage_->BindLabel(target, label);
-}
-
-void BytecodeRegisterOptimizer::FlushState() {
+void BytecodeRegisterOptimizer::Flush() {
if (!flush_required_) {
return;
}
@@ -332,7 +258,7 @@ void BytecodeRegisterOptimizer::FlushState() {
void BytecodeRegisterOptimizer::OutputRegisterTransfer(
RegisterInfo* input_info, RegisterInfo* output_info,
- BytecodeSourceInfo* source_info) {
+ BytecodeSourceInfo source_info) {
Register input = input_info->register_value();
Register output = output_info->register_value();
DCHECK_NE(input.index(), output.index());
@@ -404,7 +330,7 @@ void BytecodeRegisterOptimizer::AddToEquivalenceSet(
void BytecodeRegisterOptimizer::RegisterTransfer(
RegisterInfo* input_info, RegisterInfo* output_info,
- BytecodeSourceInfo* source_info) {
+ BytecodeSourceInfo source_info) {
// Materialize an alternate in the equivalence set that
// |output_info| is leaving.
if (output_info->materialized()) {
@@ -423,7 +349,7 @@ void BytecodeRegisterOptimizer::RegisterTransfer(
output_info->set_materialized(false);
RegisterInfo* materialized_info = input_info->GetMaterializedEquivalent();
OutputRegisterTransfer(materialized_info, output_info, source_info);
- } else if (source_info->is_valid()) {
+ } else if (source_info.is_valid()) {
// Emit a placeholder nop to maintain source position info.
EmitNopForSourceInfo(source_info);
}
@@ -437,60 +363,32 @@ void BytecodeRegisterOptimizer::RegisterTransfer(
}
void BytecodeRegisterOptimizer::EmitNopForSourceInfo(
- BytecodeSourceInfo* source_info) const {
- DCHECK(source_info->is_valid());
+ BytecodeSourceInfo source_info) const {
+ DCHECK(source_info.is_valid());
BytecodeNode nop(Bytecode::kNop, source_info);
next_stage_->Write(&nop);
}
-void BytecodeRegisterOptimizer::DoLdar(BytecodeNode* node) {
- Register input = GetRegisterInputOperand(
- 0, node->bytecode(), node->operands(), node->operand_count());
- RegisterInfo* input_info = GetRegisterInfo(input);
- RegisterTransfer(input_info, accumulator_info_, node->source_info_ptr());
-}
-
-void BytecodeRegisterOptimizer::DoMov(BytecodeNode* node) {
- Register input = GetRegisterInputOperand(
- 0, node->bytecode(), node->operands(), node->operand_count());
- RegisterInfo* input_info = GetRegisterInfo(input);
- Register output = GetRegisterOutputOperand(
- 1, node->bytecode(), node->operands(), node->operand_count());
- RegisterInfo* output_info = GetRegisterInfo(output);
- RegisterTransfer(input_info, output_info, node->source_info_ptr());
-}
-
-void BytecodeRegisterOptimizer::DoStar(BytecodeNode* node) {
- Register output = GetRegisterOutputOperand(
- 0, node->bytecode(), node->operands(), node->operand_count());
- RegisterInfo* output_info = GetRegisterInfo(output);
- RegisterTransfer(accumulator_info_, output_info, node->source_info_ptr());
-}
-
-void BytecodeRegisterOptimizer::PrepareRegisterOutputOperand(
- RegisterInfo* reg_info) {
+void BytecodeRegisterOptimizer::PrepareOutputRegister(Register reg) {
+ RegisterInfo* reg_info = GetRegisterInfo(reg);
if (reg_info->materialized()) {
CreateMaterializedEquivalent(reg_info);
}
+ reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
max_register_index_ =
std::max(max_register_index_, reg_info->register_value().index());
- reg_info->MoveToNewEquivalenceSet(NextEquivalenceId(), true);
}
-void BytecodeRegisterOptimizer::PrepareRegisterRangeOutputOperand(
- Register start, int count) {
- for (int i = 0; i < count; ++i) {
- Register reg(start.index() + i);
- RegisterInfo* reg_info = GetRegisterInfo(reg);
- PrepareRegisterOutputOperand(reg_info);
+void BytecodeRegisterOptimizer::PrepareOutputRegisterList(
+ RegisterList reg_list) {
+ int start_index = reg_list.first_register().index();
+ for (int i = 0; i < reg_list.register_count(); ++i) {
+ Register current(start_index + i);
+ PrepareOutputRegister(current);
}
}
-Register BytecodeRegisterOptimizer::GetEquivalentRegisterForInputOperand(
- Register reg) {
- // For a temporary register, RegInfo state may need be created. For
- // locals and parameters, the RegInfo state is created in the
- // BytecodeRegisterOptimizer constructor.
+Register BytecodeRegisterOptimizer::GetInputRegister(Register reg) {
RegisterInfo* reg_info = GetRegisterInfo(reg);
if (reg_info->materialized()) {
return reg;
@@ -501,124 +399,49 @@ Register BytecodeRegisterOptimizer::GetEquivalentRegisterForInputOperand(
}
}
-void BytecodeRegisterOptimizer::PrepareRegisterInputOperand(
- BytecodeNode* const node, Register reg, int operand_index) {
- Register equivalent = GetEquivalentRegisterForInputOperand(reg);
- node->UpdateOperand(operand_index,
- static_cast<uint32_t>(equivalent.ToOperand()));
-}
-
-void BytecodeRegisterOptimizer::PrepareRegisterRangeInputOperand(Register start,
- int count) {
- for (int i = 0; i < count; ++i) {
- Register current(start.index() + i);
- RegisterInfo* input_info = GetRegisterInfo(current);
- Materialize(input_info);
+RegisterList BytecodeRegisterOptimizer::GetInputRegisterList(
+ RegisterList reg_list) {
+ if (reg_list.register_count() == 1) {
+ // If there is only a single register, treat it as a normal input register.
+ Register reg(GetInputRegister(reg_list.first_register()));
+ return RegisterList(reg.index(), 1);
+ } else {
+ int start_index = reg_list.first_register().index();
+ for (int i = 0; i < reg_list.register_count(); ++i) {
+ Register current(start_index + i);
+ RegisterInfo* input_info = GetRegisterInfo(current);
+ Materialize(input_info);
+ }
+ return reg_list;
}
}
-void BytecodeRegisterOptimizer::PrepareRegisterOperands(
- BytecodeNode* const node) {
- //
- // For each input operand, get a materialized equivalent if it is
- // just a single register, otherwise materialize register range.
- // Update operand_scale if necessary.
- //
- // For each output register about to be clobbered, materialize an
- // equivalent if it exists. Put each register in it's own equivalence set.
- //
- const uint32_t* operands = node->operands();
- int operand_count = node->operand_count();
- const OperandType* operand_types =
- Bytecodes::GetOperandTypes(node->bytecode());
- for (int i = 0; i < operand_count; ++i) {
- int count;
- if (operand_types[i] == OperandType::kRegList) {
- DCHECK_LT(i, operand_count - 1);
- DCHECK(operand_types[i + 1] == OperandType::kRegCount);
- count = static_cast<int>(operands[i + 1]);
- } else {
- count = Bytecodes::GetNumberOfRegistersRepresentedBy(operand_types[i]);
- }
-
- if (count == 0) {
- continue;
- }
-
- Register reg = Register::FromOperand(static_cast<int32_t>(operands[i]));
- if (Bytecodes::IsRegisterInputOperandType(operand_types[i])) {
- if (count == 1) {
- PrepareRegisterInputOperand(node, reg, i);
- } else if (count > 1) {
- PrepareRegisterRangeInputOperand(reg, count);
- }
- } else if (Bytecodes::IsRegisterOutputOperandType(operand_types[i])) {
- PrepareRegisterRangeOutputOperand(reg, count);
- }
+void BytecodeRegisterOptimizer::PrepareForBytecode(Bytecode bytecode) {
+ if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger ||
+ bytecode == Bytecode::kSuspendGenerator) {
+ // All state must be flushed before emitting
+ // - a jump bytecode (as the register equivalents at the jump target aren't
+ // known.
+ // - a call to the debugger (as it can manipulate locals and parameters),
+ // - a generator suspend (as this involves saving all registers).
+ Flush();
}
-}
-void BytecodeRegisterOptimizer::PrepareAccumulator(BytecodeNode* const node) {
// Materialize the accumulator if it is read by the bytecode. The
// accumulator is special and no other register can be materialized
// in it's place.
- if (Bytecodes::ReadsAccumulator(node->bytecode()) &&
+ if (Bytecodes::ReadsAccumulator(bytecode) &&
!accumulator_info_->materialized()) {
Materialize(accumulator_info_);
}
// Materialize an equivalent to the accumulator if it will be
// clobbered when the bytecode is dispatched.
- if (Bytecodes::WritesAccumulator(node->bytecode())) {
- PrepareRegisterOutputOperand(accumulator_info_);
+ if (Bytecodes::WritesAccumulator(bytecode)) {
+ PrepareOutputRegister(accumulator_);
}
}
-void BytecodeRegisterOptimizer::PrepareOperands(BytecodeNode* const node) {
- PrepareAccumulator(node);
- PrepareRegisterOperands(node);
-}
-
-// static
-Register BytecodeRegisterOptimizer::GetRegisterInputOperand(
- int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
- DCHECK_LT(index, operand_count);
- DCHECK(Bytecodes::IsRegisterInputOperandType(
- Bytecodes::GetOperandType(bytecode, index)));
- return OperandToRegister(operands[index]);
-}
-
-// static
-Register BytecodeRegisterOptimizer::GetRegisterOutputOperand(
- int index, Bytecode bytecode, const uint32_t* operands, int operand_count) {
- DCHECK_LT(index, operand_count);
- DCHECK(Bytecodes::IsRegisterOutputOperandType(
- Bytecodes::GetOperandType(bytecode, index)));
- return OperandToRegister(operands[index]);
-}
-
-BytecodeRegisterOptimizer::RegisterInfo*
-BytecodeRegisterOptimizer::GetRegisterInfo(Register reg) {
- size_t index = GetRegisterInfoTableIndex(reg);
- DCHECK_LT(index, register_info_table_.size());
- return register_info_table_[index];
-}
-
-BytecodeRegisterOptimizer::RegisterInfo*
-BytecodeRegisterOptimizer::GetOrCreateRegisterInfo(Register reg) {
- size_t index = GetRegisterInfoTableIndex(reg);
- return index < register_info_table_.size() ? register_info_table_[index]
- : NewRegisterInfo(reg);
-}
-
-BytecodeRegisterOptimizer::RegisterInfo*
-BytecodeRegisterOptimizer::NewRegisterInfo(Register reg) {
- size_t index = GetRegisterInfoTableIndex(reg);
- DCHECK_GE(index, register_info_table_.size());
- GrowRegisterMap(reg);
- return register_info_table_[index];
-}
-
void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
DCHECK(RegisterIsTemporary(reg));
size_t index = GetRegisterInfoTableIndex(reg);
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index eda22e5f4d..e2a02cf594 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -5,6 +5,8 @@
#ifndef V8_INTERPRETER_BYTECODE_REGISTER_OPTIMIZER_H_
#define V8_INTERPRETER_BYTECODE_REGISTER_OPTIMIZER_H_
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-pipeline.h"
namespace v8 {
@@ -15,10 +17,9 @@ namespace interpreter {
// registers. The bytecode generator uses temporary registers
// liberally for correctness and convenience and this stage removes
// transfers that are not required and preserves correctness.
-class BytecodeRegisterOptimizer final
- : public BytecodePipelineStage,
- public BytecodeRegisterAllocator::Observer,
- public ZoneObject {
+class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
+ : public NON_EXPORTED_BASE(BytecodeRegisterAllocator::Observer),
+ public NON_EXPORTED_BASE(ZoneObject) {
public:
BytecodeRegisterOptimizer(Zone* zone,
BytecodeRegisterAllocator* register_allocator,
@@ -26,17 +27,44 @@ class BytecodeRegisterOptimizer final
BytecodePipelineStage* next_stage);
virtual ~BytecodeRegisterOptimizer() {}
- // BytecodePipelineStage interface.
- void Write(BytecodeNode* node) override;
- void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
- void BindLabel(BytecodeLabel* label) override;
- void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
- Handle<BytecodeArray> ToBytecodeArray(
- Isolate* isolate, int register_count, int parameter_count,
- Handle<FixedArray> handler_table) override;
+ // Perform explicit register transfer operations.
+ void DoLdar(Register input, BytecodeSourceInfo source_info) {
+ RegisterInfo* input_info = GetRegisterInfo(input);
+ RegisterTransfer(input_info, accumulator_info_, source_info);
+ }
+ void DoStar(Register output, BytecodeSourceInfo source_info) {
+ RegisterInfo* output_info = GetRegisterInfo(output);
+ RegisterTransfer(accumulator_info_, output_info, source_info);
+ }
+ void DoMov(Register input, Register output, BytecodeSourceInfo source_info) {
+ RegisterInfo* input_info = GetRegisterInfo(input);
+ RegisterInfo* output_info = GetRegisterInfo(output);
+ RegisterTransfer(input_info, output_info, source_info);
+ }
+
+ // Materialize all live registers and flush equivalence sets.
+ void Flush();
+
+ // Prepares for |bytecode|.
+ void PrepareForBytecode(Bytecode bytecode);
+
+ // Prepares |reg| for being used as an output operand.
+ void PrepareOutputRegister(Register reg);
+
+ // Prepares registers in |reg_list| for being used as an output operand.
+ void PrepareOutputRegisterList(RegisterList reg_list);
+
+ // Returns an equivalent register to |reg| to be used as an input operand.
+ Register GetInputRegister(Register reg);
+
+ // Returns an equivalent register list to |reg_list| to be used as an input
+ // operand.
+ RegisterList GetInputRegisterList(RegisterList reg_list);
+
+ int maxiumum_register_index() const { return max_register_index_; }
private:
- static const uint32_t kInvalidEquivalenceId = kMaxUInt32;
+ static const uint32_t kInvalidEquivalenceId;
class RegisterInfo;
@@ -45,48 +73,20 @@ class BytecodeRegisterOptimizer final
void RegisterListAllocateEvent(RegisterList reg_list) override;
void RegisterListFreeEvent(RegisterList reg) override;
- // Helpers for BytecodePipelineStage interface.
- void FlushState();
-
// Update internal state for register transfer from |input| to
// |output| using |source_info| as source position information if
// any bytecodes are emitted due to transfer.
void RegisterTransfer(RegisterInfo* input, RegisterInfo* output,
- BytecodeSourceInfo* source_info);
+ BytecodeSourceInfo source_info);
// Emit a register transfer bytecode from |input| to |output|.
- void OutputRegisterTransfer(RegisterInfo* input, RegisterInfo* output,
- BytecodeSourceInfo* source_info = nullptr);
+ void OutputRegisterTransfer(
+ RegisterInfo* input, RegisterInfo* output,
+ BytecodeSourceInfo source_info = BytecodeSourceInfo());
// Emits a Nop to preserve source position information in the
// bytecode pipeline.
- void EmitNopForSourceInfo(BytecodeSourceInfo* source_info) const;
-
- // Handlers for bytecode nodes for register to register transfers.
- void DoLdar(BytecodeNode* node);
- void DoMov(BytecodeNode* node);
- void DoStar(BytecodeNode* node);
-
- // Operand processing methods for bytecodes other than those
- // performing register to register transfers.
- void PrepareOperands(BytecodeNode* const node);
- void PrepareAccumulator(BytecodeNode* const node);
- void PrepareRegisterOperands(BytecodeNode* const node);
-
- void PrepareRegisterOutputOperand(RegisterInfo* reg_info);
- void PrepareRegisterRangeOutputOperand(Register start, int count);
- void PrepareRegisterInputOperand(BytecodeNode* const node, Register reg,
- int operand_index);
- void PrepareRegisterRangeInputOperand(Register start, int count);
-
- Register GetEquivalentRegisterForInputOperand(Register reg);
-
- static Register GetRegisterInputOperand(int index, Bytecode bytecode,
- const uint32_t* operands,
- int operand_count);
- static Register GetRegisterOutputOperand(int index, Bytecode bytecode,
- const uint32_t* operands,
- int operand_count);
+ void EmitNopForSourceInfo(BytecodeSourceInfo source_info) const;
void CreateMaterializedEquivalent(RegisterInfo* info);
RegisterInfo* GetMaterializedEquivalent(RegisterInfo* info);
@@ -96,9 +96,23 @@ class BytecodeRegisterOptimizer final
RegisterInfo* non_set_member);
// Methods for finding and creating metadata for each register.
- RegisterInfo* GetOrCreateRegisterInfo(Register reg);
- RegisterInfo* GetRegisterInfo(Register reg);
- RegisterInfo* NewRegisterInfo(Register reg);
+ RegisterInfo* GetRegisterInfo(Register reg) {
+ size_t index = GetRegisterInfoTableIndex(reg);
+ DCHECK_LT(index, register_info_table_.size());
+ return register_info_table_[index];
+ }
+ RegisterInfo* GetOrCreateRegisterInfo(Register reg) {
+ size_t index = GetRegisterInfoTableIndex(reg);
+ return index < register_info_table_.size() ? register_info_table_[index]
+ : NewRegisterInfo(reg);
+ }
+ RegisterInfo* NewRegisterInfo(Register reg) {
+ size_t index = GetRegisterInfoTableIndex(reg);
+ DCHECK_GE(index, register_info_table_.size());
+ GrowRegisterMap(reg);
+ return register_info_table_[index];
+ }
+
void GrowRegisterMap(Register reg);
bool RegisterIsTemporary(Register reg) const {
@@ -123,7 +137,8 @@ class BytecodeRegisterOptimizer final
uint32_t NextEquivalenceId() {
equivalence_id_++;
- CHECK_NE(equivalence_id_, kInvalidEquivalenceId);
+ // TODO(rmcilroy): use the same type for these and remove static_cast.
+ CHECK_NE(static_cast<size_t>(equivalence_id_), kInvalidEquivalenceId);
return equivalence_id_;
}
diff --git a/deps/v8/src/interpreter/bytecode-register.h b/deps/v8/src/interpreter/bytecode-register.h
index d698d4049d..554bc23a5b 100644
--- a/deps/v8/src/interpreter/bytecode-register.h
+++ b/deps/v8/src/interpreter/bytecode-register.h
@@ -8,6 +8,7 @@
#include "src/interpreter/bytecodes.h"
#include "src/frames.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -15,7 +16,7 @@ namespace interpreter {
// An interpreter Register which is located in the function's Register file
// in its stack-frame. Register hold parameters, this, and expression values.
-class Register final {
+class V8_EXPORT_PRIVATE Register final {
public:
explicit Register(int index = kInvalidIndex) : index_(index) {}
@@ -104,6 +105,9 @@ class RegisterList {
RegisterList(int first_reg_index, int register_count)
: first_reg_index_(first_reg_index), register_count_(register_count) {}
+ // Increases the size of the register list by one.
+ void IncrementRegisterCount() { register_count_++; }
+
// Returns a new RegisterList which is a truncated version of this list, with
// |count| registers.
const RegisterList Truncate(int new_count) {
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index c58f4685a2..15c4e98a02 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -211,6 +211,12 @@ bool Bytecodes::IsStarLookahead(Bytecode bytecode, OperandScale operand_scale) {
case Bytecode::kLdaNull:
case Bytecode::kLdaTheHole:
case Bytecode::kLdaConstant:
+ case Bytecode::kLdaUndefined:
+ case Bytecode::kLdaGlobal:
+ case Bytecode::kLdaNamedProperty:
+ case Bytecode::kLdaKeyedProperty:
+ case Bytecode::kLdaContextSlot:
+ case Bytecode::kLdaCurrentContextSlot:
case Bytecode::kAdd:
case Bytecode::kSub:
case Bytecode::kMul:
@@ -220,6 +226,7 @@ bool Bytecodes::IsStarLookahead(Bytecode bytecode, OperandScale operand_scale) {
case Bytecode::kDec:
case Bytecode::kTypeOf:
case Bytecode::kCall:
+ case Bytecode::kCallProperty:
case Bytecode::kNew:
return true;
default:
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 6232966bbc..23d77f0c33 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -37,12 +37,8 @@ namespace interpreter {
V(LdaFalse, AccumulatorUse::kWrite) \
V(LdaConstant, AccumulatorUse::kWrite, OperandType::kIdx) \
\
- /* Loading registers */ \
- V(LdrUndefined, AccumulatorUse::kNone, OperandType::kRegOut) \
- \
/* Globals */ \
V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx) \
- V(LdrGlobal, AccumulatorUse::kNone, OperandType::kIdx, OperandType::kRegOut) \
V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx) \
V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx, \
OperandType::kIdx) \
@@ -54,10 +50,10 @@ namespace interpreter {
V(PopContext, AccumulatorUse::kNone, OperandType::kReg) \
V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kIdx, OperandType::kUImm) \
- V(LdrContextSlot, AccumulatorUse::kNone, OperandType::kReg, \
- OperandType::kIdx, OperandType::kUImm, OperandType::kRegOut) \
+ V(LdaCurrentContextSlot, AccumulatorUse::kWrite, OperandType::kIdx) \
V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg, \
OperandType::kIdx, OperandType::kUImm) \
+ V(StaCurrentContextSlot, AccumulatorUse::kRead, OperandType::kIdx) \
\
/* Load-Store lookup slots */ \
V(LdaLookupSlot, AccumulatorUse::kWrite, OperandType::kIdx) \
@@ -83,12 +79,14 @@ namespace interpreter {
/* Property loads (LoadIC) operations */ \
V(LdaNamedProperty, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kIdx, OperandType::kIdx) \
- V(LdrNamedProperty, AccumulatorUse::kNone, OperandType::kReg, \
- OperandType::kIdx, OperandType::kIdx, OperandType::kRegOut) \
V(LdaKeyedProperty, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kIdx) \
- V(LdrKeyedProperty, AccumulatorUse::kRead, OperandType::kReg, \
- OperandType::kIdx, OperandType::kRegOut) \
+ \
+ /* Operations on module variables */ \
+ V(LdaModuleVariable, AccumulatorUse::kWrite, OperandType::kImm, \
+ OperandType::kUImm) \
+ V(StaModuleVariable, AccumulatorUse::kRead, OperandType::kImm, \
+ OperandType::kUImm) \
\
/* Propery stores (StoreIC) operations */ \
V(StaNamedPropertySloppy, AccumulatorUse::kRead, OperandType::kReg, \
@@ -145,6 +143,8 @@ namespace interpreter {
/* Call operations */ \
V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kRegList, \
OperandType::kRegCount, OperandType::kIdx) \
+ V(CallProperty, AccumulatorUse::kWrite, OperandType::kReg, \
+ OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx) \
V(TailCall, AccumulatorUse::kWrite, OperandType::kReg, \
OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx) \
V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId, \
@@ -314,7 +314,7 @@ enum class Bytecode : uint8_t {
#define CONSTEXPR constexpr
#endif
-class Bytecodes final {
+class V8_EXPORT_PRIVATE Bytecodes final {
public:
// The maximum number of operands a bytecode may have.
static const int kMaxOperands = 4;
@@ -422,15 +422,16 @@ class Bytecodes final {
bytecode == Bytecode::kLdaTrue || bytecode == Bytecode::kLdaFalse ||
bytecode == Bytecode::kLdaUndefined ||
bytecode == Bytecode::kLdaTheHole ||
- bytecode == Bytecode::kLdaConstant;
+ bytecode == Bytecode::kLdaConstant ||
+ bytecode == Bytecode::kLdaContextSlot ||
+ bytecode == Bytecode::kLdaCurrentContextSlot;
}
// Return true if |bytecode| is a register load without effects,
- // e.g. Mov, Star, LdrUndefined.
+ // e.g. Mov, Star.
static CONSTEXPR bool IsRegisterLoadWithoutEffects(Bytecode bytecode) {
return bytecode == Bytecode::kMov || bytecode == Bytecode::kPopContext ||
- bytecode == Bytecode::kPushContext || bytecode == Bytecode::kStar ||
- bytecode == Bytecode::kLdrUndefined;
+ bytecode == Bytecode::kPushContext || bytecode == Bytecode::kStar;
}
// Returns true if the bytecode is a conditional jump taking
@@ -525,8 +526,8 @@ class Bytecodes final {
// Returns true if the bytecode is a call or a constructor call.
static CONSTEXPR bool IsCallOrNew(Bytecode bytecode) {
- return bytecode == Bytecode::kCall || bytecode == Bytecode::kTailCall ||
- bytecode == Bytecode::kNew;
+ return bytecode == Bytecode::kCall || bytecode == Bytecode::kCallProperty ||
+ bytecode == Bytecode::kTailCall || bytecode == Bytecode::kNew;
}
// Returns true if the bytecode is a call to the runtime.
@@ -733,7 +734,8 @@ class Bytecodes final {
// See crbug.com/603131.
#undef CONSTEXPR
-std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const Bytecode& bytecode);
} // namespace interpreter
} // namespace internal
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index 78d36f5044..8e95913e57 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -5,6 +5,7 @@
#ifndef V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
#define V8_INTERPRETER_CONSTANT_ARRAY_BUILDER_H_
+#include "src/globals.h"
#include "src/identity-map.h"
#include "src/interpreter/bytecodes.h"
#include "src/zone/zone-containers.h"
@@ -20,7 +21,7 @@ namespace interpreter {
// interpreter. Each instance of this class is intended to be used to
// generate exactly one FixedArray of constants via the ToFixedArray
// method.
-class ConstantArrayBuilder final BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
public:
// Capacity of the 8-bit operand slice.
static const size_t k8BitCapacity = 1u << kBitsPerByte;
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index 5767ffa8a5..c8ce5539e9 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -97,17 +97,17 @@ Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
Label context_search(this, 2, context_search_loop_variables);
// Fast path if the depth is 0.
- BranchIfWord32Equal(depth, Int32Constant(0), &context_found, &context_search);
+ Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
// Loop until the depth is 0.
Bind(&context_search);
{
cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
cur_context.Bind(
- LoadContextSlot(cur_context.value(), Context::PREVIOUS_INDEX));
+ LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
- BranchIfWord32Equal(cur_depth.value(), Int32Constant(0), &context_found,
- &context_search);
+ Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found,
+ &context_search);
}
Bind(&context_found);
@@ -135,14 +135,14 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
// contexts actually need to be checked.
Node* extension_slot =
- LoadContextSlot(cur_context.value(), Context::EXTENSION_INDEX);
+ LoadContextElement(cur_context.value(), Context::EXTENSION_INDEX);
// Jump to the target if the extension slot is not a hole.
GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target);
cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
cur_context.Bind(
- LoadContextSlot(cur_context.value(), Context::PREVIOUS_INDEX));
+ LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)),
&context_search);
@@ -485,26 +485,6 @@ Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
}
}
-Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
- return Load(MachineType::AnyTagged(), context,
- IntPtrConstant(Context::SlotOffset(slot_index)));
-}
-
-Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
- Node* offset =
- IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
- IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
- return Load(MachineType::AnyTagged(), context, offset);
-}
-
-Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
- Node* value) {
- Node* offset =
- IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
- IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
- return Store(MachineRepresentation::kTagged, context, offset, value);
-}
-
Node* InterpreterAssembler::LoadTypeFeedbackVector() {
Node* function = LoadRegister(Register::function_closure());
Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
@@ -566,28 +546,22 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
WeakCell::kValueOffset == Symbol::kHashFieldSlot);
Variable return_value(this, MachineRepresentation::kTagged);
- Label handle_monomorphic(this), extra_checks(this), end(this), call(this),
- call_function(this), call_without_feedback(this);
-
- // Slot id of 0 is used to indicate no typefeedback is available. Call using
- // call builtin.
- STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
- Node* is_feedback_unavailable = Word32Equal(slot_id, Int32Constant(0));
- GotoIf(is_feedback_unavailable, &call_without_feedback);
+ Label call_function(this), extra_checks(this, Label::kDeferred), call(this),
+ end(this);
// The checks. First, does function match the recorded monomorphic target?
Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
- Node* feedback_value = LoadWeakCellValue(feedback_element);
+ Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
Node* is_monomorphic = WordEqual(function, feedback_value);
- BranchIf(is_monomorphic, &handle_monomorphic, &extra_checks);
+ GotoUnless(is_monomorphic, &extra_checks);
- Bind(&handle_monomorphic);
- {
- // The compare above could have been a SMI/SMI comparison. Guard against
- // this convincing us that we have a monomorphic JSFunction.
- Node* is_smi = WordIsSmi(function);
- GotoIf(is_smi, &extra_checks);
+ // The compare above could have been a SMI/SMI comparison. Guard against
+ // this convincing us that we have a monomorphic JSFunction.
+ Node* is_smi = TaggedIsSmi(function);
+ Branch(is_smi, &extra_checks, &call_function);
+ Bind(&call_function);
+ {
// Increment the call count.
IncrementCallCount(type_feedback_vector, slot_id);
@@ -603,56 +577,56 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Bind(&extra_checks);
{
- Label check_initialized(this, Label::kDeferred), mark_megamorphic(this),
- check_allocation_site(this),
- create_allocation_site(this, Label::kDeferred);
- // Check if it is a megamorphic target
+ Label check_initialized(this), mark_megamorphic(this),
+ create_allocation_site(this);
+
+ Comment("check if megamorphic");
+ // Check if it is a megamorphic target.
Node* is_megamorphic = WordEqual(
feedback_element,
HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- BranchIf(is_megamorphic, &call, &check_allocation_site);
+ GotoIf(is_megamorphic, &call);
- Bind(&check_allocation_site);
- {
- Node* is_allocation_site =
- WordEqual(LoadMap(feedback_element),
- LoadRoot(Heap::kAllocationSiteMapRootIndex));
- GotoUnless(is_allocation_site, &check_initialized);
+ Comment("check if it is an allocation site");
+ Node* is_allocation_site = WordEqual(
+ LoadMap(feedback_element), LoadRoot(Heap::kAllocationSiteMapRootIndex));
+ GotoUnless(is_allocation_site, &check_initialized);
- // If it is not the Array() function, mark megamorphic.
- Node* context_slot =
- LoadFixedArrayElement(LoadNativeContext(context),
- Int32Constant(Context::ARRAY_FUNCTION_INDEX));
- Node* is_array_function = WordEqual(context_slot, function);
- GotoUnless(is_array_function, &mark_megamorphic);
+ // If it is not the Array() function, mark megamorphic.
+ Node* context_slot =
+ LoadFixedArrayElement(LoadNativeContext(context),
+ Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* is_array_function = WordEqual(context_slot, function);
+ GotoUnless(is_array_function, &mark_megamorphic);
- // It is a monomorphic Array function. Increment the call count.
- IncrementCallCount(type_feedback_vector, slot_id);
-
- // Call ArrayConstructorStub.
- Callable callable_call =
- CodeFactory::InterpreterPushArgsAndConstructArray(isolate());
- Node* code_target_call = HeapConstant(callable_call.code());
- Node* ret_value =
- CallStub(callable_call.descriptor(), code_target_call, context,
- arg_count, function, feedback_element, first_arg);
- return_value.Bind(ret_value);
- Goto(&end);
- }
+ // It is a monomorphic Array function. Increment the call count.
+ IncrementCallCount(type_feedback_vector, slot_id);
+
+ // Call ArrayConstructorStub.
+ Callable callable_call =
+ CodeFactory::InterpreterPushArgsAndConstructArray(isolate());
+ Node* code_target_call = HeapConstant(callable_call.code());
+ Node* ret_value =
+ CallStub(callable_call.descriptor(), code_target_call, context,
+ arg_count, function, feedback_element, first_arg);
+ return_value.Bind(ret_value);
+ Goto(&end);
Bind(&check_initialized);
{
- Label possibly_monomorphic(this);
- // Check if it is uninitialized.
+ Comment("check if uninitialized");
+ // Check if it is uninitialized target first.
Node* is_uninitialized = WordEqual(
feedback_element,
HeapConstant(TypeFeedbackVector::UninitializedSentinel(isolate())));
GotoUnless(is_uninitialized, &mark_megamorphic);
- Node* is_smi = WordIsSmi(function);
+ Comment("handle_unitinitialized");
+ // If it is not a JSFunction mark it as megamorphic.
+ Node* is_smi = TaggedIsSmi(function);
GotoIf(is_smi, &mark_megamorphic);
- // Check if function is an object of JSFunction type
+ // Check if function is an object of JSFunction type.
Node* instance_type = LoadInstanceType(function);
Node* is_js_function =
WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
@@ -665,7 +639,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Node* is_array_function = WordEqual(context_slot, function);
GotoIf(is_array_function, &create_allocation_site);
- // Check if the function belongs to the same native context
+ // Check if the function belongs to the same native context.
Node* native_context = LoadNativeContext(
LoadObjectField(function, JSFunction::kContextOffset));
Node* is_same_native_context =
@@ -704,22 +678,9 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
}
}
- Bind(&call_function);
- {
- // Increment the call count.
- IncrementCallCount(type_feedback_vector, slot_id);
-
- Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
- isolate(), tail_call_mode, CallableType::kJSFunction);
- Node* code_target_call = HeapConstant(callable_call.code());
- Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
- context, arg_count, first_arg, function);
- return_value.Bind(ret_value);
- Goto(&end);
- }
-
Bind(&call);
{
+ Comment("Increment call count and call using Call builtin");
// Increment the call count.
IncrementCallCount(type_feedback_vector, slot_id);
@@ -733,18 +694,6 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
Goto(&end);
}
- Bind(&call_without_feedback);
- {
- // Call using call builtin.
- Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
- isolate(), tail_call_mode, CallableType::kAny);
- Node* code_target_call = HeapConstant(callable_call.code());
- Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
- context, arg_count, first_arg, function);
- return_value.Bind(ret_value);
- Goto(&end);
- }
-
Bind(&end);
return return_value.value();
}
@@ -763,10 +712,10 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
Node* new_target, Node* first_arg,
Node* arg_count, Node* slot_id,
Node* type_feedback_vector) {
- Label call_construct(this), js_function(this), end(this);
Variable return_value(this, MachineRepresentation::kTagged);
Variable allocation_feedback(this, MachineRepresentation::kTagged);
- allocation_feedback.Bind(UndefinedConstant());
+ Label call_construct_function(this, &allocation_feedback),
+ extra_checks(this, Label::kDeferred), call_construct(this), end(this);
// Slot id of 0 is used to indicate no type feedback is available.
STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
@@ -774,139 +723,125 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
GotoIf(is_feedback_unavailable, &call_construct);
// Check that the constructor is not a smi.
- Node* is_smi = WordIsSmi(constructor);
+ Node* is_smi = TaggedIsSmi(constructor);
GotoIf(is_smi, &call_construct);
// Check that constructor is a JSFunction.
Node* instance_type = LoadInstanceType(constructor);
Node* is_js_function =
WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
- BranchIf(is_js_function, &js_function, &call_construct);
+ GotoUnless(is_js_function, &call_construct);
- Bind(&js_function);
+ // Check if it is a monomorphic constructor.
+ Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
+ Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
+ Node* is_monomorphic = WordEqual(constructor, feedback_value);
+ allocation_feedback.Bind(UndefinedConstant());
+ Branch(is_monomorphic, &call_construct_function, &extra_checks);
+
+ Bind(&call_construct_function);
+ {
+ Comment("call using callConstructFunction");
+ IncrementCallCount(type_feedback_vector, slot_id);
+ Callable callable_function = CodeFactory::InterpreterPushArgsAndConstruct(
+ isolate(), CallableType::kJSFunction);
+ return_value.Bind(CallStub(callable_function.descriptor(),
+ HeapConstant(callable_function.code()), context,
+ arg_count, new_target, constructor,
+ allocation_feedback.value(), first_arg));
+ Goto(&end);
+ }
+
+ Bind(&extra_checks);
{
- // Cache the called function in a feedback vector slot. Cache states
- // are uninitialized, monomorphic (indicated by a JSFunction), and
- // megamorphic.
- // TODO(mythria/v8:5210): Check if it is better to mark extra_checks as a
- // deferred block so that call_construct_function will be scheduled.
- Label extra_checks(this), call_construct_function(this);
-
- Node* feedback_element =
- LoadFixedArrayElement(type_feedback_vector, slot_id);
- Node* feedback_value = LoadWeakCellValue(feedback_element);
- Node* is_monomorphic = WordEqual(constructor, feedback_value);
- BranchIf(is_monomorphic, &call_construct_function, &extra_checks);
-
- Bind(&extra_checks);
+ Label check_allocation_site(this), check_initialized(this),
+ initialize(this), mark_megamorphic(this);
+
+ // Check if it is a megamorphic target.
+ Comment("check if megamorphic");
+ Node* is_megamorphic = WordEqual(
+ feedback_element,
+ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ GotoIf(is_megamorphic, &call_construct_function);
+
+ Comment("check if weak cell");
+ Node* is_weak_cell = WordEqual(LoadMap(feedback_element),
+ LoadRoot(Heap::kWeakCellMapRootIndex));
+ GotoUnless(is_weak_cell, &check_allocation_site);
+
+ // If the weak cell is cleared, we have a new chance to become
+ // monomorphic.
+ Comment("check if weak cell is cleared");
+ Node* is_smi = TaggedIsSmi(feedback_value);
+ Branch(is_smi, &initialize, &mark_megamorphic);
+
+ Bind(&check_allocation_site);
{
- Label mark_megamorphic(this), initialize(this),
- check_allocation_site(this), check_initialized(this),
- set_alloc_feedback_and_call(this);
- {
- // Check if it is a megamorphic target
- Comment("check if megamorphic");
- Node* is_megamorphic = WordEqual(
- feedback_element,
- HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- GotoIf(is_megamorphic, &call_construct_function);
-
- Comment("check if weak cell");
- Node* is_weak_cell = WordEqual(LoadMap(feedback_element),
- LoadRoot(Heap::kWeakCellMapRootIndex));
- GotoUnless(is_weak_cell, &check_allocation_site);
- // If the weak cell is cleared, we have a new chance to become
- // monomorphic.
- Comment("check if weak cell is cleared");
- Node* is_smi = WordIsSmi(feedback_value);
- BranchIf(is_smi, &initialize, &mark_megamorphic);
- }
+ Comment("check if it is an allocation site");
+ Node* is_allocation_site =
+ WordEqual(LoadObjectField(feedback_element, 0),
+ LoadRoot(Heap::kAllocationSiteMapRootIndex));
+ GotoUnless(is_allocation_site, &check_initialized);
- Bind(&check_allocation_site);
- {
- Comment("check if it is an allocation site");
- Node* is_allocation_site =
- WordEqual(LoadObjectField(feedback_element, 0),
- LoadRoot(Heap::kAllocationSiteMapRootIndex));
- GotoUnless(is_allocation_site, &check_initialized);
-
- // Make sure the function is the Array() function
- Node* context_slot =
- LoadFixedArrayElement(LoadNativeContext(context),
- Int32Constant(Context::ARRAY_FUNCTION_INDEX));
- Node* is_array_function = WordEqual(context_slot, constructor);
- BranchIf(is_array_function, &set_alloc_feedback_and_call,
- &mark_megamorphic);
- }
+ // Make sure the function is the Array() function.
+ Node* context_slot =
+ LoadFixedArrayElement(LoadNativeContext(context),
+ Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* is_array_function = WordEqual(context_slot, constructor);
+ GotoUnless(is_array_function, &mark_megamorphic);
- Bind(&set_alloc_feedback_and_call);
- {
- allocation_feedback.Bind(feedback_element);
- Goto(&call_construct_function);
- }
+ allocation_feedback.Bind(feedback_element);
+ Goto(&call_construct_function);
+ }
- Bind(&check_initialized);
- {
- // Check if it is uninitialized.
- Comment("check if uninitialized");
- Node* is_uninitialized = WordEqual(
- feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
- BranchIf(is_uninitialized, &initialize, &mark_megamorphic);
- }
+ Bind(&check_initialized);
+ {
+ // Check if it is uninitialized.
+ Comment("check if uninitialized");
+ Node* is_uninitialized = WordEqual(
+ feedback_element, LoadRoot(Heap::kuninitialized_symbolRootIndex));
+ Branch(is_uninitialized, &initialize, &mark_megamorphic);
+ }
- Bind(&initialize);
- {
- Label create_weak_cell(this), create_allocation_site(this);
- Comment("initialize the feedback element");
- // Check that it is the Array() function.
- Node* context_slot =
- LoadFixedArrayElement(LoadNativeContext(context),
- Int32Constant(Context::ARRAY_FUNCTION_INDEX));
- Node* is_array_function = WordEqual(context_slot, constructor);
- BranchIf(is_array_function, &create_allocation_site, &create_weak_cell);
-
- Bind(&create_allocation_site);
- {
- Node* site = CreateAllocationSiteInFeedbackVector(
- type_feedback_vector, SmiTag(slot_id));
- allocation_feedback.Bind(site);
- Goto(&call_construct_function);
- }
+ Bind(&initialize);
+ {
+ Label create_allocation_site(this), create_weak_cell(this);
+ Comment("initialize the feedback element");
+ // Create an allocation site if the function is an array function,
+ // otherwise create a weak cell.
+ Node* context_slot =
+ LoadFixedArrayElement(LoadNativeContext(context),
+ Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* is_array_function = WordEqual(context_slot, constructor);
+ Branch(is_array_function, &create_allocation_site, &create_weak_cell);
- Bind(&create_weak_cell);
- {
- CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
- constructor);
- Goto(&call_construct_function);
- }
+ Bind(&create_allocation_site);
+ {
+ Node* site = CreateAllocationSiteInFeedbackVector(type_feedback_vector,
+ SmiTag(slot_id));
+ allocation_feedback.Bind(site);
+ Goto(&call_construct_function);
}
- Bind(&mark_megamorphic);
+ Bind(&create_weak_cell);
{
- // MegamorphicSentinel is an immortal immovable object so
- // write-barrier is not needed.
- Comment("transition to megamorphic");
- DCHECK(
- Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
- StoreFixedArrayElement(
- type_feedback_vector, slot_id,
- HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
- SKIP_WRITE_BARRIER);
+ CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
+ constructor);
Goto(&call_construct_function);
}
}
- Bind(&call_construct_function);
+ Bind(&mark_megamorphic);
{
- Comment("call using callConstructFunction");
- IncrementCallCount(type_feedback_vector, slot_id);
- Callable callable_function = CodeFactory::InterpreterPushArgsAndConstruct(
- isolate(), CallableType::kJSFunction);
- return_value.Bind(CallStub(callable_function.descriptor(),
- HeapConstant(callable_function.code()),
- context, arg_count, new_target, constructor,
- allocation_feedback.value(), first_arg));
- Goto(&end);
+ // MegamorphicSentinel is an immortal immovable object so
+ // write-barrier is not needed.
+ Comment("transition to megamorphic");
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
+ StoreFixedArrayElement(
+ type_feedback_vector, slot_id,
+ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
+ SKIP_WRITE_BARRIER);
+ Goto(&call_construct_function);
}
}
@@ -1007,7 +942,7 @@ Node* InterpreterAssembler::Jump(Node* delta) {
void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
Label match(this), no_match(this);
- BranchIf(condition, &match, &no_match);
+ Branch(condition, &match, &no_match);
Bind(&match);
Jump(delta);
Bind(&no_match);
@@ -1035,12 +970,12 @@ Node* InterpreterAssembler::LoadBytecode(compiler::Node* bytecode_offset) {
Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
Label do_inline_star(this), done(this);
- Variable var_bytecode(this, MachineRepresentation::kWord8);
+ Variable var_bytecode(this, MachineType::PointerRepresentation());
var_bytecode.Bind(target_bytecode);
Node* star_bytecode = IntPtrConstant(static_cast<int>(Bytecode::kStar));
Node* is_star = WordEqual(target_bytecode, star_bytecode);
- BranchIf(is_star, &do_inline_star, &done);
+ Branch(is_star, &do_inline_star, &done);
Bind(&do_inline_star);
{
@@ -1161,7 +1096,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
// Check if the {value} is a Smi or a HeapObject.
Label if_valueissmi(this), if_valueisnotsmi(this);
- Branch(WordIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
+ Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
Bind(&if_valueissmi);
{
@@ -1178,7 +1113,8 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
// Check if {value} is a HeapNumber.
Label if_valueisheapnumber(this),
if_valueisnotheapnumber(this, Label::kDeferred);
- Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
+ Node* value_map = LoadMap(value);
+ Branch(WordEqual(value_map, HeapNumberMapConstant()),
&if_valueisheapnumber, &if_valueisnotheapnumber);
Bind(&if_valueisheapnumber);
@@ -1193,11 +1129,36 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Bind(&if_valueisnotheapnumber);
{
- // Convert the {value} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(isolate());
- var_value.Bind(CallStub(callable, context, value));
- var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kAny));
- Goto(&loop);
+ // We do not require an Or with earlier feedback here because once we
+ // convert the value to a number, we cannot reach this path. We can
+ // only reach this path on the first pass when the feedback is kNone.
+ CSA_ASSERT(this,
+ Word32Equal(var_type_feedback->value(),
+ Int32Constant(BinaryOperationFeedback::kNone)));
+
+ Label if_valueisoddball(this),
+ if_valueisnotoddball(this, Label::kDeferred);
+ Node* is_oddball = Word32Equal(LoadMapInstanceType(value_map),
+ Int32Constant(ODDBALL_TYPE));
+ Branch(is_oddball, &if_valueisoddball, &if_valueisnotoddball);
+
+ Bind(&if_valueisoddball);
+ {
+ // Convert Oddball to a Number and perform checks again.
+ var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
+ var_type_feedback->Bind(
+ Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ Goto(&loop);
+ }
+
+ Bind(&if_valueisnotoddball);
+ {
+ // Convert the {value} to a Number first.
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_value.Bind(CallStub(callable, context, value));
+ var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kAny));
+ Goto(&loop);
+ }
}
}
}
@@ -1241,7 +1202,7 @@ void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
BailoutReason bailout_reason) {
Label ok(this), abort(this, Label::kDeferred);
- BranchIfWordEqual(lhs, rhs, &ok, &abort);
+ Branch(WordEqual(lhs, rhs), &ok, &abort);
Bind(&abort);
Abort(bailout_reason);
@@ -1271,7 +1232,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
Node* counter_reached_max = WordEqual(
old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
- BranchIf(counter_reached_max, &counter_saturated, &counter_ok);
+ Branch(counter_reached_max, &counter_saturated, &counter_ok);
Bind(&counter_ok);
{
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index 9dda20af48..aefd2bc053 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -9,6 +9,7 @@
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/frames.h"
+#include "src/globals.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
@@ -17,7 +18,7 @@ namespace v8 {
namespace internal {
namespace interpreter {
-class InterpreterAssembler : public CodeStubAssembler {
+class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
public:
InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode,
OperandScale operand_scale);
@@ -92,15 +93,6 @@ class InterpreterAssembler : public CodeStubAssembler {
// Load and untag constant at |index| in the constant pool.
compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);
- // Load |slot_index| from |context|.
- compiler::Node* LoadContextSlot(compiler::Node* context, int slot_index);
- compiler::Node* LoadContextSlot(compiler::Node* context,
- compiler::Node* slot_index);
- // Stores |value| into |slot_index| of |context|.
- compiler::Node* StoreContextSlot(compiler::Node* context,
- compiler::Node* slot_index,
- compiler::Node* value);
-
// Load the TypeFeedbackVector for the current function.
compiler::Node* LoadTypeFeedbackVector();
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.cc b/deps/v8/src/interpreter/interpreter-intrinsics.cc
index 600b9c086f..b46ca878cc 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.cc
@@ -125,7 +125,7 @@ Node* IntrinsicsHelper::IsInstanceType(Node* input, int type) {
InterpreterAssembler::Label if_not_smi(assembler_), return_true(assembler_),
return_false(assembler_), end(assembler_);
Node* arg = __ LoadRegister(input);
- __ GotoIf(__ WordIsSmi(arg), &return_false);
+ __ GotoIf(__ TaggedIsSmi(arg), &return_false);
Node* condition = CompareInstanceType(arg, type, kInstanceTypeEqual);
__ Branch(condition, &return_true, &return_false);
@@ -154,7 +154,7 @@ Node* IntrinsicsHelper::IsJSReceiver(Node* input, Node* arg_count,
end(assembler_);
Node* arg = __ LoadRegister(input);
- __ GotoIf(__ WordIsSmi(arg), &return_false);
+ __ GotoIf(__ TaggedIsSmi(arg), &return_false);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Node* condition = CompareInstanceType(arg, FIRST_JS_RECEIVER_TYPE,
@@ -202,7 +202,7 @@ Node* IntrinsicsHelper::IsSmi(Node* input, Node* arg_count, Node* context) {
Node* arg = __ LoadRegister(input);
- __ Branch(__ WordIsSmi(arg), &if_smi, &if_not_smi);
+ __ Branch(__ TaggedIsSmi(arg), &if_smi, &if_not_smi);
__ Bind(&if_smi);
{
return_value.Bind(__ BooleanConstant(true));
@@ -249,12 +249,6 @@ Node* IntrinsicsHelper::NumberToString(Node* input, Node* arg_count,
CodeFactory::NumberToString(isolate()));
}
-Node* IntrinsicsHelper::RegExpConstructResult(Node* input, Node* arg_count,
- Node* context) {
- return IntrinsicAsStubCall(input, context,
- CodeFactory::RegExpConstructResult(isolate()));
-}
-
Node* IntrinsicsHelper::RegExpExec(Node* input, Node* arg_count,
Node* context) {
return IntrinsicAsStubCall(input, context,
@@ -321,7 +315,7 @@ Node* IntrinsicsHelper::ValueOf(Node* args_reg, Node* arg_count,
return_value.Bind(object);
// If the object is a smi return the object.
- __ GotoIf(__ WordIsSmi(object), &done);
+ __ GotoIf(__ TaggedIsSmi(object), &done);
// If the object is not a value type, return the object.
Node* condition =
@@ -346,7 +340,7 @@ Node* IntrinsicsHelper::ClassOf(Node* args_reg, Node* arg_count,
Node* object = __ LoadRegister(args_reg);
// If the object is not a JSReceiver, we return null.
- __ GotoIf(__ WordIsSmi(object), &null);
+ __ GotoIf(__ TaggedIsSmi(object), &null);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
Node* is_js_receiver = CompareInstanceType(object, FIRST_JS_RECEIVER_TYPE,
kInstanceTypeGreaterThanOrEqual);
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index 11fe4a0a8e..70ff291df3 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -35,7 +35,6 @@ namespace interpreter {
V(IsTypedArray, is_typed_array, 1) \
V(NewObject, new_object, 2) \
V(NumberToString, number_to_string, 1) \
- V(RegExpConstructResult, reg_exp_construct_result, 3) \
V(RegExpExec, reg_exp_exec, 4) \
V(SubString, sub_string, 3) \
V(ToString, to_string, 1) \
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 410030247f..81aecafecf 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -53,8 +53,8 @@ Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
}
void Interpreter::Initialize() {
- if (IsDispatchTableInitialized()) return;
- Zone zone(isolate_->allocator());
+ if (!ShouldInitializeDispatchTable()) return;
+ Zone zone(isolate_->allocator(), ZONE_NAME);
HandleScope scope(isolate_);
if (FLAG_trace_ignition_dispatches) {
@@ -103,6 +103,9 @@ void Interpreter::Initialize() {
dispatch_table_[index] = dispatch_table_[illegal_index];
}
}
+
+ // Initialization should have been successful.
+ DCHECK(IsDispatchTableInitialized());
}
Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
@@ -197,6 +200,8 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
return FAILED;
}
+ CodeGenerator::MakeCodePrologue(info(), "interpreter");
+
if (FLAG_print_bytecode) {
OFStream os(stdout);
bytecodes->Print(os);
@@ -213,13 +218,17 @@ CompilationJob* Interpreter::NewCompilationJob(CompilationInfo* info) {
}
bool Interpreter::IsDispatchTableInitialized() {
+ return dispatch_table_[0] != nullptr;
+}
+
+bool Interpreter::ShouldInitializeDispatchTable() {
if (FLAG_trace_ignition || FLAG_trace_ignition_codegen ||
FLAG_trace_ignition_dispatches) {
// Regenerate table to add bytecode tracing operations, print the assembly
// code generated by TurboFan or instrument handlers with dispatch counters.
- return false;
+ return true;
}
- return dispatch_table_[0] != nullptr;
+ return !IsDispatchTableInitialized();
}
void Interpreter::TraceCodegen(Handle<Code> code) {
@@ -343,17 +352,6 @@ void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) {
__ Dispatch();
}
-// LdrUndefined <reg>
-//
-// Loads undefined into the accumulator and |reg|.
-void Interpreter::DoLdrUndefined(InterpreterAssembler* assembler) {
- Node* undefined_value =
- __ HeapConstant(isolate_->factory()->undefined_value());
- Node* destination = __ BytecodeOperandReg(0);
- __ StoreRegister(undefined_value, destination);
- __ Dispatch();
-}
-
// LdaNull
//
// Load Null into the accumulator.
@@ -451,23 +449,6 @@ void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
__ Dispatch();
}
-// LdrGlobal <slot> <reg>
-//
-// Load the global with name in constant pool entry <name_index> into
-// register <reg> using FeedBackVector slot <slot> outside of a typeof.
-void Interpreter::DoLdrGlobal(InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
-
- Node* context = __ GetContext();
-
- Node* raw_slot = __ BytecodeOperandIdx(0);
- Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
- Node* destination = __ BytecodeOperandReg(1);
- __ StoreRegister(result, destination);
- __ Dispatch();
-}
-
// LdaGlobalInsideTypeof <slot>
//
// Load the global with name in constant pool entry <name_index> into the
@@ -488,9 +469,9 @@ void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
typedef StoreWithVectorDescriptor Descriptor;
// Get the global object.
Node* context = __ GetContext();
- Node* native_context =
- __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
- Node* global = __ LoadContextSlot(native_context, Context::EXTENSION_INDEX);
+ Node* native_context = __ LoadNativeContext(context);
+ Node* global =
+ __ LoadContextElement(native_context, Context::EXTENSION_INDEX);
// Store the global via the StoreIC.
Node* code_target = __ HeapConstant(ic.code());
@@ -525,34 +506,29 @@ void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) {
DoStaGlobal(ic, assembler);
}
-compiler::Node* Interpreter::BuildLoadContextSlot(
- InterpreterAssembler* assembler) {
- Node* reg_index = __ BytecodeOperandReg(0);
- Node* context = __ LoadRegister(reg_index);
- Node* slot_index = __ BytecodeOperandIdx(1);
- Node* depth = __ BytecodeOperandUImm(2);
- Node* slot_context = __ GetContextAtDepth(context, depth);
- return __ LoadContextSlot(slot_context, slot_index);
-}
-
// LdaContextSlot <context> <slot_index> <depth>
//
// Load the object in |slot_index| of the context at |depth| in the context
// chain starting at |context| into the accumulator.
void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
- Node* result = BuildLoadContextSlot(assembler);
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* context = __ LoadRegister(reg_index);
+ Node* slot_index = __ BytecodeOperandIdx(1);
+ Node* depth = __ BytecodeOperandUImm(2);
+ Node* slot_context = __ GetContextAtDepth(context, depth);
+ Node* result = __ LoadContextElement(slot_context, slot_index);
__ SetAccumulator(result);
__ Dispatch();
}
-// LdrContextSlot <context> <slot_index> <depth> <reg>
+// LdaCurrentContextSlot <slot_index>
//
-// Load the object in |slot_index| of the context at |depth| in the context
-// chain of |context| into register |reg|.
-void Interpreter::DoLdrContextSlot(InterpreterAssembler* assembler) {
- Node* result = BuildLoadContextSlot(assembler);
- Node* destination = __ BytecodeOperandReg(3);
- __ StoreRegister(result, destination);
+// Load the object in |slot_index| of the current context into the accumulator.
+void Interpreter::DoLdaCurrentContextSlot(InterpreterAssembler* assembler) {
+ Node* slot_index = __ BytecodeOperandIdx(0);
+ Node* slot_context = __ GetContext();
+ Node* result = __ LoadContextElement(slot_context, slot_index);
+ __ SetAccumulator(result);
__ Dispatch();
}
@@ -567,7 +543,19 @@ void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) {
Node* slot_index = __ BytecodeOperandIdx(1);
Node* depth = __ BytecodeOperandUImm(2);
Node* slot_context = __ GetContextAtDepth(context, depth);
- __ StoreContextSlot(slot_context, slot_index, value);
+ __ StoreContextElement(slot_context, slot_index, value);
+ __ Dispatch();
+}
+
+// StaCurrentContextSlot <slot_index>
+//
+// Stores the object in the accumulator into |slot_index| of the current
+// context.
+void Interpreter::DoStaCurrentContextSlot(InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Node* slot_index = __ BytecodeOperandIdx(0);
+ Node* slot_context = __ GetContext();
+ __ StoreContextElement(slot_context, slot_index, value);
__ Dispatch();
}
@@ -612,7 +600,7 @@ void Interpreter::DoLdaLookupContextSlot(Runtime::FunctionId function_id,
// Fast path does a normal load context.
{
Node* slot_context = __ GetContextAtDepth(context, depth);
- Node* result = __ LoadContextSlot(slot_context, slot_index);
+ Node* result = __ LoadContextElement(slot_context, slot_index);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -724,9 +712,13 @@ void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
DoStaLookupSlot(LanguageMode::STRICT, assembler);
}
-Node* Interpreter::BuildLoadNamedProperty(Callable ic,
- InterpreterAssembler* assembler) {
+// LdaNamedProperty <object> <name_index> <slot>
+//
+// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
+// constant pool entry <name_index>.
+void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
typedef LoadWithVectorDescriptor Descriptor;
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
Node* code_target = __ HeapConstant(ic.code());
Node* register_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(register_index);
@@ -736,38 +728,21 @@ Node* Interpreter::BuildLoadNamedProperty(Callable ic,
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- return __ CallStub(
+ Node* result = __ CallStub(
ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
Arg(Descriptor::kVector, type_feedback_vector));
-}
-
-// LdaNamedProperty <object> <name_index> <slot>
-//
-// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
-// constant pool entry <name_index>.
-void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
- Node* result = BuildLoadNamedProperty(ic, assembler);
__ SetAccumulator(result);
__ Dispatch();
}
-// LdrNamedProperty <object> <name_index> <slot> <reg>
+// KeyedLoadIC <object> <slot>
//
-// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
-// constant pool entry <name_index> and puts the result into register <reg>.
-void Interpreter::DoLdrNamedProperty(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
- Node* result = BuildLoadNamedProperty(ic, assembler);
- Node* destination = __ BytecodeOperandReg(3);
- __ StoreRegister(result, destination);
- __ Dispatch();
-}
-
-Node* Interpreter::BuildLoadKeyedProperty(Callable ic,
- InterpreterAssembler* assembler) {
+// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
+// in the accumulator.
+void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
typedef LoadWithVectorDescriptor Descriptor;
+ Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
Node* code_target = __ HeapConstant(ic.code());
Node* reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(reg_index);
@@ -776,35 +751,14 @@ Node* Interpreter::BuildLoadKeyedProperty(Callable ic,
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- return __ CallStub(
+ Node* result = __ CallStub(
ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
Arg(Descriptor::kVector, type_feedback_vector));
-}
-
-// KeyedLoadIC <object> <slot>
-//
-// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
-// in the accumulator.
-void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
- Node* result = BuildLoadKeyedProperty(ic, assembler);
__ SetAccumulator(result);
__ Dispatch();
}
-// LdrKeyedProperty <object> <slot> <reg>
-//
-// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
-// in the accumulator and puts the result in register <reg>.
-void Interpreter::DoLdrKeyedProperty(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
- Node* result = BuildLoadKeyedProperty(ic, assembler);
- Node* destination = __ BytecodeOperandReg(2);
- __ StoreRegister(result, destination);
- __ Dispatch();
-}
-
void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
typedef StoreWithVectorDescriptor Descriptor;
Node* code_target = __ HeapConstant(ic.code());
@@ -881,6 +835,88 @@ void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) {
DoKeyedStoreIC(ic, assembler);
}
+// LdaModuleVariable <cell_index> <depth>
+//
+// Load the contents of a module variable into the accumulator. The variable is
+// identified by <cell_index>. <depth> is the depth of the current context
+// relative to the module context.
+void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) {
+ Node* cell_index = __ BytecodeOperandImm(0);
+ Node* depth = __ BytecodeOperandUImm(1);
+
+ Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
+ Node* module =
+ __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
+
+ Label if_export(assembler), if_import(assembler), end(assembler);
+ __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export,
+ &if_import);
+
+ __ Bind(&if_export);
+ {
+ Node* regular_exports =
+ __ LoadObjectField(module, Module::kRegularExportsOffset);
+ // The actual array index is (cell_index - 1).
+ Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1));
+ Node* cell = __ LoadFixedArrayElement(regular_exports, export_index);
+ __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset));
+ __ Goto(&end);
+ }
+
+ __ Bind(&if_import);
+ {
+ Node* regular_imports =
+ __ LoadObjectField(module, Module::kRegularImportsOffset);
+ // The actual array index is (-cell_index - 1).
+ Node* import_index = __ IntPtrSub(__ IntPtrConstant(-1), cell_index);
+ Node* cell = __ LoadFixedArrayElement(regular_imports, import_index);
+ __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset));
+ __ Goto(&end);
+ }
+
+ __ Bind(&end);
+ __ Dispatch();
+}
+
+// StaModuleVariable <cell_index> <depth>
+//
+// Store accumulator to the module variable identified by <cell_index>.
+// <depth> is the depth of the current context relative to the module context.
+void Interpreter::DoStaModuleVariable(InterpreterAssembler* assembler) {
+ Node* value = __ GetAccumulator();
+ Node* cell_index = __ BytecodeOperandImm(0);
+ Node* depth = __ BytecodeOperandUImm(1);
+
+ Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
+ Node* module =
+ __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
+
+ Label if_export(assembler), if_import(assembler), end(assembler);
+ __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export,
+ &if_import);
+
+ __ Bind(&if_export);
+ {
+ Node* regular_exports =
+ __ LoadObjectField(module, Module::kRegularExportsOffset);
+ // The actual array index is (cell_index - 1).
+ Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1));
+ Node* cell = __ LoadFixedArrayElement(regular_exports, export_index);
+ __ StoreObjectField(cell, Cell::kValueOffset, value);
+ __ Goto(&end);
+ }
+
+ __ Bind(&if_import);
+ {
+ // Not supported (probably never).
+ __ Abort(kUnsupportedModuleOperation);
+ __ Goto(&end);
+ }
+
+ __ Bind(&end);
+ __ Dispatch();
+}
+
// PushContext <context>
//
// Saves the current context in <context>, and pushes the accumulator as the
@@ -904,14 +940,24 @@ void Interpreter::DoPopContext(InterpreterAssembler* assembler) {
__ Dispatch();
}
-// TODO(mythria): Remove this function once all BinaryOps record type feedback.
-template <class Generator>
-void Interpreter::DoBinaryOp(InterpreterAssembler* assembler) {
+// TODO(mythria): Remove this function once all CompareOps record type feedback.
+void Interpreter::DoCompareOp(Token::Value compare_op,
+ InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(0);
Node* lhs = __ LoadRegister(reg_index);
Node* rhs = __ GetAccumulator();
Node* context = __ GetContext();
- Node* result = Generator::Generate(assembler, lhs, rhs, context);
+ Node* result;
+ switch (compare_op) {
+ case Token::IN:
+ result = assembler->HasProperty(rhs, lhs, context);
+ break;
+ case Token::INSTANCEOF:
+ result = assembler->InstanceOf(lhs, rhs, context);
+ break;
+ default:
+ UNREACHABLE();
+ }
__ SetAccumulator(result);
__ Dispatch();
}
@@ -930,8 +976,8 @@ void Interpreter::DoBinaryOpWithFeedback(InterpreterAssembler* assembler) {
__ Dispatch();
}
-template <class Generator>
-void Interpreter::DoCompareOpWithFeedback(InterpreterAssembler* assembler) {
+void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
+ InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(0);
Node* lhs = __ LoadRegister(reg_index);
Node* rhs = __ GetAccumulator();
@@ -950,7 +996,7 @@ void Interpreter::DoCompareOpWithFeedback(InterpreterAssembler* assembler) {
Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
Label lhs_is_smi(assembler), lhs_is_not_smi(assembler),
gather_rhs_type(assembler), do_compare(assembler);
- __ Branch(__ WordIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
+ __ Branch(__ TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
__ Bind(&lhs_is_smi);
var_type_feedback.Bind(
@@ -976,7 +1022,7 @@ void Interpreter::DoCompareOpWithFeedback(InterpreterAssembler* assembler) {
__ Bind(&gather_rhs_type);
{
Label rhs_is_smi(assembler);
- __ GotoIf(__ WordIsSmi(rhs), &rhs_is_smi);
+ __ GotoIf(__ TaggedIsSmi(rhs), &rhs_is_smi);
Node* rhs_map = __ LoadMap(rhs);
Node* rhs_type =
@@ -999,7 +1045,39 @@ void Interpreter::DoCompareOpWithFeedback(InterpreterAssembler* assembler) {
__ Goto(&skip_feedback_update);
__ Bind(&skip_feedback_update);
- Node* result = Generator::Generate(assembler, lhs, rhs, context);
+ Node* result;
+ switch (compare_op) {
+ case Token::EQ:
+ result = assembler->Equal(CodeStubAssembler::kDontNegateResult, lhs, rhs,
+ context);
+ break;
+ case Token::NE:
+ result =
+ assembler->Equal(CodeStubAssembler::kNegateResult, lhs, rhs, context);
+ break;
+ case Token::EQ_STRICT:
+ result = assembler->StrictEqual(CodeStubAssembler::kDontNegateResult, lhs,
+ rhs, context);
+ break;
+ case Token::LT:
+ result = assembler->RelationalComparison(CodeStubAssembler::kLessThan,
+ lhs, rhs, context);
+ break;
+ case Token::GT:
+ result = assembler->RelationalComparison(CodeStubAssembler::kGreaterThan,
+ lhs, rhs, context);
+ break;
+ case Token::LTE:
+ result = assembler->RelationalComparison(
+ CodeStubAssembler::kLessThanOrEqual, lhs, rhs, context);
+ break;
+ case Token::GTE:
+ result = assembler->RelationalComparison(
+ CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context);
+ break;
+ default:
+ UNREACHABLE();
+ }
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1089,13 +1167,13 @@ void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op,
}
Node* result_type =
- __ Select(__ WordIsSmi(result),
+ __ Select(__ TaggedIsSmi(result),
__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
__ Int32Constant(BinaryOperationFeedback::kNumber));
if (FLAG_debug_code) {
Label ok(assembler);
- __ GotoIf(__ WordIsSmi(result), &ok);
+ __ GotoIf(__ TaggedIsSmi(result), &ok);
Node* result_map = __ LoadMap(result);
__ AbortIfWordNotEqual(result_map, __ HeapNumberMapConstant(),
kExpectedHeapNumber);
@@ -1180,21 +1258,22 @@ void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
- __ BranchIf(__ WordIsSmi(left), &fastpath, &slowpath);
+ __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
__ Bind(&fastpath);
{
// Try fast Smi addition first.
- Node* pair = __ SmiAddWithOverflow(left, right);
+ Node* pair = __ IntPtrAddWithOverflow(__ BitcastTaggedToWord(left),
+ __ BitcastTaggedToWord(right));
Node* overflow = __ Projection(1, pair);
// Check if the Smi additon overflowed.
Label if_notoverflow(assembler);
- __ BranchIf(overflow, &slowpath, &if_notoverflow);
+ __ Branch(overflow, &slowpath, &if_notoverflow);
__ Bind(&if_notoverflow);
{
__ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
type_feedback_vector, slot_index);
- var_result.Bind(__ Projection(0, pair));
+ var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
__ Goto(&end);
}
}
@@ -1233,21 +1312,22 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
- __ BranchIf(__ WordIsSmi(left), &fastpath, &slowpath);
+ __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
__ Bind(&fastpath);
{
// Try fast Smi subtraction first.
- Node* pair = __ SmiSubWithOverflow(left, right);
+ Node* pair = __ IntPtrSubWithOverflow(__ BitcastTaggedToWord(left),
+ __ BitcastTaggedToWord(right));
Node* overflow = __ Projection(1, pair);
// Check if the Smi subtraction overflowed.
Label if_notoverflow(assembler);
- __ BranchIf(overflow, &slowpath, &if_notoverflow);
+ __ Branch(overflow, &slowpath, &if_notoverflow);
__ Bind(&if_notoverflow);
{
__ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
type_feedback_vector, slot_index);
- var_result.Bind(__ Projection(0, pair));
+ var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
__ Goto(&end);
}
}
@@ -1287,7 +1367,7 @@ void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) {
Node* value = __ Word32Or(lhs_value, rhs_value);
Node* result = __ ChangeInt32ToTagged(value);
Node* result_type =
- __ Select(__ WordIsSmi(result),
+ __ Select(__ TaggedIsSmi(result),
__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
__ Int32Constant(BinaryOperationFeedback::kNumber));
__ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
@@ -1315,7 +1395,7 @@ void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) {
Node* value = __ Word32And(lhs_value, rhs_value);
Node* result = __ ChangeInt32ToTagged(value);
Node* result_type =
- __ Select(__ WordIsSmi(result),
+ __ Select(__ TaggedIsSmi(result),
__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
__ Int32Constant(BinaryOperationFeedback::kNumber));
__ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
@@ -1345,7 +1425,7 @@ void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) {
Node* value = __ Word32Shl(lhs_value, shift_count);
Node* result = __ ChangeInt32ToTagged(value);
Node* result_type =
- __ Select(__ WordIsSmi(result),
+ __ Select(__ TaggedIsSmi(result),
__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
__ Int32Constant(BinaryOperationFeedback::kNumber));
__ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
@@ -1375,7 +1455,7 @@ void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) {
Node* value = __ Word32Sar(lhs_value, shift_count);
Node* result = __ ChangeInt32ToTagged(value);
Node* result_type =
- __ Select(__ WordIsSmi(result),
+ __ Select(__ TaggedIsSmi(result),
__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
__ Int32Constant(BinaryOperationFeedback::kNumber));
__ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
@@ -1393,15 +1473,6 @@ Node* Interpreter::BuildUnaryOp(Callable callable,
}
template <class Generator>
-void Interpreter::DoUnaryOp(InterpreterAssembler* assembler) {
- Node* value = __ GetAccumulator();
- Node* context = __ GetContext();
- Node* result = Generator::Generate(assembler, value, context);
- __ SetAccumulator(result);
- __ Dispatch();
-}
-
-template <class Generator>
void Interpreter::DoUnaryOpWithFeedback(InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
Node* context = __ GetContext();
@@ -1495,7 +1566,7 @@ void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
Label if_true(assembler), if_false(assembler), end(assembler);
Node* true_value = __ BooleanConstant(true);
Node* false_value = __ BooleanConstant(false);
- __ BranchIfWordEqual(value, true_value, &if_true, &if_false);
+ __ Branch(__ WordEqual(value, true_value), &if_true, &if_false);
__ Bind(&if_true);
{
result.Bind(false_value);
@@ -1520,7 +1591,11 @@ void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
// Load the accumulator with the string representating type of the
// object in the accumulator.
void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
- DoUnaryOp<TypeofStub>(assembler);
+ Node* value = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* result = assembler->Typeof(value, context);
+ __ SetAccumulator(result);
+ __ Dispatch();
}
void Interpreter::DoDelete(Runtime::FunctionId function_id,
@@ -1578,6 +1653,17 @@ void Interpreter::DoCall(InterpreterAssembler* assembler) {
DoJSCall(assembler, TailCallMode::kDisallow);
}
+// CallProperty <callable> <receiver> <arg_count> <feedback_slot_id>
+//
+// Call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers. Collect type feedback into
+// |feedback_slot_id|. The callable is known to be a property of the receiver.
+void Interpreter::DoCallProperty(InterpreterAssembler* assembler) {
+ // TODO(leszeks): Look into making the interpreter use the fact that the
+ // receiver is non-null.
+ DoJSCall(assembler, TailCallMode::kDisallow);
+}
+
// TailCall <callable> <receiver> <arg_count> <feedback_slot_id>
//
// Tail call a JSfunction or Callable in |callable| with the |receiver| and
@@ -1660,9 +1746,8 @@ void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
// Get the function to call from the native context.
Node* context = __ GetContext();
- Node* native_context =
- __ LoadContextSlot(context, Context::NATIVE_CONTEXT_INDEX);
- Node* function = __ LoadContextSlot(native_context, context_index);
+ Node* native_context = __ LoadNativeContext(context);
+ Node* function = __ LoadContextElement(native_context, context_index);
// Call the function.
Node* result = __ CallJS(function, context, first_arg, args_count,
@@ -1698,35 +1783,35 @@ void Interpreter::DoNew(InterpreterAssembler* assembler) {
//
// Test if the value in the <src> register equals the accumulator.
void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<EqualStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::EQ, assembler);
}
// TestNotEqual <src>
//
// Test if the value in the <src> register is not equal to the accumulator.
void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<NotEqualStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::NE, assembler);
}
// TestEqualStrict <src>
//
// Test if the value in the <src> register is strictly equal to the accumulator.
void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<StrictEqualStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::EQ_STRICT, assembler);
}
// TestLessThan <src>
//
// Test if the value in the <src> register is less than the accumulator.
void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<LessThanStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::LT, assembler);
}
// TestGreaterThan <src>
//
// Test if the value in the <src> register is greater than the accumulator.
void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<GreaterThanStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::GT, assembler);
}
// TestLessThanOrEqual <src>
@@ -1734,7 +1819,7 @@ void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
// Test if the value in the <src> register is less than or equal to the
// accumulator.
void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<LessThanOrEqualStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::LTE, assembler);
}
// TestGreaterThanOrEqual <src>
@@ -1742,7 +1827,7 @@ void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
// Test if the value in the <src> register is greater than or equal to the
// accumulator.
void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
- DoCompareOpWithFeedback<GreaterThanOrEqualStub>(assembler);
+ DoCompareOpWithFeedback(Token::Value::GTE, assembler);
}
// TestIn <src>
@@ -1750,7 +1835,7 @@ void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
// Test if the object referenced by the register operand is a property of the
// object referenced by the accumulator.
void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
- DoBinaryOp<HasPropertyStub>(assembler);
+ DoCompareOp(Token::IN, assembler);
}
// TestInstanceOf <src>
@@ -1758,7 +1843,7 @@ void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
// Test if the object referenced by the <src> register is an an instance of type
// referenced by the accumulator.
void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
- DoBinaryOp<InstanceOfStub>(assembler);
+ DoCompareOp(Token::INSTANCEOF, assembler);
}
// Jump <imm>
@@ -2025,7 +2110,7 @@ void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
Node* use_fast_shallow_clone = __ Word32And(
bytecode_flags,
__ Int32Constant(CreateArrayLiteralFlags::FastShallowCloneBit::kMask));
- __ BranchIf(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
+ __ Branch(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
__ Bind(&fast_shallow_clone);
{
@@ -2068,9 +2153,9 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
Label if_fast_clone(assembler),
if_not_fast_clone(assembler, Label::kDeferred);
Node* fast_clone_properties_count =
- __ BitFieldDecode<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
+ __ DecodeWord32<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
bytecode_flags);
- __ BranchIf(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
+ __ Branch(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
__ Bind(&if_fast_clone);
{
@@ -2217,7 +2302,7 @@ void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
Node* duplicate_parameters_bit = __ Int32Constant(
1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte);
Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
- __ BranchIf(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
+ __ Branch(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
__ Bind(&if_not_duplicate_parameters);
{
@@ -2273,7 +2358,7 @@ void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred);
Node* interrupt = __ StackCheckTriggeredInterrupt();
- __ BranchIf(interrupt, &stack_check_interrupt, &ok);
+ __ Branch(interrupt, &stack_check_interrupt, &ok);
__ Bind(&ok);
__ Dispatch();
@@ -2363,7 +2448,7 @@ void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
Node* object_reg = __ BytecodeOperandReg(0);
Node* receiver = __ LoadRegister(object_reg);
Node* context = __ GetContext();
- Node* const zero_smi = __ SmiConstant(Smi::FromInt(0));
+ Node* const zero_smi = __ SmiConstant(Smi::kZero);
Label nothing_to_iterate(assembler, Label::kDeferred),
use_enum_cache(assembler), use_runtime(assembler, Label::kDeferred);
@@ -2446,7 +2531,7 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
// Check if we can use the for-in fast path potentially using the enum cache.
Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
- __ BranchIfWordEqual(receiver_map, cache_type, &if_fast, &if_slow);
+ __ Branch(__ WordEqual(receiver_map, cache_type), &if_fast, &if_slow);
__ Bind(&if_fast);
{
// Enum cache in use for {receiver}, the {key} is definitely valid.
@@ -2483,7 +2568,7 @@ void Interpreter::DoForInContinue(InterpreterAssembler* assembler) {
// Check if {index} is at {cache_length} already.
Label if_true(assembler), if_false(assembler), end(assembler);
- __ BranchIfWordEqual(index, cache_length, &if_true, &if_false);
+ __ Branch(__ WordEqual(index, cache_length), &if_true, &if_false);
__ Bind(&if_true);
{
__ SetAccumulator(__ BooleanConstant(false));
@@ -2554,7 +2639,7 @@ void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
STATIC_ASSERT(StepFrame > StepNext);
STATIC_ASSERT(LastStepAction == StepFrame);
Node* step_next = __ Int32Constant(StepNext);
- __ BranchIfInt32LessThanOrEqual(step_next, step_action, &if_stepping, &ok);
+ __ Branch(__ Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok);
__ Bind(&ok);
Node* array =
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index b646bf8313..b10ae2e451 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -78,16 +78,12 @@ class Interpreter {
// Generates code to perform the binary operation via |Generator|.
template <class Generator>
- void DoBinaryOp(InterpreterAssembler* assembler);
-
- // Generates code to perform the binary operation via |Generator|.
- template <class Generator>
void DoBinaryOpWithFeedback(InterpreterAssembler* assembler);
// Generates code to perform the comparison via |Generator| while gathering
// type feedback.
- template <class Generator>
- void DoCompareOpWithFeedback(InterpreterAssembler* assembler);
+ void DoCompareOpWithFeedback(Token::Value compare_op,
+ InterpreterAssembler* assembler);
// Generates code to perform the bitwise binary operation corresponding to
// |bitwise_op| while gathering type feedback.
@@ -99,10 +95,6 @@ class Interpreter {
template <class Generator>
void DoBinaryOpWithImmediate(InterpreterAssembler* assembler);
- // Generates code to perform the unary operation via |Generator|.
- template <class Generator>
- void DoUnaryOp(InterpreterAssembler* assembler);
-
// Generates code to perform the unary operation via |Generator| while
// gatering type feedback.
template <class Generator>
@@ -147,22 +139,11 @@ class Interpreter {
void DoStaLookupSlot(LanguageMode language_mode,
InterpreterAssembler* assembler);
- // Generates code to load a context slot.
- compiler::Node* BuildLoadContextSlot(InterpreterAssembler* assembler);
-
// Generates code to load a global.
compiler::Node* BuildLoadGlobal(Callable ic, compiler::Node* context,
compiler::Node* feedback_slot,
InterpreterAssembler* assembler);
- // Generates code to load a named property.
- compiler::Node* BuildLoadNamedProperty(Callable ic,
- InterpreterAssembler* assembler);
-
- // Generates code to load a keyed property.
- compiler::Node* BuildLoadKeyedProperty(Callable ic,
- InterpreterAssembler* assembler);
-
// Generates code to prepare the result for ForInPrepare. Cache data
// are placed into the consecutive series of registers starting at
// |output_register|.
@@ -183,6 +164,7 @@ class Interpreter {
OperandScale operand_scale);
bool IsDispatchTableInitialized();
+ bool ShouldInitializeDispatchTable();
static const int kNumberOfWideVariants = 3;
static const int kDispatchTableSize = kNumberOfWideVariants * (kMaxUInt8 + 1);
diff --git a/deps/v8/src/interpreter/mkpeephole.cc b/deps/v8/src/interpreter/mkpeephole.cc
index 270fe83ef9..62d3a77e02 100644
--- a/deps/v8/src/interpreter/mkpeephole.cc
+++ b/deps/v8/src/interpreter/mkpeephole.cc
@@ -79,33 +79,6 @@ const char* PeepholeActionTableWriter::kNamespaceElements[] = {"v8", "internal",
// static
PeepholeActionAndData PeepholeActionTableWriter::LookupActionAndData(
Bytecode last, Bytecode current) {
- // Optimize various accumulator loads followed by store accumulator
- // to an equivalent register load and loading the accumulator with
- // the register. The latter accumulator load can often be elided as
- // it is side-effect free and often followed by another accumulator
- // load so can be elided.
- if (current == Bytecode::kStar) {
- switch (last) {
- case Bytecode::kLdaNamedProperty:
- return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
- Bytecode::kLdrNamedProperty};
- case Bytecode::kLdaKeyedProperty:
- return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
- Bytecode::kLdrKeyedProperty};
- case Bytecode::kLdaGlobal:
- return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
- Bytecode::kLdrGlobal};
- case Bytecode::kLdaContextSlot:
- return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
- Bytecode::kLdrContextSlot};
- case Bytecode::kLdaUndefined:
- return {PeepholeAction::kTransformLdaStarToLdrLdarAction,
- Bytecode::kLdrUndefined};
- default:
- break;
- }
- }
-
// ToName bytecodes can be replaced by Star with the same output register if
// the value in the accumulator is already a name.
if (current == Bytecode::kToName && Bytecodes::PutsNameInAccumulator(last)) {
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index 34c98bba64..a148968b27 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -130,17 +130,27 @@ bool Isolate::IsArraySpeciesLookupChainIntact() {
Cell* species_cell = heap()->species_protector();
return species_cell->value()->IsSmi() &&
- Smi::cast(species_cell->value())->value() == kArrayProtectorValid;
+ Smi::cast(species_cell->value())->value() == kProtectorValid;
}
bool Isolate::IsHasInstanceLookupChainIntact() {
PropertyCell* has_instance_cell = heap()->has_instance_protector();
- return has_instance_cell->value() == Smi::FromInt(kArrayProtectorValid);
+ return has_instance_cell->value() == Smi::FromInt(kProtectorValid);
}
bool Isolate::IsStringLengthOverflowIntact() {
PropertyCell* has_instance_cell = heap()->string_length_protector();
- return has_instance_cell->value() == Smi::FromInt(kArrayProtectorValid);
+ return has_instance_cell->value() == Smi::FromInt(kProtectorValid);
+}
+
+bool Isolate::IsFastArrayIterationIntact() {
+ Cell* fast_iteration = heap()->fast_array_iteration_protector();
+ return fast_iteration->value() == Smi::FromInt(kProtectorValid);
+}
+
+bool Isolate::IsArrayIteratorLookupChainIntact() {
+ Cell* array_iterator_cell = heap()->array_iterator_protector();
+ return array_iterator_cell->value() == Smi::FromInt(kProtectorValid);
}
} // namespace internal
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 63c927b04c..0eab398238 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -20,12 +20,15 @@
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/compilation-statistics.h"
+#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/crankshaft/hydrogen.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
+#include "src/elements.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
+#include "src/ic/access-compiler-data.h"
#include "src/ic/stub-cache.h"
#include "src/interface-descriptors.h"
#include "src/interpreter/interpreter.h"
@@ -39,6 +42,7 @@
#include "src/runtime-profiler.h"
#include "src/simulator.h"
#include "src/snapshot/deserializer.h"
+#include "src/tracing/tracing-category-observer.h"
#include "src/v8.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
@@ -507,7 +511,7 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
case StackFrame::WASM: {
WasmFrame* wasm_frame = WasmFrame::cast(frame);
- Handle<Object> wasm_object(wasm_frame->wasm_obj(), this);
+ Handle<Object> instance(wasm_frame->wasm_instance(), this);
const int wasm_function_index = wasm_frame->function_index();
Code* code = wasm_frame->unchecked_code();
Handle<AbstractCode> abstract_code(AbstractCode::cast(code), this);
@@ -516,12 +520,15 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
// TODO(wasm): The wasm object returned by the WasmFrame should always
// be a wasm object.
- DCHECK(wasm::IsWasmObject(*wasm_object) ||
- wasm_object->IsUndefined(this));
+ DCHECK(wasm::IsWasmInstance(*instance) || instance->IsUndefined(this));
- elements = FrameArray::AppendWasmFrame(
- elements, wasm_object, wasm_function_index, abstract_code, offset,
- FrameArray::kIsWasmFrame);
+ int flags = wasm::WasmIsAsmJs(*instance, this)
+ ? FrameArray::kIsAsmJsWasmFrame
+ : FrameArray::kIsWasmFrame;
+
+ elements =
+ FrameArray::AppendWasmFrame(elements, instance, wasm_function_index,
+ abstract_code, offset, flags);
} break;
default:
@@ -627,7 +634,7 @@ class CaptureStackTraceHelper {
if (!line_key_.is_null()) {
Script::PositionInfo info;
bool valid_pos =
- script->GetPositionInfo(position, &info, Script::WITH_OFFSET);
+ Script::GetPositionInfo(script, position, &info, Script::WITH_OFFSET);
if (!column_key_.is_null() && valid_pos) {
JSObject::AddProperty(stack_frame, column_key_,
@@ -695,17 +702,18 @@ class CaptureStackTraceHelper {
if (!function_key_.is_null()) {
Handle<String> name = wasm::GetWasmFunctionName(
- isolate_, handle(frame->wasm_obj(), isolate_),
+ isolate_, handle(frame->wasm_instance(), isolate_),
frame->function_index());
JSObject::AddProperty(stack_frame, function_key_, name, NONE);
}
- // Encode the function index as line number.
+ // Encode the function index as line number (1-based).
if (!line_key_.is_null()) {
JSObject::AddProperty(
stack_frame, line_key_,
- isolate_->factory()->NewNumberFromInt(frame->function_index()), NONE);
+ isolate_->factory()->NewNumberFromInt(frame->function_index() + 1),
+ NONE);
}
- // Encode the byte offset as column.
+ // Encode the byte offset as column (1-based).
if (!column_key_.is_null()) {
Code* code = frame->LookupCode();
int offset = static_cast<int>(frame->pc() - code->instruction_start());
@@ -1063,6 +1071,39 @@ Object* Isolate::Throw(Object* exception, MessageLocation* location) {
HandleScope scope(this);
Handle<Object> exception_handle(exception, this);
+ if (FLAG_print_all_exceptions) {
+ printf("=========================================================\n");
+ printf("Exception thrown:\n");
+ if (location) {
+ Handle<Script> script = location->script();
+ Handle<Object> name = Script::GetNameOrSourceURL(script);
+ printf("at ");
+ if (name->IsString() && String::cast(*name)->length() > 0)
+ String::cast(*name)->PrintOn(stdout);
+ else
+ printf("<anonymous>");
+// Script::GetLineNumber and Script::GetColumnNumber can allocate on the heap to
+// initialize the line_ends array, so be careful when calling them.
+#ifdef DEBUG
+ if (AllowHeapAllocation::IsAllowed()) {
+#else
+ if (false) {
+#endif
+ printf(", %d:%d - %d:%d\n",
+ Script::GetLineNumber(script, location->start_pos()) + 1,
+ Script::GetColumnNumber(script, location->start_pos()),
+ Script::GetLineNumber(script, location->end_pos()) + 1,
+ Script::GetColumnNumber(script, location->end_pos()));
+ } else {
+ printf(", line %d\n", script->GetLineNumber(location->start_pos()) + 1);
+ }
+ }
+ exception->Print();
+ printf("Stack Trace:\n");
+ PrintStack(stdout);
+ printf("=========================================================\n");
+ }
+
// Determine whether a message needs to be created for the given exception
// depending on the following criteria:
// 1) External v8::TryCatch missing: Always create a message because any
@@ -1228,9 +1269,19 @@ Object* Isolate::UnwindAndFindHandler() {
// For interpreted frame we perform a range lookup in the handler table.
if (frame->is_interpreted() && catchable_by_js) {
InterpretedFrame* js_frame = static_cast<InterpretedFrame*>(frame);
+ int register_slots = js_frame->GetBytecodeArray()->register_count();
int context_reg = 0; // Will contain register index holding context.
offset = js_frame->LookupExceptionHandlerInTable(&context_reg, nullptr);
if (offset >= 0) {
+ // Compute the stack pointer from the frame pointer. This ensures that
+ // argument slots on the stack are dropped as returning would.
+ // Note: This is only needed for interpreted frames that have been
+ // materialized by the deoptimizer. If there is a handler frame
+ // in between then {frame->sp()} would already be correct.
+ Address return_sp = frame->fp() -
+ InterpreterFrameConstants::kFixedFrameSizeFromFp -
+ register_slots * kPointerSize;
+
// Patch the bytecode offset in the interpreted frame to reflect the
// position of the exception handler. The special builtin below will
// take care of continuing to dispatch at that position. Also restore
@@ -1241,7 +1292,7 @@ Object* Isolate::UnwindAndFindHandler() {
// Gather information from the frame.
code = *builtins()->InterpreterEnterBytecodeDispatch();
- handler_sp = frame->sp();
+ handler_sp = return_sp;
handler_fp = frame->fp();
break;
}
@@ -1933,48 +1984,102 @@ void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
class VerboseAccountingAllocator : public AccountingAllocator {
public:
- VerboseAccountingAllocator(Heap* heap, size_t sample_bytes)
- : heap_(heap), last_memory_usage_(0), sample_bytes_(sample_bytes) {}
-
- v8::internal::Segment* AllocateSegment(size_t size) override {
- v8::internal::Segment* memory = AccountingAllocator::AllocateSegment(size);
+ VerboseAccountingAllocator(Heap* heap, size_t allocation_sample_bytes,
+ size_t pool_sample_bytes)
+ : heap_(heap),
+ last_memory_usage_(0),
+ last_pool_size_(0),
+ nesting_deepth_(0),
+ allocation_sample_bytes_(allocation_sample_bytes),
+ pool_sample_bytes_(pool_sample_bytes) {}
+
+ v8::internal::Segment* GetSegment(size_t size) override {
+ v8::internal::Segment* memory = AccountingAllocator::GetSegment(size);
if (memory) {
- size_t current = GetCurrentMemoryUsage();
- if (last_memory_usage_.Value() + sample_bytes_ < current) {
- PrintJSON(current);
- last_memory_usage_.SetValue(current);
+ size_t malloced_current = GetCurrentMemoryUsage();
+ size_t pooled_current = GetCurrentPoolSize();
+
+ if (last_memory_usage_.Value() + allocation_sample_bytes_ <
+ malloced_current ||
+ last_pool_size_.Value() + pool_sample_bytes_ < pooled_current) {
+ PrintMemoryJSON(malloced_current, pooled_current);
+ last_memory_usage_.SetValue(malloced_current);
+ last_pool_size_.SetValue(pooled_current);
}
}
return memory;
}
- void FreeSegment(v8::internal::Segment* memory) override {
- AccountingAllocator::FreeSegment(memory);
- size_t current = GetCurrentMemoryUsage();
- if (current + sample_bytes_ < last_memory_usage_.Value()) {
- PrintJSON(current);
- last_memory_usage_.SetValue(current);
+ void ReturnSegment(v8::internal::Segment* memory) override {
+ AccountingAllocator::ReturnSegment(memory);
+ size_t malloced_current = GetCurrentMemoryUsage();
+ size_t pooled_current = GetCurrentPoolSize();
+
+ if (malloced_current + allocation_sample_bytes_ <
+ last_memory_usage_.Value() ||
+ pooled_current + pool_sample_bytes_ < last_pool_size_.Value()) {
+ PrintMemoryJSON(malloced_current, pooled_current);
+ last_memory_usage_.SetValue(malloced_current);
+ last_pool_size_.SetValue(pooled_current);
}
}
+ void ZoneCreation(const Zone* zone) override {
+ double time = heap_->isolate()->time_millis_since_init();
+ PrintF(
+ "{"
+ "\"type\": \"zonecreation\", "
+ "\"isolate\": \"%p\", "
+ "\"time\": %f, "
+ "\"ptr\": \"%p\", "
+ "\"name\": \"%s\","
+ "\"nesting\": %zu"
+ "}\n",
+ reinterpret_cast<void*>(heap_->isolate()), time,
+ reinterpret_cast<const void*>(zone), zone->name(),
+ nesting_deepth_.Value());
+ nesting_deepth_.Increment(1);
+ }
+
+ void ZoneDestruction(const Zone* zone) override {
+ nesting_deepth_.Decrement(1);
+ double time = heap_->isolate()->time_millis_since_init();
+ PrintF(
+ "{"
+ "\"type\": \"zonedestruction\", "
+ "\"isolate\": \"%p\", "
+ "\"time\": %f, "
+ "\"ptr\": \"%p\", "
+ "\"name\": \"%s\", "
+ "\"size\": %zu,"
+ "\"nesting\": %zu"
+ "}\n",
+ reinterpret_cast<void*>(heap_->isolate()), time,
+ reinterpret_cast<const void*>(zone), zone->name(),
+ zone->allocation_size(), nesting_deepth_.Value());
+ }
+
private:
- void PrintJSON(size_t sample) {
+ void PrintMemoryJSON(size_t malloced, size_t pooled) {
// Note: Neither isolate, nor heap is locked, so be careful with accesses
// as the allocator is potentially used on a concurrent thread.
double time = heap_->isolate()->time_millis_since_init();
PrintF(
"{"
- "\"type\": \"malloced\", "
+ "\"type\": \"zone\", "
"\"isolate\": \"%p\", "
"\"time\": %f, "
- "\"value\": %zu"
+ "\"allocated\": %zu,"
+ "\"pooled\": %zu"
"}\n",
- reinterpret_cast<void*>(heap_->isolate()), time, sample);
+ reinterpret_cast<void*>(heap_->isolate()), time, malloced, pooled);
}
Heap* heap_;
base::AtomicNumber<size_t> last_memory_usage_;
- size_t sample_bytes_;
+ base::AtomicNumber<size_t> last_pool_size_;
+ base::AtomicNumber<size_t> nesting_deepth_;
+ size_t allocation_sample_bytes_, pool_sample_bytes_;
};
Isolate::Isolate(bool enable_serializer)
@@ -1997,15 +2102,13 @@ Isolate::Isolate(bool enable_serializer)
capture_stack_trace_for_uncaught_exceptions_(false),
stack_trace_for_uncaught_exceptions_frame_limit_(0),
stack_trace_for_uncaught_exceptions_options_(StackTrace::kOverview),
- keyed_lookup_cache_(NULL),
context_slot_cache_(NULL),
descriptor_lookup_cache_(NULL),
handle_scope_implementer_(NULL),
unicode_cache_(NULL),
- allocator_(FLAG_trace_gc_object_stats
- ? new VerboseAccountingAllocator(&heap_, 256 * KB)
- : new AccountingAllocator()),
- runtime_zone_(new Zone(allocator_)),
+ allocator_(FLAG_trace_gc_object_stats ? new VerboseAccountingAllocator(
+ &heap_, 256 * KB, 128 * KB)
+ : new AccountingAllocator()),
inner_pointer_to_code_cache_(NULL),
global_handles_(NULL),
eternal_handles_(NULL),
@@ -2031,7 +2134,6 @@ Isolate::Isolate(bool enable_serializer)
optimizing_compile_dispatcher_(NULL),
stress_deopt_count_(0),
next_optimization_id_(0),
- js_calls_from_api_counter_(0),
#if TRACE_MAPS
next_unique_sfi_id_(0),
#endif
@@ -2170,13 +2272,16 @@ void Isolate::Deinit() {
delete heap_profiler_;
heap_profiler_ = NULL;
+ cancelable_task_manager()->CancelAndWait();
+
heap_.TearDown();
logger_->TearDown();
delete interpreter_;
interpreter_ = NULL;
- cancelable_task_manager()->CancelAndWait();
+ delete compiler_dispatcher_tracer_;
+ compiler_dispatcher_tracer_ = nullptr;
delete cpu_profiler_;
cpu_profiler_ = NULL;
@@ -2200,9 +2305,6 @@ void Isolate::SetIsolateThreadLocals(Isolate* isolate,
Isolate::~Isolate() {
TRACE_ISOLATE(destructor);
- // Has to be called while counters_ are still alive
- runtime_zone_->DeleteKeptSegment();
-
// The entry stack must be empty when we get here.
DCHECK(entry_stack_ == NULL || entry_stack_->previous_item == NULL);
@@ -2218,6 +2320,9 @@ Isolate::~Isolate() {
delete[] call_descriptor_data_;
call_descriptor_data_ = NULL;
+ delete access_compiler_data_;
+ access_compiler_data_ = NULL;
+
delete regexp_stack_;
regexp_stack_ = NULL;
@@ -2225,8 +2330,6 @@ Isolate::~Isolate() {
descriptor_lookup_cache_ = NULL;
delete context_slot_cache_;
context_slot_cache_ = NULL;
- delete keyed_lookup_cache_;
- keyed_lookup_cache_ = NULL;
delete load_stub_cache_;
load_stub_cache_ = NULL;
@@ -2279,9 +2382,6 @@ Isolate::~Isolate() {
delete cancelable_task_manager_;
cancelable_task_manager_ = nullptr;
- delete runtime_zone_;
- runtime_zone_ = nullptr;
-
delete allocator_;
allocator_ = nullptr;
@@ -2371,7 +2471,6 @@ bool Isolate::Init(Deserializer* des) {
#undef ASSIGN_ELEMENT
compilation_cache_ = new CompilationCache(this);
- keyed_lookup_cache_ = new KeyedLookupCache();
context_slot_cache_ = new ContextSlotCache();
descriptor_lookup_cache_ = new DescriptorLookupCache();
unicode_cache_ = new UnicodeCache();
@@ -2388,9 +2487,11 @@ bool Isolate::Init(Deserializer* des) {
date_cache_ = new DateCache();
call_descriptor_data_ =
new CallInterfaceDescriptorData[CallDescriptors::NUMBER_OF_DESCRIPTORS];
+ access_compiler_data_ = new AccessCompilerData();
cpu_profiler_ = new CpuProfiler(this);
heap_profiler_ = new HeapProfiler(heap());
interpreter_ = new interpreter::Interpreter(this);
+ compiler_dispatcher_tracer_ = new CompilerDispatcherTracer(this);
// Enable logging before setting up the heap
logger_->SetUp(this);
@@ -2471,9 +2572,7 @@ bool Isolate::Init(Deserializer* des) {
}
load_stub_cache_->Initialize();
store_stub_cache_->Initialize();
- if (FLAG_ignition || serializer_enabled()) {
- interpreter_->Initialize();
- }
+ interpreter_->Initialize();
heap_.NotifyDeserializationComplete();
}
@@ -2651,8 +2750,8 @@ void Isolate::DumpAndResetCompilationStats() {
turbo_statistics_ = nullptr;
delete hstatistics_;
hstatistics_ = nullptr;
- if (FLAG_runtime_call_stats &&
- !TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED()) {
+ if (V8_UNLIKELY(FLAG_runtime_stats ==
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) {
OFStream os(stdout);
counters()->runtime_call_stats()->Print(os);
counters()->runtime_call_stats()->Reset();
@@ -2733,7 +2832,7 @@ bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
PropertyCell* no_elements_cell = heap()->array_protector();
bool cell_reports_intact =
no_elements_cell->value()->IsSmi() &&
- Smi::cast(no_elements_cell->value())->value() == kArrayProtectorValid;
+ Smi::cast(no_elements_cell->value())->value() == kProtectorValid;
#ifdef DEBUG
Map* root_array_map =
@@ -2792,7 +2891,7 @@ bool Isolate::IsIsConcatSpreadableLookupChainIntact() {
Cell* is_concat_spreadable_cell = heap()->is_concat_spreadable_protector();
bool is_is_concat_spreadable_set =
Smi::cast(is_concat_spreadable_cell->value())->value() ==
- kArrayProtectorInvalid;
+ kProtectorInvalid;
#ifdef DEBUG
Map* root_array_map = get_initial_js_array_map(GetInitialFastElementsKind());
if (root_array_map == NULL) {
@@ -2827,7 +2926,7 @@ void Isolate::UpdateArrayProtectorOnSetElement(Handle<JSObject> object) {
if (!IsArrayOrObjectPrototype(*object)) return;
PropertyCell::SetValueWithInvalidation(
factory()->array_protector(),
- handle(Smi::FromInt(kArrayProtectorInvalid), this));
+ handle(Smi::FromInt(kProtectorInvalid), this));
}
void Isolate::InvalidateHasInstanceProtector() {
@@ -2835,7 +2934,7 @@ void Isolate::InvalidateHasInstanceProtector() {
DCHECK(IsHasInstanceLookupChainIntact());
PropertyCell::SetValueWithInvalidation(
factory()->has_instance_protector(),
- handle(Smi::FromInt(kArrayProtectorInvalid), this));
+ handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsHasInstanceLookupChainIntact());
}
@@ -2843,15 +2942,14 @@ void Isolate::InvalidateIsConcatSpreadableProtector() {
DCHECK(factory()->is_concat_spreadable_protector()->value()->IsSmi());
DCHECK(IsIsConcatSpreadableLookupChainIntact());
factory()->is_concat_spreadable_protector()->set_value(
- Smi::FromInt(kArrayProtectorInvalid));
+ Smi::FromInt(kProtectorInvalid));
DCHECK(!IsIsConcatSpreadableLookupChainIntact());
}
void Isolate::InvalidateArraySpeciesProtector() {
DCHECK(factory()->species_protector()->value()->IsSmi());
DCHECK(IsArraySpeciesLookupChainIntact());
- factory()->species_protector()->set_value(
- Smi::FromInt(kArrayProtectorInvalid));
+ factory()->species_protector()->set_value(Smi::FromInt(kProtectorInvalid));
DCHECK(!IsArraySpeciesLookupChainIntact());
}
@@ -2860,10 +2958,18 @@ void Isolate::InvalidateStringLengthOverflowProtector() {
DCHECK(IsStringLengthOverflowIntact());
PropertyCell::SetValueWithInvalidation(
factory()->string_length_protector(),
- handle(Smi::FromInt(kArrayProtectorInvalid), this));
+ handle(Smi::FromInt(kProtectorInvalid), this));
DCHECK(!IsStringLengthOverflowIntact());
}
+void Isolate::InvalidateArrayIteratorProtector() {
+ DCHECK(factory()->array_iterator_protector()->value()->IsSmi());
+ DCHECK(IsArrayIteratorLookupChainIntact());
+ factory()->array_iterator_protector()->set_value(
+ Smi::FromInt(kProtectorInvalid));
+ DCHECK(!IsArrayIteratorLookupChainIntact());
+}
+
bool Isolate::IsAnyInitialArrayPrototype(Handle<JSArray> array) {
DisallowHeapAllocation no_gc;
return IsInAnyContext(*array, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
@@ -2888,6 +2994,14 @@ base::RandomNumberGenerator* Isolate::random_number_generator() {
return random_number_generator_;
}
+int Isolate::GenerateIdentityHash(uint32_t mask) {
+ int hash;
+ int attempts = 0;
+ do {
+ hash = random_number_generator()->NextInt() & mask;
+ } while (hash == 0 && attempts++ < 30);
+ return hash != 0 ? hash : 1;
+}
Object* Isolate::FindCodeObject(Address a) {
return inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(a);
@@ -3005,20 +3119,88 @@ void Isolate::ReportPromiseReject(Handle<JSObject> promise,
v8::Utils::StackTraceToLocal(stack_trace)));
}
-void Isolate::PromiseResolveThenableJob(Handle<PromiseContainer> container,
- MaybeHandle<Object>* result,
- MaybeHandle<Object>* maybe_exception) {
- if (debug()->is_active()) {
- Handle<Object> before_debug_event(container->before_debug_event(), this);
- if (before_debug_event->IsJSObject()) {
- debug()->OnAsyncTaskEvent(Handle<JSObject>::cast(before_debug_event));
+namespace {
+class PromiseDebugEventScope {
+ public:
+ PromiseDebugEventScope(Isolate* isolate, Object* id, Object* name)
+ : isolate_(isolate),
+ id_(id, isolate_),
+ name_(name, isolate_),
+ is_debug_active_(isolate_->debug()->is_active() && id_->IsNumber() &&
+ name_->IsString()) {
+ if (is_debug_active_) {
+ isolate_->debug()->OnAsyncTaskEvent(
+ isolate_->factory()->will_handle_string(), id_,
+ Handle<String>::cast(name_));
+ }
+ }
+
+ ~PromiseDebugEventScope() {
+ if (is_debug_active_) {
+ isolate_->debug()->OnAsyncTaskEvent(
+ isolate_->factory()->did_handle_string(), id_,
+ Handle<String>::cast(name_));
}
}
- Handle<JSReceiver> thenable(container->thenable(), this);
- Handle<JSFunction> resolve(container->resolve(), this);
- Handle<JSFunction> reject(container->reject(), this);
- Handle<JSReceiver> then(container->then(), this);
+ private:
+ Isolate* isolate_;
+ Handle<Object> id_;
+ Handle<Object> name_;
+ bool is_debug_active_;
+};
+} // namespace
+
+void Isolate::PromiseReactionJob(Handle<PromiseReactionJobInfo> info,
+ MaybeHandle<Object>* result,
+ MaybeHandle<Object>* maybe_exception) {
+ PromiseDebugEventScope helper(this, info->debug_id(), info->debug_name());
+
+ Handle<Object> value(info->value(), this);
+ Handle<Object> tasks(info->tasks(), this);
+ Handle<JSFunction> promise_handle_fn = promise_handle();
+ Handle<Object> undefined = factory()->undefined_value();
+
+ // If tasks is an array we have multiple onFulfilled/onRejected callbacks
+ // associated with the promise. The deferred object for each callback
+ // is attached to this array as well.
+ // Otherwise, there is a single callback and the deferred object is attached
+ // directly to PromiseReactionJobInfo.
+ if (tasks->IsJSArray()) {
+ Handle<JSArray> array = Handle<JSArray>::cast(tasks);
+ DCHECK(array->length()->IsSmi());
+ int length = Smi::cast(array->length())->value();
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ DCHECK(length % 2 == 0);
+ for (int i = 0; i < length; i += 2) {
+ DCHECK(accessor->HasElement(array, i));
+ DCHECK(accessor->HasElement(array, i + 1));
+ Handle<Object> argv[] = {value, accessor->Get(array, i),
+ accessor->Get(array, i + 1)};
+ *result = Execution::TryCall(this, promise_handle_fn, undefined,
+ arraysize(argv), argv, maybe_exception);
+ // If execution is terminating, just bail out.
+ if (result->is_null() && maybe_exception->is_null()) {
+ return;
+ }
+ }
+ } else {
+ Handle<Object> deferred(info->deferred(), this);
+ Handle<Object> argv[] = {value, tasks, deferred};
+ *result = Execution::TryCall(this, promise_handle_fn, undefined,
+ arraysize(argv), argv, maybe_exception);
+ }
+}
+
+void Isolate::PromiseResolveThenableJob(
+ Handle<PromiseResolveThenableJobInfo> info, MaybeHandle<Object>* result,
+ MaybeHandle<Object>* maybe_exception) {
+ PromiseDebugEventScope helper(this, info->debug_id(), info->debug_name());
+
+ Handle<JSReceiver> thenable(info->thenable(), this);
+ Handle<JSFunction> resolve(info->resolve(), this);
+ Handle<JSFunction> reject(info->reject(), this);
+ Handle<JSReceiver> then(info->then(), this);
Handle<Object> argv[] = {resolve, reject};
*result = Execution::TryCall(this, then, thenable, arraysize(argv), argv,
maybe_exception);
@@ -3031,18 +3213,12 @@ void Isolate::PromiseResolveThenableJob(Handle<PromiseContainer> container,
Execution::TryCall(this, reject, factory()->undefined_value(),
arraysize(reason_arg), reason_arg, maybe_exception);
}
-
- if (debug()->is_active()) {
- Handle<Object> after_debug_event(container->after_debug_event(), this);
- if (after_debug_event->IsJSObject()) {
- debug()->OnAsyncTaskEvent(Handle<JSObject>::cast(after_debug_event));
- }
- }
}
void Isolate::EnqueueMicrotask(Handle<Object> microtask) {
DCHECK(microtask->IsJSFunction() || microtask->IsCallHandlerInfo() ||
- microtask->IsPromiseContainer());
+ microtask->IsPromiseResolveThenableJobInfo() ||
+ microtask->IsPromiseReactionJobInfo());
Handle<FixedArray> queue(heap()->microtask_queue(), this);
int num_tasks = pending_microtask_count();
DCHECK(num_tasks <= queue->length());
@@ -3094,11 +3270,16 @@ void Isolate::RunMicrotasksInternal() {
callback(data);
} else {
SaveContext save(this);
- Context* context = microtask->IsJSFunction()
- ? Handle<JSFunction>::cast(microtask)->context()
- : Handle<PromiseContainer>::cast(microtask)
- ->resolve()
- ->context();
+ Context* context;
+ if (microtask->IsJSFunction()) {
+ context = Handle<JSFunction>::cast(microtask)->context();
+ } else if (microtask->IsPromiseResolveThenableJobInfo()) {
+ context =
+ Handle<PromiseResolveThenableJobInfo>::cast(microtask)->context();
+ } else {
+ context = Handle<PromiseReactionJobInfo>::cast(microtask)->context();
+ }
+
set_context(context->native_context());
handle_scope_implementer_->EnterMicrotaskContext(
Handle<Context>(context, this));
@@ -3112,9 +3293,13 @@ void Isolate::RunMicrotasksInternal() {
result = Execution::TryCall(this, microtask_function,
factory()->undefined_value(), 0, NULL,
&maybe_exception);
+ } else if (microtask->IsPromiseResolveThenableJobInfo()) {
+ PromiseResolveThenableJob(
+ Handle<PromiseResolveThenableJobInfo>::cast(microtask), &result,
+ &maybe_exception);
} else {
- PromiseResolveThenableJob(Handle<PromiseContainer>::cast(microtask),
- &result, &maybe_exception);
+ PromiseReactionJob(Handle<PromiseReactionJobInfo>::cast(microtask),
+ &result, &maybe_exception);
}
handle_scope_implementer_->LeaveMicrotaskContext();
@@ -3213,7 +3398,7 @@ void Isolate::AddDetachedContext(Handle<Context> context) {
Handle<FixedArray> detached_contexts = factory()->detached_contexts();
int length = detached_contexts->length();
detached_contexts = factory()->CopyFixedArrayAndGrow(detached_contexts, 2);
- detached_contexts->set(length, Smi::FromInt(0));
+ detached_contexts->set(length, Smi::kZero);
detached_contexts->set(length + 1, *cell);
heap()->set_detached_contexts(*detached_contexts);
}
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 8d0d3b478f..87bc45bb51 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -33,6 +33,8 @@ class RandomNumberGenerator;
namespace internal {
+class AccessCompilerData;
+class AddressToIndexHashMap;
class BasicBlockProfiler;
class Bootstrapper;
class CancelableTaskManager;
@@ -44,6 +46,7 @@ class CodeRange;
class CodeStubDescriptor;
class CodeTracer;
class CompilationCache;
+class CompilerDispatcherTracer;
class CompilationStatistics;
class ContextSlotCache;
class Counters;
@@ -57,12 +60,12 @@ class ExternalCallbackScope;
class ExternalReferenceTable;
class Factory;
class HandleScopeImplementer;
+class HeapObjectToIndexHashMap;
class HeapProfiler;
class HStatistics;
class HTracer;
class InlineRuntimeFunctionsTable;
class InnerPointerToCodeCache;
-class KeyedLookupCache;
class Logger;
class MaterializedObjectStore;
class OptimizingCompileDispatcher;
@@ -116,16 +119,6 @@ class Interpreter;
#define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())
-#define RETURN_RESULT(isolate, call, T) \
- do { \
- Handle<T> __result__; \
- if (!(call).ToHandle(&__result__)) { \
- DCHECK((isolate)->has_pending_exception()); \
- return MaybeHandle<T>(); \
- } \
- return __result__; \
- } while (false)
-
#define RETURN_RESULT_OR_FAILURE(isolate, call) \
do { \
Handle<Object> __result__; \
@@ -409,9 +402,12 @@ typedef List<HeapObject*> DebugObjectCache;
V(Object*, string_stream_current_security_token, nullptr) \
V(ExternalReferenceTable*, external_reference_table, nullptr) \
V(intptr_t*, api_external_references, nullptr) \
- V(base::HashMap*, external_reference_map, nullptr) \
- V(base::HashMap*, root_index_map, nullptr) \
+ V(AddressToIndexHashMap*, external_reference_map, nullptr) \
+ V(HeapObjectToIndexHashMap*, root_index_map, nullptr) \
+ V(v8::DeserializeInternalFieldsCallback, \
+ deserialize_internal_fields_callback, nullptr) \
V(int, pending_microtask_count, 0) \
+ V(int, debug_microtask_count, 0) \
V(HStatistics*, hstatistics, nullptr) \
V(CompilationStatistics*, turbo_statistics, nullptr) \
V(HTracer*, htracer, nullptr) \
@@ -723,7 +719,7 @@ class Isolate {
void ReportFailedAccessCheck(Handle<JSObject> receiver);
// Exception throwing support. The caller should use the result
- // of Throw() as its return vaue.
+ // of Throw() as its return value.
Object* Throw(Object* exception, MessageLocation* location = NULL);
Object* ThrowIllegalOperation();
@@ -868,10 +864,6 @@ class Isolate {
return materialized_object_store_;
}
- KeyedLookupCache* keyed_lookup_cache() {
- return keyed_lookup_cache_;
- }
-
ContextSlotCache* context_slot_cache() {
return context_slot_cache_;
}
@@ -886,7 +878,6 @@ class Isolate {
DCHECK(handle_scope_implementer_);
return handle_scope_implementer_;
}
- Zone* runtime_zone() { return runtime_zone_; }
UnicodeCache* unicode_cache() {
return unicode_cache_;
@@ -927,6 +918,8 @@ class Isolate {
RegExpStack* regexp_stack() { return regexp_stack_; }
+ List<int>* regexp_indices() { return &regexp_indices_; }
+
unibrow::Mapping<unibrow::Ecma262Canonicalize>*
interp_canonicalize_mapping() {
return &regexp_macro_assembler_canonicalize_;
@@ -994,8 +987,8 @@ class Isolate {
Map* get_initial_js_array_map(ElementsKind kind);
- static const int kArrayProtectorValid = 1;
- static const int kArrayProtectorInvalid = 0;
+ static const int kProtectorValid = 1;
+ static const int kProtectorInvalid = 0;
bool IsFastArrayConstructorPrototypeChainIntact();
inline bool IsArraySpeciesLookupChainIntact();
@@ -1003,6 +996,10 @@ class Isolate {
bool IsIsConcatSpreadableLookupChainIntact();
bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
inline bool IsStringLengthOverflowIntact();
+ inline bool IsArrayIteratorLookupChainIntact();
+
+ // Avoid deopt loops if fast Array Iterators migrate to slow Array Iterators.
+ inline bool IsFastArrayIterationIntact();
// On intent to set an element in object, make sure that appropriate
// notifications occur if the set is on the elements of the array or
@@ -1022,12 +1019,15 @@ class Isolate {
void InvalidateHasInstanceProtector();
void InvalidateIsConcatSpreadableProtector();
void InvalidateStringLengthOverflowProtector();
+ void InvalidateArrayIteratorProtector();
// Returns true if array is the initial array prototype in any native context.
bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
CallInterfaceDescriptorData* call_descriptor_data(int index);
+ AccessCompilerData* access_compiler_data() { return access_compiler_data_; }
+
void IterateDeferredHandles(ObjectVisitor* visitor);
void LinkDeferredHandles(DeferredHandles* deferred_handles);
void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
@@ -1063,7 +1063,11 @@ class Isolate {
void* stress_deopt_count_address() { return &stress_deopt_count_; }
- base::RandomNumberGenerator* random_number_generator();
+ V8_EXPORT_PRIVATE base::RandomNumberGenerator* random_number_generator();
+
+ // Generates a random number that is non-zero when masked
+ // with the provided mask.
+ int GenerateIdentityHash(uint32_t mask);
// Given an address occupied by a live code object, return that object.
Object* FindCodeObject(Address a);
@@ -1076,12 +1080,6 @@ class Isolate {
return id;
}
- void IncrementJsCallsFromApiCounter() { ++js_calls_from_api_counter_; }
-
- unsigned int js_calls_from_api_counter() {
- return js_calls_from_api_counter_;
- }
-
// Get (and lazily initialize) the registry for per-isolate symbols.
Handle<JSObject> GetSymbolRegistry();
@@ -1101,12 +1099,16 @@ class Isolate {
void ReportPromiseReject(Handle<JSObject> promise, Handle<Object> value,
v8::PromiseRejectEvent event);
- void PromiseResolveThenableJob(Handle<PromiseContainer> container,
+ void PromiseReactionJob(Handle<PromiseReactionJobInfo> info,
+ MaybeHandle<Object>* result,
+ MaybeHandle<Object>* maybe_exception);
+ void PromiseResolveThenableJob(Handle<PromiseResolveThenableJobInfo> info,
MaybeHandle<Object>* result,
MaybeHandle<Object>* maybe_exception);
void EnqueueMicrotask(Handle<Object> microtask);
void RunMicrotasks();
bool IsRunningMicrotasks() const { return is_running_microtasks_; }
+ int GetNextDebugMicrotaskId() { return debug_microtask_count_++; }
void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
void CountUsage(v8::Isolate::UseCounterFeature feature);
@@ -1151,6 +1153,10 @@ class Isolate {
AccountingAllocator* allocator() { return allocator_; }
+ CompilerDispatcherTracer* compiler_dispatcher_tracer() const {
+ return compiler_dispatcher_tracer_;
+ }
+
bool IsInAnyContext(Object* object, uint32_t index);
void SetRAILMode(RAILMode rail_mode);
@@ -1319,14 +1325,12 @@ class Isolate {
bool capture_stack_trace_for_uncaught_exceptions_;
int stack_trace_for_uncaught_exceptions_frame_limit_;
StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
- KeyedLookupCache* keyed_lookup_cache_;
ContextSlotCache* context_slot_cache_;
DescriptorLookupCache* descriptor_lookup_cache_;
HandleScopeData handle_scope_data_;
HandleScopeImplementer* handle_scope_implementer_;
UnicodeCache* unicode_cache_;
AccountingAllocator* allocator_;
- Zone* runtime_zone_;
InnerPointerToCodeCache* inner_pointer_to_code_cache_;
GlobalHandles* global_handles_;
EternalHandles* eternal_handles_;
@@ -1339,8 +1343,10 @@ class Isolate {
unibrow::Mapping<unibrow::Ecma262Canonicalize>
regexp_macro_assembler_canonicalize_;
RegExpStack* regexp_stack_;
+ List<int> regexp_indices_;
DateCache* date_cache_;
CallInterfaceDescriptorData* call_descriptor_data_;
+ AccessCompilerData* access_compiler_data_;
base::RandomNumberGenerator* random_number_generator_;
base::AtomicValue<RAILMode> rail_mode_;
@@ -1377,6 +1383,8 @@ class Isolate {
interpreter::Interpreter* interpreter_;
+ CompilerDispatcherTracer* compiler_dispatcher_tracer_;
+
typedef std::pair<InterruptCallback, void*> InterruptEntry;
std::queue<InterruptEntry> api_interrupts_queue_;
@@ -1409,9 +1417,6 @@ class Isolate {
int next_optimization_id_;
- // Counts javascript calls from the API. Wraps around on overflow.
- unsigned int js_calls_from_api_counter_;
-
#if TRACE_MAPS
int next_unique_sfi_id_;
#endif
diff --git a/deps/v8/src/js/array-iterator.js b/deps/v8/src/js/array-iterator.js
deleted file mode 100644
index 227f733a05..0000000000
--- a/deps/v8/src/js/array-iterator.js
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -----------------------------------------------------------------------
-// Imports
-
-var arrayIterationKindSymbol =
- utils.ImportNow("array_iteration_kind_symbol");
-var arrayIteratorNextIndexSymbol =
- utils.ImportNow("array_iterator_next_symbol");
-var arrayIteratorObjectSymbol =
- utils.ImportNow("array_iterator_object_symbol");
-var GlobalArray = global.Array;
-var IteratorPrototype = utils.ImportNow("IteratorPrototype");
-var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-var GlobalTypedArray = %object_get_prototype_of(global.Uint8Array);
-
-// -----------------------------------------------------------------------
-
-function ArrayIterator() {}
-
-
-// TODO(wingo): Update section numbers when ES6 has stabilized. The
-// section numbers below are already out of date as of the May 2014
-// draft.
-
-
-// 15.4.5.1 CreateArrayIterator Abstract Operation
-function CreateArrayIterator(array, kind) {
- var object = TO_OBJECT(array);
- var iterator = new ArrayIterator;
- SET_PRIVATE(iterator, arrayIteratorObjectSymbol, object);
- SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, 0);
- SET_PRIVATE(iterator, arrayIterationKindSymbol, kind);
- return iterator;
-}
-
-
-// 22.1.5.2.2 %ArrayIteratorPrototype%[@@iterator]
-function ArrayIteratorIterator() {
- return this;
-}
-
-
-// ES6 section 22.1.5.2.1 %ArrayIteratorPrototype%.next( )
-function ArrayIteratorNext() {
- var iterator = this;
- var value = UNDEFINED;
- var done = true;
-
- if (!IS_RECEIVER(iterator) ||
- !HAS_DEFINED_PRIVATE(iterator, arrayIteratorNextIndexSymbol)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Array Iterator.prototype.next', this);
- }
-
- var array = GET_PRIVATE(iterator, arrayIteratorObjectSymbol);
- if (!IS_UNDEFINED(array)) {
- var index = GET_PRIVATE(iterator, arrayIteratorNextIndexSymbol);
- var itemKind = GET_PRIVATE(iterator, arrayIterationKindSymbol);
- var length = TO_UINT32(array.length);
-
- // "sparse" is never used.
-
- if (index >= length) {
- SET_PRIVATE(iterator, arrayIteratorObjectSymbol, UNDEFINED);
- } else {
- SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, index + 1);
-
- if (itemKind == ITERATOR_KIND_VALUES) {
- value = array[index];
- } else if (itemKind == ITERATOR_KIND_ENTRIES) {
- value = [index, array[index]];
- } else {
- value = index;
- }
- done = false;
- }
- }
-
- return %_CreateIterResultObject(value, done);
-}
-
-
-function ArrayEntries() {
- return CreateArrayIterator(this, ITERATOR_KIND_ENTRIES);
-}
-
-
-function ArrayValues() {
- return CreateArrayIterator(this, ITERATOR_KIND_VALUES);
-}
-
-
-function ArrayKeys() {
- return CreateArrayIterator(this, ITERATOR_KIND_KEYS);
-}
-
-// TODO(littledan): Check for detached TypedArray in these three methods
-function TypedArrayEntries() {
- if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
- return %_Call(ArrayEntries, this);
-}
-
-
-function TypedArrayValues() {
- if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
- return %_Call(ArrayValues, this);
-}
-
-
-function TypedArrayKeys() {
- if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
- return %_Call(ArrayKeys, this);
-}
-
-
-%FunctionSetPrototype(ArrayIterator, {__proto__: IteratorPrototype});
-%FunctionSetInstanceClassName(ArrayIterator, 'Array Iterator');
-
-utils.InstallFunctions(ArrayIterator.prototype, DONT_ENUM, [
- 'next', ArrayIteratorNext
-]);
-utils.SetFunctionName(ArrayIteratorIterator, iteratorSymbol);
-%AddNamedProperty(ArrayIterator.prototype, toStringTagSymbol,
- "Array Iterator", READ_ONLY | DONT_ENUM);
-
-utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
- // No 'values' since it breaks webcompat: http://crbug.com/409858
- 'entries', ArrayEntries,
- 'keys', ArrayKeys
-]);
-
-// TODO(adam): Remove these calls once 'values' is in the above
-// InstallFunctions block, as they'll be redundant.
-utils.SetFunctionName(ArrayValues, 'values');
-%FunctionRemovePrototype(ArrayValues);
-%SetNativeFlag(ArrayValues);
-
-%AddNamedProperty(GlobalArray.prototype, iteratorSymbol, ArrayValues,
- DONT_ENUM);
-
-utils.InstallFunctions(GlobalTypedArray.prototype, DONT_ENUM, [
- 'entries', TypedArrayEntries,
- 'keys', TypedArrayKeys,
- 'values', TypedArrayValues
-]);
-%AddNamedProperty(GlobalTypedArray.prototype,
- iteratorSymbol, TypedArrayValues, DONT_ENUM);
-
-// -------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
- to.ArrayValues = ArrayValues;
-});
-
-%InstallToContext(["array_values_iterator", ArrayValues]);
-
-})
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index d10e7f18b5..e23810f4de 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -1539,6 +1539,8 @@ var getFunction = function(name, jsBuiltin, len) {
return f;
};
+var ArrayValues = getFunction("values", null, 0);
+
// Set up non-enumerable functions of the Array.prototype object and
// set their names.
// Manipulate the length of some of the functions to meet
@@ -1568,9 +1570,14 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"find", getFunction("find", ArrayFind, 1),
"findIndex", getFunction("findIndex", ArrayFindIndex, 1),
"fill", getFunction("fill", ArrayFill, 1),
- "includes", getFunction("includes", null, 1)
+ "includes", getFunction("includes", null, 1),
+ "keys", getFunction("keys", null, 0),
+ "entries", getFunction("entries", null, 0),
+ iteratorSymbol, ArrayValues
]);
+%FunctionSetName(ArrayValues, "values");
+
utils.InstallGetter(GlobalArray, speciesSymbol, ArraySpecies);
%FinishArrayPrototypeSetup(GlobalArray.prototype);
@@ -1614,6 +1621,7 @@ utils.Export(function(to) {
to.ArrayJoin = ArrayJoin;
to.ArrayPush = ArrayPush;
to.ArrayToString = ArrayToString;
+ to.ArrayValues = ArrayValues;
to.InnerArrayCopyWithin = InnerArrayCopyWithin;
to.InnerArrayEvery = InnerArrayEvery;
to.InnerArrayFill = InnerArrayFill;
@@ -1638,6 +1646,7 @@ utils.Export(function(to) {
"array_splice", ArraySplice,
"array_slice", ArraySlice,
"array_unshift", ArrayUnshift,
+ "array_values_iterator", ArrayValues,
]);
});
diff --git a/deps/v8/src/js/async-await.js b/deps/v8/src/js/async-await.js
index b733f3d9fa..a1cac0d5cd 100644
--- a/deps/v8/src/js/async-await.js
+++ b/deps/v8/src/js/async-await.js
@@ -30,7 +30,6 @@ utils.Import(function(from) {
NewPromiseCapability = from.NewPromiseCapability;
PerformPromiseThen = from.PerformPromiseThen;
PromiseCreate = from.PromiseCreate;
- PromiseNextMicrotaskID = from.PromiseNextMicrotaskID;
RejectPromise = from.RejectPromise;
ResolvePromise = from.ResolvePromise;
});
@@ -143,13 +142,9 @@ function AsyncFunctionPromiseCreate() {
%DebugPushPromise(promise);
// Assign ID and create a recurring task to save stack for future
// resumptions from await.
- var id = PromiseNextMicrotaskID();
+ var id = %DebugNextMicrotaskId();
SET_PRIVATE(promise, promiseAsyncStackIDSymbol, id);
- %DebugAsyncTaskEvent({
- type: "enqueueRecurring",
- id: id,
- name: "async function",
- });
+ %DebugAsyncTaskEvent("enqueueRecurring", id, "async function");
}
return promise;
}
@@ -158,11 +153,12 @@ function AsyncFunctionPromiseRelease(promise) {
if (DEBUG_IS_ACTIVE) {
// Cancel
var id = GET_PRIVATE(promise, promiseAsyncStackIDSymbol);
- %DebugAsyncTaskEvent({
- type: "cancel",
- id: id,
- name: "async function",
- });
+
+ // Don't send invalid events when catch prediction is turned on in
+ // the middle of some async operation.
+ if (!IS_UNDEFINED(id)) {
+ %DebugAsyncTaskEvent("cancel", id, "async function");
+ }
// Pop the Promise under construction in an async function on
// from catch prediction stack.
%DebugPopPromise();
diff --git a/deps/v8/src/js/collection.js b/deps/v8/src/js/collection.js
index 6fe880d913..a4ae904771 100644
--- a/deps/v8/src/js/collection.js
+++ b/deps/v8/src/js/collection.js
@@ -14,14 +14,13 @@ var GlobalMap = global.Map;
var GlobalObject = global.Object;
var GlobalSet = global.Set;
var hashCodeSymbol = utils.ImportNow("hash_code_symbol");
-var MathRandom;
+var MathRandom = global.Math.random;
var MapIterator;
var SetIterator;
var speciesSymbol = utils.ImportNow("species_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
- MathRandom = from.MathRandom;
MapIterator = from.MapIterator;
SetIterator = from.SetIterator;
});
diff --git a/deps/v8/src/js/i18n.js b/deps/v8/src/js/i18n.js
index a397849395..b051b090bc 100644
--- a/deps/v8/src/js/i18n.js
+++ b/deps/v8/src/js/i18n.js
@@ -26,23 +26,17 @@ var GlobalString = global.String;
var InstallFunctions = utils.InstallFunctions;
var InstallGetter = utils.InstallGetter;
var InternalArray = utils.InternalArray;
-var InternalRegExpMatch;
-var InternalRegExpReplace
var ObjectHasOwnProperty = utils.ImportNow("ObjectHasOwnProperty");
var OverrideFunction = utils.OverrideFunction;
var patternSymbol = utils.ImportNow("intl_pattern_symbol");
var resolvedSymbol = utils.ImportNow("intl_resolved_symbol");
var SetFunctionName = utils.SetFunctionName;
-var StringIndexOf;
var StringSubstr = GlobalString.prototype.substr;
var StringSubstring = GlobalString.prototype.substring;
utils.Import(function(from) {
ArrayJoin = from.ArrayJoin;
ArrayPush = from.ArrayPush;
- InternalRegExpMatch = from.InternalRegExpMatch;
- InternalRegExpReplace = from.InternalRegExpReplace;
- StringIndexOf = from.StringIndexOf;
});
// Utilities for definitions
@@ -78,9 +72,10 @@ function AddBoundMethod(obj, methodName, implementation, length, type) {
if (IS_UNDEFINED(this[internalName])) {
var boundMethod;
if (IS_UNDEFINED(length) || length === 2) {
- boundMethod = ANONYMOUS_FUNCTION((x, y) => implementation(this, x, y));
+ boundMethod =
+ ANONYMOUS_FUNCTION((fst, snd) => implementation(this, fst, snd));
} else if (length === 1) {
- boundMethod = ANONYMOUS_FUNCTION(x => implementation(this, x));
+ boundMethod = ANONYMOUS_FUNCTION(fst => implementation(this, fst));
} else {
boundMethod = ANONYMOUS_FUNCTION((...args) => {
// DateTimeFormat.format needs to be 0 arg method, but can still
@@ -250,7 +245,7 @@ function GetTimezoneNameLocationPartRE() {
* Parameter locales is treated as a priority list.
*/
function supportedLocalesOf(service, locales, options) {
- if (IS_NULL(InternalRegExpMatch(GetServiceRE(), service))) {
+ if (IS_NULL(%regexp_internal_match(GetServiceRE(), service))) {
throw %make_error(kWrongServiceType, service);
}
@@ -298,7 +293,7 @@ function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
var matchedLocales = new InternalArray();
for (var i = 0; i < requestedLocales.length; ++i) {
// Remove -u- extension.
- var locale = InternalRegExpReplace(
+ var locale = %RegExpInternalReplace(
GetUnicodeExtensionRE(), requestedLocales[i], '');
do {
if (!IS_UNDEFINED(availableLocales[locale])) {
@@ -408,7 +403,7 @@ function resolveLocale(service, requestedLocales, options) {
* lookup algorithm.
*/
function lookupMatcher(service, requestedLocales) {
- if (IS_NULL(InternalRegExpMatch(GetServiceRE(), service))) {
+ if (IS_NULL(%regexp_internal_match(GetServiceRE(), service))) {
throw %make_error(kWrongServiceType, service);
}
@@ -419,12 +414,12 @@ function lookupMatcher(service, requestedLocales) {
for (var i = 0; i < requestedLocales.length; ++i) {
// Remove all extensions.
- var locale = InternalRegExpReplace(
+ var locale = %RegExpInternalReplace(
GetAnyExtensionRE(), requestedLocales[i], '');
do {
if (!IS_UNDEFINED(AVAILABLE_LOCALES[service][locale])) {
// Return the resolved locale and extension.
- var extensionMatch = InternalRegExpMatch(
+ var extensionMatch = %regexp_internal_match(
GetUnicodeExtensionRE(), requestedLocales[i]);
var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0];
return {'locale': locale, 'extension': extension, 'position': i};
@@ -622,7 +617,7 @@ function getOptimalLanguageTag(original, resolved) {
// Preserve extensions of resolved locale, but swap base tags with original.
var resolvedBase = new GlobalRegExp('^' + locales[1].base, 'g');
- return InternalRegExpReplace(resolvedBase, resolved, locales[0].base);
+ return %RegExpInternalReplace(resolvedBase, resolved, locales[0].base);
}
@@ -637,7 +632,7 @@ function getAvailableLocalesOf(service) {
for (var i in available) {
if (HAS_OWN_PROPERTY(available, i)) {
- var parts = InternalRegExpMatch(
+ var parts = %regexp_internal_match(
/^([a-z]{2,3})-([A-Z][a-z]{3})-([A-Z]{2})$/, i);
if (!IS_NULL(parts)) {
// Build xx-ZZ. We don't care about the actual value,
@@ -709,7 +704,7 @@ function toTitleCaseWord(word) {
* 'of', 'au' and 'es' are special-cased and lowercased.
*/
function toTitleCaseTimezoneLocation(location) {
- var match = InternalRegExpMatch(GetTimezoneNameLocationPartRE(), location)
+ var match = %regexp_internal_match(GetTimezoneNameLocationPartRE(), location)
if (IS_NULL(match)) throw %make_range_error(kExpectedLocation, location);
var result = toTitleCaseWord(match[1]);
@@ -744,7 +739,7 @@ function canonicalizeLanguageTag(localeID) {
// Optimize for the most common case; a language code alone in
// the canonical form/lowercase (e.g. "en", "fil").
if (IS_STRING(localeID) &&
- !IS_NULL(InternalRegExpMatch(/^[a-z]{2,3}$/, localeID))) {
+ !IS_NULL(%regexp_internal_match(/^[a-z]{2,3}$/, localeID))) {
return localeID;
}
@@ -822,12 +817,12 @@ function initializeLocaleList(locales) {
*/
function isStructuallyValidLanguageTag(locale) {
// Check if it's well-formed, including grandfadered tags.
- if (IS_NULL(InternalRegExpMatch(GetLanguageTagRE(), locale))) {
+ if (IS_NULL(%regexp_internal_match(GetLanguageTagRE(), locale))) {
return false;
}
// Just return if it's a x- form. It's all private.
- if (%_Call(StringIndexOf, locale, 'x-') === 0) {
+ if (%StringIndexOf(locale, 'x-', 0) === 0) {
return true;
}
@@ -844,7 +839,7 @@ function isStructuallyValidLanguageTag(locale) {
var parts = %StringSplit(locale, '-', kMaxUint32);
for (var i = 1; i < parts.length; i++) {
var value = parts[i];
- if (!IS_NULL(InternalRegExpMatch(GetLanguageVariantRE(), value)) &&
+ if (!IS_NULL(%regexp_internal_match(GetLanguageVariantRE(), value)) &&
extensions.length === 0) {
if (%ArrayIndexOf(variants, value, 0) === -1) {
%_Call(ArrayPush, variants, value);
@@ -853,7 +848,7 @@ function isStructuallyValidLanguageTag(locale) {
}
}
- if (!IS_NULL(InternalRegExpMatch(GetLanguageSingletonRE(), value))) {
+ if (!IS_NULL(%regexp_internal_match(GetLanguageSingletonRE(), value))) {
if (%ArrayIndexOf(extensions, value, 0) === -1) {
%_Call(ArrayPush, extensions, value);
} else {
@@ -1122,7 +1117,7 @@ AddBoundMethod(Intl.Collator, 'compare', compare, 2, 'collator');
*/
function isWellFormedCurrencyCode(currency) {
return typeof currency == "string" && currency.length == 3 &&
- IS_NULL(InternalRegExpMatch(/[^A-Za-z]/, currency));
+ IS_NULL(%regexp_internal_match(/[^A-Za-z]/, currency));
}
@@ -1440,57 +1435,57 @@ function appendToLDMLString(option, pairs) {
*/
function fromLDMLString(ldmlString) {
// First remove '' quoted text, so we lose 'Uhr' strings.
- ldmlString = InternalRegExpReplace(GetQuotedStringRE(), ldmlString, '');
+ ldmlString = %RegExpInternalReplace(GetQuotedStringRE(), ldmlString, '');
var options = {};
- var match = InternalRegExpMatch(/E{3,5}/, ldmlString);
+ var match = %regexp_internal_match(/E{3,5}/, ldmlString);
options = appendToDateTimeObject(
options, 'weekday', match, {EEEEE: 'narrow', EEE: 'short', EEEE: 'long'});
- match = InternalRegExpMatch(/G{3,5}/, ldmlString);
+ match = %regexp_internal_match(/G{3,5}/, ldmlString);
options = appendToDateTimeObject(
options, 'era', match, {GGGGG: 'narrow', GGG: 'short', GGGG: 'long'});
- match = InternalRegExpMatch(/y{1,2}/, ldmlString);
+ match = %regexp_internal_match(/y{1,2}/, ldmlString);
options = appendToDateTimeObject(
options, 'year', match, {y: 'numeric', yy: '2-digit'});
- match = InternalRegExpMatch(/M{1,5}/, ldmlString);
+ match = %regexp_internal_match(/M{1,5}/, ldmlString);
options = appendToDateTimeObject(options, 'month', match, {MM: '2-digit',
M: 'numeric', MMMMM: 'narrow', MMM: 'short', MMMM: 'long'});
// Sometimes we get L instead of M for month - standalone name.
- match = InternalRegExpMatch(/L{1,5}/, ldmlString);
+ match = %regexp_internal_match(/L{1,5}/, ldmlString);
options = appendToDateTimeObject(options, 'month', match, {LL: '2-digit',
L: 'numeric', LLLLL: 'narrow', LLL: 'short', LLLL: 'long'});
- match = InternalRegExpMatch(/d{1,2}/, ldmlString);
+ match = %regexp_internal_match(/d{1,2}/, ldmlString);
options = appendToDateTimeObject(
options, 'day', match, {d: 'numeric', dd: '2-digit'});
- match = InternalRegExpMatch(/h{1,2}/, ldmlString);
+ match = %regexp_internal_match(/h{1,2}/, ldmlString);
if (match !== null) {
options['hour12'] = true;
}
options = appendToDateTimeObject(
options, 'hour', match, {h: 'numeric', hh: '2-digit'});
- match = InternalRegExpMatch(/H{1,2}/, ldmlString);
+ match = %regexp_internal_match(/H{1,2}/, ldmlString);
if (match !== null) {
options['hour12'] = false;
}
options = appendToDateTimeObject(
options, 'hour', match, {H: 'numeric', HH: '2-digit'});
- match = InternalRegExpMatch(/m{1,2}/, ldmlString);
+ match = %regexp_internal_match(/m{1,2}/, ldmlString);
options = appendToDateTimeObject(
options, 'minute', match, {m: 'numeric', mm: '2-digit'});
- match = InternalRegExpMatch(/s{1,2}/, ldmlString);
+ match = %regexp_internal_match(/s{1,2}/, ldmlString);
options = appendToDateTimeObject(
options, 'second', match, {s: 'numeric', ss: '2-digit'});
- match = InternalRegExpMatch(/z|zzzz/, ldmlString);
+ match = %regexp_internal_match(/z|zzzz/, ldmlString);
options = appendToDateTimeObject(
options, 'timeZoneName', match, {z: 'short', zzzz: 'long'});
@@ -1819,7 +1814,7 @@ function canonicalizeTimeZoneID(tzID) {
// We expect only _, '-' and / beside ASCII letters.
// All inputs should conform to Area/Location(/Location)* from now on.
- var match = InternalRegExpMatch(GetTimezoneNameCheckRE(), tzID);
+ var match = %regexp_internal_match(GetTimezoneNameCheckRE(), tzID);
if (IS_NULL(match)) throw %make_range_error(kExpectedTimezoneID, tzID);
var result = toTitleCaseTimezoneLocation(match[1]) + '/' +
@@ -2058,7 +2053,7 @@ function LocaleConvertCase(s, locales, isToUpper) {
}
// StringSplit is slower than this.
- var pos = %_Call(StringIndexOf, language, '-');
+ var pos = %StringIndexOf(language, '-', 0);
if (pos != -1) {
language = %_Call(StringSubstring, language, 0, pos);
}
diff --git a/deps/v8/src/js/macros.py b/deps/v8/src/js/macros.py
index cdc3d0ae0c..5ad578a2be 100644
--- a/deps/v8/src/js/macros.py
+++ b/deps/v8/src/js/macros.py
@@ -39,9 +39,6 @@ define kMaxSafeInteger = 9007199254740991;
# 2^32 - 1
define kMaxUint32 = 4294967295;
-# Native cache ids.
-define STRING_TO_REGEXP_CACHE_ID = 0;
-
# Type query macros.
#
# Note: We have special support for typeof(foo) === 'bar' in the compiler.
@@ -117,36 +114,6 @@ define UNDEFINED = (void 0);
# Macros implemented in Python.
python macro CHAR_CODE(str) = ord(str[1]);
-# Layout of internal RegExpLastMatchInfo object.
-define REGEXP_NUMBER_OF_CAPTURES = 0;
-define REGEXP_LAST_SUBJECT = 1;
-define REGEXP_LAST_INPUT = 2;
-define REGEXP_FIRST_CAPTURE = 3;
-define CAPTURE0 = 3; # Aliases REGEXP_FIRST_CAPTURE.
-define CAPTURE1 = 4;
-
-macro NUMBER_OF_CAPTURES(array) = ((array)[REGEXP_NUMBER_OF_CAPTURES]);
-macro LAST_SUBJECT(array) = ((array)[REGEXP_LAST_SUBJECT]);
-macro LAST_INPUT(array) = ((array)[REGEXP_LAST_INPUT]);
-macro CAPTURE(index) = (REGEXP_FIRST_CAPTURE + (index));
-
-# Macros for internal slot access.
-macro REGEXP_GLOBAL(regexp) = (%_RegExpFlags(regexp) & 1);
-macro REGEXP_IGNORE_CASE(regexp) = (%_RegExpFlags(regexp) & 2);
-macro REGEXP_MULTILINE(regexp) = (%_RegExpFlags(regexp) & 4);
-macro REGEXP_STICKY(regexp) = (%_RegExpFlags(regexp) & 8);
-macro REGEXP_UNICODE(regexp) = (%_RegExpFlags(regexp) & 16);
-macro REGEXP_SOURCE(regexp) = (%_RegExpSource(regexp));
-
-# For the regexp capture override array. This has the same
-# format as the arguments to a function called from
-# String.prototype.replace.
-macro OVERRIDE_MATCH(override) = ((override)[0]);
-macro OVERRIDE_POS(override) = ((override)[(override).length - 2]);
-macro OVERRIDE_SUBJECT(override) = ((override)[(override).length - 1]);
-# 1-based so index of 1 returns the first capture
-macro OVERRIDE_CAPTURE(override, index) = ((override)[(index)]);
-
# For messages.js
# Matches Script::Type from objects.h
define TYPE_NATIVE = 0;
diff --git a/deps/v8/src/js/math.js b/deps/v8/src/js/math.js
deleted file mode 100644
index 346da24596..0000000000
--- a/deps/v8/src/js/math.js
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-// The first two slots are reserved to persist PRNG state.
-define kRandomNumberStart = 2;
-
-var GlobalMath = global.Math;
-var NaN = %GetRootNaN();
-var nextRandomIndex = 0;
-var randomNumbers = UNDEFINED;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-
-//-------------------------------------------------------------------
-// ECMA 262 - 15.8.2.14
-function MathRandom() {
- // While creating a startup snapshot, %GenerateRandomNumbers returns a
- // normal array containing a single random number, and has to be called for
- // every new random number.
- // Otherwise, it returns a pre-populated typed array of random numbers. The
- // first two elements are reserved for the PRNG state.
- if (nextRandomIndex <= kRandomNumberStart) {
- randomNumbers = %GenerateRandomNumbers(randomNumbers);
- if (%_IsTypedArray(randomNumbers)) {
- nextRandomIndex = %_TypedArrayGetLength(randomNumbers);
- } else {
- nextRandomIndex = randomNumbers.length;
- }
- }
- return randomNumbers[--nextRandomIndex];
-}
-
-// -------------------------------------------------------------------
-
-%AddNamedProperty(GlobalMath, toStringTagSymbol, "Math", READ_ONLY | DONT_ENUM);
-
-// Set up non-enumerable functions of the Math object and
-// set their names.
-utils.InstallFunctions(GlobalMath, DONT_ENUM, [
- "random", MathRandom,
-]);
-
-%SetForceInlineFlag(MathRandom);
-
-// -------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
- to.MathRandom = MathRandom;
-});
-
-})
diff --git a/deps/v8/src/js/prologue.js b/deps/v8/src/js/prologue.js
index 8a07a4cb07..dba77d7d8d 100644
--- a/deps/v8/src/js/prologue.js
+++ b/deps/v8/src/js/prologue.js
@@ -181,10 +181,7 @@ function PostNatives(utils) {
// Whitelist of exports from normal natives to experimental natives and debug.
var expose_list = [
- "ArrayToString",
"FormatDateToParts",
- "GetIterator",
- "GetMethod",
"MapEntries",
"MapIterator",
"MapIteratorNext",
@@ -196,26 +193,12 @@ function PostNatives(utils) {
"ToLocaleLowerCaseI18N",
"ToLocaleUpperCaseI18N",
"ToLowerCaseI18N",
- "ToPositiveInteger",
"ToUpperCaseI18N",
// From runtime:
- "is_concat_spreadable_symbol",
- "iterator_symbol",
- "object_freeze",
- "object_is_frozen",
- "object_is_sealed",
"promise_result_symbol",
"promise_state_symbol",
"reflect_apply",
- "reflect_construct",
- "regexp_flags_symbol",
"to_string_tag_symbol",
- "object_to_string",
- "species_symbol",
- "match_symbol",
- "replace_symbol",
- "search_symbol",
- "split_symbol",
];
var filtered_exports = {};
diff --git a/deps/v8/src/js/promise.js b/deps/v8/src/js/promise.js
index 793d60fb0a..0b37c643d1 100644
--- a/deps/v8/src/js/promise.js
+++ b/deps/v8/src/js/promise.js
@@ -24,8 +24,8 @@ var promiseRejectReactionsSymbol =
utils.ImportNow("promise_reject_reactions_symbol");
var promiseFulfillReactionsSymbol =
utils.ImportNow("promise_fulfill_reactions_symbol");
-var promiseDeferredReactionsSymbol =
- utils.ImportNow("promise_deferred_reactions_symbol");
+var promiseDeferredReactionSymbol =
+ utils.ImportNow("promise_deferred_reaction_symbol");
var promiseHandledHintSymbol =
utils.ImportNow("promise_handled_hint_symbol");
var promiseRawSymbol = utils.ImportNow("promise_raw_symbol");
@@ -44,44 +44,13 @@ utils.Import(function(from) {
// -------------------------------------------------------------------
// [[PromiseState]] values:
+// These values should be kept in sync with PromiseStatus in globals.h
const kPending = 0;
const kFulfilled = +1;
-const kRejected = -1;
-
-var lastMicrotaskId = 0;
-
-function PromiseNextMicrotaskID() {
- return ++lastMicrotaskId;
-}
-
-// ES#sec-createresolvingfunctions
-// CreateResolvingFunctions ( promise )
-function CreateResolvingFunctions(promise, debugEvent) {
- var alreadyResolved = false;
-
- // ES#sec-promise-resolve-functions
- // Promise Resolve Functions
- var resolve = value => {
- if (alreadyResolved === true) return;
- alreadyResolved = true;
- ResolvePromise(promise, value);
- };
-
- // ES#sec-promise-reject-functions
- // Promise Reject Functions
- var reject = reason => {
- if (alreadyResolved === true) return;
- alreadyResolved = true;
- RejectPromise(promise, reason, debugEvent);
- };
-
- return {
- __proto__: null,
- resolve: resolve,
- reject: reject
- };
-}
+const kRejected = +2;
+const kResolveCallback = 0;
+const kRejectCallback = 1;
// ES#sec-promise-executor
// Promise ( executor )
@@ -96,13 +65,15 @@ var GlobalPromise = function Promise(executor) {
var promise = PromiseInit(%_NewObject(GlobalPromise, new.target));
// Calling the reject function would be a new exception, so debugEvent = true
- var callbacks = CreateResolvingFunctions(promise, true);
+ // TODO(gsathya): Remove container for callbacks when this is moved
+ // to CPP/TF.
+ var callbacks = %create_resolving_functions(promise, true);
var debug_is_active = DEBUG_IS_ACTIVE;
try {
if (debug_is_active) %DebugPushPromise(promise);
- executor(callbacks.resolve, callbacks.reject);
+ executor(callbacks[kResolveCallback], callbacks[kRejectCallback]);
} %catch (e) { // Natives syntax to mark this catch block.
- %_Call(callbacks.reject, UNDEFINED, e);
+ %_Call(callbacks[kRejectCallback], UNDEFINED, e);
} finally {
if (debug_is_active) %DebugPopPromise();
}
@@ -128,16 +99,11 @@ function PromiseSet(promise, status, value) {
SET_PRIVATE(promise, promiseFulfillReactionsSymbol, UNDEFINED);
SET_PRIVATE(promise, promiseRejectReactionsSymbol, UNDEFINED);
- // There are 2 possible states for this symbol --
- // 1) UNDEFINED -- This is the zero state, no deferred object is
- // attached to this symbol. When we want to add a new deferred we
- // directly attach it to this symbol.
- // 2) symbol with attached deferred object -- New deferred objects
- // are not attached to this symbol, but instead they are directly
- // attached to the resolve, reject callback arrays. At this point,
- // the deferred symbol's state is stale, and the deferreds should be
- // read from the reject, resolve callbacks.
- SET_PRIVATE(promise, promiseDeferredReactionsSymbol, UNDEFINED);
+ // This symbol is used only when one deferred needs to be attached. When more
+ // than one deferred need to be attached the promise, we attach them directly
+ // to the promiseFulfillReactionsSymbol and promiseRejectReactionsSymbol and
+ // reset this back to UNDEFINED.
+ SET_PRIVATE(promise, promiseDeferredReactionSymbol, UNDEFINED);
return promise;
}
@@ -153,47 +119,35 @@ function PromiseInit(promise) {
return PromiseSet(promise, kPending, UNDEFINED);
}
-function FulfillPromise(promise, status, value, promiseQueue) {
- if (GET_PRIVATE(promise, promiseStateSymbol) === kPending) {
- var tasks = GET_PRIVATE(promise, promiseQueue);
- if (!IS_UNDEFINED(tasks)) {
- var deferreds = GET_PRIVATE(promise, promiseDeferredReactionsSymbol);
- PromiseEnqueue(value, tasks, deferreds, status);
- }
- PromiseSet(promise, status, value);
- }
-}
-
function PromiseHandle(value, handler, deferred) {
var debug_is_active = DEBUG_IS_ACTIVE;
try {
if (debug_is_active) %DebugPushPromise(deferred.promise);
var result = handler(value);
- deferred.resolve(result);
+ if (IS_UNDEFINED(deferred.resolve)) {
+ ResolvePromise(deferred.promise, result);
+ } else {
+ %_Call(deferred.resolve, UNDEFINED, result);
+ }
} %catch (exception) { // Natives syntax to mark this catch block.
- try { deferred.reject(exception); } catch (e) { }
+ try {
+ if (IS_UNDEFINED(deferred.reject)) {
+ // Pass false for debugEvent so .then chaining does not trigger
+ // redundant ExceptionEvents.
+ %PromiseReject(deferred.promise, exception, false);
+ PromiseSet(deferred.promise, kRejected, exception);
+ } else {
+ %_Call(deferred.reject, UNDEFINED, exception);
+ }
+ } catch (e) { }
} finally {
if (debug_is_active) %DebugPopPromise();
}
}
-function PromiseEnqueue(value, tasks, deferreds, status) {
+function PromiseDebugGetInfo(deferreds, status) {
var id, name, instrumenting = DEBUG_IS_ACTIVE;
- %EnqueueMicrotask(function() {
- if (instrumenting) {
- %DebugAsyncTaskEvent({ type: "willHandle", id: id, name: name });
- }
- if (IS_ARRAY(tasks)) {
- for (var i = 0; i < tasks.length; i += 2) {
- PromiseHandle(value, tasks[i], tasks[i + 1]);
- }
- } else {
- PromiseHandle(value, tasks, deferreds);
- }
- if (instrumenting) {
- %DebugAsyncTaskEvent({ type: "didHandle", id: id, name: name });
- }
- });
+
if (instrumenting) {
// In an async function, reuse the existing stack related to the outer
// Promise. Otherwise, e.g. in a direct call to then, save a new stack.
@@ -209,11 +163,12 @@ function PromiseEnqueue(value, tasks, deferreds, status) {
promiseAsyncStackIDSymbol);
name = "async function";
} else {
- id = PromiseNextMicrotaskID();
+ id = %DebugNextMicrotaskId();
name = status === kFulfilled ? "Promise.resolve" : "Promise.reject";
- %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
+ %DebugAsyncTaskEvent("enqueue", id, name);
}
}
+ return [id, name];
}
function PromiseAttachCallbacks(promise, deferred, onResolve, onReject) {
@@ -222,11 +177,11 @@ function PromiseAttachCallbacks(promise, deferred, onResolve, onReject) {
if (IS_UNDEFINED(maybeResolveCallbacks)) {
SET_PRIVATE(promise, promiseFulfillReactionsSymbol, onResolve);
SET_PRIVATE(promise, promiseRejectReactionsSymbol, onReject);
- SET_PRIVATE(promise, promiseDeferredReactionsSymbol, deferred);
+ SET_PRIVATE(promise, promiseDeferredReactionSymbol, deferred);
} else if (!IS_ARRAY(maybeResolveCallbacks)) {
var resolveCallbacks = new InternalArray();
var rejectCallbacks = new InternalArray();
- var existingDeferred = GET_PRIVATE(promise, promiseDeferredReactionsSymbol);
+ var existingDeferred = GET_PRIVATE(promise, promiseDeferredReactionSymbol);
resolveCallbacks.push(
maybeResolveCallbacks, existingDeferred, onResolve, deferred);
@@ -237,7 +192,7 @@ function PromiseAttachCallbacks(promise, deferred, onResolve, onReject) {
SET_PRIVATE(promise, promiseFulfillReactionsSymbol, resolveCallbacks);
SET_PRIVATE(promise, promiseRejectReactionsSymbol, rejectCallbacks);
- SET_PRIVATE(promise, promiseDeferredReactionsSymbol, UNDEFINED);
+ SET_PRIVATE(promise, promiseDeferredReactionSymbol, UNDEFINED);
} else {
maybeResolveCallbacks.push(onResolve, deferred);
GET_PRIVATE(promise, promiseRejectReactionsSymbol).push(onReject, deferred);
@@ -266,16 +221,19 @@ function PromiseCreate() {
// Promise Resolve Functions, steps 6-13
function ResolvePromise(promise, resolution) {
if (resolution === promise) {
- return RejectPromise(promise,
- %make_type_error(kPromiseCyclic, resolution),
- true);
+ var exception = %make_type_error(kPromiseCyclic, resolution);
+ %PromiseReject(promise, exception, true);
+ PromiseSet(promise, kRejected, exception);
+ return;
}
if (IS_RECEIVER(resolution)) {
// 25.4.1.3.2 steps 8-12
try {
var then = resolution.then;
} catch (e) {
- return RejectPromise(promise, e, true);
+ %PromiseReject(promise, e, true);
+ PromiseSet(promise, kRejected, e);
+ return;
}
// Resolution is a native promise and if it's already resolved or
@@ -287,8 +245,9 @@ function ResolvePromise(promise, resolution) {
// This goes inside the if-else to save one symbol lookup in
// the slow path.
var thenableValue = GET_PRIVATE(resolution, promiseResultSymbol);
- FulfillPromise(promise, kFulfilled, thenableValue,
+ %PromiseFulfill(promise, kFulfilled, thenableValue,
promiseFulfillReactionsSymbol);
+ PromiseSet(promise, kFulfilled, thenableValue);
SET_PRIVATE(promise, promiseHasHandlerSymbol, true);
return;
} else if (thenableState === kRejected) {
@@ -299,70 +258,37 @@ function ResolvePromise(promise, resolution) {
%PromiseRevokeReject(resolution);
}
// Don't cause a debug event as this case is forwarding a rejection
- RejectPromise(promise, thenableValue, false);
+ %PromiseReject(promise, thenableValue, false);
+ PromiseSet(promise, kRejected, thenableValue);
SET_PRIVATE(resolution, promiseHasHandlerSymbol, true);
return;
}
}
if (IS_CALLABLE(then)) {
- var callbacks = CreateResolvingFunctions(promise, false);
- var id, before_debug_event, after_debug_event;
- var instrumenting = DEBUG_IS_ACTIVE;
- if (instrumenting) {
- if (IsPromise(resolution)) {
+ if (DEBUG_IS_ACTIVE && IsPromise(resolution)) {
// Mark the dependency of the new promise on the resolution
- SET_PRIVATE(resolution, promiseHandledBySymbol, promise);
- }
- id = PromiseNextMicrotaskID();
- before_debug_event = {
- type: "willHandle",
- id: id,
- name: "PromiseResolveThenableJob"
- };
- after_debug_event = {
- type: "didHandle",
- id: id,
- name: "PromiseResolveThenableJob"
- };
- %DebugAsyncTaskEvent({
- type: "enqueue",
- id: id,
- name: "PromiseResolveThenableJob"
- });
+ SET_PRIVATE(resolution, promiseHandledBySymbol, promise);
}
- %EnqueuePromiseResolveThenableJob(
- resolution, then, callbacks.resolve, callbacks.reject,
- before_debug_event, after_debug_event);
+ %EnqueuePromiseResolveThenableJob(promise, resolution, then);
return;
}
}
- FulfillPromise(promise, kFulfilled, resolution,
- promiseFulfillReactionsSymbol);
+ %PromiseFulfill(promise, kFulfilled, resolution,
+ promiseFulfillReactionsSymbol);
+ PromiseSet(promise, kFulfilled, resolution);
}
-// ES#sec-rejectpromise
-// RejectPromise ( promise, reason )
+// Only used by async-await.js
function RejectPromise(promise, reason, debugEvent) {
- // Check promise status to confirm that this reject has an effect.
- // Call runtime for callbacks to the debugger or for unhandled reject.
- // The debugEvent parameter sets whether a debug ExceptionEvent should
- // be triggered. It should be set to false when forwarding a rejection
- // rather than creating a new one.
- if (GET_PRIVATE(promise, promiseStateSymbol) === kPending) {
- // This check is redundant with checks in the runtime, but it may help
- // avoid unnecessary runtime calls.
- if ((debugEvent && DEBUG_IS_ACTIVE) ||
- !HAS_DEFINED_PRIVATE(promise, promiseHasHandlerSymbol)) {
- %PromiseRejectEvent(promise, reason, debugEvent);
- }
- }
- FulfillPromise(promise, kRejected, reason, promiseRejectReactionsSymbol)
+ %PromiseReject(promise, reason, debugEvent);
+ PromiseSet(promise, kRejected, reason);
}
// Export to bindings
function DoRejectPromise(promise, reason) {
- return RejectPromise(promise, reason, true);
+ %PromiseReject(promise, reason, true);
+ PromiseSet(promise, kRejected, reason);
}
// ES#sec-newpromisecapability
@@ -371,11 +297,13 @@ function NewPromiseCapability(C, debugEvent) {
if (C === GlobalPromise) {
// Optimized case, avoid extra closure.
var promise = PromiseCreate();
- var callbacks = CreateResolvingFunctions(promise, debugEvent);
+ // TODO(gsathya): Remove container for callbacks when this is
+ // moved to CPP/TF.
+ var callbacks = %create_resolving_functions(promise, debugEvent);
return {
promise: promise,
- resolve: callbacks.resolve,
- reject: callbacks.reject
+ resolve: callbacks[kResolveCallback],
+ reject: callbacks[kRejectCallback]
};
}
@@ -423,8 +351,8 @@ function PerformPromiseThen(promise, onResolve, onReject, resultCapability) {
PromiseAttachCallbacks(promise, resultCapability, onResolve, onReject);
break;
case kFulfilled:
- PromiseEnqueue(GET_PRIVATE(promise, promiseResultSymbol),
- onResolve, resultCapability, kFulfilled);
+ %EnqueuePromiseReactionJob(GET_PRIVATE(promise, promiseResultSymbol),
+ onResolve, resultCapability, kFulfilled);
break;
case kRejected:
if (!HAS_DEFINED_PRIVATE(promise, promiseHasHandlerSymbol)) {
@@ -432,8 +360,8 @@ function PerformPromiseThen(promise, onResolve, onReject, resultCapability) {
// Revoke previously triggered reject event.
%PromiseRevokeReject(promise);
}
- PromiseEnqueue(GET_PRIVATE(promise, promiseResultSymbol),
- onReject, resultCapability, kRejected);
+ %EnqueuePromiseReactionJob(GET_PRIVATE(promise, promiseResultSymbol),
+ onReject, resultCapability, kRejected);
break;
}
@@ -452,9 +380,23 @@ function PromiseThen(onResolve, onReject) {
}
var constructor = SpeciesConstructor(this, GlobalPromise);
- // Pass false for debugEvent so .then chaining does not trigger
- // redundant ExceptionEvents.
- var resultCapability = NewPromiseCapability(constructor, false);
+ var resultCapability;
+
+ // The resultCapability.promise is only ever fulfilled internally,
+ // so we don't need the closures to protect against accidentally
+ // calling them multiple times.
+ if (constructor === GlobalPromise) {
+ // TODO(gsathya): Combine this into NewPromiseCapability.
+ resultCapability = {
+ promise: PromiseCreate(),
+ resolve: UNDEFINED,
+ reject: UNDEFINED
+ };
+ } else {
+ // Pass false for debugEvent so .then chaining does not trigger
+ // redundant ExceptionEvents.
+ resultCapability = NewPromiseCapability(constructor, false);
+ }
return PerformPromiseThen(this, onResolve, onReject, resultCapability);
}
@@ -477,13 +419,13 @@ function PromiseResolve(x) {
// Avoid creating resolving functions.
if (this === GlobalPromise) {
var promise = PromiseCreate();
- var resolveResult = ResolvePromise(promise, x);
+ ResolvePromise(promise, x);
return promise;
}
// debugEvent is not so meaningful here as it will be resolved
var promiseCapability = NewPromiseCapability(this, true);
- var resolveResult = %_Call(promiseCapability.resolve, UNDEFINED, x);
+ %_Call(promiseCapability.resolve, UNDEFINED, x);
return promiseCapability.promise;
}
@@ -580,7 +522,7 @@ function PromiseRace(iterable) {
}
}
} catch (e) {
- deferred.reject(e)
+ %_Call(deferred.reject, UNDEFINED, e);
}
return deferred.promise;
}
@@ -620,12 +562,12 @@ function PromiseHasUserDefinedRejectHandlerRecursive(promise) {
}
var queue = GET_PRIVATE(promise, promiseRejectReactionsSymbol);
- var deferreds = GET_PRIVATE(promise, promiseDeferredReactionsSymbol);
+ var deferred = GET_PRIVATE(promise, promiseDeferredReactionSymbol);
if (IS_UNDEFINED(queue)) return false;
if (!IS_ARRAY(queue)) {
- return PromiseHasUserDefinedRejectHandlerCheck(queue, deferreds);
+ return PromiseHasUserDefinedRejectHandlerCheck(queue, deferred);
}
for (var i = 0; i < queue.length; i += 2) {
@@ -645,6 +587,10 @@ function PromiseHasUserDefinedRejectHandler() {
return PromiseHasUserDefinedRejectHandlerRecursive(this);
};
+function MarkPromiseAsHandled(promise) {
+ SET_PRIVATE(promise, promiseHasHandlerSymbol, true);
+}
+
function PromiseSpecies() {
return this;
@@ -676,8 +622,12 @@ utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
"promise_create", PromiseCreate,
"promise_has_user_defined_reject_handler", PromiseHasUserDefinedRejectHandler,
"promise_reject", DoRejectPromise,
+ // TODO(gsathya): Remove this once we update the promise builtin.
+ "promise_internal_reject", RejectPromise,
"promise_resolve", ResolvePromise,
- "promise_then", PromiseThen
+ "promise_then", PromiseThen,
+ "promise_handle", PromiseHandle,
+ "promise_debug_get_info", PromiseDebugGetInfo
]);
// This allows extras to create promises quickly without building extra
@@ -686,14 +636,14 @@ utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
utils.InstallFunctions(extrasUtils, 0, [
"createPromise", PromiseCreate,
"resolvePromise", ResolvePromise,
- "rejectPromise", DoRejectPromise
+ "rejectPromise", DoRejectPromise,
+ "markPromiseAsHandled", MarkPromiseAsHandled
]);
utils.Export(function(to) {
to.IsPromise = IsPromise;
to.PromiseCreate = PromiseCreate;
to.PromiseThen = PromiseThen;
- to.PromiseNextMicrotaskID = PromiseNextMicrotaskID;
to.GlobalPromise = GlobalPromise;
to.NewPromiseCapability = NewPromiseCapability;
diff --git a/deps/v8/src/js/regexp.js b/deps/v8/src/js/regexp.js
deleted file mode 100644
index 49da45b84c..0000000000
--- a/deps/v8/src/js/regexp.js
+++ /dev/null
@@ -1,1058 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalArray = global.Array;
-var GlobalObject = global.Object;
-var GlobalRegExp = global.RegExp;
-var GlobalRegExpPrototype = GlobalRegExp.prototype;
-var InternalArray = utils.InternalArray;
-var InternalPackedArray = utils.InternalPackedArray;
-var MaxSimple;
-var MinSimple;
-var RegExpExecJS = GlobalRegExp.prototype.exec;
-var matchSymbol = utils.ImportNow("match_symbol");
-var replaceSymbol = utils.ImportNow("replace_symbol");
-var searchSymbol = utils.ImportNow("search_symbol");
-var speciesSymbol = utils.ImportNow("species_symbol");
-var splitSymbol = utils.ImportNow("split_symbol");
-var SpeciesConstructor;
-
-utils.Import(function(from) {
- MaxSimple = from.MaxSimple;
- MinSimple = from.MinSimple;
- SpeciesConstructor = from.SpeciesConstructor;
-});
-
-// -------------------------------------------------------------------
-
-// Property of the builtins object for recording the result of the last
-// regexp match. The property RegExpLastMatchInfo includes the matchIndices
-// array of the last successful regexp match (an array of start/end index
-// pairs for the match and all the captured substrings), the invariant is
-// that there are at least two capture indices. The array also contains
-// the subject string for the last successful match.
-// We use a JSObject rather than a JSArray so we don't have to manually update
-// its length.
-var RegExpLastMatchInfo = {
- REGEXP_NUMBER_OF_CAPTURES: 2,
- REGEXP_LAST_SUBJECT: "",
- REGEXP_LAST_INPUT: UNDEFINED, // Settable with RegExpSetInput.
- CAPTURE0: 0,
- CAPTURE1: 0
-};
-
-// -------------------------------------------------------------------
-
-// ES#sec-isregexp IsRegExp ( argument )
-function IsRegExp(o) {
- if (!IS_RECEIVER(o)) return false;
- var is_regexp = o[matchSymbol];
- if (!IS_UNDEFINED(is_regexp)) return TO_BOOLEAN(is_regexp);
- return IS_REGEXP(o);
-}
-
-
-// ES#sec-regexpinitialize
-// Runtime Semantics: RegExpInitialize ( obj, pattern, flags )
-function RegExpInitialize(object, pattern, flags) {
- pattern = IS_UNDEFINED(pattern) ? '' : TO_STRING(pattern);
- flags = IS_UNDEFINED(flags) ? '' : TO_STRING(flags);
- %RegExpInitializeAndCompile(object, pattern, flags);
- return object;
-}
-
-
-function PatternFlags(pattern) {
- return (REGEXP_GLOBAL(pattern) ? 'g' : '') +
- (REGEXP_IGNORE_CASE(pattern) ? 'i' : '') +
- (REGEXP_MULTILINE(pattern) ? 'm' : '') +
- (REGEXP_UNICODE(pattern) ? 'u' : '') +
- (REGEXP_STICKY(pattern) ? 'y' : '');
-}
-
-
-// ES#sec-regexp.prototype.compile RegExp.prototype.compile (pattern, flags)
-function RegExpCompileJS(pattern, flags) {
- if (!IS_REGEXP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- "RegExp.prototype.compile", this);
- }
-
- if (IS_REGEXP(pattern)) {
- if (!IS_UNDEFINED(flags)) throw %make_type_error(kRegExpFlags);
-
- flags = PatternFlags(pattern);
- pattern = REGEXP_SOURCE(pattern);
- }
-
- RegExpInitialize(this, pattern, flags);
-
- // Return undefined for compatibility with JSC.
- // See http://crbug.com/585775 for web compat details.
-}
-
-
-function DoRegExpExec(regexp, string, index) {
- return %_RegExpExec(regexp, string, index, RegExpLastMatchInfo);
-}
-
-
-// This is kind of performance sensitive, so we want to avoid unnecessary
-// type checks on inputs. But we also don't want to inline it several times
-// manually, so we use a macro :-)
-macro RETURN_NEW_RESULT_FROM_MATCH_INFO(MATCHINFO, STRING)
- var numResults = NUMBER_OF_CAPTURES(MATCHINFO) >> 1;
- var start = MATCHINFO[CAPTURE0];
- var end = MATCHINFO[CAPTURE1];
- // Calculate the substring of the first match before creating the result array
- // to avoid an unnecessary write barrier storing the first result.
- var first = %_SubString(STRING, start, end);
- var result = %_RegExpConstructResult(numResults, start, STRING);
- result[0] = first;
- if (numResults == 1) return result;
- var j = REGEXP_FIRST_CAPTURE + 2;
- for (var i = 1; i < numResults; i++) {
- start = MATCHINFO[j++];
- if (start != -1) {
- end = MATCHINFO[j];
- result[i] = %_SubString(STRING, start, end);
- }
- j++;
- }
- return result;
-endmacro
-
-
-
-// ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
-// Also takes an optional exec method in case our caller
-// has already fetched exec.
-function RegExpSubclassExec(regexp, string, exec) {
- if (IS_UNDEFINED(exec)) {
- exec = regexp.exec;
- }
- if (IS_CALLABLE(exec)) {
- var result = %_Call(exec, regexp, string);
- if (!IS_RECEIVER(result) && !IS_NULL(result)) {
- throw %make_type_error(kInvalidRegExpExecResult);
- }
- return result;
- }
- return %_Call(RegExpExecJS, regexp, string);
-}
-%SetForceInlineFlag(RegExpSubclassExec);
-
-
-// ES#sec-regexp.prototype.test RegExp.prototype.test ( S )
-function RegExpSubclassTest(string) {
- if (!IS_RECEIVER(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'RegExp.prototype.test', this);
- }
- string = TO_STRING(string);
- var match = RegExpSubclassExec(this, string);
- return !IS_NULL(match);
-}
-%FunctionRemovePrototype(RegExpSubclassTest);
-
-
-function RegExpToString() {
- if (!IS_RECEIVER(this)) {
- throw %make_type_error(
- kIncompatibleMethodReceiver, 'RegExp.prototype.toString', this);
- }
- if (this === GlobalRegExpPrototype) {
- %IncrementUseCounter(kRegExpPrototypeToString);
- }
- return '/' + TO_STRING(this.source) + '/' + TO_STRING(this.flags);
-}
-
-
-function AtSurrogatePair(subject, index) {
- if (index + 1 >= subject.length) return false;
- var first = %_StringCharCodeAt(subject, index);
- if (first < 0xD800 || first > 0xDBFF) return false;
- var second = %_StringCharCodeAt(subject, index + 1);
- return second >= 0xDC00 && second <= 0xDFFF;
-}
-
-
-// Fast path implementation of RegExp.prototype[Symbol.split] which
-// doesn't properly call the underlying exec, @@species methods
-function RegExpSplit(string, limit) {
- if (!IS_REGEXP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- "RegExp.prototype.@@split", this);
- }
- var separator = this;
- var subject = TO_STRING(string);
-
- limit = (IS_UNDEFINED(limit)) ? kMaxUint32 : TO_UINT32(limit);
- var length = subject.length;
-
- if (limit === 0) return [];
-
- if (length === 0) {
- if (DoRegExpExec(separator, subject, 0, 0) !== null) return [];
- return [subject];
- }
-
- var currentIndex = 0;
- var startIndex = 0;
- var startMatch = 0;
- var result = new InternalArray();
-
- outer_loop:
- while (true) {
- if (startIndex === length) {
- result[result.length] = %_SubString(subject, currentIndex, length);
- break;
- }
-
- var matchInfo = DoRegExpExec(separator, subject, startIndex);
- if (matchInfo === null || length === (startMatch = matchInfo[CAPTURE0])) {
- result[result.length] = %_SubString(subject, currentIndex, length);
- break;
- }
- var endIndex = matchInfo[CAPTURE1];
-
- // We ignore a zero-length match at the currentIndex.
- if (startIndex === endIndex && endIndex === currentIndex) {
- if (REGEXP_UNICODE(this) && AtSurrogatePair(subject, startIndex)) {
- startIndex += 2;
- } else {
- startIndex++;
- }
- continue;
- }
-
- result[result.length] = %_SubString(subject, currentIndex, startMatch);
-
- if (result.length === limit) break;
-
- var matchinfo_len = NUMBER_OF_CAPTURES(matchInfo) + REGEXP_FIRST_CAPTURE;
- for (var i = REGEXP_FIRST_CAPTURE + 2; i < matchinfo_len; ) {
- var start = matchInfo[i++];
- var end = matchInfo[i++];
- if (end != -1) {
- result[result.length] = %_SubString(subject, start, end);
- } else {
- result[result.length] = UNDEFINED;
- }
- if (result.length === limit) break outer_loop;
- }
-
- startIndex = currentIndex = endIndex;
- }
-
- var array_result = [];
- %MoveArrayContents(result, array_result);
- return array_result;
-}
-
-
-// ES#sec-regexp.prototype-@@split
-// RegExp.prototype [ @@split ] ( string, limit )
-function RegExpSubclassSplit(string, limit) {
- if (!IS_RECEIVER(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- "RegExp.prototype.@@split", this);
- }
- string = TO_STRING(string);
- var constructor = SpeciesConstructor(this, GlobalRegExp);
- var flags = TO_STRING(this.flags);
-
- // TODO(adamk): this fast path is wrong as we doesn't ensure that 'exec'
- // is actually a data property on RegExp.prototype.
- if (IS_REGEXP(this) && constructor === GlobalRegExp) {
- var exec = this.exec;
- if (exec === RegExpExecJS) {
- return %_Call(RegExpSplit, this, string, limit);
- }
- }
-
- var unicode = %StringIndexOf(flags, 'u', 0) >= 0;
- var sticky = %StringIndexOf(flags, 'y', 0) >= 0;
- var newFlags = sticky ? flags : flags + "y";
- var splitter = new constructor(this, newFlags);
- var array = new GlobalArray();
- var arrayIndex = 0;
- var lim = (IS_UNDEFINED(limit)) ? kMaxUint32 : TO_UINT32(limit);
- var size = string.length;
- var prevStringIndex = 0;
- if (lim === 0) return array;
- var result;
- if (size === 0) {
- result = RegExpSubclassExec(splitter, string);
- if (IS_NULL(result)) %AddElement(array, 0, string);
- return array;
- }
- var stringIndex = prevStringIndex;
- while (stringIndex < size) {
- splitter.lastIndex = stringIndex;
- result = RegExpSubclassExec(splitter, string);
- if (IS_NULL(result)) {
- stringIndex += AdvanceStringIndex(string, stringIndex, unicode);
- } else {
- var end = MinSimple(TO_LENGTH(splitter.lastIndex), size);
- if (end === prevStringIndex) {
- stringIndex += AdvanceStringIndex(string, stringIndex, unicode);
- } else {
- %AddElement(
- array, arrayIndex,
- %_SubString(string, prevStringIndex, stringIndex));
- arrayIndex++;
- if (arrayIndex === lim) return array;
- prevStringIndex = end;
- var numberOfCaptures = MaxSimple(TO_LENGTH(result.length), 0);
- for (var i = 1; i < numberOfCaptures; i++) {
- %AddElement(array, arrayIndex, result[i]);
- arrayIndex++;
- if (arrayIndex === lim) return array;
- }
- stringIndex = prevStringIndex;
- }
- }
- }
- %AddElement(array, arrayIndex,
- %_SubString(string, prevStringIndex, size));
- return array;
-}
-%FunctionRemovePrototype(RegExpSubclassSplit);
-
-
-// ES#sec-regexp.prototype-@@match
-// RegExp.prototype [ @@match ] ( string )
-function RegExpSubclassMatch(string) {
- if (!IS_RECEIVER(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- "RegExp.prototype.@@match", this);
- }
- string = TO_STRING(string);
- var global = this.global;
- if (!global) return RegExpSubclassExec(this, string);
- var unicode = this.unicode;
- this.lastIndex = 0;
- var array = new InternalArray();
- var n = 0;
- var result;
- while (true) {
- result = RegExpSubclassExec(this, string);
- if (IS_NULL(result)) {
- if (n === 0) return null;
- break;
- }
- var matchStr = TO_STRING(result[0]);
- array[n] = matchStr;
- if (matchStr === "") SetAdvancedStringIndex(this, string, unicode);
- n++;
- }
- var resultArray = [];
- %MoveArrayContents(array, resultArray);
- return resultArray;
-}
-%FunctionRemovePrototype(RegExpSubclassMatch);
-
-
-// Legacy implementation of RegExp.prototype[Symbol.replace] which
-// doesn't properly call the underlying exec method.
-
-// TODO(lrn): This array will survive indefinitely if replace is never
-// called again. However, it will be empty, since the contents are cleared
-// in the finally block.
-var reusableReplaceArray = new InternalArray(4);
-
-// Helper function for replacing regular expressions with the result of a
-// function application in String.prototype.replace.
-function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
- var resultArray = reusableReplaceArray;
- if (resultArray) {
- reusableReplaceArray = null;
- } else {
- // Inside a nested replace (replace called from the replacement function
- // of another replace) or we have failed to set the reusable array
- // back due to an exception in a replacement function. Create a new
- // array to use in the future, or until the original is written back.
- resultArray = new InternalArray(16);
- }
- var res = %RegExpExecMultiple(regexp,
- subject,
- RegExpLastMatchInfo,
- resultArray);
- regexp.lastIndex = 0;
- if (IS_NULL(res)) {
- // No matches at all.
- reusableReplaceArray = resultArray;
- return subject;
- }
- var len = res.length;
- if (NUMBER_OF_CAPTURES(RegExpLastMatchInfo) == 2) {
- // If the number of captures is two then there are no explicit captures in
- // the regexp, just the implicit capture that captures the whole match. In
- // this case we can simplify quite a bit and end up with something faster.
- // The builder will consist of some integers that indicate slices of the
- // input string and some replacements that were returned from the replace
- // function.
- var match_start = 0;
- for (var i = 0; i < len; i++) {
- var elem = res[i];
- if (%_IsSmi(elem)) {
- // Integers represent slices of the original string.
- if (elem > 0) {
- match_start = (elem >> 11) + (elem & 0x7ff);
- } else {
- match_start = res[++i] - elem;
- }
- } else {
- var func_result = replace(elem, match_start, subject);
- // Overwrite the i'th element in the results with the string we got
- // back from the callback function.
- res[i] = TO_STRING(func_result);
- match_start += elem.length;
- }
- }
- } else {
- for (var i = 0; i < len; i++) {
- var elem = res[i];
- if (!%_IsSmi(elem)) {
- // elem must be an Array.
- // Use the apply argument as backing for global RegExp properties.
- var func_result = %reflect_apply(replace, UNDEFINED, elem);
- // Overwrite the i'th element in the results with the string we got
- // back from the callback function.
- res[i] = TO_STRING(func_result);
- }
- }
- }
- var result = %StringBuilderConcat(res, len, subject);
- resultArray.length = 0;
- reusableReplaceArray = resultArray;
- return result;
-}
-
-
-// Compute the string of a given regular expression capture.
-function CaptureString(string, lastCaptureInfo, index) {
- // Scale the index.
- var scaled = index << 1;
- // Compute start and end.
- var start = lastCaptureInfo[CAPTURE(scaled)];
- // If start isn't valid, return undefined.
- if (start < 0) return;
- var end = lastCaptureInfo[CAPTURE(scaled + 1)];
- return %_SubString(string, start, end);
-}
-
-
-function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
- var matchInfo = DoRegExpExec(regexp, subject, 0);
- if (IS_NULL(matchInfo)) {
- regexp.lastIndex = 0;
- return subject;
- }
- var index = matchInfo[CAPTURE0];
- var result = %_SubString(subject, 0, index);
- var endOfMatch = matchInfo[CAPTURE1];
- // Compute the parameter list consisting of the match, captures, index,
- // and subject for the replace function invocation.
- // The number of captures plus one for the match.
- var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;
- var replacement;
- if (m == 1) {
- // No captures, only the match, which is always valid.
- var s = %_SubString(subject, index, endOfMatch);
- // Don't call directly to avoid exposing the built-in global object.
- replacement = replace(s, index, subject);
- } else {
- var parameters = new InternalArray(m + 2);
- for (var j = 0; j < m; j++) {
- parameters[j] = CaptureString(subject, matchInfo, j);
- }
- parameters[j] = index;
- parameters[j + 1] = subject;
-
- replacement = %reflect_apply(replace, UNDEFINED, parameters);
- }
-
- result += replacement; // The add method converts to string if necessary.
- // Can't use matchInfo any more from here, since the function could
- // overwrite it.
- return result + %_SubString(subject, endOfMatch, subject.length);
-}
-
-// Wraps access to matchInfo's captures into a format understood by
-// GetSubstitution.
-function MatchInfoCaptureWrapper(matches, subject) {
- this.length = NUMBER_OF_CAPTURES(matches) >> 1;
- this.match = matches;
- this.subject = subject;
-}
-
-MatchInfoCaptureWrapper.prototype.at = function(ix) {
- const match = this.match;
- const start = match[CAPTURE(ix << 1)];
- if (start < 0) return UNDEFINED;
- return %_SubString(this.subject, start, match[CAPTURE((ix << 1) + 1)]);
-};
-%SetForceInlineFlag(MatchInfoCaptureWrapper.prototype.at);
-
-function ArrayCaptureWrapper(array) {
- this.length = array.length;
- this.array = array;
-}
-
-ArrayCaptureWrapper.prototype.at = function(ix) {
- return this.array[ix];
-};
-%SetForceInlineFlag(ArrayCaptureWrapper.prototype.at);
-
-function RegExpReplace(string, replace) {
- if (!IS_REGEXP(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- "RegExp.prototype.@@replace", this);
- }
- var subject = TO_STRING(string);
- var search = this;
-
- if (!IS_CALLABLE(replace)) {
- replace = TO_STRING(replace);
-
- if (!REGEXP_GLOBAL(search)) {
- // Non-global regexp search, string replace.
- var match = DoRegExpExec(search, subject, 0);
- if (match == null) {
- search.lastIndex = 0
- return subject;
- }
- if (replace.length == 0) {
- return %_SubString(subject, 0, match[CAPTURE0]) +
- %_SubString(subject, match[CAPTURE1], subject.length)
- }
- const captures = new MatchInfoCaptureWrapper(match, subject);
- const start = match[CAPTURE0];
- const end = match[CAPTURE1];
-
- const prefix = %_SubString(subject, 0, start);
- const matched = %_SubString(subject, start, end);
- const suffix = %_SubString(subject, end, subject.length);
-
- return prefix +
- GetSubstitution(matched, subject, start, captures, replace) +
- suffix;
- }
-
- // Global regexp search, string replace.
- search.lastIndex = 0;
- return %StringReplaceGlobalRegExpWithString(
- subject, search, replace, RegExpLastMatchInfo);
- }
-
- if (REGEXP_GLOBAL(search)) {
- // Global regexp search, function replace.
- return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
- }
- // Non-global regexp search, function replace.
- return StringReplaceNonGlobalRegExpWithFunction(subject, search, replace);
-}
-
-
-// ES#sec-getsubstitution
-// GetSubstitution(matched, str, position, captures, replacement)
-// Expand the $-expressions in the string and return a new string with
-// the result.
-function GetSubstitution(matched, string, position, captures, replacement) {
- var matchLength = matched.length;
- var stringLength = string.length;
- var capturesLength = captures.length;
- var tailPos = position + matchLength;
- var result = "";
- var pos, expansion, peek, next, scaledIndex, advance, newScaledIndex;
-
- var next = %StringIndexOf(replacement, '$', 0);
- if (next < 0) {
- result += replacement;
- return result;
- }
-
- if (next > 0) result += %_SubString(replacement, 0, next);
-
- while (true) {
- expansion = '$';
- pos = next + 1;
- if (pos < replacement.length) {
- peek = %_StringCharCodeAt(replacement, pos);
- if (peek == 36) { // $$
- ++pos;
- result += '$';
- } else if (peek == 38) { // $& - match
- ++pos;
- result += matched;
- } else if (peek == 96) { // $` - prefix
- ++pos;
- result += %_SubString(string, 0, position);
- } else if (peek == 39) { // $' - suffix
- ++pos;
- result += %_SubString(string, tailPos, stringLength);
- } else if (peek >= 48 && peek <= 57) {
- // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
- scaledIndex = (peek - 48);
- advance = 1;
- if (pos + 1 < replacement.length) {
- next = %_StringCharCodeAt(replacement, pos + 1);
- if (next >= 48 && next <= 57) {
- newScaledIndex = scaledIndex * 10 + ((next - 48));
- if (newScaledIndex < capturesLength) {
- scaledIndex = newScaledIndex;
- advance = 2;
- }
- }
- }
- if (scaledIndex != 0 && scaledIndex < capturesLength) {
- var capture = captures.at(scaledIndex);
- if (!IS_UNDEFINED(capture)) result += capture;
- pos += advance;
- } else {
- result += '$';
- }
- } else {
- result += '$';
- }
- } else {
- result += '$';
- }
-
- // Go the the next $ in the replacement.
- next = %StringIndexOf(replacement, '$', pos);
-
- // Return if there are no more $ characters in the replacement. If we
- // haven't reached the end, we need to append the suffix.
- if (next < 0) {
- if (pos < replacement.length) {
- result += %_SubString(replacement, pos, replacement.length);
- }
- return result;
- }
-
- // Append substring between the previous and the next $ character.
- if (next > pos) {
- result += %_SubString(replacement, pos, next);
- }
- }
- return result;
-}
-
-
-// ES#sec-advancestringindex
-// AdvanceStringIndex ( S, index, unicode )
-function AdvanceStringIndex(string, index, unicode) {
- var increment = 1;
- if (unicode) {
- var first = %_StringCharCodeAt(string, index);
- if (first >= 0xD800 && first <= 0xDBFF && string.length > index + 1) {
- var second = %_StringCharCodeAt(string, index + 1);
- if (second >= 0xDC00 && second <= 0xDFFF) {
- increment = 2;
- }
- }
- }
- return increment;
-}
-
-
-function SetAdvancedStringIndex(regexp, string, unicode) {
- var lastIndex = regexp.lastIndex;
- regexp.lastIndex = lastIndex +
- AdvanceStringIndex(string, lastIndex, unicode);
-}
-
-
-// ES#sec-regexp.prototype-@@replace
-// RegExp.prototype [ @@replace ] ( string, replaceValue )
-function RegExpSubclassReplace(string, replace) {
- if (!IS_RECEIVER(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- "RegExp.prototype.@@replace", this);
- }
- string = TO_STRING(string);
- var length = string.length;
- var functionalReplace = IS_CALLABLE(replace);
- if (!functionalReplace) replace = TO_STRING(replace);
- var global = TO_BOOLEAN(this.global);
- if (global) {
- var unicode = TO_BOOLEAN(this.unicode);
- this.lastIndex = 0;
- }
-
- // TODO(adamk): this fast path is wrong as we doesn't ensure that 'exec'
- // is actually a data property on RegExp.prototype.
- var exec;
- if (IS_REGEXP(this)) {
- exec = this.exec;
- if (exec === RegExpExecJS) {
- return %_Call(RegExpReplace, this, string, replace);
- }
- }
-
- var results = new InternalArray();
- var result, replacement;
- while (true) {
- result = RegExpSubclassExec(this, string, exec);
- // Ensure exec will be read again on the next loop through.
- exec = UNDEFINED;
- if (IS_NULL(result)) {
- break;
- } else {
- results.push(result);
- if (!global) break;
- var matchStr = TO_STRING(result[0]);
- if (matchStr === "") SetAdvancedStringIndex(this, string, unicode);
- }
- }
- var accumulatedResult = "";
- var nextSourcePosition = 0;
- for (var i = 0; i < results.length; i++) {
- result = results[i];
- var capturesLength = MaxSimple(TO_LENGTH(result.length), 0);
- var matched = TO_STRING(result[0]);
- var matchedLength = matched.length;
- var position = MaxSimple(MinSimple(TO_INTEGER(result.index), length), 0);
- var captures = new InternalArray();
- for (var n = 0; n < capturesLength; n++) {
- var capture = result[n];
- if (!IS_UNDEFINED(capture)) capture = TO_STRING(capture);
- captures[n] = capture;
- }
- if (functionalReplace) {
- var parameters = new InternalArray(capturesLength + 2);
- for (var j = 0; j < capturesLength; j++) {
- parameters[j] = captures[j];
- }
- parameters[j] = position;
- parameters[j + 1] = string;
- replacement = %reflect_apply(replace, UNDEFINED, parameters, 0,
- parameters.length);
- } else {
- const capturesWrapper = new ArrayCaptureWrapper(captures);
- replacement = GetSubstitution(matched, string, position, capturesWrapper,
- replace);
- }
- if (position >= nextSourcePosition) {
- accumulatedResult +=
- %_SubString(string, nextSourcePosition, position) + replacement;
- nextSourcePosition = position + matchedLength;
- }
- }
- if (nextSourcePosition >= length) return accumulatedResult;
- return accumulatedResult + %_SubString(string, nextSourcePosition, length);
-}
-%FunctionRemovePrototype(RegExpSubclassReplace);
-
-
-// ES#sec-regexp.prototype-@@search
-// RegExp.prototype [ @@search ] ( string )
-function RegExpSubclassSearch(string) {
- if (!IS_RECEIVER(this)) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- "RegExp.prototype.@@search", this);
- }
- string = TO_STRING(string);
- var previousLastIndex = this.lastIndex;
- if (previousLastIndex != 0) this.lastIndex = 0;
- var result = RegExpSubclassExec(this, string);
- var currentLastIndex = this.lastIndex;
- if (currentLastIndex != previousLastIndex) this.lastIndex = previousLastIndex;
- if (IS_NULL(result)) return -1;
- return result.index;
-}
-%FunctionRemovePrototype(RegExpSubclassSearch);
-
-
-// Getters for the static properties lastMatch, lastParen, leftContext, and
-// rightContext of the RegExp constructor. The properties are computed based
-// on the captures array of the last successful match and the subject string
-// of the last successful match.
-function RegExpGetLastMatch() {
- var regExpSubject = LAST_SUBJECT(RegExpLastMatchInfo);
- return %_SubString(regExpSubject,
- RegExpLastMatchInfo[CAPTURE0],
- RegExpLastMatchInfo[CAPTURE1]);
-}
-
-
-function RegExpGetLastParen() {
- var length = NUMBER_OF_CAPTURES(RegExpLastMatchInfo);
- if (length <= 2) return ''; // There were no captures.
- // We match the SpiderMonkey behavior: return the substring defined by the
- // last pair (after the first pair) of elements of the capture array even if
- // it is empty.
- var regExpSubject = LAST_SUBJECT(RegExpLastMatchInfo);
- var start = RegExpLastMatchInfo[CAPTURE(length - 2)];
- var end = RegExpLastMatchInfo[CAPTURE(length - 1)];
- if (start != -1 && end != -1) {
- return %_SubString(regExpSubject, start, end);
- }
- return "";
-}
-
-
-function RegExpGetLeftContext() {
- var start_index;
- var subject;
- start_index = RegExpLastMatchInfo[CAPTURE0];
- subject = LAST_SUBJECT(RegExpLastMatchInfo);
- return %_SubString(subject, 0, start_index);
-}
-
-
-function RegExpGetRightContext() {
- var start_index;
- var subject;
- start_index = RegExpLastMatchInfo[CAPTURE1];
- subject = LAST_SUBJECT(RegExpLastMatchInfo);
- return %_SubString(subject, start_index, subject.length);
-}
-
-
-// The properties $1..$9 are the first nine capturing substrings of the last
-// successful match, or ''. The function RegExpMakeCaptureGetter will be
-// called with indices from 1 to 9.
-function RegExpMakeCaptureGetter(n) {
- return function foo() {
- var index = n * 2;
- if (index >= NUMBER_OF_CAPTURES(RegExpLastMatchInfo)) return '';
- var matchStart = RegExpLastMatchInfo[CAPTURE(index)];
- var matchEnd = RegExpLastMatchInfo[CAPTURE(index + 1)];
- if (matchStart == -1 || matchEnd == -1) return '';
- return %_SubString(LAST_SUBJECT(RegExpLastMatchInfo), matchStart, matchEnd);
- };
-}
-
-
-// ES6 21.2.5.3.
-function RegExpGetFlags() {
- if (!IS_RECEIVER(this)) {
- throw %make_type_error(
- kRegExpNonObject, "RegExp.prototype.flags", TO_STRING(this));
- }
- var result = '';
- if (this.global) result += 'g';
- if (this.ignoreCase) result += 'i';
- if (this.multiline) result += 'm';
- if (this.unicode) result += 'u';
- if (this.sticky) result += 'y';
- return result;
-}
-
-
-// ES6 21.2.5.4.
-function RegExpGetGlobal() {
- if (!IS_REGEXP(this)) {
- if (this === GlobalRegExpPrototype) {
- %IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
- return UNDEFINED;
- }
- throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.global");
- }
- return TO_BOOLEAN(REGEXP_GLOBAL(this));
-}
-%SetForceInlineFlag(RegExpGetGlobal);
-
-
-// ES6 21.2.5.5.
-function RegExpGetIgnoreCase() {
- if (!IS_REGEXP(this)) {
- if (this === GlobalRegExpPrototype) {
- %IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
- return UNDEFINED;
- }
- throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.ignoreCase");
- }
- return TO_BOOLEAN(REGEXP_IGNORE_CASE(this));
-}
-
-
-// ES6 21.2.5.7.
-function RegExpGetMultiline() {
- if (!IS_REGEXP(this)) {
- if (this === GlobalRegExpPrototype) {
- %IncrementUseCounter(kRegExpPrototypeOldFlagGetter);
- return UNDEFINED;
- }
- throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.multiline");
- }
- return TO_BOOLEAN(REGEXP_MULTILINE(this));
-}
-
-
-// ES6 21.2.5.10.
-function RegExpGetSource() {
- if (!IS_REGEXP(this)) {
- if (this === GlobalRegExpPrototype) {
- %IncrementUseCounter(kRegExpPrototypeSourceGetter);
- return "(?:)";
- }
- throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.source");
- }
- return REGEXP_SOURCE(this);
-}
-
-
-// ES6 21.2.5.12.
-function RegExpGetSticky() {
- if (!IS_REGEXP(this)) {
- if (this === GlobalRegExpPrototype) {
- %IncrementUseCounter(kRegExpPrototypeStickyGetter);
- return UNDEFINED;
- }
- throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.sticky");
- }
- return TO_BOOLEAN(REGEXP_STICKY(this));
-}
-%SetForceInlineFlag(RegExpGetSticky);
-
-
-// ES6 21.2.5.15.
-function RegExpGetUnicode() {
- if (!IS_REGEXP(this)) {
- if (this === GlobalRegExpPrototype) {
- %IncrementUseCounter(kRegExpPrototypeUnicodeGetter);
- return UNDEFINED;
- }
- throw %make_type_error(kRegExpNonRegExp, "RegExp.prototype.unicode");
- }
- return TO_BOOLEAN(REGEXP_UNICODE(this));
-}
-%SetForceInlineFlag(RegExpGetUnicode);
-
-
-function RegExpSpecies() {
- return this;
-}
-
-
-// -------------------------------------------------------------------
-
-utils.InstallGetter(GlobalRegExp, speciesSymbol, RegExpSpecies);
-
-utils.InstallFunctions(GlobalRegExp.prototype, DONT_ENUM, [
- "test", RegExpSubclassTest,
- "toString", RegExpToString,
- "compile", RegExpCompileJS,
- matchSymbol, RegExpSubclassMatch,
- replaceSymbol, RegExpSubclassReplace,
- searchSymbol, RegExpSubclassSearch,
- splitSymbol, RegExpSubclassSplit,
-]);
-
-utils.InstallGetter(GlobalRegExp.prototype, 'flags', RegExpGetFlags);
-utils.InstallGetter(GlobalRegExp.prototype, 'global', RegExpGetGlobal);
-utils.InstallGetter(GlobalRegExp.prototype, 'ignoreCase', RegExpGetIgnoreCase);
-utils.InstallGetter(GlobalRegExp.prototype, 'multiline', RegExpGetMultiline);
-utils.InstallGetter(GlobalRegExp.prototype, 'source', RegExpGetSource);
-utils.InstallGetter(GlobalRegExp.prototype, 'sticky', RegExpGetSticky);
-utils.InstallGetter(GlobalRegExp.prototype, 'unicode', RegExpGetUnicode);
-
-// The properties `input` and `$_` are aliases for each other. When this
-// value is set the value it is set to is coerced to a string.
-// Getter and setter for the input.
-var RegExpGetInput = function() {
- var regExpInput = LAST_INPUT(RegExpLastMatchInfo);
- return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
-};
-var RegExpSetInput = function(string) {
- LAST_INPUT(RegExpLastMatchInfo) = TO_STRING(string);
-};
-
-// TODO(jgruber): All of these getters and setters were intended to be installed
-// with various attributes (e.g. DONT_ENUM | DONT_DELETE), but
-// InstallGetterSetter had a bug which ignored the passed attributes and
-// simply installed as DONT_ENUM instead. We might want to change back
-// to the intended attributes at some point.
-// On the other hand, installing attributes as DONT_ENUM matches the draft
-// specification at
-// https://github.com/claudepache/es-regexp-legacy-static-properties
-
-%OptimizeObjectForAddingMultipleProperties(GlobalRegExp, 22);
-utils.InstallGetterSetter(GlobalRegExp, 'input', RegExpGetInput, RegExpSetInput,
- DONT_ENUM);
-utils.InstallGetterSetter(GlobalRegExp, '$_', RegExpGetInput, RegExpSetInput,
- DONT_ENUM);
-
-
-var NoOpSetter = function(ignored) {};
-
-
-// Static properties set by a successful match.
-utils.InstallGetterSetter(GlobalRegExp, 'lastMatch', RegExpGetLastMatch,
- NoOpSetter, DONT_ENUM);
-utils.InstallGetterSetter(GlobalRegExp, '$&', RegExpGetLastMatch, NoOpSetter,
- DONT_ENUM);
-utils.InstallGetterSetter(GlobalRegExp, 'lastParen', RegExpGetLastParen,
- NoOpSetter, DONT_ENUM);
-utils.InstallGetterSetter(GlobalRegExp, '$+', RegExpGetLastParen, NoOpSetter,
- DONT_ENUM);
-utils.InstallGetterSetter(GlobalRegExp, 'leftContext', RegExpGetLeftContext,
- NoOpSetter, DONT_ENUM);
-utils.InstallGetterSetter(GlobalRegExp, '$`', RegExpGetLeftContext, NoOpSetter,
- DONT_ENUM);
-utils.InstallGetterSetter(GlobalRegExp, 'rightContext', RegExpGetRightContext,
- NoOpSetter, DONT_ENUM);
-utils.InstallGetterSetter(GlobalRegExp, "$'", RegExpGetRightContext, NoOpSetter,
- DONT_ENUM);
-
-for (var i = 1; i < 10; ++i) {
- utils.InstallGetterSetter(GlobalRegExp, '$' + i, RegExpMakeCaptureGetter(i),
- NoOpSetter, DONT_ENUM);
-}
-%ToFastProperties(GlobalRegExp);
-
-%InstallToContext(["regexp_last_match_info", RegExpLastMatchInfo]);
-
-// -------------------------------------------------------------------
-// Internal
-
-var InternalRegExpMatchInfo = {
- REGEXP_NUMBER_OF_CAPTURES: 2,
- REGEXP_LAST_SUBJECT: "",
- REGEXP_LAST_INPUT: UNDEFINED,
- CAPTURE0: 0,
- CAPTURE1: 0
-};
-
-function InternalRegExpMatch(regexp, subject) {
- var matchInfo = %_RegExpExec(regexp, subject, 0, InternalRegExpMatchInfo);
- if (!IS_NULL(matchInfo)) {
- RETURN_NEW_RESULT_FROM_MATCH_INFO(matchInfo, subject);
- }
- return null;
-}
-
-function InternalRegExpReplace(regexp, subject, replacement) {
- return %StringReplaceGlobalRegExpWithString(
- subject, regexp, replacement, InternalRegExpMatchInfo);
-}
-
-// -------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
- to.GetSubstitution = GetSubstitution;
- to.InternalRegExpMatch = InternalRegExpMatch;
- to.InternalRegExpReplace = InternalRegExpReplace;
- to.IsRegExp = IsRegExp;
- to.RegExpExec = DoRegExpExec;
- to.RegExpInitialize = RegExpInitialize;
- to.RegExpLastMatchInfo = RegExpLastMatchInfo;
-});
-
-})
diff --git a/deps/v8/src/js/string.js b/deps/v8/src/js/string.js
index 7c552a93a9..3a9254c713 100644
--- a/deps/v8/src/js/string.js
+++ b/deps/v8/src/js/string.js
@@ -10,13 +10,10 @@
// Imports
var ArrayJoin;
-var GetSubstitution;
var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
-var IsRegExp;
var MaxSimple;
var MinSimple;
-var RegExpInitialize;
var matchSymbol = utils.ImportNow("match_symbol");
var replaceSymbol = utils.ImportNow("replace_symbol");
var searchSymbol = utils.ImportNow("search_symbol");
@@ -24,11 +21,8 @@ var splitSymbol = utils.ImportNow("split_symbol");
utils.Import(function(from) {
ArrayJoin = from.ArrayJoin;
- GetSubstitution = from.GetSubstitution;
- IsRegExp = from.IsRegExp;
MaxSimple = from.MaxSimple;
MinSimple = from.MinSimple;
- RegExpInitialize = from.RegExpInitialize;
});
//-------------------------------------------------------------------
@@ -46,21 +40,6 @@ function StringConcat(other /* and more */) { // length == 1
}
-// ECMA-262 section 15.5.4.7
-function StringIndexOf(pattern, position) { // length == 1
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.indexOf");
-
- var subject = TO_STRING(this);
- pattern = TO_STRING(pattern);
- var index = TO_INTEGER(position);
- if (index < 0) index = 0;
- if (index > subject.length) index = subject.length;
- return %StringIndexOf(subject, pattern, index);
-}
-
-%FunctionSetLength(StringIndexOf, 1);
-
-
// ES6 21.1.3.11.
function StringMatchJS(pattern) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.match");
@@ -75,11 +54,94 @@ function StringMatchJS(pattern) {
var subject = TO_STRING(this);
// Equivalent to RegExpCreate (ES#sec-regexpcreate)
- var regexp = %_NewObject(GlobalRegExp, GlobalRegExp);
- RegExpInitialize(regexp, pattern);
+ var regexp = %RegExpCreate(pattern);
return regexp[matchSymbol](subject);
}
+// ES#sec-getsubstitution
+// GetSubstitution(matched, str, position, captures, replacement)
+// Expand the $-expressions in the string and return a new string with
+// the result.
+function GetSubstitution(matched, string, position, captures, replacement) {
+ var matchLength = matched.length;
+ var stringLength = string.length;
+ var capturesLength = captures.length;
+ var tailPos = position + matchLength;
+ var result = "";
+ var pos, expansion, peek, next, scaledIndex, advance, newScaledIndex;
+
+ var next = %StringIndexOf(replacement, '$', 0);
+ if (next < 0) {
+ result += replacement;
+ return result;
+ }
+
+ if (next > 0) result += %_SubString(replacement, 0, next);
+
+ while (true) {
+ expansion = '$';
+ pos = next + 1;
+ if (pos < replacement.length) {
+ peek = %_StringCharCodeAt(replacement, pos);
+ if (peek == 36) { // $$
+ ++pos;
+ result += '$';
+ } else if (peek == 38) { // $& - match
+ ++pos;
+ result += matched;
+ } else if (peek == 96) { // $` - prefix
+ ++pos;
+ result += %_SubString(string, 0, position);
+ } else if (peek == 39) { // $' - suffix
+ ++pos;
+ result += %_SubString(string, tailPos, stringLength);
+ } else if (peek >= 48 && peek <= 57) {
+ // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
+ scaledIndex = (peek - 48);
+ advance = 1;
+ if (pos + 1 < replacement.length) {
+ next = %_StringCharCodeAt(replacement, pos + 1);
+ if (next >= 48 && next <= 57) {
+ newScaledIndex = scaledIndex * 10 + ((next - 48));
+ if (newScaledIndex < capturesLength) {
+ scaledIndex = newScaledIndex;
+ advance = 2;
+ }
+ }
+ }
+ if (scaledIndex != 0 && scaledIndex < capturesLength) {
+ var capture = captures.at(scaledIndex);
+ if (!IS_UNDEFINED(capture)) result += capture;
+ pos += advance;
+ } else {
+ result += '$';
+ }
+ } else {
+ result += '$';
+ }
+ } else {
+ result += '$';
+ }
+
+ // Go the the next $ in the replacement.
+ next = %StringIndexOf(replacement, '$', pos);
+
+ // Return if there are no more $ characters in the replacement. If we
+ // haven't reached the end, we need to append the suffix.
+ if (next < 0) {
+ if (pos < replacement.length) {
+ result += %_SubString(replacement, pos, replacement.length);
+ }
+ return result;
+ }
+
+ // Append substring between the previous and the next $ character.
+ if (next > pos) {
+ result += %_SubString(replacement, pos, next);
+ }
+ }
+ return result;
+}
// ES6, section 21.1.3.14
function StringReplace(search, replace) {
@@ -158,8 +220,7 @@ function StringSearch(pattern) {
var subject = TO_STRING(this);
// Equivalent to RegExpCreate (ES#sec-regexpcreate)
- var regexp = %_NewObject(GlobalRegExp, GlobalRegExp);
- RegExpInitialize(regexp, pattern);
+ var regexp = %RegExpCreate(pattern);
return %_Call(regexp[searchSymbol], regexp, subject);
}
@@ -395,87 +456,6 @@ function StringRepeat(count) {
}
-// ES6 draft 04-05-14, section 21.1.3.18
-function StringStartsWith(searchString, position) { // length == 1
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.startsWith");
-
- var s = TO_STRING(this);
-
- if (IsRegExp(searchString)) {
- throw %make_type_error(kFirstArgumentNotRegExp, "String.prototype.startsWith");
- }
-
- var ss = TO_STRING(searchString);
- var pos = TO_INTEGER(position);
-
- var s_len = s.length;
- var start = MinSimple(MaxSimple(pos, 0), s_len);
- var ss_len = ss.length;
- if (ss_len + start > s_len) {
- return false;
- }
-
- return %_SubString(s, start, start + ss_len) === ss;
-}
-
-%FunctionSetLength(StringStartsWith, 1);
-
-
-// ES6 draft 04-05-14, section 21.1.3.7
-function StringEndsWith(searchString, position) { // length == 1
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.endsWith");
-
- var s = TO_STRING(this);
-
- if (IsRegExp(searchString)) {
- throw %make_type_error(kFirstArgumentNotRegExp, "String.prototype.endsWith");
- }
-
- var ss = TO_STRING(searchString);
- var s_len = s.length;
- var pos = !IS_UNDEFINED(position) ? TO_INTEGER(position) : s_len
-
- var end = MinSimple(MaxSimple(pos, 0), s_len);
- var ss_len = ss.length;
- var start = end - ss_len;
- if (start < 0) {
- return false;
- }
-
- return %_SubString(s, start, start + ss_len) === ss;
-}
-
-%FunctionSetLength(StringEndsWith, 1);
-
-
-// ES6 draft 04-05-14, section 21.1.3.6
-function StringIncludes(searchString, position) { // length == 1
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.includes");
-
- var string = TO_STRING(this);
-
- if (IsRegExp(searchString)) {
- throw %make_type_error(kFirstArgumentNotRegExp, "String.prototype.includes");
- }
-
- searchString = TO_STRING(searchString);
- var pos = TO_INTEGER(position);
-
- var stringLength = string.length;
- if (pos < 0) pos = 0;
- if (pos > stringLength) pos = stringLength;
- var searchStringLength = searchString.length;
-
- if (searchStringLength + pos > stringLength) {
- return false;
- }
-
- return %StringIndexOf(string, searchString, pos) !== -1;
-}
-
-%FunctionSetLength(StringIncludes, 1);
-
-
// ES6 Draft 05-22-2014, section 21.1.3.3
function StringCodePointAt(pos) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.codePointAt");
@@ -533,16 +513,12 @@ utils.InstallFunctions(GlobalString, DONT_ENUM, [
utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
"codePointAt", StringCodePointAt,
"concat", StringConcat,
- "endsWith", StringEndsWith,
- "includes", StringIncludes,
- "indexOf", StringIndexOf,
"match", StringMatchJS,
"repeat", StringRepeat,
"replace", StringReplace,
"search", StringSearch,
"slice", StringSlice,
"split", StringSplitJS,
- "startsWith", StringStartsWith,
"toLowerCase", StringToLowerCaseJS,
"toLocaleLowerCase", StringToLocaleLowerCase,
"toUpperCase", StringToUpperCaseJS,
@@ -567,7 +543,6 @@ utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
// Exports
utils.Export(function(to) {
- to.StringIndexOf = StringIndexOf;
to.StringMatch = StringMatchJS;
to.StringReplace = StringReplace;
to.StringSlice = StringSlice;
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index edb3b06a74..7667e18d78 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -844,12 +844,7 @@ function TypedArrayFrom(source, mapfn, thisArg) {
// TODO(bmeurer): Migrate this to a proper builtin.
function TypedArrayConstructor() {
- if (IS_UNDEFINED(new.target)) {
- throw %make_type_error(kConstructorNonCallable, "TypedArray");
- }
- if (new.target === GlobalTypedArray) {
- throw %make_type_error(kConstructAbstractClass, "TypedArray");
- }
+ throw %make_type_error(kConstructAbstractClass, "TypedArray");
}
function TypedArraySpecies() {
diff --git a/deps/v8/src/js/v8natives.js b/deps/v8/src/js/v8natives.js
index 93636a036b..f67a8b5bf4 100644
--- a/deps/v8/src/js/v8natives.js
+++ b/deps/v8/src/js/v8natives.js
@@ -18,51 +18,6 @@ var ObjectToString = utils.ImportNow("object_to_string");
// ----------------------------------------------------------------------------
-// ES6 18.2.5 parseInt(string, radix)
-function GlobalParseInt(string, radix) {
- if (IS_UNDEFINED(radix) || radix === 10 || radix === 0) {
- // Some people use parseInt instead of Math.floor. This
- // optimization makes parseInt on a Smi 12 times faster (60ns
- // vs 800ns). The following optimization makes parseInt on a
- // non-Smi number 9 times faster (230ns vs 2070ns). Together
- // they make parseInt on a string 1.4% slower (274ns vs 270ns).
- if (%_IsSmi(string)) return string;
- if (IS_NUMBER(string) &&
- ((0.01 < string && string < 1e9) ||
- (-1e9 < string && string < -0.01))) {
- // Truncate number.
- return string | 0;
- }
- string = TO_STRING(string);
- radix = radix | 0;
- } else {
- // The spec says ToString should be evaluated before ToInt32.
- string = TO_STRING(string);
- radix = TO_INT32(radix);
- if (!(radix == 0 || (2 <= radix && radix <= 36))) {
- return NaN;
- }
- }
-
- if (%_HasCachedArrayIndex(string) &&
- (radix == 0 || radix == 10)) {
- return %_GetCachedArrayIndex(string);
- }
- return %StringParseInt(string, radix);
-}
-
-
-// ES6 18.2.4 parseFloat(string)
-function GlobalParseFloat(string) {
- // 1. Let inputString be ? ToString(string).
- string = TO_STRING(string);
- if (%_HasCachedArrayIndex(string)) return %_GetCachedArrayIndex(string);
- return %StringParseFloat(string);
-}
-
-
-// ----------------------------------------------------------------------------
-
// Set up global object.
var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY;
@@ -75,12 +30,6 @@ utils.InstallConstants(global, [
"undefined", UNDEFINED,
]);
-// Set up non-enumerable function on the global object.
-utils.InstallFunctions(global, DONT_ENUM, [
- "parseInt", GlobalParseInt,
- "parseFloat", GlobalParseFloat,
-]);
-
// ----------------------------------------------------------------------------
// Object
@@ -114,37 +63,6 @@ function GetMethod(obj, p) {
throw %make_type_error(kCalledNonCallable, typeof func);
}
-// ES6 section 19.1.2.18.
-function ObjectSetPrototypeOf(obj, proto) {
- CHECK_OBJECT_COERCIBLE(obj, "Object.setPrototypeOf");
-
- if (proto !== null && !IS_RECEIVER(proto)) {
- throw %make_type_error(kProtoObjectOrNull, proto);
- }
-
- if (IS_RECEIVER(obj)) {
- %SetPrototype(obj, proto);
- }
-
- return obj;
-}
-
-// ES6 B.2.2.1.1
-function ObjectGetProto() {
- return %object_get_prototype_of(this);
-}
-
-
-// ES6 B.2.2.1.2
-function ObjectSetProto(proto) {
- CHECK_OBJECT_COERCIBLE(this, "Object.prototype.__proto__");
-
- if ((IS_RECEIVER(proto) || IS_NULL(proto)) && IS_RECEIVER(this)) {
- %SetPrototype(this, proto);
- }
-}
-
-
// ES6 19.1.1.1
function ObjectConstructor(x) {
if (GlobalObject != new.target && !IS_UNDEFINED(new.target)) {
@@ -176,16 +94,6 @@ utils.InstallFunctions(GlobalObject.prototype, DONT_ENUM, [
// __defineSetter__ is added in bootstrapper.cc.
// __lookupSetter__ is added in bootstrapper.cc.
]);
-utils.InstallGetterSetter(
- GlobalObject.prototype, "__proto__", ObjectGetProto, ObjectSetProto);
-
-// Set up non-enumerable functions in the Object object.
-utils.InstallFunctions(GlobalObject, DONT_ENUM, [
- "setPrototypeOf", ObjectSetPrototypeOf,
- // getOwnPropertySymbols is added in symbol.js.
- // Others are added in bootstrapper.cc.
-]);
-
// ----------------------------------------------------------------------------
@@ -210,13 +118,6 @@ utils.InstallConstants(GlobalNumber, [
"EPSILON", 2.220446049250313e-16,
]);
-// Harmony Number constructor additions
-utils.InstallFunctions(GlobalNumber, DONT_ENUM, [
- "parseInt", GlobalParseInt,
- "parseFloat", GlobalParseFloat
-]);
-
-
// ----------------------------------------------------------------------------
// Iterator related spec functions.
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
index 576100ab84..5e79b611a2 100644
--- a/deps/v8/src/json-parser.cc
+++ b/deps/v8/src/json-parser.cc
@@ -104,7 +104,7 @@ JsonParser<seq_one_byte>::JsonParser(Isolate* isolate, Handle<String> source)
source_length_(source->length()),
isolate_(isolate),
factory_(isolate_->factory()),
- zone_(isolate_->allocator()),
+ zone_(isolate_->allocator(), ZONE_NAME),
object_constructor_(isolate_->native_context()->object_function(),
isolate_),
position_(-1) {
diff --git a/deps/v8/src/keys.cc b/deps/v8/src/keys.cc
index c6e31e3f23..9b6c8f3381 100644
--- a/deps/v8/src/keys.cc
+++ b/deps/v8/src/keys.cc
@@ -780,7 +780,7 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
target_keys->get(i));
nonconfigurable_keys_length++;
// The key was moved, null it out in the original list.
- target_keys->set(i, Smi::FromInt(0));
+ target_keys->set(i, Smi::kZero);
} else {
// 14c. Else,
// 14c i. Append key as an element of targetConfigurableKeys.
@@ -794,7 +794,7 @@ Maybe<bool> KeyAccumulator::CollectOwnJSProxyKeys(Handle<JSReceiver> receiver,
return AddKeysFromJSProxy(proxy, trap_result);
}
// 16. Let uncheckedResultKeys be a new List which is a copy of trapResult.
- Zone set_zone(isolate_->allocator());
+ Zone set_zone(isolate_->allocator(), ZONE_NAME);
const int kPresent = 1;
const int kGone = 0;
IdentityMap<int> unchecked_result_keys(isolate_->heap(), &set_zone);
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/layout-descriptor-inl.h
index 3f150658e7..bade05e2e8 100644
--- a/deps/v8/src/layout-descriptor-inl.h
+++ b/deps/v8/src/layout-descriptor-inl.h
@@ -18,7 +18,7 @@ LayoutDescriptor* LayoutDescriptor::FromSmi(Smi* smi) {
Handle<LayoutDescriptor> LayoutDescriptor::New(Isolate* isolate, int length) {
if (length <= kSmiValueSize) {
// The whole bit vector fits into a smi.
- return handle(LayoutDescriptor::FromSmi(Smi::FromInt(0)), isolate);
+ return handle(LayoutDescriptor::FromSmi(Smi::kZero), isolate);
}
length = GetSlowModeBackingStoreLength(length);
return Handle<LayoutDescriptor>::cast(isolate->factory()->NewFixedTypedArray(
@@ -37,7 +37,7 @@ bool LayoutDescriptor::InobjectUnboxedField(int inobject_properties,
LayoutDescriptor* LayoutDescriptor::FastPointerLayout() {
- return LayoutDescriptor::FromSmi(Smi::FromInt(0));
+ return LayoutDescriptor::FromSmi(Smi::kZero);
}
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index f64143ed24..866a4471c7 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -7,6 +7,7 @@
#include <algorithm>
#include <queue>
+#include "include/libplatform/libplatform.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index e36234f528..4b52c28129 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -11,8 +11,10 @@
#include <queue>
#include <vector>
+#include "include/libplatform/libplatform-export.h"
#include "include/libplatform/v8-tracing.h"
#include "include/v8-platform.h"
+#include "src/base/compiler-specific.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/libplatform/task-queue.h"
@@ -28,7 +30,7 @@ namespace tracing {
class TracingController;
}
-class DefaultPlatform : public Platform {
+class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
public:
DefaultPlatform();
virtual ~DefaultPlatform();
diff --git a/deps/v8/src/libplatform/task-queue.h b/deps/v8/src/libplatform/task-queue.h
index 5239cdac40..330527a09e 100644
--- a/deps/v8/src/libplatform/task-queue.h
+++ b/deps/v8/src/libplatform/task-queue.h
@@ -7,6 +7,7 @@
#include <queue>
+#include "include/libplatform/libplatform-export.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
@@ -18,7 +19,7 @@ class Task;
namespace platform {
-class TaskQueue {
+class V8_PLATFORM_EXPORT TaskQueue {
public:
TaskQueue();
~TaskQueue();
diff --git a/deps/v8/src/libplatform/tracing/trace-config.cc b/deps/v8/src/libplatform/tracing/trace-config.cc
index 7a824f614e..e77d191e5e 100644
--- a/deps/v8/src/libplatform/tracing/trace-config.cc
+++ b/deps/v8/src/libplatform/tracing/trace-config.cc
@@ -32,11 +32,6 @@ void TraceConfig::AddIncludedCategory(const char* included_category) {
included_categories_.push_back(included_category);
}
-void TraceConfig::AddExcludedCategory(const char* excluded_category) {
- DCHECK(excluded_category != NULL && strlen(excluded_category) > 0);
- excluded_categories_.push_back(excluded_category);
-}
-
} // namespace tracing
} // namespace platform
} // namespace v8
diff --git a/deps/v8/src/libplatform/worker-thread.h b/deps/v8/src/libplatform/worker-thread.h
index 6a55a6bc89..22b0626024 100644
--- a/deps/v8/src/libplatform/worker-thread.h
+++ b/deps/v8/src/libplatform/worker-thread.h
@@ -7,6 +7,8 @@
#include <queue>
+#include "include/libplatform/libplatform-export.h"
+#include "src/base/compiler-specific.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
@@ -16,7 +18,7 @@ namespace platform {
class TaskQueue;
-class WorkerThread : public base::Thread {
+class V8_PLATFORM_EXPORT WorkerThread : public NON_EXPORTED_BASE(base::Thread) {
public:
explicit WorkerThread(TaskQueue* queue);
virtual ~WorkerThread();
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index 0b40972b8e..f65498aa60 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -281,7 +281,7 @@ class SamplerManager {
if (!entry) return;
SamplerList& samplers = *static_cast<SamplerList*>(entry->value);
- for (int i = 0; i < samplers.size(); ++i) {
+ for (size_t i = 0; i < samplers.size(); ++i) {
Sampler* sampler = samplers[i];
Isolate* isolate = sampler->isolate();
// We require a fully initialized and entered isolate.
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index 83e5f4594e..049286572b 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -129,7 +129,8 @@ class List {
INLINE(void Allocate(int length,
AllocationPolicy allocator = AllocationPolicy()));
- // Clears the list by setting the length to zero. Even if T is a
+ // Clears the list by freeing the storage memory. If you want to keep the
+ // memory, use Rewind(0) instead. Be aware, that even if T is a
// pointer type, clearing the list doesn't delete the entries.
INLINE(void Clear());
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index 22972ec055..462f83f534 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -37,7 +37,6 @@ void Log::Initialize(const char* log_file_name) {
FLAG_log_gc = true;
FLAG_log_suspect = true;
FLAG_log_handles = true;
- FLAG_log_regexp = true;
FLAG_log_internal_timer_events = true;
}
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index 059e5a53c5..b165b3ee9a 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -30,8 +30,8 @@ class Log {
static bool InitLogAtStart() {
return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_gc ||
- FLAG_log_handles || FLAG_log_suspect || FLAG_log_regexp ||
- FLAG_ll_prof || FLAG_perf_basic_prof || FLAG_perf_prof ||
+ FLAG_log_handles || FLAG_log_suspect || FLAG_ll_prof ||
+ FLAG_perf_basic_prof || FLAG_perf_prof ||
FLAG_log_internal_timer_events || FLAG_prof_cpp;
}
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index fc7fcb9ced..bc52d053f3 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -27,6 +27,7 @@
#include "src/runtime-profiler.h"
#include "src/source-position-table.h"
#include "src/string-stream.h"
+#include "src/tracing/tracing-category-observer.h"
#include "src/vm-state-inl.h"
namespace v8 {
@@ -893,64 +894,6 @@ void Logger::LeaveExternal(Isolate* isolate) {
TIMER_EVENTS_LIST(V)
#undef V
-
-namespace {
-// Emits the source code of a regexp. Used by regexp events.
-void LogRegExpSource(Handle<JSRegExp> regexp, Isolate* isolate,
- Log::MessageBuilder* msg) {
- // Prints "/" + re.source + "/" +
- // (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
-
- Handle<Object> source =
- JSReceiver::GetProperty(isolate, regexp, "source").ToHandleChecked();
- if (!source->IsString()) {
- msg->Append("no source");
- return;
- }
-
- switch (regexp->TypeTag()) {
- case JSRegExp::ATOM:
- msg->Append('a');
- break;
- default:
- break;
- }
- msg->Append('/');
- msg->AppendDetailed(*Handle<String>::cast(source), false);
- msg->Append('/');
-
- // global flag
- Handle<Object> global =
- JSReceiver::GetProperty(isolate, regexp, "global").ToHandleChecked();
- if (global->IsTrue(isolate)) {
- msg->Append('g');
- }
- // ignorecase flag
- Handle<Object> ignorecase =
- JSReceiver::GetProperty(isolate, regexp, "ignoreCase").ToHandleChecked();
- if (ignorecase->IsTrue(isolate)) {
- msg->Append('i');
- }
- // multiline flag
- Handle<Object> multiline =
- JSReceiver::GetProperty(isolate, regexp, "multiline").ToHandleChecked();
- if (multiline->IsTrue(isolate)) {
- msg->Append('m');
- }
-}
-} // namespace
-
-
-void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
- if (!log_->IsEnabled() || !FLAG_log_regexp) return;
- Log::MessageBuilder msg(log_);
- msg.Append("regexp-compile,");
- LogRegExpSource(regexp, isolate_, &msg);
- msg.Append(in_cache ? ",hit" : ",miss");
- msg.WriteToLogFile();
-}
-
-
void Logger::ApiNamedPropertyAccess(const char* tag,
JSObject* holder,
Object* name) {
@@ -1206,12 +1149,13 @@ void Logger::CodeLinePosInfoRecordEvent(AbstractCode* code,
iter.Advance()) {
if (iter.is_statement()) {
jit_logger_->AddCodeLinePosInfoEvent(
- jit_handler_data, iter.code_offset(), iter.source_position(),
+ jit_handler_data, iter.code_offset(),
+ iter.source_position().ScriptOffset(),
JitCodeEvent::STATEMENT_POSITION);
}
- jit_logger_->AddCodeLinePosInfoEvent(jit_handler_data, iter.code_offset(),
- iter.source_position(),
- JitCodeEvent::POSITION);
+ jit_logger_->AddCodeLinePosInfoEvent(
+ jit_handler_data, iter.code_offset(),
+ iter.source_position().ScriptOffset(), JitCodeEvent::POSITION);
}
jit_logger_->EndCodePosInfoEvent(code, jit_handler_data);
}
@@ -1341,7 +1285,8 @@ void Logger::RuntimeCallTimerEvent() {
void Logger::TickEvent(v8::TickSample* sample, bool overflow) {
if (!log_->IsEnabled() || !FLAG_prof_cpp) return;
- if (FLAG_runtime_call_stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats ==
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) {
RuntimeCallTimerEvent();
}
Log::MessageBuilder msg(log_);
@@ -1542,8 +1487,6 @@ void Logger::LogCodeObjects() {
}
void Logger::LogBytecodeHandlers() {
- if (!FLAG_ignition) return;
-
const interpreter::OperandScale kOperandScales[] = {
#define VALUE(Name, _) interpreter::OperandScale::k##Name,
OPERAND_SCALE_LIST(VALUE)
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index a05b187282..b7a5fc6bd3 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -218,11 +218,6 @@ class Logger : public CodeEventListener {
INLINE(static void CallEventLogger(Isolate* isolate, const char* name,
StartEnd se, bool expose_to_api));
- // ==== Events logged by --log-regexp ====
- // Regexp compilation and execution events.
-
- void RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache);
-
bool is_logging() {
return is_logging_;
}
@@ -346,8 +341,7 @@ class Logger : public CodeEventListener {
V(CompileCode, true) \
V(DeoptimizeCode, true) \
V(Execute, true) \
- V(External, true) \
- V(IcMiss, false)
+ V(External, true)
#define V(TimerName, expose) \
class TimerEvent##TimerName : public AllStatic { \
diff --git a/deps/v8/src/lookup-cache.cc b/deps/v8/src/lookup-cache.cc
index 18729d630d..b740fdbf11 100644
--- a/deps/v8/src/lookup-cache.cc
+++ b/deps/v8/src/lookup-cache.cc
@@ -13,72 +13,5 @@ void DescriptorLookupCache::Clear() {
for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
}
-int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
- DisallowHeapAllocation no_gc;
- // Uses only lower 32 bits if pointers are larger.
- uintptr_t addr_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
- return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
-}
-
-int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
- DisallowHeapAllocation no_gc;
- int index = (Hash(map, name) & kHashMask);
- for (int i = 0; i < kEntriesPerBucket; i++) {
- Key& key = keys_[index + i];
- if ((key.map == *map) && key.name->Equals(*name)) {
- return field_offsets_[index + i];
- }
- }
- return kNotFound;
-}
-
-void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
- int field_offset) {
- DisallowHeapAllocation no_gc;
- if (!name->IsUniqueName()) {
- if (!StringTable::InternalizeStringIfExists(name->GetIsolate(),
- Handle<String>::cast(name))
- .ToHandle(&name)) {
- return;
- }
- }
- // This cache is cleared only between mark compact passes, so we expect the
- // cache to only contain old space names.
- DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
-
- int index = (Hash(map, name) & kHashMask);
- // After a GC there will be free slots, so we use them in order (this may
- // help to get the most frequently used one in position 0).
- for (int i = 0; i < kEntriesPerBucket; i++) {
- Key& key = keys_[index];
- Object* free_entry_indicator = NULL;
- if (key.map == free_entry_indicator) {
- key.map = *map;
- key.name = *name;
- field_offsets_[index + i] = field_offset;
- return;
- }
- }
- // No free entry found in this bucket, so we move them all down one and
- // put the new entry at position zero.
- for (int i = kEntriesPerBucket - 1; i > 0; i--) {
- Key& key = keys_[index + i];
- Key& key2 = keys_[index + i - 1];
- key = key2;
- field_offsets_[index + i] = field_offsets_[index + i - 1];
- }
-
- // Write the new first entry.
- Key& key = keys_[index];
- key.map = *map;
- key.name = *name;
- field_offsets_[index] = field_offset;
-}
-
-void KeyedLookupCache::Clear() {
- for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/lookup-cache.h b/deps/v8/src/lookup-cache.h
index 6da5e5b3d7..bf64cc00d2 100644
--- a/deps/v8/src/lookup-cache.h
+++ b/deps/v8/src/lookup-cache.h
@@ -52,65 +52,6 @@ class DescriptorLookupCache {
DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
};
-// Cache for mapping (map, property name) into field offset.
-// Cleared at startup and prior to mark sweep collection.
-class KeyedLookupCache {
- public:
- // Lookup field offset for (map, name). If absent, -1 is returned.
- int Lookup(Handle<Map> map, Handle<Name> name);
-
- // Update an element in the cache.
- void Update(Handle<Map> map, Handle<Name> name, int field_offset);
-
- // Clear the cache.
- void Clear();
-
- static const int kLength = 256;
- static const int kCapacityMask = kLength - 1;
- static const int kMapHashShift = 5;
- static const int kHashMask = -4; // Zero the last two bits.
- static const int kEntriesPerBucket = 4;
- static const int kEntryLength = 2;
- static const int kMapIndex = 0;
- static const int kKeyIndex = 1;
- static const int kNotFound = -1;
-
- // kEntriesPerBucket should be a power of 2.
- STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
- STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
-
- private:
- KeyedLookupCache() {
- for (int i = 0; i < kLength; ++i) {
- keys_[i].map = NULL;
- keys_[i].name = NULL;
- field_offsets_[i] = kNotFound;
- }
- }
-
- static inline int Hash(Handle<Map> map, Handle<Name> name);
-
- // Get the address of the keys and field_offsets arrays. Used in
- // generated code to perform cache lookups.
- Address keys_address() { return reinterpret_cast<Address>(&keys_); }
-
- Address field_offsets_address() {
- return reinterpret_cast<Address>(&field_offsets_);
- }
-
- struct Key {
- Map* map;
- Name* name;
- };
-
- Key keys_[kLength];
- int field_offsets_[kLength];
-
- friend class ExternalReference;
- friend class Isolate;
- DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index b6c0b92a17..186823df84 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -129,7 +129,8 @@ Handle<JSReceiver> LookupIterator::GetRootForNonJSReceiver(
Handle<JSValue>::cast(result)->set_value(*receiver);
return result;
}
- auto root = handle(receiver->GetRootMap(isolate)->prototype(), isolate);
+ auto root =
+ handle(receiver->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
if (root->IsNull(isolate)) {
unsigned int magic = 0xbbbbbbbb;
isolate->PushStackTraceAndDie(magic, *receiver, NULL, magic);
@@ -193,6 +194,11 @@ void LookupIterator::InternalUpdateProtector() {
} else if (*name_ == heap()->has_instance_symbol()) {
if (!isolate_->IsHasInstanceLookupChainIntact()) return;
isolate_->InvalidateHasInstanceProtector();
+ } else if (*name_ == heap()->iterator_symbol()) {
+ if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
+ if (holder_->IsJSArray()) {
+ isolate_->InvalidateArrayIteratorProtector();
+ }
}
}
@@ -601,6 +607,12 @@ Handle<Object> LookupIterator::FetchValue() const {
return handle(result, isolate_);
}
+int LookupIterator::GetFieldDescriptorIndex() const {
+ DCHECK(has_property_);
+ DCHECK(holder_->HasFastProperties());
+ DCHECK_EQ(v8::internal::DATA, property_details_.type());
+ return descriptor_number();
+}
int LookupIterator::GetAccessorIndex() const {
DCHECK(has_property_);
@@ -797,7 +809,8 @@ LookupIterator::State LookupIterator::LookupInRegularHolder(
JSObject* js_object = JSObject::cast(holder);
ElementsAccessor* accessor = js_object->GetElementsAccessor();
FixedArrayBase* backing_store = js_object->elements();
- number_ = accessor->GetEntryForIndex(js_object, backing_store, index_);
+ number_ =
+ accessor->GetEntryForIndex(isolate_, js_object, backing_store, index_);
if (number_ == kMaxUInt32) {
return holder->IsJSTypedArray() ? INTEGER_INDEXED_EXOTIC : NOT_FOUND;
}
@@ -843,5 +856,27 @@ Handle<InterceptorInfo> LookupIterator::GetInterceptorForFailedAccessCheck()
return Handle<InterceptorInfo>();
}
+bool LookupIterator::TryLookupCachedProperty() {
+ return state() == LookupIterator::ACCESSOR &&
+ GetAccessors()->IsAccessorPair() && LookupCachedProperty();
+}
+
+bool LookupIterator::LookupCachedProperty() {
+ DCHECK_EQ(state(), LookupIterator::ACCESSOR);
+ DCHECK(GetAccessors()->IsAccessorPair());
+
+ AccessorPair* accessor_pair = AccessorPair::cast(*GetAccessors());
+ Handle<Object> getter(accessor_pair->getter(), isolate());
+ MaybeHandle<Name> maybe_name =
+ FunctionTemplateInfo::TryGetCachedPropertyName(isolate(), getter);
+ if (maybe_name.is_null()) return false;
+
+ // We have found a cached property! Modify the iterator accordingly.
+ name_ = maybe_name.ToHandleChecked();
+ Restart();
+ CHECK_EQ(state(), LookupIterator::DATA);
+ return true;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index 687c677613..e0b40c40fe 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -6,13 +6,14 @@
#define V8_LOOKUP_H_
#include "src/factory.h"
+#include "src/globals.h"
#include "src/isolate.h"
#include "src/objects.h"
namespace v8 {
namespace internal {
-class LookupIterator final BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
public:
enum Configuration {
// Configuration bits.
@@ -237,6 +238,7 @@ class LookupIterator final BASE_EMBEDDED {
}
FieldIndex GetFieldIndex() const;
Handle<FieldType> GetFieldType() const;
+ int GetFieldDescriptorIndex() const;
int GetAccessorIndex() const;
int GetConstantIndex() const;
Handle<PropertyCell> GetPropertyCell() const;
@@ -256,11 +258,17 @@ class LookupIterator final BASE_EMBEDDED {
if (*name_ == heap()->is_concat_spreadable_symbol() ||
*name_ == heap()->constructor_string() ||
*name_ == heap()->species_symbol() ||
- *name_ == heap()->has_instance_symbol()) {
+ *name_ == heap()->has_instance_symbol() ||
+ *name_ == heap()->iterator_symbol()) {
InternalUpdateProtector();
}
}
+ // Lookup a 'cached' private property for an accessor.
+ // If not found returns false and leaves the LookupIterator unmodified.
+ bool TryLookupCachedProperty();
+ bool LookupCachedProperty();
+
private:
void InternalUpdateProtector();
diff --git a/deps/v8/src/machine-type.h b/deps/v8/src/machine-type.h
index e9605d7280..844c956e7b 100644
--- a/deps/v8/src/machine-type.h
+++ b/deps/v8/src/machine-type.h
@@ -29,9 +29,14 @@ enum class MachineRepresentation : uint8_t {
kFloat32,
kFloat64,
kSimd128,
- kFirstFPRepresentation = kFloat32
+ kFirstFPRepresentation = kFloat32,
+ kLastRepresentation = kSimd128
};
+static_assert(static_cast<int>(MachineRepresentation::kLastRepresentation) <
+ kIntSize * kBitsPerByte,
+ "Bit masks of MachineRepresentation should fit in an int");
+
const char* MachineReprToString(MachineRepresentation);
enum class MachineSemantic : uint8_t {
@@ -223,7 +228,7 @@ V8_INLINE size_t hash_value(MachineType type) {
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
MachineRepresentation rep);
std::ostream& operator<<(std::ostream& os, MachineSemantic type);
-std::ostream& operator<<(std::ostream& os, MachineType type);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, MachineType type);
inline bool IsFloatingPoint(MachineRepresentation rep) {
return rep >= MachineRepresentation::kFirstFPRepresentation;
@@ -234,12 +239,17 @@ inline bool CanBeTaggedPointer(MachineRepresentation rep) {
rep == MachineRepresentation::kTaggedPointer;
}
+inline bool CanBeTaggedSigned(MachineRepresentation rep) {
+ return rep == MachineRepresentation::kTagged ||
+ rep == MachineRepresentation::kTaggedSigned;
+}
+
inline bool IsAnyTagged(MachineRepresentation rep) {
return CanBeTaggedPointer(rep) || rep == MachineRepresentation::kTaggedSigned;
}
// Gets the log2 of the element size in bytes of the machine type.
-inline int ElementSizeLog2Of(MachineRepresentation rep) {
+V8_EXPORT_PRIVATE inline int ElementSizeLog2Of(MachineRepresentation rep) {
switch (rep) {
case MachineRepresentation::kBit:
case MachineRepresentation::kWord8:
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index cc6349d73c..eea77e34d8 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -12,6 +12,7 @@
#include "src/keys.h"
#include "src/string-builder.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -211,14 +212,6 @@ Handle<Object> JSStackFrame::GetFunctionName() {
return isolate_->factory()->null_value();
}
-Handle<Object> JSStackFrame::GetScriptNameOrSourceUrl() {
- if (!HasScript()) return isolate_->factory()->null_value();
- Handle<Script> script = GetScript();
- Object* source_url = script->source_url();
- return (source_url->IsString()) ? handle(source_url, isolate_)
- : handle(script->name(), isolate_);
-}
-
namespace {
bool CheckMethodName(Isolate* isolate, Handle<JSObject> obj, Handle<Name> name,
@@ -238,8 +231,19 @@ bool CheckMethodName(Isolate* isolate, Handle<JSObject> obj, Handle<Name> name,
return false;
}
+Handle<Object> ScriptNameOrSourceUrl(Handle<Script> script, Isolate* isolate) {
+ Object* name_or_url = script->source_url();
+ if (!name_or_url->IsString()) name_or_url = script->name();
+ return handle(name_or_url, isolate);
+}
+
} // namespace
+Handle<Object> JSStackFrame::GetScriptNameOrSourceUrl() {
+ if (!HasScript()) return isolate_->factory()->null_value();
+ return ScriptNameOrSourceUrl(GetScript(), isolate_);
+}
+
Handle<Object> JSStackFrame::GetMethodName() {
if (receiver_->IsNull(isolate_) || receiver_->IsUndefined(isolate_)) {
return isolate_->factory()->null_value();
@@ -298,7 +302,7 @@ namespace {
Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
if (script->eval_from_shared()->IsUndefined(isolate))
- return *isolate->factory()->undefined_value();
+ return isolate->heap()->undefined_value();
Handle<SharedFunctionInfo> shared(
SharedFunctionInfo::cast(script->eval_from_shared()));
@@ -312,13 +316,13 @@ Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
Object* EvalFromScript(Isolate* isolate, Handle<Script> script) {
if (script->eval_from_shared()->IsUndefined(isolate))
- return *isolate->factory()->undefined_value();
+ return isolate->heap()->undefined_value();
Handle<SharedFunctionInfo> eval_from_shared(
SharedFunctionInfo::cast(script->eval_from_shared()));
return eval_from_shared->script()->IsScript()
? eval_from_shared->script()
- : *isolate->factory()->undefined_value();
+ : isolate->heap()->undefined_value();
}
MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
@@ -364,8 +368,8 @@ MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
builder.AppendString(Handle<String>::cast(name_obj));
Script::PositionInfo info;
- if (eval_from_script->GetPositionInfo(script->GetEvalPosition(), &info,
- Script::NO_OFFSET)) {
+ if (Script::GetPositionInfo(eval_from_script, script->GetEvalPosition(),
+ &info, Script::NO_OFFSET)) {
builder.AppendCString(":");
Handle<String> str = isolate->factory()->NumberToString(
@@ -455,7 +459,7 @@ bool IsNonEmptyString(Handle<Object> object) {
return (object->IsString() && String::cast(*object)->length() > 0);
}
-void AppendFileLocation(Isolate* isolate, JSStackFrame* call_site,
+void AppendFileLocation(Isolate* isolate, StackFrameBase* call_site,
IncrementalStringBuilder* builder) {
if (call_site->IsNative()) {
builder->AppendCString("native");
@@ -595,14 +599,14 @@ MaybeHandle<String> JSStackFrame::ToString() {
builder.AppendString(Handle<String>::cast(function_name));
} else {
AppendFileLocation(isolate_, this, &builder);
- RETURN_RESULT(isolate_, builder.Finish(), String);
+ return builder.Finish();
}
builder.AppendCString(" (");
AppendFileLocation(isolate_, this, &builder);
builder.AppendCString(")");
- RETURN_RESULT(isolate_, builder.Finish(), String);
+ return builder.Finish();
}
int JSStackFrame::GetPosition() const { return code_->SourcePosition(offset_); }
@@ -617,9 +621,10 @@ Handle<Script> JSStackFrame::GetScript() const {
void WasmStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
int frame_ix) {
- DCHECK(array->IsWasmFrame(frame_ix));
+ // This function is called for both wasm and asm.js->wasm frames.
+ DCHECK(array->IsWasmFrame(frame_ix) || array->IsAsmJsWasmFrame(frame_ix));
isolate_ = isolate;
- wasm_obj_ = handle(array->WasmObject(frame_ix), isolate);
+ wasm_instance_ = handle(array->WasmInstance(frame_ix), isolate);
wasm_func_index_ = array->WasmFunctionIndex(frame_ix)->value();
code_ = handle(array->Code(frame_ix), isolate);
offset_ = array->Offset(frame_ix)->value();
@@ -631,7 +636,15 @@ Handle<Object> WasmStackFrame::GetFunction() const {
}
Handle<Object> WasmStackFrame::GetFunctionName() {
- return wasm::GetWasmFunctionNameOrNull(isolate_, wasm_obj_, wasm_func_index_);
+ Handle<Object> name;
+ Handle<WasmCompiledModule> compiled_module(
+ Handle<WasmInstanceObject>::cast(wasm_instance_)->get_compiled_module(),
+ isolate_);
+ if (!WasmCompiledModule::GetFunctionName(compiled_module, wasm_func_index_)
+ .ToHandle(&name)) {
+ name = isolate_->factory()->null_value();
+ }
+ return name;
}
MaybeHandle<String> WasmStackFrame::ToString() {
@@ -667,6 +680,72 @@ Handle<Object> WasmStackFrame::Null() const {
return isolate_->factory()->null_value();
}
+Handle<Object> AsmJsWasmStackFrame::GetReceiver() const {
+ return isolate_->global_proxy();
+}
+
+Handle<Object> AsmJsWasmStackFrame::GetFunction() const {
+ // TODO(clemensh): Return lazily created JSFunction.
+ return Null();
+}
+
+Handle<Object> AsmJsWasmStackFrame::GetFileName() {
+ Handle<Script> script =
+ wasm::GetScript(Handle<JSObject>::cast(wasm_instance_));
+ DCHECK_EQ(Script::TYPE_NORMAL, script->type());
+ return handle(script->name(), isolate_);
+}
+
+Handle<Object> AsmJsWasmStackFrame::GetScriptNameOrSourceUrl() {
+ Handle<Script> script =
+ wasm::GetScript(Handle<JSObject>::cast(wasm_instance_));
+ DCHECK_EQ(Script::TYPE_NORMAL, script->type());
+ return ScriptNameOrSourceUrl(script, isolate_);
+}
+
+int AsmJsWasmStackFrame::GetPosition() const {
+ DCHECK_LE(0, offset_);
+ int byte_offset = code_->SourcePosition(offset_);
+ return wasm::GetAsmWasmSourcePosition(Handle<JSObject>::cast(wasm_instance_),
+ wasm_func_index_, byte_offset);
+}
+
+int AsmJsWasmStackFrame::GetLineNumber() {
+ DCHECK_LE(0, GetPosition());
+ Handle<Script> script =
+ wasm::GetScript(Handle<JSObject>::cast(wasm_instance_));
+ DCHECK_EQ(Script::TYPE_NORMAL, script->type());
+ return Script::GetLineNumber(script, GetPosition()) + 1;
+}
+
+int AsmJsWasmStackFrame::GetColumnNumber() {
+ DCHECK_LE(0, GetPosition());
+ Handle<Script> script =
+ wasm::GetScript(Handle<JSObject>::cast(wasm_instance_));
+ DCHECK_EQ(Script::TYPE_NORMAL, script->type());
+ return Script::GetColumnNumber(script, GetPosition()) + 1;
+}
+
+MaybeHandle<String> AsmJsWasmStackFrame::ToString() {
+ // The string should look exactly as the respective javascript frame string.
+ // Keep this method in line to JSStackFrame::ToString().
+
+ IncrementalStringBuilder builder(isolate_);
+
+ Handle<Object> function_name = GetFunctionName();
+
+ if (IsNonEmptyString(function_name)) {
+ builder.AppendString(Handle<String>::cast(function_name));
+ builder.AppendCString(" (");
+ }
+
+ AppendFileLocation(isolate_, this, &builder);
+
+ if (IsNonEmptyString(function_name)) builder.AppendCString(")");
+
+ return builder.Finish();
+}
+
FrameArrayIterator::FrameArrayIterator(Isolate* isolate,
Handle<FrameArray> array, int frame_ix)
: isolate_(isolate), array_(array), next_frame_ix_(frame_ix) {}
@@ -680,13 +759,22 @@ void FrameArrayIterator::Next() { next_frame_ix_++; }
StackFrameBase* FrameArrayIterator::Frame() {
DCHECK(HasNext());
const int flags = array_->Flags(next_frame_ix_)->value();
- const bool is_js_frame = (flags & FrameArray::kIsWasmFrame) == 0;
- if (is_js_frame) {
- js_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
- return &js_frame_;
- } else {
- wasm_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
- return &wasm_frame_;
+ switch (flags & (FrameArray::kIsWasmFrame | FrameArray::kIsAsmJsWasmFrame)) {
+ case 0:
+ // JavaScript Frame.
+ js_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
+ return &js_frame_;
+ case FrameArray::kIsWasmFrame:
+ // Wasm Frame;
+ wasm_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
+ return &wasm_frame_;
+ case FrameArray::kIsAsmJsWasmFrame:
+ // Asm.js Wasm Frame:
+ asm_wasm_frame_.FromFrameArray(isolate_, array_, next_frame_ix_);
+ return &asm_wasm_frame_;
+ default:
+ UNREACHABLE();
+ return nullptr;
}
}
@@ -864,7 +952,7 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
}
}
- RETURN_RESULT(isolate, builder.Finish(), Object);
+ return builder.Finish();
}
Handle<String> MessageTemplate::FormatMessage(Isolate* isolate,
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index e7bbcc34c2..86cc8d0dff 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -126,7 +126,7 @@ class WasmStackFrame : public StackFrameBase {
public:
virtual ~WasmStackFrame() {}
- Handle<Object> GetReceiver() const override { return wasm_obj_; }
+ Handle<Object> GetReceiver() const override { return wasm_instance_; }
Handle<Object> GetFunction() const override;
Handle<Object> GetFileName() override { return Null(); }
@@ -148,20 +148,40 @@ class WasmStackFrame : public StackFrameBase {
MaybeHandle<String> ToString() override;
- private:
- void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
+ protected:
Handle<Object> Null() const;
Isolate* isolate_;
- Handle<Object> wasm_obj_;
+ // TODO(wasm): Use proper typing.
+ Handle<Object> wasm_instance_;
uint32_t wasm_func_index_;
Handle<AbstractCode> code_;
int offset_;
+ private:
+ void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
+
friend class FrameArrayIterator;
};
+class AsmJsWasmStackFrame : public WasmStackFrame {
+ public:
+ virtual ~AsmJsWasmStackFrame() {}
+
+ Handle<Object> GetReceiver() const override;
+ Handle<Object> GetFunction() const override;
+
+ Handle<Object> GetFileName() override;
+ Handle<Object> GetScriptNameOrSourceUrl() override;
+
+ int GetPosition() const override;
+ int GetLineNumber() override;
+ int GetColumnNumber() override;
+
+ MaybeHandle<String> ToString() override;
+};
+
class FrameArrayIterator {
public:
FrameArrayIterator(Isolate* isolate, Handle<FrameArray> array,
@@ -179,6 +199,7 @@ class FrameArrayIterator {
int next_frame_ix_;
WasmStackFrame wasm_frame_;
+ AsmJsWasmStackFrame asm_wasm_frame_;
JSStackFrame js_frame_;
};
@@ -499,7 +520,8 @@ class ErrorUtils : public AllStatic {
T(UnsupportedTimeZone, "Unsupported time zone specified %") \
T(ValueOutOfRange, "Value % out of range for % options property %") \
/* SyntaxError */ \
- T(AmbiguousExport, "Multiple star exports provide name '%'") \
+ T(AmbiguousExport, \
+ "The requested module contains conflicting star exports for name '%'") \
T(BadGetterArity, "Getter must not have any formal parameters.") \
T(BadSetterArity, "Setter must have exactly one formal parameter.") \
T(ConstructorIsAccessor, "Class constructor may not be an accessor") \
@@ -604,7 +626,8 @@ class ErrorUtils : public AllStatic {
T(UnexpectedTokenString, "Unexpected string") \
T(UnexpectedTokenRegExp, "Unexpected regular expression") \
T(UnknownLabel, "Undefined label '%'") \
- T(UnresolvableExport, "Module does not provide an export named '%'") \
+ T(UnresolvableExport, \
+ "The requested module does not provide an export named '%'") \
T(UnterminatedArgList, "missing ) after argument list") \
T(UnterminatedRegExp, "Invalid regular expression: missing /") \
T(UnterminatedTemplate, "Unterminated template literal") \
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index f5b235d1f6..865e64c87d 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -1784,13 +1784,44 @@ void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
addu(at, at, src.rm()); // Add base register.
}
+// Helper for base-reg + upper part of offset, when offset is larger than int16.
+// Loads higher part of the offset to AT register.
+// Returns lower part of the offset to be used as offset
+// in Load/Store instructions
+int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
+ DCHECK(!src.rm().is(at));
+ int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
+ // If the highest bit of the lower part of the offset is 1, this would make
+ // the offset in the load/store instruction negative. We need to compensate
+ // for this by adding 1 to the upper part of the offset.
+ if (src.offset_ & kNegOffset) {
+ hi += 1;
+ }
+ lui(at, hi);
+ addu(at, at, src.rm());
+ return (src.offset_ & kImm16Mask);
+}
+
+// Helper for loading base-reg + upper offset's part to AT reg when we are using
+// two 32-bit loads/stores instead of one 64-bit
+int32_t Assembler::LoadUpperOffsetForTwoMemoryAccesses(const MemOperand& src) {
+ DCHECK(!src.rm().is(at));
+ if (is_int16((src.offset_ & kImm16Mask) + kIntSize)) {
+ // Only if lower part of offset + kIntSize fits in 16bits
+ return LoadRegPlusUpperOffsetPartToAt(src);
+ }
+ // In case offset's lower part + kIntSize doesn't fit in 16bits,
+ // load reg + hole offset to AT
+ LoadRegPlusOffsetToAt(src);
+ return 0;
+}
void Assembler::lb(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(LB, at, rd, off16);
}
}
@@ -1799,8 +1830,8 @@ void Assembler::lbu(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(LBU, at, rd, off16);
}
}
@@ -1809,8 +1840,8 @@ void Assembler::lh(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(LH, at, rd, off16);
}
}
@@ -1819,8 +1850,8 @@ void Assembler::lhu(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(LHU, at, rd, off16);
}
}
@@ -1829,8 +1860,8 @@ void Assembler::lw(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(LW, at, rd, off16);
}
}
@@ -1855,8 +1886,8 @@ void Assembler::sb(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to store.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(SB, at, rd, off16);
}
}
@@ -1865,8 +1896,8 @@ void Assembler::sh(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to store.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(SH, at, rd, off16);
}
}
@@ -1875,8 +1906,8 @@ void Assembler::sw(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to store.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(SW, at, rd, off16);
}
}
@@ -2172,8 +2203,8 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
if (is_int16(src.offset_)) {
GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(src);
- GenInstrImmediate(LWC1, at, fd, 0);
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+ GenInstrImmediate(LWC1, at, fd, off16);
}
}
@@ -2190,11 +2221,11 @@ void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
GenInstrImmediate(LWC1, src.rm(), nextfpreg,
src.offset_ + Register::kExponentOffset);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(src);
- GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
+ int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
+ GenInstrImmediate(LWC1, at, fd, off16 + Register::kMantissaOffset);
FPURegister nextfpreg;
nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(LWC1, at, nextfpreg, Register::kExponentOffset);
+ GenInstrImmediate(LWC1, at, nextfpreg, off16 + Register::kExponentOffset);
}
} else {
DCHECK(IsFp64Mode() || IsFpxxMode());
@@ -2207,9 +2238,9 @@ void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
src.offset_ + Register::kExponentOffset);
mthc1(at, fd);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(src);
- GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
- GenInstrImmediate(LW, at, at, Register::kExponentOffset);
+ int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
+ GenInstrImmediate(LWC1, at, fd, off16 + Register::kMantissaOffset);
+ GenInstrImmediate(LW, at, at, off16 + Register::kExponentOffset);
mthc1(at, fd);
}
}
@@ -2220,8 +2251,8 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
if (is_int16(src.offset_)) {
GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(src);
- GenInstrImmediate(SWC1, at, fd, 0);
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+ GenInstrImmediate(SWC1, at, fd, off16);
}
}
@@ -2240,11 +2271,11 @@ void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
GenInstrImmediate(SWC1, src.rm(), nextfpreg,
src.offset_ + Register::kExponentOffset);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(src);
- GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
+ int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
+ GenInstrImmediate(SWC1, at, fd, off16 + Register::kMantissaOffset);
FPURegister nextfpreg;
nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(SWC1, at, nextfpreg, Register::kExponentOffset);
+ GenInstrImmediate(SWC1, at, nextfpreg, off16 + Register::kExponentOffset);
}
} else {
DCHECK(IsFp64Mode() || IsFpxxMode());
@@ -2257,10 +2288,10 @@ void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
GenInstrImmediate(SW, src.rm(), at,
src.offset_ + Register::kExponentOffset);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(src);
- GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
+ int32_t off16 = LoadUpperOffsetForTwoMemoryAccesses(src);
+ GenInstrImmediate(SWC1, at, fd, off16 + Register::kMantissaOffset);
mfhc1(t8, fd);
- GenInstrImmediate(SW, at, t8, Register::kExponentOffset);
+ GenInstrImmediate(SW, at, t8, off16 + Register::kExponentOffset);
}
}
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index e58abd8c0c..1df6e3f5ad 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -1055,7 +1055,8 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+ void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+ int id);
static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
intptr_t pc_delta);
@@ -1177,6 +1178,8 @@ class Assembler : public AssemblerBase {
// Helpers.
void LoadRegPlusOffsetToAt(const MemOperand& src);
+ int32_t LoadRegPlusUpperOffsetPartToAt(const MemOperand& src);
+ int32_t LoadUpperOffsetForTwoMemoryAccesses(const MemOperand& src);
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 43e67354f2..966214be8c 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -566,7 +566,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
__ And(t2, lhs, Operand(rhs));
__ JumpIfNotSmi(t2, &not_smis, t0);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
@@ -1625,13 +1625,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ Addu(a1, a1, Operand(2)); // a1 was a smi.
- __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(a0, &runtime);
- __ GetObjectType(a0, a2, a2);
- __ Branch(&runtime, ne, a2, Operand(JS_OBJECT_TYPE));
+ // Check that the last match info is a FixedArray.
+ __ lw(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(last_match_info_elements, &runtime);
// Check that the object has fast elements.
- __ lw(last_match_info_elements,
- FieldMemOperand(a0, JSArray::kElementsOffset));
__ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
__ Branch(&runtime, ne, a0, Operand(at));
@@ -1639,7 +1636,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// additional information.
__ lw(a0,
FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
+ __ Addu(a2, a1, Operand(RegExpMatchInfo::kLastMatchOverhead));
__ sra(at, a0, kSmiTagSize);
__ Branch(&runtime, gt, a2, Operand(at));
@@ -1648,28 +1645,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Store the capture count.
__ sll(a2, a1, kSmiTagSize + kSmiShiftSize); // To smi.
__ sw(a2, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastCaptureCountOffset));
+ RegExpMatchInfo::kNumberOfCapturesOffset));
// Store last subject and last input.
- __ sw(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset));
+ __ sw(subject, FieldMemOperand(last_match_info_elements,
+ RegExpMatchInfo::kLastSubjectOffset));
__ mov(a2, subject);
__ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset,
- subject,
- t3,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ RegExpMatchInfo::kLastSubjectOffset, subject, t3,
+ kRAHasNotBeenSaved, kDontSaveFPRegs);
__ mov(subject, a2);
- __ sw(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastInputOffset));
+ __ sw(subject, FieldMemOperand(last_match_info_elements,
+ RegExpMatchInfo::kLastInputOffset));
__ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastInputOffset,
- subject,
- t3,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ RegExpMatchInfo::kLastInputOffset, subject, t3,
+ kRAHasNotBeenSaved, kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -1681,9 +1670,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Label next_capture, done;
// Capture register counter starts from number of capture registers and
// counts down until wrapping after zero.
- __ Addu(a0,
- last_match_info_elements,
- Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+ __ Addu(a0, last_match_info_elements,
+ Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag));
__ bind(&next_capture);
__ Subu(a1, a1, Operand(1));
__ Branch(&done, lt, a1, Operand(zero_reg));
@@ -1699,7 +1687,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&done);
// Return last match info.
- __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
+ __ mov(v0, last_match_info_elements);
__ DropAndRet(4);
// Do the runtime call to execute the regexp.
@@ -1917,6 +1905,7 @@ static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+ // a0 - number of arguments
// a1 - function
// a3 - slot id
// a2 - vector
@@ -1924,25 +1913,22 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
__ Branch(miss, ne, a1, Operand(at));
- __ li(a0, Operand(arg_count()));
-
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, a2, a3);
__ mov(a2, t0);
__ mov(a3, a1);
- ArrayConstructorStub stub(masm->isolate(), arg_count());
+ ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
void CallICStub::Generate(MacroAssembler* masm) {
+ // a0 - number of arguments
// a1 - function
// a3 - slot id (Smi)
// a2 - vector
Label extra_checks_or_miss, call, call_function, call_count_incremented;
- int argc = arg_count();
- ParameterCount actual(argc);
// The checks. First, does r1 match the recorded monomorphic target?
__ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize);
@@ -1976,9 +1962,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
- RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
- USE_DELAY_SLOT);
- __ li(a0, Operand(argc)); // In delay slot.
+ RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
@@ -2019,9 +2003,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&call_count_incremented);
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
- RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
- USE_DELAY_SLOT);
- __ li(a0, Operand(argc)); // In delay slot.
+ RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
__ bind(&uninitialized);
@@ -2050,11 +2032,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
+ __ SmiTag(a0);
+ __ Push(a0);
__ Push(a2, a3);
__ Push(cp, a1);
__ CallStub(&create_stub);
__ Pop(cp, a1);
__ Pop(a2, a3);
+ __ Pop(a0);
+ __ SmiUntag(a0);
}
__ Branch(&call_function);
@@ -2071,6 +2057,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Preserve the number of arguments as Smi.
+ __ SmiTag(a0);
+ __ Push(a0);
+
// Push the receiver and the function and feedback info.
__ Push(a1, a2, a3);
@@ -2079,6 +2069,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
// Move result to a1 and exit the internal frame.
__ mov(a1, v0);
+
+ // Restore number of arguments.
+ __ Pop(a0);
+ __ SmiUntag(a0);
}
@@ -3183,16 +3177,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_scratch;
- __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
- __ lw(regs_.scratch1(),
- MemOperand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
- __ sw(regs_.scratch1(),
- MemOperand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
-
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -3276,21 +3260,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Addu(sp, sp, a1);
}
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(a2);
CallICStub stub(isolate(), state());
@@ -3298,14 +3267,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
}
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, bool is_polymorphic,
@@ -3392,180 +3353,12 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ Jump(t9);
}
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // a1
- Register name = LoadWithVectorDescriptor::NameRegister(); // a2
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // a3
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // a0
- Register feedback = t0;
- Register receiver_map = t1;
- Register scratch1 = t4;
-
- __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
- __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ Branch(&not_array, ne, at, Operand(scratch1));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, true, &miss);
-
- __ bind(&not_array);
- __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ Branch(&miss, ne, at, Operand(feedback));
- masm->isolate()->load_stub_cache()->GenerateProbe(
- masm, receiver, name, feedback, receiver_map, scratch1, t5);
-
- __ bind(&miss);
- LoadIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // a1
- Register key = LoadWithVectorDescriptor::NameRegister(); // a2
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // a3
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // a0
- Register feedback = t0;
- Register receiver_map = t1;
- Register scratch1 = t4;
-
- __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
- __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ Branch(&not_array, ne, at, Operand(scratch1));
- // We have a polymorphic element handler.
- __ JumpIfNotSmi(key, &miss);
-
- Label polymorphic, try_poly_name;
- __ bind(&polymorphic);
- HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, true, &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ Branch(&try_poly_name, ne, at, Operand(feedback));
- Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ Branch(&miss, ne, key, Operand(feedback));
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
- __ lw(feedback,
- FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, false, &miss);
-
- __ bind(&miss);
- KeyedLoadIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- StoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // a1
- Register key = StoreWithVectorDescriptor::NameRegister(); // a2
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // a3
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // t0
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0)); // a0
- Register feedback = t1;
- Register receiver_map = t2;
- Register scratch1 = t5;
-
- __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
- __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ Branch(&not_array, ne, scratch1, Operand(at));
-
- Register scratch2 = t4;
- HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
- &miss);
-
- __ bind(&not_array);
- __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ Branch(&miss, ne, feedback, Operand(at));
- masm->isolate()->store_stub_cache()->GenerateProbe(
- masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
-
- __ bind(&miss);
- StoreIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ Branch(USE_DELAY_SLOT, &compare_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot.
-}
-
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
@@ -3908,29 +3701,18 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- if (argument_count() == ANY) {
- Label not_zero_case, not_one_case;
- __ And(at, a0, a0);
- __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ bind(&not_zero_case);
- __ Branch(&not_one_case, gt, a0, Operand(1));
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else if (argument_count() == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count() == ONE) {
- CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count() == MORE_THAN_ONE) {
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- UNREACHABLE();
- }
+ Label not_zero_case, not_one_case;
+ __ And(at, a0, a0);
+ __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ Branch(&not_one_case, gt, a0, Operand(1));
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -3983,23 +3765,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
- switch (argument_count()) {
- case ANY:
- case MORE_THAN_ONE:
- __ Lsa(at, sp, a0, kPointerSizeLog2);
- __ sw(a1, MemOperand(at));
- __ li(at, Operand(3));
- __ addu(a0, a0, at);
- break;
- case NONE:
- __ sw(a1, MemOperand(sp, 0 * kPointerSize));
- __ li(a0, Operand(3));
- break;
- case ONE:
- __ sw(a1, MemOperand(sp, 1 * kPointerSize));
- __ li(a0, Operand(4));
- break;
- }
+ __ Lsa(at, sp, a0, kPointerSizeLog2);
+ __ sw(a1, MemOperand(at));
+ __ li(at, Operand(3));
+ __ addu(a0, a0, at);
__ Push(a3, a2);
__ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -4260,7 +4029,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
__ sw(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
__ sw(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
- __ Move(a1, Smi::FromInt(0));
+ __ Move(a1, Smi::kZero);
__ Ret(USE_DELAY_SLOT);
__ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset)); // In delay slot
STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
@@ -4421,7 +4190,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
FixedArray::kHeaderSize + 2 * kPointerSize;
// If there are no mapped parameters, we do not need the parameter_map.
Label param_map_size;
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
__ Branch(USE_DELAY_SLOT, &param_map_size, eq, t2, Operand(zero_reg));
__ mov(t5, zero_reg); // In delay slot: param map size = 0 when t2 == 0.
__ sll(t5, t2, 1);
@@ -4486,13 +4255,13 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
Label skip3;
- __ Branch(&skip3, ne, t2, Operand(Smi::FromInt(0)));
+ __ Branch(&skip3, ne, t2, Operand(Smi::kZero));
// Move backing store address to a1, because it is
// expected there when filling in the unmapped arguments.
__ mov(a1, t0);
__ bind(&skip3);
- __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::FromInt(0)));
+ __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::kZero));
__ LoadRoot(t1, Heap::kSloppyArgumentsElementsMapRootIndex);
__ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
@@ -4537,7 +4306,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
__ sw(t3, MemOperand(t6));
__ Addu(t5, t5, Operand(Smi::FromInt(1)));
__ bind(&parameters_test);
- __ Branch(&parameters_loop, ne, t1, Operand(Smi::FromInt(0)));
+ __ Branch(&parameters_loop, ne, t1, Operand(Smi::kZero));
// t1 = argument count (tagged).
__ lw(t1, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
@@ -4692,119 +4461,6 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
}
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register context_reg = cp;
- Register slot_reg = a2;
- Register value_reg = a0;
- Register cell_reg = t0;
- Register cell_value_reg = t1;
- Register cell_details_reg = t2;
- Label fast_heapobject_case, fast_smi_case, slow_case;
-
- if (FLAG_debug_code) {
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Check(ne, kUnexpectedValue, value_reg, Operand(at));
- }
-
- // Go up context chain to the script context.
- for (int i = 0; i < depth(); ++i) {
- __ lw(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
- context_reg = cell_reg;
- }
-
- // Load the PropertyCell at the specified slot.
- __ Lsa(at, context_reg, slot_reg, kPointerSizeLog2);
- __ lw(cell_reg, ContextMemOperand(at, 0));
-
- // Load PropertyDetails for the cell (actually only the cell_type and kind).
- __ lw(cell_details_reg,
- FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset));
- __ SmiUntag(cell_details_reg);
- __ And(cell_details_reg, cell_details_reg,
- PropertyDetails::PropertyCellTypeField::kMask |
- PropertyDetails::KindField::kMask |
- PropertyDetails::kAttributesReadOnlyMask);
-
- // Check if PropertyCell holds mutable data.
- Label not_mutable_data;
- __ Branch(&not_mutable_data, ne, cell_details_reg,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kMutable) |
- PropertyDetails::KindField::encode(kData)));
- __ JumpIfSmi(value_reg, &fast_smi_case);
- __ bind(&fast_heapobject_case);
- __ sw(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
- __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
- cell_details_reg, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- // RecordWriteField clobbers the value register, so we need to reload.
- __ Ret(USE_DELAY_SLOT);
- __ lw(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
- __ bind(&not_mutable_data);
-
- // Check if PropertyCell value matches the new value (relevant for Constant,
- // ConstantType and Undefined cells).
- Label not_same_value;
- __ lw(cell_value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
- __ Branch(&not_same_value, ne, value_reg, Operand(cell_value_reg));
- // Make sure the PropertyCell is not marked READ_ONLY.
- __ And(at, cell_details_reg, PropertyDetails::kAttributesReadOnlyMask);
- __ Branch(&slow_case, ne, at, Operand(zero_reg));
- if (FLAG_debug_code) {
- Label done;
- // This can only be true for Constant, ConstantType and Undefined cells,
- // because we never store the_hole via this stub.
- __ Branch(&done, eq, cell_details_reg,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstant) |
- PropertyDetails::KindField::encode(kData)));
- __ Branch(&done, eq, cell_details_reg,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
- __ Check(eq, kUnexpectedValue, cell_details_reg,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kUndefined) |
- PropertyDetails::KindField::encode(kData)));
- __ bind(&done);
- }
- __ Ret();
- __ bind(&not_same_value);
-
- // Check if PropertyCell contains data with constant type (and is not
- // READ_ONLY).
- __ Branch(&slow_case, ne, cell_details_reg,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
-
- // Now either both old and new values must be SMIs or both must be heap
- // objects with same map.
- Label value_is_heap_object;
- __ JumpIfNotSmi(value_reg, &value_is_heap_object);
- __ JumpIfNotSmi(cell_value_reg, &slow_case);
- // Old and new values are SMIs, no need for a write barrier here.
- __ bind(&fast_smi_case);
- __ Ret(USE_DELAY_SLOT);
- __ sw(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
- __ bind(&value_is_heap_object);
- __ JumpIfSmi(cell_value_reg, &slow_case);
- Register cell_value_map_reg = cell_value_reg;
- __ lw(cell_value_map_reg,
- FieldMemOperand(cell_value_reg, HeapObject::kMapOffset));
- __ Branch(&fast_heapobject_case, eq, cell_value_map_reg,
- FieldMemOperand(value_reg, HeapObject::kMapOffset));
-
- // Fallback to the runtime.
- __ bind(&slow_case);
- __ SmiTag(slot_reg);
- __ Push(slot_reg, value_reg);
- __ TailCallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
@@ -5074,7 +4730,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ sw(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
__ sw(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
// should_throw_on_error -> false
- DCHECK(Smi::FromInt(0) == nullptr);
+ DCHECK(Smi::kZero == nullptr);
__ sw(zero_reg,
MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
__ lw(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index aed41420d6..486ae68324 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -29,9 +29,9 @@ const Register LoadDescriptor::ReceiverRegister() { return a1; }
const Register LoadDescriptor::NameRegister() { return a2; }
const Register LoadDescriptor::SlotRegister() { return a0; }
-
const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return t0; }
const Register StoreDescriptor::ReceiverRegister() { return a1; }
const Register StoreDescriptor::NameRegister() { return a2; }
@@ -44,10 +44,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return t0; }
const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
const Register StoreTransitionDescriptor::MapRegister() { return t1; }
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return a2; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return a1; }
const Register StringCompareDescriptor::RightRegister() { return a0; }
@@ -160,7 +156,7 @@ void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a3, a2};
+ Register registers[] = {a1, a0, a3, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -209,13 +205,6 @@ void ConstructTrampolineDescriptor::InitializePlatformSpecific(
}
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a2, a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a0, a1};
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index d61717d222..c3abe4fa6f 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -500,85 +500,6 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Allocation support.
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
- Label same_contexts;
- Register temporary = t8;
-
- DCHECK(!holder_reg.is(scratch));
- DCHECK(!holder_reg.is(at));
- DCHECK(!scratch.is(at));
-
- // Load current lexical context from the active StandardFrame, which
- // may require crawling past STUB frames.
- Label load_context;
- Label has_context;
- mov(at, fp);
- bind(&load_context);
- lw(scratch, MemOperand(at, CommonFrameConstants::kContextOrFrameTypeOffset));
- // Passing temporary register, otherwise JumpIfNotSmi modifies register at.
- JumpIfNotSmi(scratch, &has_context, temporary);
- lw(at, MemOperand(at, CommonFrameConstants::kCallerFPOffset));
- Branch(&load_context);
- bind(&has_context);
-
- // In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
- Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
- scratch, Operand(zero_reg));
-#endif
-
- // Load the native context of the current context.
- lw(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the native_context_map.
- lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- LoadRoot(at, Heap::kNativeContextMapRootIndex);
- Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
- holder_reg, Operand(at));
- pop(holder_reg); // Restore holder.
- }
-
- // Check if both contexts are the same.
- lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- Branch(&same_contexts, eq, scratch, Operand(at));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- push(holder_reg); // Temporarily save holder on the stack.
- mov(holder_reg, at); // Move at to its holding place.
- LoadRoot(at, Heap::kNullValueRootIndex);
- Check(ne, kJSGlobalProxyContextShouldNotBeNull,
- holder_reg, Operand(at));
-
- lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- LoadRoot(at, Heap::kNativeContextMapRootIndex);
- Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
- holder_reg, Operand(at));
- // Restore at is not needed. at is reloaded below.
- pop(holder_reg); // Restore holder.
- // Restore at to holder's context.
- lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- }
-
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
- lw(scratch, FieldMemOperand(scratch, token_offset));
- lw(at, FieldMemOperand(at, token_offset));
- Branch(miss, ne, scratch, Operand(at));
-
- bind(&same_contexts);
-}
-
-
// Compute the hash code from the untagged key. This must be kept in sync with
// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
@@ -619,87 +540,6 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
And(reg0, reg0, Operand(0x3fffffff));
}
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register result,
- Register reg0,
- Register reg1,
- Register reg2) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- //
- // Scratch registers:
- //
- // reg0 - holds the untagged key on entry and holds the hash once computed.
- //
- // reg1 - Used to hold the capacity mask of the dictionary.
- //
- // reg2 - Used for the index into the dictionary.
- // at - Temporary (avoid MacroAssembler instructions also using 'at').
- Label done;
-
- GetNumberHash(reg0, reg1);
-
- // Compute the capacity mask.
- lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
- sra(reg1, reg1, kSmiTagSize);
- Subu(reg1, reg1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before giving up.
- for (int i = 0; i < kNumberDictionaryProbes; i++) {
- // Use reg2 for index calculations and keep the hash intact in reg0.
- mov(reg2, reg0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
- }
- and_(reg2, reg2, reg1);
-
- // Scale the index by multiplying by the element size.
- DCHECK(SeededNumberDictionary::kEntrySize == 3);
- Lsa(reg2, reg2, reg2, 1); // reg2 = reg2 * 3.
-
- // Check if the key is identical to the name.
- Lsa(reg2, elements, reg2, kPointerSizeLog2);
-
- lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
- if (i != kNumberDictionaryProbes - 1) {
- Branch(&done, eq, key, Operand(at));
- } else {
- Branch(miss, ne, key, Operand(at));
- }
- }
-
- bind(&done);
- // Check that the value is a field property.
- // reg2: elements + (index * kPointerSize).
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
- DCHECK_EQ(DATA, 0);
- And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
- Branch(miss, ne, at, Operand(zero_reg));
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- lw(result, FieldMemOperand(reg2, kValueOffset));
-}
-
-
// ---------------------------------------------------------------------------
// Instruction macros.
@@ -1217,26 +1057,18 @@ void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
void MacroAssembler::ByteSwapSigned(Register dest, Register src,
int operand_size) {
DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
- if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
- if (operand_size == 2) {
- seh(src, src);
- } else if (operand_size == 1) {
- seb(src, src);
- }
- // No need to do any preparation if operand_size is 4
+ if (operand_size == 2) {
+ Seh(src, src);
+ } else if (operand_size == 1) {
+ Seb(src, src);
+ }
+ // No need to do any preparation if operand_size is 4
+
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
wsbh(dest, src);
rotr(dest, dest, 16);
} else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) {
- if (operand_size == 1) {
- sll(src, src, 24);
- sra(src, src, 24);
- } else if (operand_size == 2) {
- sll(src, src, 16);
- sra(src, src, 16);
- }
- // No need to do any preparation if operand_size is 4
-
Register tmp = t0;
Register tmp2 = t1;
@@ -1917,6 +1749,26 @@ void MacroAssembler::Ins(Register rt,
}
}
+void MacroAssembler::Seb(Register rd, Register rt) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ seb(rd, rt);
+ } else {
+ DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson));
+ sll(rd, rt, 24);
+ sra(rd, rd, 24);
+ }
+}
+
+void MacroAssembler::Seh(Register rd, Register rt) {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ seh(rd, rt);
+ } else {
+ DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson));
+ sll(rd, rt, 16);
+ sra(rd, rd, 16);
+ }
+}
+
void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
if (IsMipsArchVariant(kMips32r6)) {
// r6 neg_s changes the sign for NaN-like operands as well.
@@ -4654,75 +4506,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
-
-void MacroAssembler::CopyBytes(Register src,
- Register dst,
- Register length,
- Register scratch) {
- Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
-
- // Align src before copying in word size chunks.
- Branch(&byte_loop, le, length, Operand(kPointerSize));
- bind(&align_loop_1);
- And(scratch, src, kPointerSize - 1);
- Branch(&word_loop, eq, scratch, Operand(zero_reg));
- lbu(scratch, MemOperand(src));
- Addu(src, src, 1);
- sb(scratch, MemOperand(dst));
- Addu(dst, dst, 1);
- Subu(length, length, Operand(1));
- Branch(&align_loop_1, ne, length, Operand(zero_reg));
-
- // Copy bytes in word size chunks.
- bind(&word_loop);
- if (emit_debug_code()) {
- And(scratch, src, kPointerSize - 1);
- Assert(eq, kExpectingAlignmentForCopyBytes,
- scratch, Operand(zero_reg));
- }
- Branch(&byte_loop, lt, length, Operand(kPointerSize));
- lw(scratch, MemOperand(src));
- Addu(src, src, kPointerSize);
-
- // TODO(kalmard) check if this can be optimized to use sw in most cases.
- // Can't use unaligned access - copy byte by byte.
- if (kArchEndian == kLittle) {
- sb(scratch, MemOperand(dst, 0));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 1));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 2));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 3));
- } else {
- sb(scratch, MemOperand(dst, 3));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 2));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 1));
- srl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 0));
- }
-
- Addu(dst, dst, 4);
-
- Subu(length, length, Operand(kPointerSize));
- Branch(&word_loop);
-
- // Copy the last bytes if any left.
- bind(&byte_loop);
- Branch(&done, eq, length, Operand(zero_reg));
- bind(&byte_loop_1);
- lbu(scratch, MemOperand(src));
- Addu(src, src, 1);
- sb(scratch, MemOperand(dst));
- Addu(dst, dst, 1);
- Subu(length, length, Operand(1));
- Branch(&byte_loop_1, ne, length, Operand(zero_reg));
- bind(&done);
-}
-
-
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {
@@ -4735,20 +4518,6 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Branch(&loop, ult, current_address, Operand(end_address));
}
-
-void MacroAssembler::CheckFastElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastHoleyElementValue));
-}
-
-
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
@@ -5344,18 +5113,6 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
-}
-
-
void MacroAssembler::ObjectToDoubleFPURegister(Register object,
FPURegister result,
Register scratch1,
@@ -6473,7 +6230,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
lw(at, FieldMemOperand(string, String::kLengthOffset));
Check(lt, kIndexIsTooLarge, index, Operand(at));
- DCHECK(Smi::FromInt(0) == 0);
+ DCHECK(Smi::kZero == 0);
Check(ge, kIndexIsNegative, index, Operand(zero_reg));
SmiUntag(index, index);
@@ -6733,7 +6490,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// For all objects but the receiver, check that the cache is empty.
EnumLength(a3, a1);
- Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
+ Branch(call_runtime, ne, a3, Operand(Smi::kZero));
bind(&start);
@@ -6803,13 +6560,14 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
- const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+ const int kMementoLastWordOffset =
+ kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
// Bail out if the object is not in new space.
JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
- Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ Addu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
li(at, Operand(new_space_allocation_top_adr));
lw(at, MemOperand(at));
Xor(scratch_reg, scratch_reg, Operand(at));
@@ -6818,7 +6576,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// The object is on a different page than allocation top. Bail out if the
// object sits on the page boundary as no memento can follow and we cannot
// touch the memory following it.
- Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ Addu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
@@ -6827,10 +6585,10 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// If top is on the same page as the current object, we need to check whether
// we are below top.
bind(&top_check);
- Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ Addu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
li(at, Operand(new_space_allocation_top_adr));
lw(at, MemOperand(at));
- Branch(no_memento_found, gt, scratch_reg, Operand(at));
+ Branch(no_memento_found, ge, scratch_reg, Operand(at));
// Memento map check.
bind(&map_check);
lw(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 4024e52c6f..824a3bf14d 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -495,24 +495,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Inline caching support.
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, whereas both scratch registers are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss);
-
void GetNumberHash(Register reg0, Register scratch);
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register result,
- Register reg0,
- Register reg1,
- Register reg2);
-
-
inline void MarkCode(NopMarkerTypes type) {
nop(type);
}
@@ -842,6 +826,8 @@ class MacroAssembler: public Assembler {
// MIPS32 R2 instruction macro.
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Seb(Register rd, Register rt);
+ void Seh(Register rd, Register rt);
void Neg_s(FPURegister fd, FPURegister fs);
void Neg_d(FPURegister fd, FPURegister fs);
@@ -1141,14 +1127,6 @@ class MacroAssembler: public Assembler {
// Must preserve the result register.
void PopStackHandler();
- // Copies a number of bytes from src to dst. All registers are clobbered. On
- // exit src and dst will point to the place just after where the last byte was
- // read or written and length will be zero.
- void CopyBytes(Register src,
- Register dst,
- Register length,
- Register scratch);
-
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.
@@ -1180,12 +1158,6 @@ class MacroAssembler: public Assembler {
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
}
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Register scratch,
- Label* fail);
-
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,
@@ -1276,13 +1248,6 @@ class MacroAssembler: public Assembler {
return eq;
}
-
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
// Get the number of least significant bits from a register.
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index dc3198cd9f..056cc425f9 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -1117,7 +1117,8 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+ void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+ int id);
static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
intptr_t pc_delta);
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index e089b54f87..97f5b73f9c 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -563,7 +563,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
__ And(a6, lhs, Operand(rhs));
__ JumpIfNotSmi(a6, &not_smis, a4);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
@@ -1625,13 +1625,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Daddu(a1, a1, Operand(1));
__ dsll(a1, a1, 1); // Multiply by 2.
- __ ld(a0, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(a0, &runtime);
- __ GetObjectType(a0, a2, a2);
- __ Branch(&runtime, ne, a2, Operand(JS_OBJECT_TYPE));
+ // Check that the last match info is a FixedArray.
+ __ ld(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(last_match_info_elements, &runtime);
// Check that the object has fast elements.
- __ ld(last_match_info_elements,
- FieldMemOperand(a0, JSArray::kElementsOffset));
__ ld(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
__ Branch(&runtime, ne, a0, Operand(at));
@@ -1639,7 +1636,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// additional information.
__ ld(a0,
FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ Daddu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
+ __ Daddu(a2, a1, Operand(RegExpMatchInfo::kLastMatchOverhead));
__ SmiUntag(at, a0);
__ Branch(&runtime, gt, a2, Operand(at));
@@ -1649,28 +1646,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Store the capture count.
__ SmiTag(a2, a1); // To smi.
__ sd(a2, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastCaptureCountOffset));
+ RegExpMatchInfo::kNumberOfCapturesOffset));
// Store last subject and last input.
- __ sd(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset));
+ __ sd(subject, FieldMemOperand(last_match_info_elements,
+ RegExpMatchInfo::kLastSubjectOffset));
__ mov(a2, subject);
__ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset,
- subject,
- a7,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ RegExpMatchInfo::kLastSubjectOffset, subject, a7,
+ kRAHasNotBeenSaved, kDontSaveFPRegs);
__ mov(subject, a2);
- __ sd(subject,
- FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastInputOffset));
+ __ sd(subject, FieldMemOperand(last_match_info_elements,
+ RegExpMatchInfo::kLastInputOffset));
__ RecordWriteField(last_match_info_elements,
- RegExpImpl::kLastInputOffset,
- subject,
- a7,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs);
+ RegExpMatchInfo::kLastInputOffset, subject, a7,
+ kRAHasNotBeenSaved, kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -1682,9 +1671,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Label next_capture, done;
// Capture register counter starts from number of capture registers and
// counts down until wrapping after zero.
- __ Daddu(a0,
- last_match_info_elements,
- Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+ __ Daddu(a0, last_match_info_elements,
+ Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag));
__ bind(&next_capture);
__ Dsubu(a1, a1, Operand(1));
__ Branch(&done, lt, a1, Operand(zero_reg));
@@ -1700,7 +1688,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&done);
// Return last match info.
- __ ld(v0, MemOperand(sp, kLastMatchInfoOffset));
+ __ mov(v0, last_match_info_elements);
__ DropAndRet(4);
// Do the runtime call to execute the regexp.
@@ -1964,6 +1952,7 @@ static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+ // a0 - number of arguments
// a1 - function
// a3 - slot id
// a2 - vector
@@ -1971,25 +1960,22 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
__ Branch(miss, ne, a1, Operand(at));
- __ li(a0, Operand(arg_count()));
-
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, a2, a3);
__ mov(a2, a4);
__ mov(a3, a1);
- ArrayConstructorStub stub(masm->isolate(), arg_count());
+ ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
void CallICStub::Generate(MacroAssembler* masm) {
+ // a0 - number of arguments
// a1 - function
// a3 - slot id (Smi)
// a2 - vector
Label extra_checks_or_miss, call, call_function, call_count_incremented;
- int argc = arg_count();
- ParameterCount actual(argc);
// The checks. First, does r1 match the recorded monomorphic target?
__ dsrl(a4, a3, 32 - kPointerSizeLog2);
@@ -2023,9 +2009,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
- RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
- USE_DELAY_SLOT);
- __ li(a0, Operand(argc)); // In delay slot.
+ RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
@@ -2067,9 +2051,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&call_count_incremented);
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
- RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
- USE_DELAY_SLOT);
- __ li(a0, Operand(argc)); // In delay slot.
+ RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
__ bind(&uninitialized);
@@ -2098,11 +2080,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
+ __ SmiTag(a0);
+ __ Push(a0);
__ Push(a2, a3);
__ Push(cp, a1);
__ CallStub(&create_stub);
__ Pop(cp, a1);
__ Pop(a2, a3);
+ __ Pop(a0);
+ __ SmiUntag(a0);
}
__ Branch(&call_function);
@@ -2119,6 +2105,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Preserve number of arguments as Smi.
+ __ SmiTag(a0);
+ __ Push(a0);
+
// Push the receiver and the function and feedback info.
__ Push(a1, a2, a3);
@@ -2127,6 +2117,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
// Move result to a1 and exit the internal frame.
__ mov(a1, v0);
+
+ // Restore number of arguments.
+ __ Pop(a0);
+ __ SmiUntag(a0);
}
@@ -3186,16 +3180,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_scratch;
- __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
- __ ld(regs_.scratch1(),
- MemOperand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ Dsubu(regs_.scratch1(), regs_.scratch1(), Operand(1));
- __ sd(regs_.scratch1(),
- MemOperand(regs_.scratch0(),
- MemoryChunk::kWriteBarrierCounterOffset));
- __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
-
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@@ -3279,21 +3263,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Daddu(sp, sp, a1);
}
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(a2);
CallICStub stub(isolate(), state());
@@ -3301,14 +3270,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
}
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, bool is_polymorphic,
@@ -3395,182 +3356,12 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ Jump(t9);
}
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // a1
- Register name = LoadWithVectorDescriptor::NameRegister(); // a2
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // a3
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // a0
- Register feedback = a4;
- Register receiver_map = a5;
- Register scratch1 = a6;
-
- __ SmiScale(feedback, slot, kPointerSizeLog2);
- __ Daddu(feedback, vector, Operand(feedback));
- __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ Branch(&not_array, ne, scratch1, Operand(at));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, true, &miss);
-
- __ bind(&not_array);
- __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ Branch(&miss, ne, feedback, Operand(at));
- masm->isolate()->load_stub_cache()->GenerateProbe(
- masm, receiver, name, feedback, receiver_map, scratch1, a7);
-
- __ bind(&miss);
- LoadIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ Branch(&compare_map);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // a1
- Register key = LoadWithVectorDescriptor::NameRegister(); // a2
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // a3
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // a0
- Register feedback = a4;
- Register receiver_map = a5;
- Register scratch1 = a6;
-
- __ SmiScale(feedback, slot, kPointerSizeLog2);
- __ Daddu(feedback, vector, Operand(feedback));
- __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ Branch(&not_array, ne, scratch1, Operand(at));
- // We have a polymorphic element handler.
- __ JumpIfNotSmi(key, &miss);
-
- Label polymorphic, try_poly_name;
- __ bind(&polymorphic);
- HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, true, &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ Branch(&try_poly_name, ne, feedback, Operand(at));
- Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ Branch(&miss, ne, key, Operand(feedback));
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ SmiScale(feedback, slot, kPointerSizeLog2);
- __ Daddu(feedback, vector, Operand(feedback));
- __ ld(feedback,
- FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, false, &miss);
-
- __ bind(&miss);
- KeyedLoadIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ Branch(&compare_map);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- StoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // a1
- Register key = StoreWithVectorDescriptor::NameRegister(); // a2
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // a3
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // a4
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0)); // a0
- Register feedback = a5;
- Register receiver_map = a6;
- Register scratch1 = a7;
-
- __ SmiScale(scratch1, slot, kPointerSizeLog2);
- __ Daddu(feedback, vector, Operand(scratch1));
- __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ Branch(&not_array, ne, scratch1, Heap::kFixedArrayMapRootIndex);
-
- Register scratch2 = t0;
- HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
- &miss);
-
- __ bind(&not_array);
- __ Branch(&miss, ne, feedback, Heap::kmegamorphic_symbolRootIndex);
- masm->isolate()->store_stub_cache()->GenerateProbe(
- masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
-
- __ bind(&miss);
- StoreIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ Branch(USE_DELAY_SLOT, &compare_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot.
-}
-
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
@@ -3913,29 +3704,18 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
- if (argument_count() == ANY) {
- Label not_zero_case, not_one_case;
- __ And(at, a0, a0);
- __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ bind(&not_zero_case);
- __ Branch(&not_one_case, gt, a0, Operand(1));
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else if (argument_count() == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count() == ONE) {
- CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count() == MORE_THAN_ONE) {
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- UNREACHABLE();
- }
+ Label not_zero_case, not_one_case;
+ __ And(at, a0, a0);
+ __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ Branch(&not_one_case, gt, a0, Operand(1));
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -3988,23 +3768,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
- switch (argument_count()) {
- case ANY:
- case MORE_THAN_ONE:
- __ Dlsa(at, sp, a0, kPointerSizeLog2);
- __ sd(a1, MemOperand(at));
- __ li(at, Operand(3));
- __ Daddu(a0, a0, at);
- break;
- case NONE:
- __ sd(a1, MemOperand(sp, 0 * kPointerSize));
- __ li(a0, Operand(3));
- break;
- case ONE:
- __ sd(a1, MemOperand(sp, 1 * kPointerSize));
- __ li(a0, Operand(4));
- break;
- }
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
+ __ sd(a1, MemOperand(at));
+ __ li(at, Operand(3));
+ __ Daddu(a0, a0, at);
__ Push(a3, a2);
__ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -4267,7 +4034,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
__ sd(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
__ sd(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
- __ Move(a1, Smi::FromInt(0));
+ __ Move(a1, Smi::kZero);
__ Ret(USE_DELAY_SLOT);
__ sd(a1, FieldMemOperand(v0, JSArray::kLengthOffset)); // In delay slot
STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
@@ -4434,7 +4201,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
FixedArray::kHeaderSize + 2 * kPointerSize;
// If there are no mapped parameters, we do not need the parameter_map.
Label param_map_size;
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
__ Branch(USE_DELAY_SLOT, &param_map_size, eq, a6, Operand(zero_reg));
__ mov(t1, zero_reg); // In delay slot: param map size = 0 when a6 == 0.
__ SmiScale(t1, a6, kPointerSizeLog2);
@@ -4500,13 +4267,13 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
Label skip3;
- __ Branch(&skip3, ne, a6, Operand(Smi::FromInt(0)));
+ __ Branch(&skip3, ne, a6, Operand(Smi::kZero));
// Move backing store address to a1, because it is
// expected there when filling in the unmapped arguments.
__ mov(a1, a4);
__ bind(&skip3);
- __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::FromInt(0)));
+ __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::kZero));
__ LoadRoot(a5, Heap::kSloppyArgumentsElementsMapRootIndex);
__ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
@@ -4553,7 +4320,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
__ sd(a7, MemOperand(t2));
__ Daddu(t1, t1, Operand(Smi::FromInt(1)));
__ bind(&parameters_test);
- __ Branch(&parameters_loop, ne, a5, Operand(Smi::FromInt(0)));
+ __ Branch(&parameters_loop, ne, a5, Operand(Smi::kZero));
// Restore t1 = argument count (tagged).
__ ld(a5, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
@@ -4714,119 +4481,6 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
}
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register context_reg = cp;
- Register slot_reg = a2;
- Register value_reg = a0;
- Register cell_reg = a4;
- Register cell_value_reg = a5;
- Register cell_details_reg = a6;
- Label fast_heapobject_case, fast_smi_case, slow_case;
-
- if (FLAG_debug_code) {
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Check(ne, kUnexpectedValue, value_reg, Operand(at));
- }
-
- // Go up context chain to the script context.
- for (int i = 0; i < depth(); ++i) {
- __ ld(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
- context_reg = cell_reg;
- }
-
- // Load the PropertyCell at the specified slot.
- __ Dlsa(at, context_reg, slot_reg, kPointerSizeLog2);
- __ ld(cell_reg, ContextMemOperand(at, 0));
-
- // Load PropertyDetails for the cell (actually only the cell_type and kind).
- __ ld(cell_details_reg,
- FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset));
- __ SmiUntag(cell_details_reg);
- __ And(cell_details_reg, cell_details_reg,
- PropertyDetails::PropertyCellTypeField::kMask |
- PropertyDetails::KindField::kMask |
- PropertyDetails::kAttributesReadOnlyMask);
-
- // Check if PropertyCell holds mutable data.
- Label not_mutable_data;
- __ Branch(&not_mutable_data, ne, cell_details_reg,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kMutable) |
- PropertyDetails::KindField::encode(kData)));
- __ JumpIfSmi(value_reg, &fast_smi_case);
- __ bind(&fast_heapobject_case);
- __ sd(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
- __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
- cell_details_reg, kRAHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- // RecordWriteField clobbers the value register, so we need to reload.
- __ Ret(USE_DELAY_SLOT);
- __ ld(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
- __ bind(&not_mutable_data);
-
- // Check if PropertyCell value matches the new value (relevant for Constant,
- // ConstantType and Undefined cells).
- Label not_same_value;
- __ ld(cell_value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
- __ Branch(&not_same_value, ne, value_reg, Operand(cell_value_reg));
- // Make sure the PropertyCell is not marked READ_ONLY.
- __ And(at, cell_details_reg, PropertyDetails::kAttributesReadOnlyMask);
- __ Branch(&slow_case, ne, at, Operand(zero_reg));
- if (FLAG_debug_code) {
- Label done;
- // This can only be true for Constant, ConstantType and Undefined cells,
- // because we never store the_hole via this stub.
- __ Branch(&done, eq, cell_details_reg,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstant) |
- PropertyDetails::KindField::encode(kData)));
- __ Branch(&done, eq, cell_details_reg,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
- __ Check(eq, kUnexpectedValue, cell_details_reg,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kUndefined) |
- PropertyDetails::KindField::encode(kData)));
- __ bind(&done);
- }
- __ Ret();
- __ bind(&not_same_value);
-
- // Check if PropertyCell contains data with constant type (and is not
- // READ_ONLY).
- __ Branch(&slow_case, ne, cell_details_reg,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
-
- // Now either both old and new values must be SMIs or both must be heap
- // objects with same map.
- Label value_is_heap_object;
- __ JumpIfNotSmi(value_reg, &value_is_heap_object);
- __ JumpIfNotSmi(cell_value_reg, &slow_case);
- // Old and new values are SMIs, no need for a write barrier here.
- __ bind(&fast_smi_case);
- __ Ret(USE_DELAY_SLOT);
- __ sd(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
- __ bind(&value_is_heap_object);
- __ JumpIfSmi(cell_value_reg, &slow_case);
- Register cell_value_map_reg = cell_value_reg;
- __ ld(cell_value_map_reg,
- FieldMemOperand(cell_value_reg, HeapObject::kMapOffset));
- __ Branch(&fast_heapobject_case, eq, cell_value_map_reg,
- FieldMemOperand(value_reg, HeapObject::kMapOffset));
-
- // Fallback to the runtime.
- __ bind(&slow_case);
- __ SmiTag(slot_reg);
- __ Push(slot_reg, value_reg);
- __ TailCallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
int64_t offset = (ref0.address() - ref1.address());
DCHECK(static_cast<int>(offset) == offset);
@@ -5100,7 +4754,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
__ sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
// should_throw_on_error -> false
- DCHECK(Smi::FromInt(0) == nullptr);
+ DCHECK(Smi::kZero == nullptr);
__ sd(zero_reg,
MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
__ ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index e5b9c2e7bd..c6a917f5d4 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -29,9 +29,9 @@ const Register LoadDescriptor::ReceiverRegister() { return a1; }
const Register LoadDescriptor::NameRegister() { return a2; }
const Register LoadDescriptor::SlotRegister() { return a0; }
-
const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return a4; }
const Register StoreDescriptor::ReceiverRegister() { return a1; }
const Register StoreDescriptor::NameRegister() { return a2; }
@@ -44,10 +44,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return a4; }
const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
const Register StoreTransitionDescriptor::MapRegister() { return a5; }
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return a2; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return a1; }
const Register StringCompareDescriptor::RightRegister() { return a0; }
@@ -153,7 +149,7 @@ void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a3, a2};
+ Register registers[] = {a1, a0, a3, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -209,13 +205,6 @@ void ConstructTrampolineDescriptor::InitializePlatformSpecific(
}
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a2, a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a0, a1};
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index dd12f9b51a..a3ab4a8840 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -517,85 +517,6 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Allocation support.
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
- Label same_contexts;
- Register temporary = t8;
-
- DCHECK(!holder_reg.is(scratch));
- DCHECK(!holder_reg.is(at));
- DCHECK(!scratch.is(at));
-
- // Load current lexical context from the active StandardFrame, which
- // may require crawling past STUB frames.
- Label load_context;
- Label has_context;
- mov(at, fp);
- bind(&load_context);
- ld(scratch, MemOperand(at, CommonFrameConstants::kContextOrFrameTypeOffset));
- // Passing temporary register, otherwise JumpIfNotSmi modifies register at.
- JumpIfNotSmi(scratch, &has_context, temporary);
- ld(at, MemOperand(at, CommonFrameConstants::kCallerFPOffset));
- Branch(&load_context);
- bind(&has_context);
-
- // In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
- Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
- scratch, Operand(zero_reg));
-#endif
-
- // Load the native context of the current context.
- ld(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the native_context_map.
- ld(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- LoadRoot(at, Heap::kNativeContextMapRootIndex);
- Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
- holder_reg, Operand(at));
- pop(holder_reg); // Restore holder.
- }
-
- // Check if both contexts are the same.
- ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- Branch(&same_contexts, eq, scratch, Operand(at));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- push(holder_reg); // Temporarily save holder on the stack.
- mov(holder_reg, at); // Move at to its holding place.
- LoadRoot(at, Heap::kNullValueRootIndex);
- Check(ne, kJSGlobalProxyContextShouldNotBeNull,
- holder_reg, Operand(at));
-
- ld(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- LoadRoot(at, Heap::kNativeContextMapRootIndex);
- Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
- holder_reg, Operand(at));
- // Restore at is not needed. at is reloaded below.
- pop(holder_reg); // Restore holder.
- // Restore at to holder's context.
- ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- }
-
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
- ld(scratch, FieldMemOperand(scratch, token_offset));
- ld(at, FieldMemOperand(at, token_offset));
- Branch(miss, ne, scratch, Operand(at));
-
- bind(&same_contexts);
-}
-
-
// Compute the hash code from the untagged key. This must be kept in sync with
// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
@@ -637,87 +558,6 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
And(reg0, reg0, Operand(0x3fffffff));
}
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register result,
- Register reg0,
- Register reg1,
- Register reg2) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- //
- // Scratch registers:
- //
- // reg0 - holds the untagged key on entry and holds the hash once computed.
- //
- // reg1 - Used to hold the capacity mask of the dictionary.
- //
- // reg2 - Used for the index into the dictionary.
- // at - Temporary (avoid MacroAssembler instructions also using 'at').
- Label done;
-
- GetNumberHash(reg0, reg1);
-
- // Compute the capacity mask.
- ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
- SmiUntag(reg1, reg1);
- Dsubu(reg1, reg1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before giving up.
- for (int i = 0; i < kNumberDictionaryProbes; i++) {
- // Use reg2 for index calculations and keep the hash intact in reg0.
- mov(reg2, reg0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
- }
- and_(reg2, reg2, reg1);
-
- // Scale the index by multiplying by the element size.
- DCHECK(SeededNumberDictionary::kEntrySize == 3);
- Dlsa(reg2, reg2, reg2, 1); // reg2 = reg2 * 3.
-
- // Check if the key is identical to the name.
- Dlsa(reg2, elements, reg2, kPointerSizeLog2);
-
- ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
- if (i != kNumberDictionaryProbes - 1) {
- Branch(&done, eq, key, Operand(at));
- } else {
- Branch(miss, ne, key, Operand(at));
- }
- }
-
- bind(&done);
- // Check that the value is a field property.
- // reg2: elements + (index * kPointerSize).
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
- DCHECK_EQ(DATA, 0);
- And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
- Branch(miss, ne, at, Operand(zero_reg));
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- ld(result, FieldMemOperand(reg2, kValueOffset));
-}
-
-
// ---------------------------------------------------------------------------
// Instruction macros.
@@ -1918,11 +1758,27 @@ void MacroAssembler::Ext(Register rt,
ext_(rt, rs, pos, size);
}
+void MacroAssembler::ExtractBits(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ DCHECK(pos < 64);
+ DCHECK(size > 0 && size <= 64);
+ DCHECK(pos + size <= 64);
+ if (pos < 32) {
+ if (size <= 32) {
+ Dext(rt, rs, pos, size);
+ } else {
+ Dextm(rt, rs, pos, size);
+ }
+ } else if (pos < 64) {
+ DCHECK(size <= 32);
+ Dextu(rt, rs, pos, size);
+ }
+}
void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos < 32);
- DCHECK(pos + size < 33);
+ DCHECK(size > 0 && size <= 32);
dext_(rt, rs, pos, size);
}
@@ -1930,7 +1786,8 @@ void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
void MacroAssembler::Dextm(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos < 32);
- DCHECK(size <= 64);
+ DCHECK(size > 32 && size <= 64);
+ DCHECK((pos + size) > 32 && (pos + size) <= 64);
dextm(rt, rs, pos, size);
}
@@ -1938,7 +1795,8 @@ void MacroAssembler::Dextm(Register rt, Register rs, uint16_t pos,
void MacroAssembler::Dextu(Register rt, Register rs, uint16_t pos,
uint16_t size) {
DCHECK(pos >= 32 && pos < 64);
- DCHECK(size < 33);
+ DCHECK(size > 0 && size <= 32);
+ DCHECK((pos + size) > 32 && (pos + size) <= 64);
dextu(rt, rs, pos, size);
}
@@ -4787,90 +4645,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
-
-void MacroAssembler::CopyBytes(Register src,
- Register dst,
- Register length,
- Register scratch) {
- Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
-
- // Align src before copying in word size chunks.
- Branch(&byte_loop, le, length, Operand(kPointerSize));
- bind(&align_loop_1);
- And(scratch, src, kPointerSize - 1);
- Branch(&word_loop, eq, scratch, Operand(zero_reg));
- lbu(scratch, MemOperand(src));
- Daddu(src, src, 1);
- sb(scratch, MemOperand(dst));
- Daddu(dst, dst, 1);
- Dsubu(length, length, Operand(1));
- Branch(&align_loop_1, ne, length, Operand(zero_reg));
-
- // Copy bytes in word size chunks.
- bind(&word_loop);
- if (emit_debug_code()) {
- And(scratch, src, kPointerSize - 1);
- Assert(eq, kExpectingAlignmentForCopyBytes,
- scratch, Operand(zero_reg));
- }
- Branch(&byte_loop, lt, length, Operand(kPointerSize));
- ld(scratch, MemOperand(src));
- Daddu(src, src, kPointerSize);
-
- // TODO(kalmard) check if this can be optimized to use sw in most cases.
- // Can't use unaligned access - copy byte by byte.
- if (kArchEndian == kLittle) {
- sb(scratch, MemOperand(dst, 0));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 1));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 2));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 3));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 4));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 5));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 6));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 7));
- } else {
- sb(scratch, MemOperand(dst, 7));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 6));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 5));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 4));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 3));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 2));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 1));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 0));
- }
- Daddu(dst, dst, 8);
-
- Dsubu(length, length, Operand(kPointerSize));
- Branch(&word_loop);
-
- // Copy the last bytes if any left.
- bind(&byte_loop);
- Branch(&done, eq, length, Operand(zero_reg));
- bind(&byte_loop_1);
- lbu(scratch, MemOperand(src));
- Daddu(src, src, 1);
- sb(scratch, MemOperand(dst));
- Daddu(dst, dst, 1);
- Dsubu(length, length, Operand(1));
- Branch(&byte_loop_1, ne, length, Operand(zero_reg));
- bind(&done);
-}
-
-
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {
@@ -4883,20 +4657,6 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Branch(&loop, ult, current_address, Operand(end_address));
}
-
-void MacroAssembler::CheckFastElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastHoleyElementValue));
-}
-
-
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
@@ -5557,18 +5317,6 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
-}
-
-
void MacroAssembler::ObjectToDoubleFPURegister(Register object,
FPURegister result,
Register scratch1,
@@ -6897,7 +6645,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
ld(at, FieldMemOperand(string, String::kLengthOffset));
Check(lt, kIndexIsTooLarge, index, Operand(at));
- DCHECK(Smi::FromInt(0) == 0);
+ DCHECK(Smi::kZero == 0);
Check(ge, kIndexIsNegative, index, Operand(zero_reg));
}
@@ -7162,7 +6910,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// For all objects but the receiver, check that the cache is empty.
EnumLength(a3, a1);
- Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
+ Branch(call_runtime, ne, a3, Operand(Smi::kZero));
bind(&start);
@@ -7232,13 +6980,14 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
- const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+ const int kMementoLastWordOffset =
+ kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
// Bail out if the object is not in new space.
JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
- Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
li(at, Operand(new_space_allocation_top_adr));
ld(at, MemOperand(at));
Xor(scratch_reg, scratch_reg, Operand(at));
@@ -7247,7 +6996,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// The object is on a different page than allocation top. Bail out if the
// object sits on the page boundary as no memento can follow and we cannot
// touch the memory following it.
- Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
@@ -7256,10 +7005,10 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// If top is on the same page as the current object, we need to check whether
// we are below top.
bind(&top_check);
- Daddu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
li(at, Operand(new_space_allocation_top_adr));
ld(at, MemOperand(at));
- Branch(no_memento_found, gt, scratch_reg, Operand(at));
+ Branch(no_memento_found, ge, scratch_reg, Operand(at));
// Memento map check.
bind(&map_check);
ld(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 4f67d70e0c..5a1cf27c08 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -527,24 +527,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Inline caching support.
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, whereas both scratch registers are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss);
-
void GetNumberHash(Register reg0, Register scratch);
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register result,
- Register reg0,
- Register reg1,
- Register reg2);
-
-
inline void MarkCode(NopMarkerTypes type) {
nop(type);
}
@@ -889,6 +873,9 @@ class MacroAssembler: public Assembler {
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Dins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+ void ExtractBits(Register rt, Register rs, uint16_t pos, uint16_t size);
+
void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
void Dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
void Dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
@@ -1199,14 +1186,6 @@ class MacroAssembler: public Assembler {
// Must preserve the result register.
void PopStackHandler();
- // Copies a number of bytes from src to dst. All registers are clobbered. On
- // exit src and dst will point to the place just after where the last byte was
- // read or written and length will be zero.
- void CopyBytes(Register src,
- Register dst,
- Register length,
- Register scratch);
-
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.
@@ -1238,12 +1217,6 @@ class MacroAssembler: public Assembler {
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
}
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Register scratch,
- Label* fail);
-
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,
@@ -1334,13 +1307,6 @@ class MacroAssembler: public Assembler {
return eq;
}
-
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
// Get the number of least significant bits from a register.
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 02387d0f4f..4a8e0076d9 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -4879,7 +4879,7 @@ void Simulator::Execute() {
while (program_counter != end_sim_pc) {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
- if (icount_ == static_cast<uint64_t>(::v8::internal::FLAG_stop_sim_at)) {
+ if (icount_ == static_cast<int64_t>(::v8::internal::FLAG_stop_sim_at)) {
MipsDebugger dbg(this);
dbg.Debug();
} else {
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index 0252b64650..f7a1a71514 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -468,6 +468,8 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
case JS_ARRAY_TYPE:
+ case JS_MODULE_NAMESPACE_TYPE:
+ case JS_FIXED_ARRAY_ITERATOR_TYPE:
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
case JS_SET_TYPE:
@@ -475,6 +477,43 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
+
+ case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
+
case JS_REGEXP_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 3c43f23074..2580bfb397 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -140,6 +140,12 @@ void HeapObject::HeapObjectVerify() {
case JS_ARRAY_TYPE:
JSArray::cast(this)->JSArrayVerify();
break;
+ case JS_MODULE_NAMESPACE_TYPE:
+ JSModuleNamespace::cast(this)->JSModuleNamespaceVerify();
+ break;
+ case JS_FIXED_ARRAY_ITERATOR_TYPE:
+ JSFixedArrayIterator::cast(this)->JSFixedArrayIteratorVerify();
+ break;
case JS_SET_TYPE:
JSSet::cast(this)->JSSetVerify();
break;
@@ -152,6 +158,44 @@ void HeapObject::HeapObjectVerify() {
case JS_MAP_ITERATOR_TYPE:
JSMapIterator::cast(this)->JSMapIteratorVerify();
break;
+ case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
+ JSArrayIterator::cast(this)->JSArrayIteratorVerify();
+ break;
+
case JS_STRING_ITERATOR_TYPE:
JSStringIterator::cast(this)->JSStringIteratorVerify();
break;
@@ -339,8 +383,8 @@ void Map::MapVerify() {
CHECK(!heap->InNewSpace(this));
CHECK(FIRST_TYPE <= instance_type() && instance_type() <= LAST_TYPE);
CHECK(instance_size() == kVariableSizeSentinel ||
- (kPointerSize <= instance_size() &&
- instance_size() < heap->Capacity()));
+ (kPointerSize <= instance_size() &&
+ static_cast<size_t>(instance_size()) < heap->Capacity()));
CHECK(GetBackPointer()->IsUndefined(heap->isolate()) ||
!Map::cast(GetBackPointer())->is_stable());
VerifyHeapPointer(prototype());
@@ -677,7 +721,7 @@ void Code::CodeVerify() {
last_gc_pc = it.rinfo()->pc();
}
}
- CHECK(raw_type_feedback_info() == Smi::FromInt(0) ||
+ CHECK(raw_type_feedback_info() == Smi::kZero ||
raw_type_feedback_info()->IsSmi() == IsCodeStubOrIC());
}
@@ -728,9 +772,33 @@ void JSArray::JSArrayVerify() {
CHECK(length()->IsNumber() || length()->IsUndefined(isolate));
// If a GC was caused while constructing this array, the elements
// pointer may point to a one pointer filler map.
- if (ElementsAreSafeToExamine()) {
- CHECK(elements()->IsUndefined(isolate) || elements()->IsFixedArray() ||
- elements()->IsFixedDoubleArray());
+ if (!ElementsAreSafeToExamine()) return;
+ if (elements()->IsUndefined(isolate)) return;
+ CHECK(elements()->IsFixedArray() || elements()->IsFixedDoubleArray());
+ if (!length()->IsNumber()) return;
+ // Verify that the length and the elements backing store are in sync.
+ if (length()->IsSmi() && HasFastElements()) {
+ int size = Smi::cast(length())->value();
+ // Holey / Packed backing stores might have slack or might have not been
+ // properly initialized yet.
+ CHECK(size <= elements()->length() ||
+ elements() == isolate->heap()->empty_fixed_array());
+ } else {
+ CHECK(HasDictionaryElements());
+ uint32_t array_length;
+ CHECK(length()->ToArrayLength(&array_length));
+ if (array_length == 0xffffffff) {
+ CHECK(length()->ToArrayLength(&array_length));
+ }
+ if (array_length != 0) {
+ SeededNumberDictionary* dict = SeededNumberDictionary::cast(elements());
+ // The dictionary can never have more elements than the array length + 1.
+ // If the backing store grows the verification might be triggered with
+ // the old length in place.
+ uint32_t nof_elements = static_cast<uint32_t>(dict->NumberOfElements());
+ if (nof_elements != 0) nof_elements--;
+ CHECK_LE(nof_elements, array_length);
+ }
}
}
@@ -782,6 +850,16 @@ void JSWeakMap::JSWeakMapVerify() {
CHECK(table()->IsHashTable() || table()->IsUndefined(GetIsolate()));
}
+void JSArrayIterator::JSArrayIteratorVerify() {
+ CHECK(IsJSArrayIterator());
+ JSObjectVerify();
+ CHECK(object()->IsJSReceiver() || object()->IsUndefined(GetIsolate()));
+
+ CHECK_GE(index()->Number(), 0);
+ CHECK_LE(index()->Number(), kMaxSafeInteger);
+ CHECK(object_map()->IsMap() || object_map()->IsUndefined(GetIsolate()));
+}
+
void JSStringIterator::JSStringIteratorVerify() {
CHECK(IsJSStringIterator());
JSObjectVerify();
@@ -872,7 +950,7 @@ void JSArrayBufferView::JSArrayBufferViewVerify() {
VerifyPointer(buffer());
Isolate* isolate = GetIsolate();
CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined(isolate) ||
- buffer() == Smi::FromInt(0));
+ buffer() == Smi::kZero);
VerifyPointer(raw_byte_offset());
CHECK(raw_byte_offset()->IsSmi() || raw_byte_offset()->IsHeapNumber() ||
@@ -909,30 +987,89 @@ void Box::BoxVerify() {
value()->ObjectVerify();
}
-void PromiseContainer::PromiseContainerVerify() {
- CHECK(IsPromiseContainer());
- thenable()->ObjectVerify();
- then()->ObjectVerify();
- resolve()->ObjectVerify();
- reject()->ObjectVerify();
- before_debug_event()->ObjectVerify();
- after_debug_event()->ObjectVerify();
+void PromiseResolveThenableJobInfo::PromiseResolveThenableJobInfoVerify() {
+ Isolate* isolate = GetIsolate();
+ CHECK(IsPromiseResolveThenableJobInfo());
+ CHECK(thenable()->IsJSReceiver());
+ CHECK(then()->IsJSReceiver());
+ CHECK(resolve()->IsJSFunction());
+ CHECK(reject()->IsJSFunction());
+ CHECK(debug_id()->IsNumber() || debug_id()->IsUndefined(isolate));
+ CHECK(debug_name()->IsString() || debug_name()->IsUndefined(isolate));
+ CHECK(context()->IsContext());
+}
+
+void PromiseReactionJobInfo::PromiseReactionJobInfoVerify() {
+ Isolate* isolate = GetIsolate();
+ CHECK(IsPromiseReactionJobInfo());
+ CHECK(value()->IsObject());
+ CHECK(tasks()->IsJSArray() || tasks()->IsCallable());
+ CHECK(deferred()->IsJSObject() || deferred()->IsUndefined(isolate));
+ CHECK(debug_id()->IsNumber() || debug_id()->IsUndefined(isolate));
+ CHECK(debug_name()->IsString() || debug_name()->IsUndefined(isolate));
+ CHECK(context()->IsContext());
+}
+
+void JSModuleNamespace::JSModuleNamespaceVerify() {
+ CHECK(IsJSModuleNamespace());
+ VerifyPointer(module());
+}
+
+void JSFixedArrayIterator::JSFixedArrayIteratorVerify() {
+ CHECK(IsJSFixedArrayIterator());
+
+ VerifyPointer(array());
+ VerifyPointer(initial_next());
+ VerifySmiField(kIndexOffset);
+
+ CHECK_LE(index(), array()->length());
+}
+
+void ModuleInfoEntry::ModuleInfoEntryVerify() {
+ Isolate* isolate = GetIsolate();
+ CHECK(IsModuleInfoEntry());
+
+ CHECK(export_name()->IsUndefined(isolate) || export_name()->IsString());
+ CHECK(local_name()->IsUndefined(isolate) || local_name()->IsString());
+ CHECK(import_name()->IsUndefined(isolate) || import_name()->IsString());
+
+ VerifySmiField(kModuleRequestOffset);
+ VerifySmiField(kCellIndexOffset);
+ VerifySmiField(kBegPosOffset);
+ VerifySmiField(kEndPosOffset);
+
+ CHECK_IMPLIES(import_name()->IsString(), module_request() >= 0);
+ CHECK_IMPLIES(export_name()->IsString() && import_name()->IsString(),
+ local_name()->IsUndefined(isolate));
}
void Module::ModuleVerify() {
CHECK(IsModule());
- CHECK(code()->IsSharedFunctionInfo() || code()->IsJSFunction());
- code()->ObjectVerify();
- exports()->ObjectVerify();
- requested_modules()->ObjectVerify();
- VerifySmiField(kFlagsOffset);
- embedder_data()->ObjectVerify();
- CHECK(shared()->name()->IsSymbol());
- // TODO(neis): Check more.
+
+ VerifyPointer(code());
+ VerifyPointer(exports());
+ VerifyPointer(module_namespace());
+ VerifyPointer(requested_modules());
+ VerifySmiField(kHashOffset);
+
+ CHECK((!instantiated() && code()->IsSharedFunctionInfo()) ||
+ (instantiated() && !evaluated() && code()->IsJSFunction()) ||
+ (instantiated() && evaluated() && code()->IsModuleInfo()));
+
+ CHECK(module_namespace()->IsUndefined(GetIsolate()) ||
+ module_namespace()->IsJSModuleNamespace());
+ if (module_namespace()->IsJSModuleNamespace()) {
+ CHECK_EQ(JSModuleNamespace::cast(module_namespace())->module(), this);
+ }
+
+ CHECK_EQ(requested_modules()->length(), info()->module_requests()->length());
+
+ CHECK_NE(hash(), 0);
}
void PrototypeInfo::PrototypeInfoVerify() {
CHECK(IsPrototypeInfo());
+ CHECK(weak_cell()->IsWeakCell() || weak_cell()->IsUndefined(GetIsolate()));
if (prototype_users()->IsWeakFixedArray()) {
WeakFixedArray::cast(prototype_users())->FixedArrayVerify();
} else {
@@ -941,6 +1078,13 @@ void PrototypeInfo::PrototypeInfoVerify() {
CHECK(validity_cell()->IsCell() || validity_cell()->IsSmi());
}
+void Tuple3::Tuple3Verify() {
+ CHECK(IsTuple3());
+ VerifyObjectField(kValue1Offset);
+ VerifyObjectField(kValue2Offset);
+ VerifyObjectField(kValue3Offset);
+}
+
void ContextExtension::ContextExtensionVerify() {
CHECK(IsContextExtension());
VerifyObjectField(kScopeInfoOffset);
@@ -1013,6 +1157,7 @@ void FunctionTemplateInfo::FunctionTemplateInfoVerify() {
VerifyPointer(instance_template());
VerifyPointer(signature());
VerifyPointer(access_check_info());
+ VerifyPointer(cached_property_name());
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index af1261538e..1a8274cbf1 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -305,7 +305,7 @@ Handle<Object> Object::NewStorageFor(Isolate* isolate,
Handle<Object> object,
Representation representation) {
if (representation.IsSmi() && object->IsUninitialized(isolate)) {
- return handle(Smi::FromInt(0), isolate);
+ return handle(Smi::kZero, isolate);
}
if (!representation.IsDouble()) return object;
double value;
@@ -690,6 +690,12 @@ bool HeapObject::IsJSObject() const {
bool HeapObject::IsJSProxy() const { return map()->IsJSProxyMap(); }
+bool HeapObject::IsJSArrayIterator() const {
+ InstanceType instance_type = map()->instance_type();
+ return (instance_type >= FIRST_ARRAY_ITERATOR_TYPE &&
+ instance_type <= LAST_ARRAY_ITERATOR_TYPE);
+}
+
TYPE_CHECKER(JSSet, JS_SET_TYPE)
TYPE_CHECKER(JSMap, JS_MAP_TYPE)
TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE)
@@ -702,6 +708,7 @@ TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
+TYPE_CHECKER(JSFixedArrayIterator, JS_FIXED_ARRAY_ITERATOR_TYPE)
bool HeapObject::IsJSWeakCollection() const {
return IsJSWeakMap() || IsJSWeakSet();
@@ -715,6 +722,8 @@ bool HeapObject::IsFrameArray() const { return IsFixedArray(); }
bool HeapObject::IsArrayList() const { return IsFixedArray(); }
+bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArray(); }
+
bool Object::IsLayoutDescriptor() const {
return IsSmi() || IsFixedTypedArrayBase();
}
@@ -794,10 +803,6 @@ bool HeapObject::IsScopeInfo() const {
return map() == GetHeap()->scope_info_map();
}
-bool HeapObject::IsModuleInfoEntry() const {
- return map() == GetHeap()->module_info_entry_map();
-}
-
bool HeapObject::IsModuleInfo() const {
return map() == GetHeap()->module_info_map();
}
@@ -1589,9 +1594,9 @@ FixedArrayBase* JSObject::elements() const {
void AllocationSite::Initialize() {
- set_transition_info(Smi::FromInt(0));
+ set_transition_info(Smi::kZero);
SetElementsKind(GetInitialFastElementsKind());
- set_nested_site(Smi::FromInt(0));
+ set_nested_site(Smi::kZero);
set_pretenure_data(0);
set_pretenure_create_count(0);
set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
@@ -1665,18 +1670,6 @@ AllocationSiteMode AllocationSite::GetMode(
return DONT_TRACK_ALLOCATION_SITE;
}
-
-AllocationSiteMode AllocationSite::GetMode(ElementsKind from,
- ElementsKind to) {
- if (IsFastSmiElementsKind(from) &&
- IsMoreGeneralElementsKindTransition(from, to)) {
- return TRACK_ALLOCATION_SITE;
- }
-
- return DONT_TRACK_ALLOCATION_SITE;
-}
-
-
inline bool AllocationSite::CanTrack(InstanceType type) {
if (FLAG_allocation_site_pretenuring) {
return type == JS_ARRAY_TYPE ||
@@ -2019,7 +2012,7 @@ void WeakCell::clear() {
// initializing the root empty weak cell.
DCHECK(GetHeap()->gc_state() == Heap::MARK_COMPACT ||
this == GetHeap()->empty_weak_cell());
- WRITE_FIELD(this, kValueOffset, Smi::FromInt(0));
+ WRITE_FIELD(this, kValueOffset, Smi::kZero);
}
@@ -2034,9 +2027,7 @@ void WeakCell::initialize(HeapObject* val) {
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kValueOffset, val, mode);
}
-
-bool WeakCell::cleared() const { return value() == Smi::FromInt(0); }
-
+bool WeakCell::cleared() const { return value() == Smi::kZero; }
Object* WeakCell::next() const { return READ_FIELD(this, kNextOffset); }
@@ -2116,6 +2107,8 @@ int JSObject::GetHeaderSize(InstanceType type) {
return JSObject::kHeaderSize;
case JS_STRING_ITERATOR_TYPE:
return JSStringIterator::kSize;
+ case JS_FIXED_ARRAY_ITERATOR_TYPE:
+ return JSFixedArrayIterator::kHeaderSize;
default:
UNREACHABLE();
return 0;
@@ -2364,9 +2357,8 @@ Handle<T> FixedArray::GetValueChecked(Isolate* isolate, int index) const {
CHECK(!obj->IsUndefined(isolate));
return Handle<T>(T::cast(obj), isolate);
}
-
-bool FixedArray::is_the_hole(int index) {
- return get(index) == GetHeap()->the_hole_value();
+bool FixedArray::is_the_hole(Isolate* isolate, int index) {
+ return get(index)->IsTheHole(isolate);
}
void FixedArray::set(int index, Smi* value) {
@@ -2436,6 +2428,9 @@ void FixedDoubleArray::set_the_hole(int index) {
WRITE_UINT64_FIELD(this, offset, kHoleNanInt64);
}
+bool FixedDoubleArray::is_the_hole(Isolate* isolate, int index) {
+ return is_the_hole(index);
+}
bool FixedDoubleArray::is_the_hole(int index) {
return get_representation(index) == kHoleNanInt64;
@@ -2469,7 +2464,7 @@ bool WeakFixedArray::IsEmptySlot(int index) const {
void WeakFixedArray::Clear(int index) {
- FixedArray::cast(this)->set(index + kFirstIndex, Smi::FromInt(0));
+ FixedArray::cast(this)->set(index + kFirstIndex, Smi::kZero);
}
@@ -2534,6 +2529,48 @@ void ArrayList::Clear(int index, Object* undefined) {
->set(kFirstIndex + index, undefined, SKIP_WRITE_BARRIER);
}
+int RegExpMatchInfo::NumberOfCaptureRegisters() {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ Object* obj = get(kNumberOfCapturesIndex);
+ return Smi::cast(obj)->value();
+}
+
+void RegExpMatchInfo::SetNumberOfCaptureRegisters(int value) {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ set(kNumberOfCapturesIndex, Smi::FromInt(value));
+}
+
+String* RegExpMatchInfo::LastSubject() {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ Object* obj = get(kLastSubjectIndex);
+ return String::cast(obj);
+}
+
+void RegExpMatchInfo::SetLastSubject(String* value) {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ set(kLastSubjectIndex, value);
+}
+
+Object* RegExpMatchInfo::LastInput() {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ return get(kLastInputIndex);
+}
+
+void RegExpMatchInfo::SetLastInput(Object* value) {
+ DCHECK_GE(length(), kLastMatchOverhead);
+ set(kLastInputIndex, value);
+}
+
+int RegExpMatchInfo::Capture(int i) {
+ DCHECK_LT(i, NumberOfCaptureRegisters());
+ Object* obj = get(kFirstCaptureIndex + i);
+ return Smi::cast(obj)->value();
+}
+
+void RegExpMatchInfo::SetCapture(int i, int value) {
+ DCHECK_LT(i, NumberOfCaptureRegisters());
+ set(kFirstCaptureIndex + i, Smi::FromInt(value));
+}
WriteBarrierMode HeapObject::GetWriteBarrierMode(
const DisallowHeapAllocation& promise) {
@@ -2563,7 +2600,7 @@ void FixedArray::set(int index,
DCHECK(map() != GetHeap()->fixed_cow_array_map());
DCHECK(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
+ NOBARRIER_WRITE_FIELD(this, offset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
}
@@ -2574,7 +2611,7 @@ void FixedArray::NoWriteBarrierSet(FixedArray* array,
DCHECK(array->map() != array->GetHeap()->fixed_cow_array_map());
DCHECK(index >= 0 && index < array->length());
DCHECK(!array->GetHeap()->InNewSpace(value));
- WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
+ NOBARRIER_WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
}
@@ -2641,6 +2678,11 @@ bool FrameArray::IsWasmFrame(int frame_ix) const {
return (flags & kIsWasmFrame) != 0;
}
+bool FrameArray::IsAsmJsWasmFrame(int frame_ix) const {
+ const int flags = Flags(frame_ix)->value();
+ return (flags & kIsAsmJsWasmFrame) != 0;
+}
+
int FrameArray::FrameCount() const {
const int frame_count = Smi::cast(get(kFrameCountIndex))->value();
DCHECK_LE(0, frame_count);
@@ -3117,7 +3159,6 @@ void HashTableBase::ElementsRemoved(int n) {
// static
int HashTableBase::ComputeCapacity(int at_least_space_for) {
- const int kMinCapacity = 4;
int capacity = base::bits::RoundUpToPowerOfTwo32(at_least_space_for * 2);
return Max(capacity, kMinCapacity);
}
@@ -3279,6 +3320,8 @@ CAST_ACCESSOR(JSGlobalProxy)
CAST_ACCESSOR(JSMap)
CAST_ACCESSOR(JSMapIterator)
CAST_ACCESSOR(JSMessageObject)
+CAST_ACCESSOR(JSModuleNamespace)
+CAST_ACCESSOR(JSFixedArrayIterator)
CAST_ACCESSOR(JSObject)
CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSReceiver)
@@ -3286,6 +3329,7 @@ CAST_ACCESSOR(JSRegExp)
CAST_ACCESSOR(JSSet)
CAST_ACCESSOR(JSSetIterator)
CAST_ACCESSOR(JSStringIterator)
+CAST_ACCESSOR(JSArrayIterator)
CAST_ACCESSOR(JSTypedArray)
CAST_ACCESSOR(JSValue)
CAST_ACCESSOR(JSWeakCollection)
@@ -3293,7 +3337,6 @@ CAST_ACCESSOR(JSWeakMap)
CAST_ACCESSOR(JSWeakSet)
CAST_ACCESSOR(LayoutDescriptor)
CAST_ACCESSOR(Map)
-CAST_ACCESSOR(ModuleInfoEntry)
CAST_ACCESSOR(ModuleInfo)
CAST_ACCESSOR(Name)
CAST_ACCESSOR(NameDictionary)
@@ -3306,6 +3349,7 @@ CAST_ACCESSOR(OrderedHashMap)
CAST_ACCESSOR(OrderedHashSet)
CAST_ACCESSOR(PropertyCell)
CAST_ACCESSOR(TemplateList)
+CAST_ACCESSOR(RegExpMatchInfo)
CAST_ACCESSOR(ScopeInfo)
CAST_ACCESSOR(SeededNumberDictionary)
CAST_ACCESSOR(SeqOneByteString)
@@ -3329,6 +3373,24 @@ CAST_ACCESSOR(WeakCell)
CAST_ACCESSOR(WeakFixedArray)
CAST_ACCESSOR(WeakHashTable)
+template <class T>
+PodArray<T>* PodArray<T>::cast(Object* object) {
+ SLOW_DCHECK(object->IsByteArray());
+ return reinterpret_cast<PodArray<T>*>(object);
+}
+template <class T>
+const PodArray<T>* PodArray<T>::cast(const Object* object) {
+ SLOW_DCHECK(object->IsByteArray());
+ return reinterpret_cast<const PodArray<T>*>(object);
+}
+
+// static
+template <class T>
+Handle<PodArray<T>> PodArray<T>::New(Isolate* isolate, int length,
+ PretenureFlag pretenure) {
+ return Handle<PodArray<T>>::cast(
+ isolate->factory()->NewByteArray(length * sizeof(T), pretenure));
+}
// static
template <class Traits>
@@ -3371,6 +3433,7 @@ DEFINE_DEOPT_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(OptimizationId, Smi)
DEFINE_DEOPT_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
DEFINE_DEOPT_ELEMENT_ACCESSORS(WeakCellCache, Object)
+DEFINE_DEOPT_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
#undef DEFINE_DEOPT_ELEMENT_ACCESSORS
@@ -4059,24 +4122,20 @@ byte ByteArray::get(int index) {
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
}
-const byte* ByteArray::data() const {
- return reinterpret_cast<const byte*>(FIELD_ADDR_CONST(this, kHeaderSize));
-}
-
void ByteArray::set(int index, byte value) {
DCHECK(index >= 0 && index < this->length());
WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
}
void ByteArray::copy_in(int index, const byte* buffer, int length) {
- DCHECK(index >= 0 && length >= 0 && index + length >= index &&
+ DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
index + length <= this->length());
byte* dst_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
memcpy(dst_addr, buffer, length);
}
void ByteArray::copy_out(int index, byte* buffer, int length) {
- DCHECK(index >= 0 && length >= 0 && index + length >= index &&
+ DCHECK(index >= 0 && length >= 0 && length <= kMaxInt - index &&
index + length <= this->length());
const byte* src_addr = FIELD_ADDR(this, kHeaderSize + index * kCharSize);
memcpy(buffer, src_addr, length);
@@ -4231,7 +4290,7 @@ int FixedTypedArrayBase::ElementSize(InstanceType type) {
int FixedTypedArrayBase::DataSize(InstanceType type) {
- if (base_pointer() == Smi::FromInt(0)) return 0;
+ if (base_pointer() == Smi::kZero) return 0;
return length() * ElementSize(type);
}
@@ -5005,8 +5064,8 @@ inline bool Code::is_hydrogen_stub() {
inline bool Code::is_interpreter_trampoline_builtin() {
Builtins* builtins = GetIsolate()->builtins();
return this == *builtins->InterpreterEntryTrampoline() ||
- this == *builtins->InterpreterEnterBytecodeDispatch() ||
- this == *builtins->InterpreterMarkBaselineOnReturn();
+ this == *builtins->InterpreterEnterBytecodeAdvance() ||
+ this == *builtins->InterpreterEnterBytecodeDispatch();
}
inline bool Code::has_unwinding_info() const {
@@ -5654,12 +5713,20 @@ ACCESSORS(AccessorInfo, data, Object, kDataOffset)
ACCESSORS(Box, value, Object, kValueOffset)
-ACCESSORS(PromiseContainer, thenable, JSReceiver, kThenableOffset)
-ACCESSORS(PromiseContainer, then, JSReceiver, kThenOffset)
-ACCESSORS(PromiseContainer, resolve, JSFunction, kResolveOffset)
-ACCESSORS(PromiseContainer, reject, JSFunction, kRejectOffset)
-ACCESSORS(PromiseContainer, before_debug_event, Object, kBeforeDebugEventOffset)
-ACCESSORS(PromiseContainer, after_debug_event, Object, kAfterDebugEventOffset)
+ACCESSORS(PromiseResolveThenableJobInfo, thenable, JSReceiver, kThenableOffset)
+ACCESSORS(PromiseResolveThenableJobInfo, then, JSReceiver, kThenOffset)
+ACCESSORS(PromiseResolveThenableJobInfo, resolve, JSFunction, kResolveOffset)
+ACCESSORS(PromiseResolveThenableJobInfo, reject, JSFunction, kRejectOffset)
+ACCESSORS(PromiseResolveThenableJobInfo, debug_id, Object, kDebugIdOffset)
+ACCESSORS(PromiseResolveThenableJobInfo, debug_name, Object, kDebugNameOffset)
+ACCESSORS(PromiseResolveThenableJobInfo, context, Context, kContextOffset);
+
+ACCESSORS(PromiseReactionJobInfo, value, Object, kValueOffset);
+ACCESSORS(PromiseReactionJobInfo, tasks, Object, kTasksOffset);
+ACCESSORS(PromiseReactionJobInfo, deferred, Object, kDeferredOffset);
+ACCESSORS(PromiseReactionJobInfo, debug_id, Object, kDebugIdOffset);
+ACCESSORS(PromiseReactionJobInfo, debug_name, Object, kDebugNameOffset);
+ACCESSORS(PromiseReactionJobInfo, context, Context, kContextOffset);
Map* PrototypeInfo::ObjectCreateMap() {
return Map::cast(WeakCell::cast(object_create_map())->value());
@@ -5702,6 +5769,7 @@ ObjectTemplateInfo* ObjectTemplateInfo::GetParent(Isolate* isolate) {
return nullptr;
}
+ACCESSORS(PrototypeInfo, weak_cell, Object, kWeakCellOffset)
ACCESSORS(PrototypeInfo, prototype_users, Object, kPrototypeUsersOffset)
ACCESSORS(PrototypeInfo, object_create_map, Object, kObjectCreateMap)
SMI_ACCESSORS(PrototypeInfo, registry_slot, kRegistrySlotOffset)
@@ -5709,27 +5777,46 @@ ACCESSORS(PrototypeInfo, validity_cell, Object, kValidityCellOffset)
SMI_ACCESSORS(PrototypeInfo, bit_field, kBitFieldOffset)
BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map, kShouldBeFastBit)
+ACCESSORS(Tuple3, value1, Object, kValue1Offset)
+ACCESSORS(Tuple3, value2, Object, kValue2Offset)
+ACCESSORS(Tuple3, value3, Object, kValue3Offset)
+
ACCESSORS(ContextExtension, scope_info, ScopeInfo, kScopeInfoOffset)
ACCESSORS(ContextExtension, extension, Object, kExtensionOffset)
+ACCESSORS(JSModuleNamespace, module, Module, kModuleOffset)
+
+ACCESSORS(JSFixedArrayIterator, array, FixedArray, kArrayOffset)
+SMI_ACCESSORS(JSFixedArrayIterator, index, kIndexOffset)
+ACCESSORS(JSFixedArrayIterator, initial_next, JSFunction, kNextOffset)
+
ACCESSORS(Module, code, Object, kCodeOffset)
ACCESSORS(Module, exports, ObjectHashTable, kExportsOffset)
+ACCESSORS(Module, regular_exports, FixedArray, kRegularExportsOffset)
+ACCESSORS(Module, regular_imports, FixedArray, kRegularImportsOffset)
+ACCESSORS(Module, module_namespace, HeapObject, kModuleNamespaceOffset)
ACCESSORS(Module, requested_modules, FixedArray, kRequestedModulesOffset)
-SMI_ACCESSORS(Module, flags, kFlagsOffset)
-BOOL_ACCESSORS(Module, flags, evaluated, kEvaluatedBit)
-ACCESSORS(Module, embedder_data, Object, kEmbedderDataOffset)
+SMI_ACCESSORS(Module, hash, kHashOffset)
+
+bool Module::evaluated() const { return code()->IsModuleInfo(); }
-SharedFunctionInfo* Module::shared() const {
- return code()->IsSharedFunctionInfo() ? SharedFunctionInfo::cast(code())
- : JSFunction::cast(code())->shared();
+void Module::set_evaluated() {
+ DCHECK(instantiated());
+ DCHECK(!evaluated());
+ return set_code(
+ JSFunction::cast(code())->shared()->scope_info()->ModuleDescriptorInfo());
}
+bool Module::instantiated() const { return !code()->IsSharedFunctionInfo(); }
+
ModuleInfo* Module::info() const {
- return shared()->scope_info()->ModuleDescriptorInfo();
+ if (evaluated()) return ModuleInfo::cast(code());
+ ScopeInfo* scope_info = instantiated()
+ ? JSFunction::cast(code())->shared()->scope_info()
+ : SharedFunctionInfo::cast(code())->scope_info();
+ return scope_info->ModuleDescriptorInfo();
}
-uint32_t Module::Hash() const { return Symbol::cast(shared()->name())->Hash(); }
-
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
@@ -5781,6 +5868,8 @@ ACCESSORS(FunctionTemplateInfo, access_check_info, Object,
kAccessCheckInfoOffset)
ACCESSORS(FunctionTemplateInfo, shared_function_info, Object,
kSharedFunctionInfoOffset)
+ACCESSORS(FunctionTemplateInfo, cached_property_name, Object,
+ kCachedPropertyNameOffset)
SMI_ACCESSORS(FunctionTemplateInfo, flag, kFlagOffset)
@@ -5848,10 +5937,8 @@ ACCESSORS(Script, shared_function_infos, Object, kSharedFunctionInfosOffset)
SMI_ACCESSORS(Script, flags, kFlagsOffset)
ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
-ACCESSORS_CHECKED(Script, wasm_object, JSObject, kEvalFromSharedOffset,
+ACCESSORS_CHECKED(Script, wasm_compiled_module, Object, kEvalFromSharedOffset,
this->type() == TYPE_WASM)
-SMI_ACCESSORS_CHECKED(Script, wasm_function_index, kEvalFromPositionOffset,
- this->type() == TYPE_WASM)
Script::CompilationType Script::compilation_type() {
return BooleanBit::get(flags(), kCompilationTypeBit) ?
@@ -5957,10 +6044,6 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, allows_lazy_compilation,
kAllowLazyCompilation)
BOOL_ACCESSORS(SharedFunctionInfo,
compiler_hints,
- allows_lazy_compilation_without_context,
- kAllowLazyCompilationWithoutContext)
-BOOL_ACCESSORS(SharedFunctionInfo,
- compiler_hints,
uses_arguments,
kUsesArguments)
BOOL_ACCESSORS(SharedFunctionInfo,
@@ -5973,6 +6056,8 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, never_compiled,
kNeverCompiled)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_declaration,
kIsDeclaration)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, marked_for_tier_up,
+ kMarkedForTierUp)
#if V8_HOST_ARCH_32_BIT
SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
@@ -6189,6 +6274,10 @@ void SharedFunctionInfo::ReplaceCode(Code* value) {
if (is_compiled()) set_never_compiled(false);
}
+bool SharedFunctionInfo::IsInterpreted() const {
+ return code()->is_interpreter_trampoline_builtin();
+}
+
bool SharedFunctionInfo::HasBaselineCode() const {
return code()->kind() == Code::FUNCTION;
}
@@ -6417,12 +6506,12 @@ bool SharedFunctionInfo::IsBuiltin() {
return type != Script::TYPE_NORMAL;
}
-
-bool SharedFunctionInfo::IsSubjectToDebugging() { return !IsBuiltin(); }
-
+bool SharedFunctionInfo::IsSubjectToDebugging() {
+ return !IsBuiltin() && !HasAsmWasmData();
+}
bool SharedFunctionInfo::OptimizedCodeMapIsCleared() const {
- return optimized_code_map() == GetHeap()->cleared_optimized_code_map();
+ return optimized_code_map() == GetHeap()->empty_fixed_array();
}
@@ -6430,6 +6519,10 @@ bool JSFunction::IsOptimized() {
return code()->kind() == Code::OPTIMIZED_FUNCTION;
}
+bool JSFunction::IsInterpreted() {
+ return code()->is_interpreter_trampoline_builtin();
+}
+
bool JSFunction::IsMarkedForBaseline() {
return code() ==
GetIsolate()->builtins()->builtin(Builtins::kCompileBaseline);
@@ -6475,11 +6568,10 @@ void Map::InobjectSlackTrackingStep() {
}
AbstractCode* JSFunction::abstract_code() {
- Code* code = this->code();
- if (code->is_interpreter_trampoline_builtin()) {
+ if (IsInterpreted()) {
return AbstractCode::cast(shared()->bytecode_array());
} else {
- return AbstractCode::cast(code);
+ return AbstractCode::cast(code());
}
}
@@ -6674,6 +6766,8 @@ bool JSGeneratorObject::is_executing() const {
return continuation() == kGeneratorExecuting;
}
+TYPE_CHECKER(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE)
+
ACCESSORS(JSValue, value, Object, kValueOffset)
@@ -6921,7 +7015,7 @@ void JSArrayBuffer::set_is_shared(bool value) {
Object* JSArrayBufferView::byte_offset() const {
- if (WasNeutered()) return Smi::FromInt(0);
+ if (WasNeutered()) return Smi::kZero;
return Object::cast(READ_FIELD(this, kByteOffsetOffset));
}
@@ -6933,7 +7027,7 @@ void JSArrayBufferView::set_byte_offset(Object* value, WriteBarrierMode mode) {
Object* JSArrayBufferView::byte_length() const {
- if (WasNeutered()) return Smi::FromInt(0);
+ if (WasNeutered()) return Smi::kZero;
return Object::cast(READ_FIELD(this, kByteLengthOffset));
}
@@ -6957,7 +7051,7 @@ bool JSArrayBufferView::WasNeutered() const {
Object* JSTypedArray::length() const {
- if (WasNeutered()) return Smi::FromInt(0);
+ if (WasNeutered()) return Smi::kZero;
return Object::cast(READ_FIELD(this, kLengthOffset));
}
@@ -7035,6 +7129,18 @@ void JSRegExp::SetDataAt(int index, Object* value) {
FixedArray::cast(data())->set(index, value);
}
+void JSRegExp::SetLastIndex(int index) {
+ static const int offset =
+ kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
+ Smi* value = Smi::FromInt(index);
+ WRITE_FIELD(this, offset, value);
+}
+
+Object* JSRegExp::LastIndex() {
+ static const int offset =
+ kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
+ return READ_FIELD(this, offset);
+}
ElementsKind JSObject::GetElementsKind() {
ElementsKind kind = map()->elements_kind();
@@ -7484,7 +7590,7 @@ void JSReceiver::initialize_properties() {
bool JSReceiver::HasFastProperties() {
- DCHECK(properties()->IsDictionary() == map()->is_dictionary_map());
+ DCHECK_EQ(properties()->IsDictionary(), map()->is_dictionary_map());
return !properties()->IsDictionary();
}
@@ -7586,6 +7692,11 @@ bool JSGlobalProxy::IsDetachedFrom(JSGlobalObject* global) const {
return iter.GetCurrent() != global;
}
+inline int JSGlobalProxy::SizeWithInternalFields(int internal_field_count) {
+ DCHECK_GE(internal_field_count, 0);
+ return kSize + internal_field_count * kPointerSize;
+}
+
Smi* JSReceiver::GetOrCreateIdentityHash(Isolate* isolate,
Handle<JSReceiver> object) {
return object->IsJSProxy() ? JSProxy::GetOrCreateIdentityHash(
@@ -7632,6 +7743,14 @@ void AccessorInfo::set_is_special_data_property(bool value) {
set_flag(BooleanBit::set(flag(), kSpecialDataProperty, value));
}
+bool AccessorInfo::replace_on_access() {
+ return BooleanBit::get(flag(), kReplaceOnAccess);
+}
+
+void AccessorInfo::set_replace_on_access(bool value) {
+ set_flag(BooleanBit::set(flag(), kReplaceOnAccess, value));
+}
+
bool AccessorInfo::is_sloppy() { return BooleanBit::get(flag(), kIsSloppy); }
void AccessorInfo::set_is_sloppy(bool value) {
@@ -7709,7 +7828,7 @@ template<typename Derived, typename Shape, typename Key>
void Dictionary<Derived, Shape, Key>::SetEntry(int entry,
Handle<Object> key,
Handle<Object> value) {
- this->SetEntry(entry, key, value, PropertyDetails(Smi::FromInt(0)));
+ this->SetEntry(entry, key, value, PropertyDetails(Smi::kZero));
}
@@ -7775,7 +7894,7 @@ uint32_t UnseededNumberDictionaryShape::HashForObject(uint32_t key,
}
Map* UnseededNumberDictionaryShape::GetMap(Isolate* isolate) {
- return *isolate->factory()->unseeded_number_dictionary_map();
+ return isolate->heap()->unseeded_number_dictionary_map();
}
uint32_t SeededNumberDictionaryShape::SeededHash(uint32_t key, uint32_t seed) {
@@ -7946,15 +8065,13 @@ bool ScopeInfo::HasSimpleParameters() {
FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(SCOPE_INFO_FIELD_ACCESSORS)
#undef SCOPE_INFO_FIELD_ACCESSORS
-Object* ModuleInfoEntry::export_name() const { return get(kExportNameIndex); }
-
-Object* ModuleInfoEntry::local_name() const { return get(kLocalNameIndex); }
-
-Object* ModuleInfoEntry::import_name() const { return get(kImportNameIndex); }
-
-Object* ModuleInfoEntry::module_request() const {
- return get(kModuleRequestIndex);
-}
+ACCESSORS(ModuleInfoEntry, export_name, Object, kExportNameOffset)
+ACCESSORS(ModuleInfoEntry, local_name, Object, kLocalNameOffset)
+ACCESSORS(ModuleInfoEntry, import_name, Object, kImportNameOffset)
+SMI_ACCESSORS(ModuleInfoEntry, module_request, kModuleRequestOffset)
+SMI_ACCESSORS(ModuleInfoEntry, cell_index, kCellIndexOffset)
+SMI_ACCESSORS(ModuleInfoEntry, beg_pos, kBegPosOffset)
+SMI_ACCESSORS(ModuleInfoEntry, end_pos, kEndPosOffset)
FixedArray* ModuleInfo::module_requests() const {
return FixedArray::cast(get(kModuleRequestsIndex));
@@ -8101,9 +8218,9 @@ void TypeFeedbackInfo::change_ic_generic_count(int delta) {
void TypeFeedbackInfo::initialize_storage() {
- WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(0));
- WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(0));
- WRITE_FIELD(this, kStorage3Offset, Smi::FromInt(0));
+ WRITE_FIELD(this, kStorage1Offset, Smi::kZero);
+ WRITE_FIELD(this, kStorage2Offset, Smi::kZero);
+ WRITE_FIELD(this, kStorage3Offset, Smi::kZero);
}
@@ -8273,6 +8390,10 @@ static inline Handle<Object> MakeEntryPair(Isolate* isolate, Handle<Name> key,
ACCESSORS(JSIteratorResult, value, Object, kValueOffset)
ACCESSORS(JSIteratorResult, done, Object, kDoneOffset)
+ACCESSORS(JSArrayIterator, object, Object, kIteratedObjectOffset)
+ACCESSORS(JSArrayIterator, index, Object, kNextIndexOffset)
+ACCESSORS(JSArrayIterator, object_map, Object, kIteratedObjectMapOffset)
+
ACCESSORS(JSStringIterator, string, String, kStringOffset)
SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 9054371e84..83e00b9f5f 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -66,11 +66,12 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
break;
case HEAP_NUMBER_TYPE:
HeapNumber::cast(this)->HeapNumberPrint(os);
+ os << "\n";
break;
case MUTABLE_HEAP_NUMBER_TYPE:
os << "<mutable ";
HeapNumber::cast(this)->HeapNumberPrint(os);
- os << ">";
+ os << ">\n";
break;
case SIMD128_VALUE_TYPE:
Simd128Value::cast(this)->Simd128ValuePrint(os);
@@ -102,6 +103,44 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
TYPED_ARRAYS(PRINT_FIXED_TYPED_ARRAY)
#undef PRINT_FIXED_TYPED_ARRAY
+ case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
+ JSArrayIterator::cast(this)->JSArrayIteratorPrint(os);
+ break;
+
case FILLER_TYPE:
os << "filler";
break;
@@ -166,6 +205,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_WEAK_SET_TYPE:
JSWeakSet::cast(this)->JSWeakSetPrint(os);
break;
+ case JS_MODULE_NAMESPACE_TYPE:
+ JSModuleNamespace::cast(this)->JSModuleNamespacePrint(os);
+ break;
case FOREIGN_TYPE:
Foreign::cast(this)->ForeignPrint(os);
break;
@@ -190,6 +232,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_TYPED_ARRAY_TYPE:
JSTypedArray::cast(this)->JSTypedArrayPrint(os);
break;
+ case JS_FIXED_ARRAY_ITERATOR_TYPE:
+ JSFixedArrayIterator::cast(this)->JSFixedArrayIteratorPrint(os);
+ break;
case JS_DATA_VIEW_TYPE:
JSDataView::cast(this)->JSDataViewPrint(os);
break;
@@ -946,6 +991,34 @@ void JSTypedArray::JSTypedArrayPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, this, !WasNeutered());
}
+void JSArrayIterator::JSArrayIteratorPrint(std::ostream& os) { // NOLING
+ JSObjectPrintHeader(os, this, "JSArrayIterator");
+
+ InstanceType instance_type = map()->instance_type();
+ std::string type;
+ if (instance_type <= LAST_ARRAY_KEY_ITERATOR_TYPE) {
+ type = "keys";
+ } else if (instance_type <= LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE) {
+ type = "entries";
+ } else {
+ type = "values";
+ }
+
+ os << "\n - type = " << type;
+ os << "\n - object = " << Brief(object());
+ os << "\n - index = " << Brief(index());
+
+ JSObjectPrintBody(os, this);
+}
+
+void JSFixedArrayIterator::JSFixedArrayIteratorPrint(
+ std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSFixedArrayIterator");
+ os << "\n - array = " << Brief(array());
+ os << "\n - index = " << index();
+ os << "\n - initial_next = " << Brief(initial_next());
+ JSObjectPrintBody(os, this);
+}
void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSDataView");
@@ -1031,18 +1104,18 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
void JSGlobalProxy::JSGlobalProxyPrint(std::ostream& os) { // NOLINT
- os << "global_proxy ";
- JSObjectPrint(os);
- os << "native context : " << Brief(native_context());
- os << "\n";
+ JSObjectPrintHeader(os, this, "JSGlobalProxy");
+ os << "\n - native context = " << Brief(native_context());
+ os << "\n - hash = " << Brief(hash());
+ JSObjectPrintBody(os, this);
}
void JSGlobalObject::JSGlobalObjectPrint(std::ostream& os) { // NOLINT
- os << "global ";
- JSObjectPrint(os);
- os << "native context : " << Brief(native_context());
- os << "\n";
+ JSObjectPrintHeader(os, this, "JSGlobalObject");
+ os << "\n - native context = " << Brief(native_context());
+ os << "\n - global proxy = " << Brief(global_proxy());
+ JSObjectPrintBody(os, this);
}
@@ -1147,14 +1220,40 @@ void Box::BoxPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void PromiseContainer::PromiseContainerPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "PromiseContainer");
+void PromiseResolveThenableJobInfo::PromiseResolveThenableJobInfoPrint(
+ std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "PromiseResolveThenableJobInfo");
os << "\n - thenable: " << Brief(thenable());
os << "\n - then: " << Brief(then());
os << "\n - resolve: " << Brief(resolve());
os << "\n - reject: " << Brief(reject());
- os << "\n - before debug event: " << Brief(before_debug_event());
- os << "\n - after debug event: " << Brief(after_debug_event());
+ os << "\n - debug id: " << Brief(debug_id());
+ os << "\n - debug name: " << Brief(debug_name());
+ os << "\n - context: " << Brief(context());
+ os << "\n";
+}
+
+void PromiseReactionJobInfo::PromiseReactionJobInfoPrint(
+ std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "PromiseReactionJobInfo");
+ os << "\n - value: " << Brief(value());
+ os << "\n - tasks: " << Brief(tasks());
+ os << "\n - deferred: " << Brief(deferred());
+ os << "\n - debug id: " << Brief(debug_id());
+ os << "\n - debug name: " << Brief(debug_name());
+ os << "\n - reaction context: " << Brief(context());
+ os << "\n";
+}
+
+void ModuleInfoEntry::ModuleInfoEntryPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "ModuleInfoEntry");
+ os << "\n - export_name: " << Brief(export_name());
+ os << "\n - local_name: " << Brief(local_name());
+ os << "\n - import_name: " << Brief(import_name());
+ os << "\n - module_request: " << module_request();
+ os << "\n - cell_index: " << cell_index();
+ os << "\n - beg_pos: " << beg_pos();
+ os << "\n - end_pos: " << end_pos();
os << "\n";
}
@@ -1164,15 +1263,30 @@ void Module::ModulePrint(std::ostream& os) { // NOLINT
os << "\n - exports: " << Brief(exports());
os << "\n - requested_modules: " << Brief(requested_modules());
os << "\n - evaluated: " << evaluated();
- os << "\n - embedder_data: " << Brief(embedder_data());
+ os << "\n";
+}
+
+void JSModuleNamespace::JSModuleNamespacePrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "JSModuleNamespace");
+ os << "\n - module: " << Brief(module());
os << "\n";
}
void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PrototypeInfo");
+ os << "\n - weak cell: " << Brief(weak_cell());
os << "\n - prototype users: " << Brief(prototype_users());
os << "\n - registry slot: " << registry_slot();
os << "\n - validity cell: " << Brief(validity_cell());
+ os << "\n - object create map: " << Brief(object_create_map());
+ os << "\n";
+}
+
+void Tuple3::Tuple3Print(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "Tuple3");
+ os << "\n - value1: " << Brief(value1());
+ os << "\n - value2: " << Brief(value2());
+ os << "\n - value3: " << Brief(value3());
os << "\n";
}
@@ -1238,6 +1352,7 @@ void FunctionTemplateInfo::FunctionTemplateInfoPrint(
os << "\n - instance_template: " << Brief(instance_template());
os << "\n - signature: " << Brief(signature());
os << "\n - access_check_info: " << Brief(access_check_info());
+ os << "\n - cached_property_name: " << Brief(cached_property_name());
os << "\n - hidden_prototype: " << (hidden_prototype() ? "true" : "false");
os << "\n - undetectable: " << (undetectable() ? "true" : "false");
os << "\n - need_access_check: " << (needs_access_check() ? "true" : "false");
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 44271db9fb..e711a21925 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -62,6 +62,7 @@
#include "src/string-stream.h"
#include "src/utils.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
#include "src/zone/zone.h"
#ifdef ENABLE_DISASSEMBLER
@@ -1199,7 +1200,7 @@ bool FunctionTemplateInfo::IsTemplateFor(Map* map) {
Handle<TemplateList> TemplateList::New(Isolate* isolate, int size) {
Handle<FixedArray> list =
isolate->factory()->NewFixedArray(kLengthIndex + size);
- list->set(kLengthIndex, Smi::FromInt(0));
+ list->set(kLengthIndex, Smi::kZero);
return Handle<TemplateList>::cast(list);
}
@@ -1352,8 +1353,19 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
Handle<Object> result = args.Call(call_fun, name);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.is_null()) return isolate->factory()->undefined_value();
- // Rebox handle before return.
- return handle(*result, isolate);
+ Handle<Object> reboxed_result = handle(*result, isolate);
+ if (info->replace_on_access() && receiver->IsJSReceiver()) {
+ args.Call(reinterpret_cast<GenericNamedPropertySetterCallback>(
+ &Accessors::ReconfigureToDataProperty),
+ name, result);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ }
+ return reboxed_result;
+ }
+
+ // AccessorPair with 'cached' private property.
+ if (it->TryLookupCachedProperty()) {
+ return Object::GetProperty(it);
}
// Regular accessor.
@@ -1417,12 +1429,20 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
return Nothing<bool>();
}
- v8::AccessorNameSetterCallback call_fun =
- v8::ToCData<v8::AccessorNameSetterCallback>(info->setter());
- // TODO(verwaest): We should not get here anymore once all AccessorInfos are
- // marked as special_data_property. They cannot both be writable and not
- // have a setter.
- if (call_fun == nullptr) return Just(true);
+ // The actual type of call_fun is either v8::AccessorNameSetterCallback or
+ // i::Accesors::AccessorNameBooleanSetterCallback, depending on whether the
+ // AccessorInfo was created by the API or internally (see accessors.cc).
+ // Here we handle both cases using GenericNamedPropertySetterCallback and
+ // its Call method.
+ GenericNamedPropertySetterCallback call_fun =
+ v8::ToCData<GenericNamedPropertySetterCallback>(info->setter());
+
+ if (call_fun == nullptr) {
+ // TODO(verwaest): We should not get here anymore once all AccessorInfos
+ // are marked as special_data_property. They cannot both be writable and
+ // not have a setter.
+ return Just(true);
+ }
if (info->is_sloppy() && !receiver->IsJSReceiver()) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -1432,9 +1452,15 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
should_throw);
- args.Call(call_fun, name, value);
+ Handle<Object> result = args.Call(call_fun, name, value);
+ // In the case of AccessorNameSetterCallback, we know that the result value
+ // cannot have been set, so the result of Call will be null. In the case of
+ // AccessorNameBooleanSetterCallback, the result will either be null
+ // (signalling an exception) or a boolean Oddball.
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
- return Just(true);
+ if (result.is_null()) return Just(true);
+ DCHECK(result->BooleanValue() || should_throw == DONT_THROW);
+ return Just(result->BooleanValue());
}
// Regular accessor.
@@ -1911,7 +1937,7 @@ Maybe<bool> JSReceiver::HasInPrototypeChain(Isolate* isolate,
}
}
-Map* Object::GetRootMap(Isolate* isolate) {
+Map* Object::GetPrototypeChainRootMap(Isolate* isolate) {
DisallowHeapAllocation no_alloc;
if (IsSmi()) {
Context* native_context = isolate->context()->native_context();
@@ -1921,11 +1947,15 @@ Map* Object::GetRootMap(Isolate* isolate) {
// The object is either a number, a string, a symbol, a boolean, a SIMD value,
// a real JS object, or a Harmony proxy.
HeapObject* heap_object = HeapObject::cast(this);
- if (heap_object->IsJSReceiver()) {
- return heap_object->map();
+ return heap_object->map()->GetPrototypeChainRootMap(isolate);
+}
+
+Map* Map::GetPrototypeChainRootMap(Isolate* isolate) {
+ DisallowHeapAllocation no_alloc;
+ if (IsJSReceiverMap()) {
+ return this;
}
- int constructor_function_index =
- heap_object->map()->GetConstructorFunctionIndex();
+ int constructor_function_index = GetConstructorFunctionIndex();
if (constructor_function_index != Map::kNoConstructorFunctionIndex) {
Context* native_context = isolate->context()->native_context();
JSFunction* constructor_function =
@@ -2152,6 +2182,8 @@ std::ostream& operator<<(std::ostream& os, const Brief& v) {
return os;
}
+// Declaration of the static Smi::kZero constant.
+Smi* const Smi::kZero(nullptr);
void Smi::SmiPrint(std::ostream& os) const { // NOLINT
os << value();
@@ -2184,8 +2216,12 @@ static bool AnWord(String* str) {
Handle<String> String::SlowFlatten(Handle<ConsString> cons,
PretenureFlag pretenure) {
- DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(cons->second()->length() != 0);
+
+ // TurboFan can create cons strings with empty first parts.
+ if (cons->first()->length() == 0) return handle(cons->second());
+
+ DCHECK(AllowHeapAllocation::IsAllowed());
Isolate* isolate = cons->GetIsolate();
int length = cons->length();
PretenureFlag tenure = isolate->heap()->InNewSpace(*cons) ? pretenure
@@ -3171,11 +3207,11 @@ bool Map::InstancesNeedRewriting(Map* target, int target_number_of_fields,
void JSObject::UpdatePrototypeUserRegistration(Handle<Map> old_map,
Handle<Map> new_map,
Isolate* isolate) {
- if (!old_map->is_prototype_map()) return;
+ DCHECK(old_map->is_prototype_map());
DCHECK(new_map->is_prototype_map());
bool was_registered = JSObject::UnregisterPrototypeUser(old_map, isolate);
new_map->set_prototype_info(old_map->prototype_info());
- old_map->set_prototype_info(Smi::FromInt(0));
+ old_map->set_prototype_info(Smi::kZero);
if (FLAG_trace_prototype_users) {
PrintF("Moving prototype_info %p from map %p to map %p.\n",
reinterpret_cast<void*>(new_map->prototype_info()),
@@ -3331,7 +3367,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
value = handle(object->RawFastPropertyAt(index), isolate);
if (!old_representation.IsDouble() && representation.IsDouble()) {
if (old_representation.IsNone()) {
- value = handle(Smi::FromInt(0), isolate);
+ value = handle(Smi::kZero, isolate);
}
value = Object::NewStorageFor(isolate, value, representation);
} else if (old_representation.IsDouble() &&
@@ -3430,7 +3466,8 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
if (expected_additional_properties > 0) {
property_count += expected_additional_properties;
} else {
- property_count += 2; // Make space for two more properties.
+ // Make space for two more properties.
+ property_count += NameDictionary::kInitialCapacity;
}
Handle<NameDictionary> dictionary =
NameDictionary::New(isolate, property_count);
@@ -3520,7 +3557,7 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
for (int i = 0; i < inobject_properties; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
- object->RawFastPropertyAtPut(index, Smi::FromInt(0));
+ object->RawFastPropertyAtPut(index, Smi::kZero);
}
}
@@ -3537,22 +3574,26 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
} // namespace
+// static
+void JSObject::NotifyMapChange(Handle<Map> old_map, Handle<Map> new_map,
+ Isolate* isolate) {
+ if (!old_map->is_prototype_map()) return;
+
+ InvalidatePrototypeChains(*old_map);
+
+ // If the map was registered with its prototype before, ensure that it
+ // registers with its new prototype now. This preserves the invariant that
+ // when a map on a prototype chain is registered with its prototype, then
+ // all prototypes further up the chain are also registered with their
+ // respective prototypes.
+ UpdatePrototypeUserRegistration(old_map, new_map, isolate);
+}
+
void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
int expected_additional_properties) {
if (object->map() == *new_map) return;
Handle<Map> old_map(object->map());
- if (old_map->is_prototype_map()) {
- // If this object is a prototype (the callee will check), invalidate any
- // prototype chains involving it.
- InvalidatePrototypeChains(object->map());
-
- // If the map was registered with its prototype before, ensure that it
- // registers with its new prototype now. This preserves the invariant that
- // when a map on a prototype chain is registered with its prototype, then
- // all prototypes further up the chain are also registered with their
- // respective prototypes.
- UpdatePrototypeUserRegistration(old_map, new_map, new_map->GetIsolate());
- }
+ NotifyMapChange(old_map, new_map, new_map->GetIsolate());
if (old_map->is_dictionary_map()) {
// For slow-to-fast migrations JSObject::MigrateSlowToFast()
@@ -3799,7 +3840,7 @@ void Map::UpdateFieldType(int descriptor, Handle<Name> name,
PropertyDetails details = instance_descriptors()->GetDetails(descriptor);
if (details.type() != DATA) return;
- Zone zone(GetIsolate()->allocator());
+ Zone zone(GetIsolate()->allocator(), ZONE_NAME);
ZoneQueue<Map*> backlog(&zone);
backlog.push(this);
@@ -3894,7 +3935,7 @@ void Map::GeneralizeFieldType(Handle<Map> map, int modify_index,
field_owner->UpdateFieldType(modify_index, name, new_representation,
wrapped_type);
field_owner->dependent_code()->DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kFieldTypeGroup);
+ isolate, DependentCode::kFieldOwnerGroup);
if (FLAG_trace_generalization) {
map->PrintGeneralization(
@@ -4636,21 +4677,10 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
value, it->GetReceiver(), language_mode);
case LookupIterator::INTERCEPTOR: {
- Handle<Map> store_target_map;
- if (it->GetReceiver()->IsJSObject()) {
- store_target_map = handle(it->GetStoreTarget()->map(), it->isolate());
- }
if (it->HolderIsReceiverOrHiddenPrototype()) {
Maybe<bool> result =
JSObject::SetPropertyWithInterceptor(it, should_throw, value);
if (result.IsNothing() || result.FromJust()) return result;
- // Interceptor modified the store target but failed to set the
- // property.
- Utils::ApiCheck(store_target_map.is_null() ||
- *store_target_map == it->GetStoreTarget()->map(),
- it->IsElement() ? "v8::IndexedPropertySetterCallback"
- : "v8::NamedPropertySetterCallback",
- "Interceptor silently changed store target.");
} else {
Maybe<PropertyAttributes> maybe_attributes =
JSObject::GetPropertyAttributesWithInterceptor(it);
@@ -4658,13 +4688,6 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
if ((maybe_attributes.FromJust() & READ_ONLY) != 0) {
return WriteToReadOnlyProperty(it, value, should_throw);
}
- // Interceptor modified the store target but failed to set the
- // property.
- Utils::ApiCheck(store_target_map.is_null() ||
- *store_target_map == it->GetStoreTarget()->map(),
- it->IsElement() ? "v8::IndexedPropertySetterCallback"
- : "v8::NamedPropertySetterCallback",
- "Interceptor silently changed store target.");
if (maybe_attributes.FromJust() == ABSENT) break;
*found = false;
return Nothing<bool>();
@@ -5786,17 +5809,10 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
it->TransitionToAccessorPair(accessors, attributes);
}
- Maybe<bool> result =
- JSObject::SetPropertyWithAccessor(it, value, should_throw);
-
- if (current_attributes == attributes || result.IsNothing()) {
- return result;
- }
-
- } else {
- it->ReconfigureDataProperty(value, attributes);
+ return JSObject::SetPropertyWithAccessor(it, value, should_throw);
}
+ it->ReconfigureDataProperty(value, attributes);
return Just(true);
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -5989,7 +6005,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
Handle<Map> new_map = Map::CopyDropDescriptors(old_map);
new_map->set_dictionary_map(false);
- UpdatePrototypeUserRegistration(old_map, new_map, isolate);
+ NotifyMapChange(old_map, new_map, isolate);
#if TRACE_MAPS
if (FLAG_trace_maps) {
@@ -6174,27 +6190,13 @@ Handle<SeededNumberDictionary> JSObject::NormalizeElements(
}
-static Smi* GenerateIdentityHash(Isolate* isolate) {
- int hash_value;
- int attempts = 0;
- do {
- // Generate a random 32-bit hash value but limit range to fit
- // within a smi.
- hash_value = isolate->random_number_generator()->NextInt() & Smi::kMaxValue;
- attempts++;
- } while (hash_value == 0 && attempts < 30);
- hash_value = hash_value != 0 ? hash_value : 1; // never return 0
-
- return Smi::FromInt(hash_value);
-}
-
template <typename ProxyType>
static Smi* GetOrCreateIdentityHashHelper(Isolate* isolate,
Handle<ProxyType> proxy) {
Object* maybe_hash = proxy->hash();
if (maybe_hash->IsSmi()) return Smi::cast(maybe_hash);
- Smi* hash = GenerateIdentityHash(isolate);
+ Smi* hash = Smi::FromInt(isolate->GenerateIdentityHash(Smi::kMaxValue));
proxy->set_hash(hash);
return hash;
}
@@ -6224,7 +6226,7 @@ Smi* JSObject::GetOrCreateIdentityHash(Isolate* isolate,
if (maybe_hash->IsSmi()) return Smi::cast(maybe_hash);
}
- Smi* hash = GenerateIdentityHash(isolate);
+ Smi* hash = Smi::FromInt(isolate->GenerateIdentityHash(Smi::kMaxValue));
CHECK(AddDataProperty(&it, handle(hash, isolate), NONE, THROW_ON_ERROR,
CERTAINLY_NOT_STORE_FROM_KEYED)
.IsJust());
@@ -6561,7 +6563,7 @@ Maybe<bool> JSReceiver::DefineOwnProperty(Isolate* isolate,
return JSProxy::DefineOwnProperty(isolate, Handle<JSProxy>::cast(object),
key, desc, should_throw);
}
- // TODO(jkummerow): Support Modules (ES6 9.4.6.6)
+ // TODO(neis): Special case for JSModuleNamespace?
// OrdinaryDefineOwnProperty, by virtue of calling
// DefineOwnPropertyIgnoreAttributes, can handle arguments (ES6 9.4.4.2)
@@ -6596,29 +6598,12 @@ Maybe<bool> JSReceiver::OrdinaryDefineOwnProperty(Isolate* isolate,
// Handle interceptor
if (it.state() == LookupIterator::INTERCEPTOR) {
- Handle<Map> store_target_map;
- if (it.GetReceiver()->IsJSObject()) {
- store_target_map = handle(it.GetStoreTarget()->map(), it.isolate());
- }
if (it.HolderIsReceiverOrHiddenPrototype()) {
Maybe<bool> result = DefinePropertyWithInterceptorInternal(
&it, it.GetInterceptor(), should_throw, *desc);
if (result.IsNothing() || result.FromJust()) {
return result;
}
- // Interceptor modified the store target but failed to set the
- // property.
- if (!store_target_map.is_null() &&
- *store_target_map != it.GetStoreTarget()->map()) {
- it.isolate()->PushStackTraceAndDie(
- 0xabababaa, v8::ToCData<void*>(it.GetInterceptor()->definer()),
- nullptr, 0xabababab);
- }
- Utils::ApiCheck(store_target_map.is_null() ||
- *store_target_map == it.GetStoreTarget()->map(),
- it.IsElement() ? "v8::IndexedPropertyDefinerCallback"
- : "v8::NamedPropertyDefinerCallback",
- "Interceptor silently changed store target.");
}
}
@@ -7981,12 +7966,31 @@ bool JSObject::IsExtensible(Handle<JSObject> object) {
return object->map()->is_extensible();
}
+namespace {
+
+template <typename Dictionary>
+void DictionaryDetailsAtPut(Isolate* isolate, Handle<Dictionary> dictionary,
+ int entry, PropertyDetails details) {
+ dictionary->DetailsAtPut(entry, details);
+}
+
+template <>
+void DictionaryDetailsAtPut<GlobalDictionary>(
+ Isolate* isolate, Handle<GlobalDictionary> dictionary, int entry,
+ PropertyDetails details) {
+ Object* value = dictionary->ValueAt(entry);
+ DCHECK(value->IsPropertyCell());
+ value = PropertyCell::cast(value)->value();
+ if (value->IsTheHole(isolate)) return;
+ PropertyCell::PrepareForValue(dictionary, entry, handle(value, isolate),
+ details);
+}
template <typename Dictionary>
-static void ApplyAttributesToDictionary(Dictionary* dictionary,
- const PropertyAttributes attributes) {
+void ApplyAttributesToDictionary(Isolate* isolate,
+ Handle<Dictionary> dictionary,
+ const PropertyAttributes attributes) {
int capacity = dictionary->Capacity();
- Isolate* isolate = dictionary->GetIsolate();
for (int i = 0; i < capacity; i++) {
Object* k = dictionary->KeyAt(i);
if (dictionary->IsKey(isolate, k) &&
@@ -8001,11 +8005,12 @@ static void ApplyAttributesToDictionary(Dictionary* dictionary,
}
details = details.CopyAddAttributes(
static_cast<PropertyAttributes>(attrs));
- dictionary->DetailsAtPut(i, details);
+ DictionaryDetailsAtPut<Dictionary>(isolate, dictionary, i, details);
}
}
}
+} // namespace
template <PropertyAttributes attrs>
Maybe<bool> JSObject::PreventExtensionsWithTransition(
@@ -8094,9 +8099,13 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
if (attrs != NONE) {
if (object->IsJSGlobalObject()) {
- ApplyAttributesToDictionary(object->global_dictionary(), attrs);
+ Handle<GlobalDictionary> dictionary(object->global_dictionary(),
+ isolate);
+ ApplyAttributesToDictionary(isolate, dictionary, attrs);
} else {
- ApplyAttributesToDictionary(object->property_dictionary(), attrs);
+ Handle<NameDictionary> dictionary(object->property_dictionary(),
+ isolate);
+ ApplyAttributesToDictionary(isolate, dictionary, attrs);
}
}
}
@@ -8120,11 +8129,12 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
}
if (object->elements() != isolate->heap()->empty_slow_element_dictionary()) {
- SeededNumberDictionary* dictionary = object->element_dictionary();
+ Handle<SeededNumberDictionary> dictionary(object->element_dictionary(),
+ isolate);
// Make sure we never go back to the fast case
- object->RequireSlowElements(dictionary);
+ object->RequireSlowElements(*dictionary);
if (attrs != NONE) {
- ApplyAttributesToDictionary(dictionary, attrs);
+ ApplyAttributesToDictionary(isolate, dictionary, attrs);
}
}
@@ -8466,8 +8476,9 @@ bool JSObject::HasEnumerableElements() {
int length = object->IsJSArray()
? Smi::cast(JSArray::cast(object)->length())->value()
: elements->length();
+ Isolate* isolate = GetIsolate();
for (int i = 0; i < length; i++) {
- if (!elements->is_the_hole(i)) return true;
+ if (!elements->is_the_hole(isolate, i)) return true;
}
return false;
}
@@ -8907,7 +8918,7 @@ Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
// For prototype maps, the PrototypeInfo is not copied.
DCHECK(memcmp(fresh->address(), new_map->address(),
kTransitionsOrPrototypeInfoOffset) == 0);
- DCHECK(fresh->raw_transitions() == Smi::FromInt(0));
+ DCHECK(fresh->raw_transitions() == Smi::kZero);
STATIC_ASSERT(kDescriptorsOffset ==
kTransitionsOrPrototypeInfoOffset + kPointerSize);
DCHECK(memcmp(HeapObject::RawField(*fresh, kDescriptorsOffset),
@@ -10264,7 +10275,7 @@ Handle<WeakFixedArray> WeakFixedArray::Allocate(
}
}
while (index < result->length()) {
- result->set(index, Smi::FromInt(0));
+ result->set(index, Smi::kZero);
index++;
}
return Handle<WeakFixedArray>::cast(result);
@@ -10329,6 +10340,15 @@ Handle<ArrayList> ArrayList::EnsureSpace(Handle<ArrayList> array, int length) {
return ret;
}
+Handle<RegExpMatchInfo> RegExpMatchInfo::ReserveCaptures(
+ Handle<RegExpMatchInfo> match_info, int capture_count) {
+ DCHECK_GE(match_info->length(), kLastMatchOverhead);
+ const int required_length = kFirstCaptureIndex + capture_count;
+ Handle<FixedArray> result =
+ EnsureSpaceInFixedArray(match_info, required_length);
+ return Handle<RegExpMatchInfo>::cast(result);
+}
+
// static
Handle<FrameArray> FrameArray::AppendJSFrame(Handle<FrameArray> in,
Handle<Object> receiver,
@@ -10349,14 +10369,14 @@ Handle<FrameArray> FrameArray::AppendJSFrame(Handle<FrameArray> in,
// static
Handle<FrameArray> FrameArray::AppendWasmFrame(Handle<FrameArray> in,
- Handle<Object> wasm_object,
+ Handle<Object> wasm_instance,
int wasm_function_index,
Handle<AbstractCode> code,
int offset, int flags) {
const int frame_count = in->FrameCount();
const int new_length = LengthFor(frame_count + 1);
Handle<FrameArray> array = EnsureSpace(in, new_length);
- array->SetWasmObject(frame_count, *wasm_object);
+ array->SetWasmInstance(frame_count, *wasm_instance);
array->SetWasmFunctionIndex(frame_count, Smi::FromInt(wasm_function_index));
array->SetCode(frame_count, *code);
array->SetOffset(frame_count, Smi::FromInt(offset));
@@ -10387,15 +10407,11 @@ Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
factory->NewFixedArray(LengthFor(size), pretenure);
result->set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors));
- result->set(kEnumCacheIndex, Smi::FromInt(0));
+ result->set(kEnumCacheIndex, Smi::kZero);
return Handle<DescriptorArray>::cast(result);
}
-
-void DescriptorArray::ClearEnumCache() {
- set(kEnumCacheIndex, Smi::FromInt(0));
-}
-
+void DescriptorArray::ClearEnumCache() { set(kEnumCacheIndex, Smi::kZero); }
void DescriptorArray::Replace(int index, Descriptor* descriptor) {
descriptor->SetSortedKeyIndex(GetSortedKeyIndex(index));
@@ -10418,9 +10434,9 @@ void DescriptorArray::SetEnumCache(Handle<DescriptorArray> descriptors,
bridge_storage = FixedArray::cast(descriptors->get(kEnumCacheIndex));
}
bridge_storage->set(kEnumCacheBridgeCacheIndex, *new_cache);
- bridge_storage->set(kEnumCacheBridgeIndicesCacheIndex,
- new_index_cache.is_null() ? Object::cast(Smi::FromInt(0))
- : *new_index_cache);
+ bridge_storage->set(
+ kEnumCacheBridgeIndicesCacheIndex,
+ new_index_cache.is_null() ? Object::cast(Smi::kZero) : *new_index_cache);
if (needs_new_enum_cache) {
descriptors->set(kEnumCacheIndex, bridge_storage);
}
@@ -10536,6 +10552,14 @@ Handle<DeoptimizationOutputData> DeoptimizationOutputData::New(
return Handle<DeoptimizationOutputData>::cast(result);
}
+SharedFunctionInfo* DeoptimizationInputData::GetInlinedFunction(int index) {
+ if (index == -1) {
+ return SharedFunctionInfo::cast(this->SharedFunctionInfo());
+ } else {
+ return SharedFunctionInfo::cast(LiteralArray()->get(index));
+ }
+}
+
const int LiteralsArray::kFeedbackVectorOffset =
LiteralsArray::OffsetOfElementAt(LiteralsArray::kVectorIndex);
@@ -10722,7 +10746,7 @@ Handle<Object> String::ToNumber(Handle<String> subject) {
// Fast case: short integer or some sorts of junk values.
if (subject->IsSeqOneByteString()) {
int len = subject->length();
- if (len == 0) return handle(Smi::FromInt(0), isolate);
+ if (len == 0) return handle(Smi::kZero, isolate);
DisallowHeapAllocation no_gc;
uint8_t const* data = Handle<SeqOneByteString>::cast(subject)->GetChars();
@@ -11604,41 +11628,170 @@ ComparisonResult String::Compare(Handle<String> x, Handle<String> y) {
return result;
}
-int String::IndexOf(Isolate* isolate, Handle<String> sub, Handle<String> pat,
- int start_index) {
+Object* String::IndexOf(Isolate* isolate, Handle<Object> receiver,
+ Handle<Object> search, Handle<Object> position) {
+ if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "String.prototype.indexOf")));
+ }
+ Handle<String> receiver_string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver_string,
+ Object::ToString(isolate, receiver));
+
+ Handle<String> search_string;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, search_string,
+ Object::ToString(isolate, search));
+
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
+ Object::ToInteger(isolate, position));
+
+ double index = std::max(position->Number(), 0.0);
+ index = std::min(index, static_cast<double>(receiver_string->length()));
+
+ return Smi::FromInt(String::IndexOf(isolate, receiver_string, search_string,
+ static_cast<uint32_t>(index)));
+}
+
+namespace {
+
+template <typename T>
+int SearchString(Isolate* isolate, String::FlatContent receiver_content,
+ Vector<T> pat_vector, int start_index) {
+ if (receiver_content.IsOneByte()) {
+ return SearchString(isolate, receiver_content.ToOneByteVector(), pat_vector,
+ start_index);
+ }
+ return SearchString(isolate, receiver_content.ToUC16Vector(), pat_vector,
+ start_index);
+}
+
+} // namespace
+
+int String::IndexOf(Isolate* isolate, Handle<String> receiver,
+ Handle<String> search, int start_index) {
DCHECK(0 <= start_index);
- DCHECK(start_index <= sub->length());
+ DCHECK(start_index <= receiver->length());
- int pattern_length = pat->length();
- if (pattern_length == 0) return start_index;
+ uint32_t search_length = search->length();
+ if (search_length == 0) return start_index;
- int subject_length = sub->length();
- if (start_index + pattern_length > subject_length) return -1;
+ uint32_t receiver_length = receiver->length();
+ if (start_index + search_length > receiver_length) return -1;
- sub = String::Flatten(sub);
- pat = String::Flatten(pat);
+ receiver = String::Flatten(receiver);
+ search = String::Flatten(search);
DisallowHeapAllocation no_gc; // ensure vectors stay valid
// Extract flattened substrings of cons strings before getting encoding.
- String::FlatContent seq_sub = sub->GetFlatContent();
- String::FlatContent seq_pat = pat->GetFlatContent();
+ String::FlatContent receiver_content = receiver->GetFlatContent();
+ String::FlatContent search_content = search->GetFlatContent();
// dispatch on type of strings
- if (seq_pat.IsOneByte()) {
- Vector<const uint8_t> pat_vector = seq_pat.ToOneByteVector();
- if (seq_sub.IsOneByte()) {
- return SearchString(isolate, seq_sub.ToOneByteVector(), pat_vector,
- start_index);
- }
- return SearchString(isolate, seq_sub.ToUC16Vector(), pat_vector,
- start_index);
+ if (search_content.IsOneByte()) {
+ Vector<const uint8_t> pat_vector = search_content.ToOneByteVector();
+ return SearchString<const uint8_t>(isolate, receiver_content, pat_vector,
+ start_index);
}
- Vector<const uc16> pat_vector = seq_pat.ToUC16Vector();
- if (seq_sub.IsOneByte()) {
- return SearchString(isolate, seq_sub.ToOneByteVector(), pat_vector,
- start_index);
+ Vector<const uc16> pat_vector = search_content.ToUC16Vector();
+ return SearchString<const uc16>(isolate, receiver_content, pat_vector,
+ start_index);
+}
+
+MaybeHandle<String> String::GetSubstitution(Isolate* isolate, Match* match,
+ Handle<String> replacement) {
+ Factory* factory = isolate->factory();
+
+ const int replacement_length = replacement->length();
+ const int captures_length = match->CaptureCount();
+
+ replacement = String::Flatten(replacement);
+
+ Handle<String> dollar_string =
+ factory->LookupSingleCharacterStringFromCode('$');
+ int next = String::IndexOf(isolate, replacement, dollar_string, 0);
+ if (next < 0) {
+ return replacement;
+ }
+
+ IncrementalStringBuilder builder(isolate);
+
+ if (next > 0) {
+ builder.AppendString(factory->NewSubString(replacement, 0, next));
+ }
+
+ while (true) {
+ int pos = next + 1;
+ if (pos < replacement_length) {
+ const uint16_t peek = replacement->Get(pos);
+ if (peek == '$') { // $$
+ pos++;
+ builder.AppendCharacter('$');
+ } else if (peek == '&') { // $& - match
+ pos++;
+ builder.AppendString(match->GetMatch());
+ } else if (peek == '`') { // $` - prefix
+ pos++;
+ builder.AppendString(match->GetPrefix());
+ } else if (peek == '\'') { // $' - suffix
+ pos++;
+ builder.AppendString(match->GetSuffix());
+ } else if (peek >= '0' && peek <= '9') {
+ // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
+ int scaled_index = (peek - '0');
+ int advance = 1;
+
+ if (pos + 1 < replacement_length) {
+ const uint16_t next_peek = replacement->Get(pos + 1);
+ if (next_peek >= '0' && next_peek <= '9') {
+ const int new_scaled_index = scaled_index * 10 + (next_peek - '0');
+ if (new_scaled_index < captures_length) {
+ scaled_index = new_scaled_index;
+ advance = 2;
+ }
+ }
+ }
+
+ if (scaled_index != 0 && scaled_index < captures_length) {
+ bool capture_exists;
+ Handle<String> capture;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, capture,
+ match->GetCapture(scaled_index, &capture_exists), String);
+ if (capture_exists) builder.AppendString(capture);
+ pos += advance;
+ } else {
+ builder.AppendCharacter('$');
+ }
+ } else {
+ builder.AppendCharacter('$');
+ }
+ } else {
+ builder.AppendCharacter('$');
+ }
+
+ // Go the the next $ in the replacement.
+ next = String::IndexOf(isolate, replacement, dollar_string, pos);
+
+ // Return if there are no more $ characters in the replacement. If we
+ // haven't reached the end, we need to append the suffix.
+ if (next < 0) {
+ if (pos < replacement_length) {
+ builder.AppendString(
+ factory->NewSubString(replacement, pos, replacement_length));
+ }
+ return builder.Finish();
+ }
+
+ // Append substring between the previous and the next $ character.
+ if (next > pos) {
+ builder.AppendString(factory->NewSubString(replacement, pos, next));
+ }
}
- return SearchString(isolate, seq_sub.ToUC16Vector(), pat_vector, start_index);
+
+ UNREACHABLE();
+ return MaybeHandle<String>();
}
namespace { // for String.Prototype.lastIndexOf
@@ -12082,6 +12235,9 @@ void JSFunction::MarkForBaseline() {
set_code_no_write_barrier(
isolate->builtins()->builtin(Builtins::kCompileBaseline));
// No write barrier required, since the builtin is part of the root set.
+ if (FLAG_mark_shared_functions_for_tier_up) {
+ shared()->set_marked_for_tier_up(true);
+ }
}
void JSFunction::MarkForOptimization() {
@@ -12092,6 +12248,9 @@ void JSFunction::MarkForOptimization() {
set_code_no_write_barrier(
isolate->builtins()->builtin(Builtins::kCompileOptimized));
// No write barrier required, since the builtin is part of the root set.
+ if (FLAG_mark_shared_functions_for_tier_up) {
+ shared()->set_marked_for_tier_up(true);
+ }
}
@@ -12112,9 +12271,15 @@ void JSFunction::AttemptConcurrentOptimization() {
ShortPrint();
PrintF(" for concurrent recompilation.\n");
}
+
set_code_no_write_barrier(
isolate->builtins()->builtin(Builtins::kCompileOptimizedConcurrent));
// No write barrier required, since the builtin is part of the root set.
+ if (FLAG_mark_shared_functions_for_tier_up) {
+ // TODO(leszeks): The compilation isn't concurrent if we trigger it using
+ // this bit.
+ shared()->set_marked_for_tier_up(true);
+ }
}
// static
@@ -12142,22 +12307,6 @@ Handle<LiteralsArray> SharedFunctionInfo::FindOrCreateLiterals(
return literals;
}
-void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(
- Handle<SharedFunctionInfo> shared, Handle<Code> code) {
- Isolate* isolate = shared->GetIsolate();
- if (isolate->serializer_enabled()) return;
- DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
- // Empty code maps are unsupported.
- if (!shared->OptimizedCodeMapIsCleared()) {
- Handle<WeakCell> cell = isolate->factory()->NewWeakCell(code);
- // A collection may have occured and cleared the optimized code map in the
- // allocation above.
- if (!shared->OptimizedCodeMapIsCleared()) {
- shared->optimized_code_map()->set(kSharedCodeIndex, *cell);
- }
- }
-}
-
// static
void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
@@ -12174,13 +12323,11 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
if (shared->OptimizedCodeMapIsCleared()) {
new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
- new_code_map->set(kSharedCodeIndex, *isolate->factory()->empty_weak_cell(),
- SKIP_WRITE_BARRIER);
entry = kEntriesStart;
} else {
Handle<FixedArray> old_code_map(shared->optimized_code_map(), isolate);
entry = shared->SearchOptimizedCodeMapEntry(*native_context, osr_ast_id);
- if (entry > kSharedCodeIndex) {
+ if (entry >= kEntriesStart) {
// Just set the code and literals of the entry.
if (!code.is_null()) {
Handle<WeakCell> code_cell =
@@ -12250,8 +12397,8 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
void SharedFunctionInfo::ClearOptimizedCodeMap() {
- FixedArray* cleared_map = GetHeap()->cleared_optimized_code_map();
- set_optimized_code_map(cleared_map, SKIP_WRITE_BARRIER);
+ FixedArray* empty_fixed_array = GetHeap()->empty_fixed_array();
+ set_optimized_code_map(empty_fixed_array, SKIP_WRITE_BARRIER);
}
@@ -12301,23 +12448,11 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
}
dst += kEntryLength;
}
- if (WeakCell::cast(code_map->get(kSharedCodeIndex))->value() ==
- optimized_code) {
- // Evict context-independent code as well.
- code_map->set(kSharedCodeIndex, heap->empty_weak_cell(),
- SKIP_WRITE_BARRIER);
- if (FLAG_trace_opt) {
- PrintF("[evicting entry from optimizing code map (%s) for ", reason);
- ShortPrint();
- PrintF(" (context-independent code)]\n");
- }
- }
if (dst != length) {
// Always trim even when array is cleared because of heap verifier.
heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(code_map,
length - dst);
- if (code_map->length() == kEntriesStart &&
- WeakCell::cast(code_map->get(kSharedCodeIndex))->cleared()) {
+ if (code_map->length() == kEntriesStart) {
ClearOptimizedCodeMap();
}
}
@@ -12331,8 +12466,7 @@ void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
// Always trim even when array is cleared because of heap verifier.
GetHeap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(code_map,
shrink_by);
- if (code_map->length() == kEntriesStart &&
- WeakCell::cast(code_map->get(kSharedCodeIndex))->cleared()) {
+ if (code_map->length() == kEntriesStart) {
ClearOptimizedCodeMap();
}
}
@@ -12554,7 +12688,7 @@ bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
static void InvalidatePrototypeChainsInternal(Map* map) {
- if (!map->is_prototype_map()) return;
+ DCHECK(map->is_prototype_map());
if (FLAG_trace_prototype_users) {
PrintF("Invalidating prototype map %p 's cell\n",
reinterpret_cast<void*>(map));
@@ -12624,7 +12758,8 @@ void Map::SetShouldBeFastPrototypeMap(Handle<Map> map, bool value,
// static
Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
Isolate* isolate) {
- Handle<Object> maybe_prototype(map->prototype(), isolate);
+ Handle<Object> maybe_prototype(
+ map->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
if (!maybe_prototype->IsJSObject()) return Handle<Cell>::null();
Handle<JSObject> prototype = Handle<JSObject>::cast(maybe_prototype);
// Ensure the prototype is registered with its own prototypes so its cell
@@ -12648,6 +12783,24 @@ Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
return cell;
}
+// static
+Handle<WeakCell> Map::GetOrCreatePrototypeWeakCell(Handle<JSObject> prototype,
+ Isolate* isolate) {
+ DCHECK(!prototype.is_null());
+ Handle<PrototypeInfo> proto_info =
+ GetOrCreatePrototypeInfo(prototype, isolate);
+ Object* maybe_cell = proto_info->weak_cell();
+ // Return existing cell if it's already created.
+ if (maybe_cell->IsWeakCell()) {
+ Handle<WeakCell> cell(WeakCell::cast(maybe_cell), isolate);
+ DCHECK(!cell->cleared());
+ return cell;
+ }
+ // Otherwise create a new cell.
+ Handle<WeakCell> cell = isolate->factory()->NewWeakCell(prototype);
+ proto_info->set_weak_cell(*cell);
+ return cell;
+}
// static
void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype,
@@ -13236,93 +13389,133 @@ int Script::GetEvalPosition() {
void Script::InitLineEnds(Handle<Script> script) {
Isolate* isolate = script->GetIsolate();
if (!script->line_ends()->IsUndefined(isolate)) return;
+ DCHECK_NE(Script::TYPE_WASM, script->type());
- if (!script->source()->IsString()) {
- DCHECK(script->source()->IsUndefined(isolate));
- Handle<FixedArray> empty = isolate->factory()->NewFixedArray(0);
- script->set_line_ends(*empty);
- DCHECK(script->line_ends()->IsFixedArray());
- return;
+ Object* src_obj = script->source();
+ if (!src_obj->IsString()) {
+ DCHECK(src_obj->IsUndefined(isolate));
+ script->set_line_ends(isolate->heap()->empty_fixed_array());
+ } else {
+ DCHECK(src_obj->IsString());
+ Handle<String> src(String::cast(src_obj), isolate);
+ Handle<FixedArray> array = String::CalculateLineEnds(src, true);
+ script->set_line_ends(*array);
}
- Handle<String> src(String::cast(script->source()), isolate);
-
- Handle<FixedArray> array = String::CalculateLineEnds(src, true);
+ DCHECK(script->line_ends()->IsFixedArray());
+}
- if (*array != isolate->heap()->empty_fixed_array()) {
- array->set_map(isolate->heap()->fixed_cow_array_map());
+bool Script::GetPositionInfo(Handle<Script> script, int position,
+ PositionInfo* info, OffsetFlag offset_flag) {
+ // For wasm, we do not create an artificial line_ends array, but do the
+ // translation directly.
+ if (script->type() == Script::TYPE_WASM) {
+ Handle<WasmCompiledModule> compiled_module(
+ WasmCompiledModule::cast(script->wasm_compiled_module()));
+ DCHECK_LE(0, position);
+ return wasm::GetPositionInfo(compiled_module,
+ static_cast<uint32_t>(position), info);
}
- script->set_line_ends(*array);
- DCHECK(script->line_ends()->IsFixedArray());
+ InitLineEnds(script);
+ return script->GetPositionInfo(position, info, offset_flag);
+}
+
+namespace {
+bool GetPositionInfoSlow(const Script* script, int position,
+ Script::PositionInfo* info) {
+ if (!script->source()->IsString()) return false;
+ if (position < 0) position = 0;
+
+ String* source_string = String::cast(script->source());
+ int line = 0;
+ int line_start = 0;
+ int len = source_string->length();
+ for (int pos = 0; pos <= len; ++pos) {
+ if (pos == len || source_string->Get(pos) == '\n') {
+ if (position <= pos) {
+ info->line = line;
+ info->column = position - line_start;
+ info->line_start = line_start;
+ info->line_end = pos;
+ return true;
+ }
+ line++;
+ line_start = pos + 1;
+ }
+ }
+ return false;
}
+} // namespace
#define SMI_VALUE(x) (Smi::cast(x)->value())
bool Script::GetPositionInfo(int position, PositionInfo* info,
- OffsetFlag offset_flag) {
- Handle<Script> script(this);
- InitLineEnds(script);
-
+ OffsetFlag offset_flag) const {
DisallowHeapAllocation no_allocation;
- DCHECK(script->line_ends()->IsFixedArray());
- FixedArray* ends = FixedArray::cast(script->line_ends());
-
- const int ends_len = ends->length();
- if (ends_len == 0) return false;
+ if (line_ends()->IsUndefined(GetIsolate())) {
+ // Slow mode: we do not have line_ends. We have to iterate through source.
+ if (!GetPositionInfoSlow(this, position, info)) return false;
+ } else {
+ DCHECK(line_ends()->IsFixedArray());
+ FixedArray* ends = FixedArray::cast(line_ends());
- // Return early on invalid positions. Negative positions behave as if 0 was
- // passed, and positions beyond the end of the script return as failure.
- if (position < 0) {
- position = 0;
- } else if (position > SMI_VALUE(ends->get(ends_len - 1))) {
- return false;
- }
+ const int ends_len = ends->length();
+ if (ends_len == 0) return false;
- // Determine line number by doing a binary search on the line ends array.
- if (SMI_VALUE(ends->get(0)) >= position) {
- info->line = 0;
- info->line_start = 0;
- info->column = position;
- } else {
- int left = 0;
- int right = ends_len - 1;
-
- while (right > 0) {
- DCHECK_LE(left, right);
- const int mid = (left + right) / 2;
- if (position > SMI_VALUE(ends->get(mid))) {
- left = mid + 1;
- } else if (position <= SMI_VALUE(ends->get(mid - 1))) {
- right = mid - 1;
- } else {
- info->line = mid;
- break;
- }
+ // Return early on invalid positions. Negative positions behave as if 0 was
+ // passed, and positions beyond the end of the script return as failure.
+ if (position < 0) {
+ position = 0;
+ } else if (position > SMI_VALUE(ends->get(ends_len - 1))) {
+ return false;
}
- DCHECK(SMI_VALUE(ends->get(info->line)) >= position &&
- SMI_VALUE(ends->get(info->line - 1)) < position);
- info->line_start = SMI_VALUE(ends->get(info->line - 1)) + 1;
- info->column = position - info->line_start;
- }
- // Line end is position of the linebreak character.
- info->line_end = SMI_VALUE(ends->get(info->line));
- if (info->line_end > 0) {
- DCHECK(script->source()->IsString());
- Handle<String> src(String::cast(script->source()));
- if (src->length() >= info->line_end &&
- src->Get(info->line_end - 1) == '\r') {
- info->line_end--;
+ // Determine line number by doing a binary search on the line ends array.
+ if (SMI_VALUE(ends->get(0)) >= position) {
+ info->line = 0;
+ info->line_start = 0;
+ info->column = position;
+ } else {
+ int left = 0;
+ int right = ends_len - 1;
+
+ while (right > 0) {
+ DCHECK_LE(left, right);
+ const int mid = (left + right) / 2;
+ if (position > SMI_VALUE(ends->get(mid))) {
+ left = mid + 1;
+ } else if (position <= SMI_VALUE(ends->get(mid - 1))) {
+ right = mid - 1;
+ } else {
+ info->line = mid;
+ break;
+ }
+ }
+ DCHECK(SMI_VALUE(ends->get(info->line)) >= position &&
+ SMI_VALUE(ends->get(info->line - 1)) < position);
+ info->line_start = SMI_VALUE(ends->get(info->line - 1)) + 1;
+ info->column = position - info->line_start;
+ }
+
+ // Line end is position of the linebreak character.
+ info->line_end = SMI_VALUE(ends->get(info->line));
+ if (info->line_end > 0) {
+ DCHECK(source()->IsString());
+ String* src = String::cast(source());
+ if (src->length() >= info->line_end &&
+ src->Get(info->line_end - 1) == '\r') {
+ info->line_end--;
+ }
}
}
// Add offsets if requested.
if (offset_flag == WITH_OFFSET) {
if (info->line == 0) {
- info->column += script->column_offset();
+ info->column += column_offset();
}
- info->line += script->line_offset();
+ info->line += line_offset();
}
return true;
@@ -13331,49 +13524,28 @@ bool Script::GetPositionInfo(int position, PositionInfo* info,
int Script::GetColumnNumber(Handle<Script> script, int code_pos) {
PositionInfo info;
- if (!script->GetPositionInfo(code_pos, &info, WITH_OFFSET)) {
- return -1;
- }
-
+ GetPositionInfo(script, code_pos, &info, WITH_OFFSET);
return info.column;
}
-int Script::GetLineNumberWithArray(int code_pos) {
+int Script::GetColumnNumber(int code_pos) const {
PositionInfo info;
- if (!GetPositionInfo(code_pos, &info, WITH_OFFSET)) {
- return -1;
- }
-
- return info.line;
+ GetPositionInfo(code_pos, &info, WITH_OFFSET);
+ return info.column;
}
-
int Script::GetLineNumber(Handle<Script> script, int code_pos) {
- InitLineEnds(script);
- return script->GetLineNumberWithArray(code_pos);
+ PositionInfo info;
+ GetPositionInfo(script, code_pos, &info, WITH_OFFSET);
+ return info.line;
}
-
-int Script::GetLineNumber(int code_pos) {
- DisallowHeapAllocation no_allocation;
- if (!line_ends()->IsUndefined(GetIsolate())) {
- return GetLineNumberWithArray(code_pos);
- }
-
- // Slow mode: we do not have line_ends. We have to iterate through source.
- if (!source()->IsString()) return -1;
-
- String* source_string = String::cast(source());
- int line = 0;
- int len = source_string->length();
- for (int pos = 0; pos < len; pos++) {
- if (pos == code_pos) break;
- if (source_string->Get(pos) == '\n') line++;
- }
- return line;
+int Script::GetLineNumber(int code_pos) const {
+ PositionInfo info;
+ GetPositionInfo(code_pos, &info, WITH_OFFSET);
+ return info.line;
}
-
Handle<Object> Script::GetNameOrSourceURL(Handle<Script> script) {
Isolate* isolate = script->GetIsolate();
@@ -13748,7 +13920,7 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
Handle<SharedFunctionInfo> shared_info, FunctionLiteral* lit) {
// When adding fields here, make sure DeclarationScope::AnalyzePartially is
// updated accordingly.
- shared_info->set_length(lit->scope()->arity());
+ shared_info->set_length(lit->function_length());
shared_info->set_internal_formal_parameter_count(lit->parameter_count());
shared_info->set_function_token_position(lit->function_token_position());
shared_info->set_start_position(lit->start_position());
@@ -13758,8 +13930,6 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_is_anonymous_expression(lit->is_anonymous_expression());
shared_info->set_inferred_name(*lit->inferred_name());
shared_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
- shared_info->set_allows_lazy_compilation_without_context(
- lit->AllowsLazyCompilationWithoutContext());
shared_info->set_language_mode(lit->language_mode());
shared_info->set_uses_arguments(lit->scope()->arguments() != NULL);
shared_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
@@ -13818,7 +13988,7 @@ void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
}
set_opt_count(0);
set_deopt_count(0);
- } else if (code()->is_interpreter_trampoline_builtin()) {
+ } else if (IsInterpreted()) {
set_profiler_ticks(0);
if (optimization_disabled() && opt_count() >= FLAG_max_opt_count) {
// Re-enable optimizations if they were disabled due to opt_count limit.
@@ -13845,11 +14015,6 @@ int SharedFunctionInfo::SearchOptimizedCodeMapEntry(Context* native_context,
return i;
}
}
- Object* shared_code =
- WeakCell::cast(optimized_code_map->get(kSharedCodeIndex))->value();
- if (shared_code->IsCode() && osr_ast_id.IsNone()) {
- return kSharedCodeIndex;
- }
}
return -1;
}
@@ -13863,8 +14028,6 @@ void SharedFunctionInfo::ClearCodeFromOptimizedCodeMap() {
optimized_code_map->set(i + kCachedCodeOffset, empty_weak_cell,
SKIP_WRITE_BARRIER);
}
- optimized_code_map->set(kSharedCodeIndex, empty_weak_cell,
- SKIP_WRITE_BARRIER);
}
}
@@ -13874,24 +14037,14 @@ CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
int entry = SearchOptimizedCodeMapEntry(native_context, osr_ast_id);
if (entry != kNotFound) {
FixedArray* code_map = optimized_code_map();
- if (entry == kSharedCodeIndex) {
- // We know the weak cell isn't cleared because we made sure of it in
- // SearchOptimizedCodeMapEntry and performed no allocations since that
- // call.
- result = {
- Code::cast(WeakCell::cast(code_map->get(kSharedCodeIndex))->value()),
- nullptr};
- } else {
- DCHECK_LE(entry + kEntryLength, code_map->length());
- WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
- WeakCell* literals_cell =
- WeakCell::cast(code_map->get(entry + kLiteralsOffset));
+ DCHECK_LE(entry + kEntryLength, code_map->length());
+ WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
+ WeakCell* literals_cell =
+ WeakCell::cast(code_map->get(entry + kLiteralsOffset));
- result = {cell->cleared() ? nullptr : Code::cast(cell->value()),
- literals_cell->cleared()
- ? nullptr
- : LiteralsArray::cast(literals_cell->value())};
- }
+ result = {cell->cleared() ? nullptr : Code::cast(cell->value()),
+ literals_cell->cleared() ? nullptr : LiteralsArray::cast(
+ literals_cell->value())};
}
return result;
}
@@ -14148,7 +14301,7 @@ int AbstractCode::SourcePosition(int offset) {
for (SourcePositionTableIterator iterator(source_position_table());
!iterator.done() && iterator.code_offset() <= offset;
iterator.Advance()) {
- position = iterator.source_position();
+ position = iterator.source_position().ScriptOffset();
}
return position;
}
@@ -14161,7 +14314,7 @@ int AbstractCode::SourceStatementPosition(int offset) {
for (SourcePositionTableIterator it(source_position_table()); !it.done();
it.Advance()) {
if (it.is_statement()) {
- int p = it.source_position();
+ int p = it.source_position().ScriptOffset();
if (statement_position < p && p <= position) {
statement_position = p;
}
@@ -14379,14 +14532,15 @@ Code* Code::GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity) {
void Code::PrintDeoptLocation(FILE* out, Address pc) {
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(this, pc);
class SourcePosition pos = info.position;
- if (info.deopt_reason != DeoptimizeReason::kNoReason || !pos.IsUnknown()) {
+ if (info.deopt_reason != DeoptimizeReason::kNoReason || pos.IsKnown()) {
if (FLAG_hydrogen_track_positions) {
- PrintF(out, " ;;; deoptimize at %d_%d: %s\n",
- pos.inlining_id(), pos.position(),
- DeoptimizeReasonToString(info.deopt_reason));
+ PrintF(out, " ;;; deoptimize at %d_%d: %s\n", pos.InliningId(),
+ pos.ScriptOffset(), DeoptimizeReasonToString(info.deopt_reason));
} else {
- PrintF(out, " ;;; deoptimize at %d: %s\n", pos.raw(),
- DeoptimizeReasonToString(info.deopt_reason));
+ PrintF(out, " ;;; deoptimize at ");
+ OFStream outstr(out);
+ pos.Print(outstr, this);
+ PrintF(out, ", %s\n", DeoptimizeReasonToString(info.deopt_reason));
}
}
}
@@ -14450,6 +14604,42 @@ WeakCell* Code::CachedWeakCell() {
return NULL;
}
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
+
+const char* Code::ICState2String(InlineCacheState state) {
+ switch (state) {
+ case UNINITIALIZED:
+ return "UNINITIALIZED";
+ case PREMONOMORPHIC:
+ return "PREMONOMORPHIC";
+ case MONOMORPHIC:
+ return "MONOMORPHIC";
+ case RECOMPUTE_HANDLER:
+ return "RECOMPUTE_HANDLER";
+ case POLYMORPHIC:
+ return "POLYMORPHIC";
+ case MEGAMORPHIC:
+ return "MEGAMORPHIC";
+ case GENERIC:
+ return "GENERIC";
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+void Code::PrintExtraICState(std::ostream& os, // NOLINT
+ Kind kind, ExtraICState extra) {
+ os << "extra_ic_state = ";
+ if ((kind == STORE_IC || kind == KEYED_STORE_IC) &&
+ is_strict(static_cast<LanguageMode>(extra))) {
+ os << "STRICT\n";
+ } else {
+ os << extra << "\n";
+ }
+}
+
+#endif // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
+
#ifdef ENABLE_DISASSEMBLER
void DeoptimizationInputData::DeoptimizationInputDataPrint(
@@ -14706,34 +14896,6 @@ void HandlerTable::HandlerTableReturnPrint(std::ostream& os) {
}
-const char* Code::ICState2String(InlineCacheState state) {
- switch (state) {
- case UNINITIALIZED: return "UNINITIALIZED";
- case PREMONOMORPHIC: return "PREMONOMORPHIC";
- case MONOMORPHIC: return "MONOMORPHIC";
- case RECOMPUTE_HANDLER:
- return "RECOMPUTE_HANDLER";
- case POLYMORPHIC: return "POLYMORPHIC";
- case MEGAMORPHIC: return "MEGAMORPHIC";
- case GENERIC: return "GENERIC";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-void Code::PrintExtraICState(std::ostream& os, // NOLINT
- Kind kind, ExtraICState extra) {
- os << "extra_ic_state = ";
- if ((kind == STORE_IC || kind == KEYED_STORE_IC) &&
- is_strict(static_cast<LanguageMode>(extra))) {
- os << "STRICT\n";
- } else {
- os << extra << "\n";
- }
-}
-
-
void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
os << "kind = " << Kind2String(kind()) << "\n";
if (IsCodeStubOrIC()) {
@@ -14817,8 +14979,8 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
os << "Source positions:\n pc offset position\n";
for (; !it.done(); it.Advance()) {
os << std::setw(10) << it.code_offset() << std::setw(10)
- << it.source_position() << (it.is_statement() ? " statement" : "")
- << "\n";
+ << it.source_position().ScriptOffset()
+ << (it.is_statement() ? " statement" : "") << "\n";
}
os << "\n";
}
@@ -14920,7 +15082,7 @@ void BytecodeArray::Disassemble(std::ostream& os) {
while (!iterator.done()) {
if (!source_positions.done() &&
iterator.current_offset() == source_positions.code_offset()) {
- os << std::setw(5) << source_positions.source_position();
+ os << std::setw(5) << source_positions.source_position().ScriptOffset();
os << (source_positions.is_statement() ? " S> " : " E> ");
source_positions.Advance();
} else {
@@ -15266,8 +15428,8 @@ const char* DependentCode::DependencyGroupName(DependencyGroup group) {
return "prototype-check";
case kPropertyCellChangedGroup:
return "property-cell-changed";
- case kFieldTypeGroup:
- return "field-type";
+ case kFieldOwnerGroup:
+ return "field-owner";
case kInitialMapChangedGroup:
return "initial-map-changed";
case kAllocationSiteTenuringChangedGroup:
@@ -15426,7 +15588,7 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
// Nothing to do if prototype is already set.
if (map->prototype() == *value) return Just(true);
- bool immutable_proto = object->map()->is_immutable_proto();
+ bool immutable_proto = map->is_immutable_proto();
if (immutable_proto) {
RETURN_FAILURE(
isolate, should_throw,
@@ -15788,6 +15950,14 @@ bool AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
return result;
}
+AllocationSiteMode AllocationSite::GetMode(ElementsKind from, ElementsKind to) {
+ if (IsFastSmiElementsKind(from) &&
+ IsMoreGeneralElementsKindTransition(from, to)) {
+ return TRACK_ALLOCATION_SITE;
+ }
+
+ return DONT_TRACK_ALLOCATION_SITE;
+}
const char* AllocationSite::PretenureDecisionName(PretenureDecision decision) {
switch (decision) {
@@ -15912,12 +16082,13 @@ bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array,
template <typename BackingStore>
static int FastHoleyElementsUsage(JSObject* object, BackingStore* store) {
+ Isolate* isolate = store->GetIsolate();
int limit = object->IsJSArray()
? Smi::cast(JSArray::cast(object)->length())->value()
: store->length();
int used = 0;
for (int i = 0; i < limit; ++i) {
- if (!store->is_the_hole(i)) ++used;
+ if (!store->is_the_hole(isolate, i)) ++used;
}
return used;
}
@@ -16540,15 +16711,14 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
if (constructor->IsJSFunction() &&
JSFunction::cast(constructor)->initial_map() == map) {
// If we still have the original map, set in-object properties directly.
- regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
- Smi::FromInt(0), SKIP_WRITE_BARRIER);
+ regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex, Smi::kZero,
+ SKIP_WRITE_BARRIER);
} else {
// Map has changed, so use generic, but slower, method.
- RETURN_ON_EXCEPTION(
- isolate,
- JSReceiver::SetProperty(regexp, factory->last_index_string(),
- Handle<Smi>(Smi::FromInt(0), isolate), STRICT),
- JSRegExp);
+ RETURN_ON_EXCEPTION(isolate, JSReceiver::SetProperty(
+ regexp, factory->lastIndex_string(),
+ Handle<Smi>(Smi::kZero, isolate), STRICT),
+ JSRegExp);
}
return regexp;
@@ -16679,7 +16849,8 @@ Handle<Derived> HashTable<Derived, Shape, Key>::New(
MinimumCapacity capacity_option,
PretenureFlag pretenure) {
DCHECK(0 <= at_least_space_for);
- DCHECK(!capacity_option || base::bits::IsPowerOfTwo32(at_least_space_for));
+ DCHECK_IMPLIES(capacity_option == USE_CUSTOM_MINIMUM_CAPACITY,
+ base::bits::IsPowerOfTwo32(at_least_space_for));
int capacity = (capacity_option == USE_CUSTOM_MINIMUM_CAPACITY)
? at_least_space_for
@@ -17264,7 +17435,7 @@ Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
limit = elements_length;
}
if (limit == 0) {
- return handle(Smi::FromInt(0), isolate);
+ return handle(Smi::kZero, isolate);
}
uint32_t result = 0;
@@ -18244,6 +18415,9 @@ Object* ObjectHashTable::Lookup(Handle<Object> key) {
return Lookup(isolate, key, Smi::cast(hash)->value());
}
+Object* ObjectHashTable::ValueAt(int entry) {
+ return get(EntryToValueIndex(entry));
+}
Object* ObjectHashTable::Lookup(Handle<Object> key, int32_t hash) {
return Lookup(GetIsolate(), key, hash);
@@ -18687,7 +18861,7 @@ Smi* OrderedHashTableIterator<Derived, TableType>::Next(JSArray* value_array) {
MoveNext();
return Smi::cast(kind());
}
- return Smi::FromInt(0);
+ return Smi::kZero;
}
@@ -19250,7 +19424,8 @@ int JSMessageObject::GetLineNumber() const {
Script::PositionInfo info;
const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
- if (!the_script->GetPositionInfo(start_position(), &info, offset_flag)) {
+ if (!Script::GetPositionInfo(the_script, start_position(), &info,
+ offset_flag)) {
return Message::kNoLineNumberInfo;
}
@@ -19264,7 +19439,8 @@ int JSMessageObject::GetColumnNumber() const {
Script::PositionInfo info;
const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
- if (!the_script->GetPositionInfo(start_position(), &info, offset_flag)) {
+ if (!Script::GetPositionInfo(the_script, start_position(), &info,
+ offset_flag)) {
return -1;
}
@@ -19281,7 +19457,8 @@ Handle<String> JSMessageObject::GetSourceLine() const {
Script::PositionInfo info;
const Script::OffsetFlag offset_flag = Script::WITH_OFFSET;
- if (!the_script->GetPositionInfo(start_position(), &info, offset_flag)) {
+ if (!Script::GetPositionInfo(the_script, start_position(), &info,
+ offset_flag)) {
return isolate->factory()->empty_string();
}
@@ -19293,7 +19470,7 @@ void JSArrayBuffer::Neuter() {
CHECK(is_neuterable());
CHECK(is_external());
set_backing_store(NULL);
- set_byte_length(Smi::FromInt(0));
+ set_byte_length(Smi::kZero);
set_was_neutered(true);
}
@@ -19304,7 +19481,7 @@ void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
DCHECK(array_buffer->GetInternalFieldCount() ==
v8::ArrayBuffer::kInternalFieldCount);
for (int i = 0; i < v8::ArrayBuffer::kInternalFieldCount; i++) {
- array_buffer->SetInternalField(i, Smi::FromInt(0));
+ array_buffer->SetInternalField(i, Smi::kZero);
}
array_buffer->set_bit_field(0);
array_buffer->set_is_external(is_external);
@@ -19591,11 +19768,29 @@ bool JSReceiver::HasProxyInPrototype(Isolate* isolate) {
return false;
}
+MaybeHandle<Object> JSModuleNamespace::GetExport(Handle<String> name) {
+ Isolate* isolate = name->GetIsolate();
+
+ Handle<Object> object(module()->exports()->Lookup(name), isolate);
+ if (object->IsTheHole(isolate)) {
+ return isolate->factory()->undefined_value();
+ }
+
+ Handle<Object> value(Handle<Cell>::cast(object)->value(), isolate);
+ if (value->IsTheHole(isolate)) {
+ THROW_NEW_ERROR(
+ isolate, NewReferenceError(MessageTemplate::kNotDefined, name), Object);
+ }
+
+ return value;
+}
+
namespace {
-template <typename T>
-struct HandleValueHash {
- V8_INLINE size_t operator()(Handle<T> handle) const { return handle->Hash(); }
+struct ModuleHandleHash {
+ V8_INLINE size_t operator()(Handle<Module> module) const {
+ return module->hash();
+ }
};
struct ModuleHandleEqual {
@@ -19604,6 +19799,12 @@ struct ModuleHandleEqual {
}
};
+struct StringHandleHash {
+ V8_INLINE size_t operator()(Handle<String> string) const {
+ return string->Hash();
+ }
+};
+
struct StringHandleEqual {
V8_INLINE bool operator()(Handle<String> lhs, Handle<String> rhs) const {
return lhs->Equals(*rhs);
@@ -19611,32 +19812,57 @@ struct StringHandleEqual {
};
class UnorderedStringSet
- : public std::unordered_set<Handle<String>, HandleValueHash<String>,
+ : public std::unordered_set<Handle<String>, StringHandleHash,
StringHandleEqual,
zone_allocator<Handle<String>>> {
public:
explicit UnorderedStringSet(Zone* zone)
- : std::unordered_set<Handle<String>, HandleValueHash<String>,
- StringHandleEqual, zone_allocator<Handle<String>>>(
- 2 /* bucket count */, HandleValueHash<String>(),
- StringHandleEqual(), zone_allocator<Handle<String>>(zone)) {}
+ : std::unordered_set<Handle<String>, StringHandleHash, StringHandleEqual,
+ zone_allocator<Handle<String>>>(
+ 2 /* bucket count */, StringHandleHash(), StringHandleEqual(),
+ zone_allocator<Handle<String>>(zone)) {}
+};
+
+class UnorderedModuleSet
+ : public std::unordered_set<Handle<Module>, ModuleHandleHash,
+ ModuleHandleEqual,
+ zone_allocator<Handle<Module>>> {
+ public:
+ explicit UnorderedModuleSet(Zone* zone)
+ : std::unordered_set<Handle<Module>, ModuleHandleHash, ModuleHandleEqual,
+ zone_allocator<Handle<Module>>>(
+ 2 /* bucket count */, ModuleHandleHash(), ModuleHandleEqual(),
+ zone_allocator<Handle<Module>>(zone)) {}
+};
+
+class UnorderedStringMap
+ : public std::unordered_map<
+ Handle<String>, Handle<Object>, StringHandleHash, StringHandleEqual,
+ zone_allocator<std::pair<const Handle<String>, Handle<Object>>>> {
+ public:
+ explicit UnorderedStringMap(Zone* zone)
+ : std::unordered_map<
+ Handle<String>, Handle<Object>, StringHandleHash, StringHandleEqual,
+ zone_allocator<std::pair<const Handle<String>, Handle<Object>>>>(
+ 2 /* bucket count */, StringHandleHash(), StringHandleEqual(),
+ zone_allocator<std::pair<const Handle<String>, Handle<Object>>>(
+ zone)) {}
};
} // anonymous namespace
class Module::ResolveSet
: public std::unordered_map<
- Handle<Module>, UnorderedStringSet*, HandleValueHash<Module>,
+ Handle<Module>, UnorderedStringSet*, ModuleHandleHash,
ModuleHandleEqual, zone_allocator<std::pair<const Handle<Module>,
UnorderedStringSet*>>> {
public:
explicit ResolveSet(Zone* zone)
: std::unordered_map<Handle<Module>, UnorderedStringSet*,
- HandleValueHash<Module>, ModuleHandleEqual,
+ ModuleHandleHash, ModuleHandleEqual,
zone_allocator<std::pair<const Handle<Module>,
UnorderedStringSet*>>>(
- 2 /* bucket count */, HandleValueHash<Module>(),
- ModuleHandleEqual(),
+ 2 /* bucket count */, ModuleHandleHash(), ModuleHandleEqual(),
zone_allocator<
std::pair<const Handle<Module>, UnorderedStringSet*>>(zone)),
zone_(zone) {}
@@ -19647,6 +19873,22 @@ class Module::ResolveSet
Zone* zone_;
};
+namespace {
+
+int ExportIndex(int cell_index) {
+ DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
+ ModuleDescriptor::kExport);
+ return cell_index - 1;
+}
+
+int ImportIndex(int cell_index) {
+ DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
+ ModuleDescriptor::kImport);
+ return -cell_index - 1;
+}
+
+} // anonymous namespace
+
void Module::CreateIndirectExport(Handle<Module> module, Handle<String> name,
Handle<ModuleInfoEntry> entry) {
Isolate* isolate = module->GetIsolate();
@@ -19656,11 +19898,15 @@ void Module::CreateIndirectExport(Handle<Module> module, Handle<String> name,
module->set_exports(*exports);
}
-void Module::CreateExport(Handle<Module> module, Handle<FixedArray> names) {
+void Module::CreateExport(Handle<Module> module, int cell_index,
+ Handle<FixedArray> names) {
DCHECK_LT(0, names->length());
Isolate* isolate = module->GetIsolate();
+
Handle<Cell> cell =
isolate->factory()->NewCell(isolate->factory()->undefined_value());
+ module->regular_exports()->set(ExportIndex(cell_index), *cell);
+
Handle<ObjectHashTable> exports(module->exports(), isolate);
for (int i = 0, n = names->length(); i < n; ++i) {
Handle<String> name(String::cast(names->get(i)), isolate);
@@ -19670,44 +19916,49 @@ void Module::CreateExport(Handle<Module> module, Handle<FixedArray> names) {
module->set_exports(*exports);
}
-void Module::StoreExport(Handle<Module> module, Handle<String> name,
- Handle<Object> value) {
- Handle<Cell> cell(Cell::cast(module->exports()->Lookup(name)));
- cell->set_value(*value);
-}
-
-Handle<Object> Module::LoadExport(Handle<Module> module, Handle<String> name) {
+Handle<Object> Module::LoadVariable(Handle<Module> module, int cell_index) {
Isolate* isolate = module->GetIsolate();
- Handle<Object> object(module->exports()->Lookup(name), isolate);
-
- // TODO(neis): Namespace imports are not yet implemented. Trying to use this
- // feature may crash here.
- if (!object->IsCell()) UNIMPLEMENTED();
-
+ Handle<Object> object;
+ switch (ModuleDescriptor::GetCellIndexKind(cell_index)) {
+ case ModuleDescriptor::kImport:
+ object = handle(module->regular_imports()->get(ImportIndex(cell_index)),
+ isolate);
+ break;
+ case ModuleDescriptor::kExport:
+ object = handle(module->regular_exports()->get(ExportIndex(cell_index)),
+ isolate);
+ break;
+ case ModuleDescriptor::kInvalid:
+ UNREACHABLE();
+ break;
+ }
return handle(Handle<Cell>::cast(object)->value(), isolate);
}
-Handle<Object> Module::LoadImport(Handle<Module> module, Handle<String> name,
- int module_request) {
+void Module::StoreVariable(Handle<Module> module, int cell_index,
+ Handle<Object> value) {
Isolate* isolate = module->GetIsolate();
- Handle<Module> requested_module(
- Module::cast(module->requested_modules()->get(module_request)), isolate);
- return Module::LoadExport(requested_module, name);
+ DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
+ ModuleDescriptor::kExport);
+ Handle<Object> object(module->regular_exports()->get(ExportIndex(cell_index)),
+ isolate);
+ Handle<Cell>::cast(object)->set_value(*value);
}
MaybeHandle<Cell> Module::ResolveImport(Handle<Module> module,
Handle<String> name, int module_request,
- bool must_resolve,
+ MessageLocation loc, bool must_resolve,
Module::ResolveSet* resolve_set) {
Isolate* isolate = module->GetIsolate();
Handle<Module> requested_module(
Module::cast(module->requested_modules()->get(module_request)), isolate);
- return Module::ResolveExport(requested_module, name, must_resolve,
+ return Module::ResolveExport(requested_module, name, loc, must_resolve,
resolve_set);
}
MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
- Handle<String> name, bool must_resolve,
+ Handle<String> name,
+ MessageLocation loc, bool must_resolve,
Module::ResolveSet* resolve_set) {
Isolate* isolate = module->GetIsolate();
Handle<Object> object(module->exports()->Lookup(name), isolate);
@@ -19729,10 +19980,10 @@ MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
} else if (name_set->count(name)) {
// Cycle detected.
if (must_resolve) {
- THROW_NEW_ERROR(
- isolate,
- NewSyntaxError(MessageTemplate::kCyclicModuleDependency, name),
- Cell);
+ return isolate->Throw<Cell>(
+ isolate->factory()->NewSyntaxError(
+ MessageTemplate::kCyclicModuleDependency, name),
+ &loc);
}
return MaybeHandle<Cell>();
}
@@ -19742,11 +19993,15 @@ MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
if (object->IsModuleInfoEntry()) {
// Not yet resolved indirect export.
Handle<ModuleInfoEntry> entry = Handle<ModuleInfoEntry>::cast(object);
- int module_request = Smi::cast(entry->module_request())->value();
Handle<String> import_name(String::cast(entry->import_name()), isolate);
+ Handle<Script> script(
+ Script::cast(JSFunction::cast(module->code())->shared()->script()),
+ isolate);
+ MessageLocation new_loc(script, entry->beg_pos(), entry->end_pos());
Handle<Cell> cell;
- if (!ResolveImport(module, import_name, module_request, true, resolve_set)
+ if (!ResolveImport(module, import_name, entry->module_request(), new_loc,
+ true, resolve_set)
.ToHandle(&cell)) {
DCHECK(isolate->has_pending_exception());
return MaybeHandle<Cell>();
@@ -19763,13 +20018,13 @@ MaybeHandle<Cell> Module::ResolveExport(Handle<Module> module,
}
DCHECK(object->IsTheHole(isolate));
- return Module::ResolveExportUsingStarExports(module, name, must_resolve,
+ return Module::ResolveExportUsingStarExports(module, name, loc, must_resolve,
resolve_set);
}
MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
- Handle<Module> module, Handle<String> name, bool must_resolve,
- Module::ResolveSet* resolve_set) {
+ Handle<Module> module, Handle<String> name, MessageLocation loc,
+ bool must_resolve, Module::ResolveSet* resolve_set) {
Isolate* isolate = module->GetIsolate();
if (!name->Equals(isolate->heap()->default_string())) {
// Go through all star exports looking for the given name. If multiple star
@@ -19783,16 +20038,22 @@ MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
if (!entry->export_name()->IsUndefined(isolate)) {
continue; // Indirect export.
}
- int module_request = Smi::cast(entry->module_request())->value();
+
+ Handle<Script> script(
+ Script::cast(JSFunction::cast(module->code())->shared()->script()),
+ isolate);
+ MessageLocation new_loc(script, entry->beg_pos(), entry->end_pos());
Handle<Cell> cell;
- if (ResolveImport(module, name, module_request, false, resolve_set)
+ if (ResolveImport(module, name, entry->module_request(), new_loc, false,
+ resolve_set)
.ToHandle(&cell)) {
if (unique_cell.is_null()) unique_cell = cell;
if (*unique_cell != *cell) {
- THROW_NEW_ERROR(
- isolate, NewSyntaxError(MessageTemplate::kAmbiguousExport, name),
- Cell);
+ return isolate->Throw<Cell>(
+ isolate->factory()->NewSyntaxError(
+ MessageTemplate::kAmbiguousExport, name),
+ &loc);
}
} else if (isolate->has_pending_exception()) {
return MaybeHandle<Cell>();
@@ -19811,18 +20072,16 @@ MaybeHandle<Cell> Module::ResolveExportUsingStarExports(
// Unresolvable.
if (must_resolve) {
- THROW_NEW_ERROR(isolate,
- NewSyntaxError(MessageTemplate::kUnresolvableExport, name),
- Cell);
+ return isolate->Throw<Cell>(isolate->factory()->NewSyntaxError(
+ MessageTemplate::kUnresolvableExport, name),
+ &loc);
}
return MaybeHandle<Cell>();
}
bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
- v8::Module::ResolveCallback callback,
- v8::Local<v8::Value> callback_data) {
- // Already instantiated.
- if (module->code()->IsJSFunction()) return true;
+ v8::Module::ResolveCallback callback) {
+ if (module->instantiated()) return true;
Isolate* isolate = module->GetIsolate();
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(module->code()),
@@ -19832,16 +20091,18 @@ bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
shared,
handle(Utils::OpenHandle(*context)->native_context(), isolate));
module->set_code(*function);
+ DCHECK(module->instantiated());
Handle<ModuleInfo> module_info(shared->scope_info()->ModuleDescriptorInfo(),
isolate);
// Set up local exports.
- Handle<FixedArray> regular_exports(module_info->regular_exports(), isolate);
- for (int i = 0, n = regular_exports->length(); i < n; i += 2) {
- Handle<FixedArray> export_names(
- FixedArray::cast(regular_exports->get(i + 1)), isolate);
- CreateExport(module, export_names);
+ // TODO(neis): Create regular_exports array here instead of in factory method?
+ for (int i = 0, n = module_info->RegularExportCount(); i < n; ++i) {
+ int cell_index = module_info->RegularExportCellIndex(i);
+ Handle<FixedArray> export_names(module_info->RegularExportExportNames(i),
+ isolate);
+ CreateExport(module, cell_index, export_names);
}
// Partially set up indirect exports.
@@ -19866,7 +20127,7 @@ bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
// persist a module_map across multiple top-level module loads, as
// the current module is left in a "half-instantiated" state.
if (!callback(context, v8::Utils::ToLocal(specifier),
- v8::Utils::ToLocal(module), callback_data)
+ v8::Utils::ToLocal(module))
.ToLocal(&api_requested_module)) {
// TODO(adamk): Give this a better error message. But this is a
// misuse of the API anyway.
@@ -19875,12 +20136,12 @@ bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
}
Handle<Module> requested_module = Utils::OpenHandle(*api_requested_module);
module->requested_modules()->set(i, *requested_module);
- if (!Instantiate(requested_module, context, callback, callback_data)) {
+ if (!Instantiate(requested_module, context, callback)) {
return false;
}
}
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
// Resolve imports.
Handle<FixedArray> regular_imports(module_info->regular_imports(), isolate);
@@ -19888,12 +20149,18 @@ bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
Handle<ModuleInfoEntry> entry(
ModuleInfoEntry::cast(regular_imports->get(i)), isolate);
Handle<String> name(String::cast(entry->import_name()), isolate);
- int module_request = Smi::cast(entry->module_request())->value();
+ Handle<Script> script(
+ Script::cast(JSFunction::cast(module->code())->shared()->script()),
+ isolate);
+ MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
ResolveSet resolve_set(&zone);
- if (ResolveImport(module, name, module_request, true, &resolve_set)
- .is_null()) {
+ Handle<Cell> cell;
+ if (!ResolveImport(module, name, entry->module_request(), loc, true,
+ &resolve_set)
+ .ToHandle(&cell)) {
return false;
}
+ module->regular_imports()->set(ImportIndex(entry->cell_index()), *cell);
}
// Resolve indirect exports.
@@ -19902,8 +20169,13 @@ bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
ModuleInfoEntry::cast(special_exports->get(i)), isolate);
Handle<Object> name(entry->export_name(), isolate);
if (name->IsUndefined(isolate)) continue; // Star export.
+ Handle<Script> script(
+ Script::cast(JSFunction::cast(module->code())->shared()->script()),
+ isolate);
+ MessageLocation loc(script, entry->beg_pos(), entry->end_pos());
ResolveSet resolve_set(&zone);
- if (ResolveExport(module, Handle<String>::cast(name), true, &resolve_set)
+ if (ResolveExport(module, Handle<String>::cast(name), loc, true,
+ &resolve_set)
.is_null()) {
return false;
}
@@ -19913,16 +20185,15 @@ bool Module::Instantiate(Handle<Module> module, v8::Local<v8::Context> context,
}
MaybeHandle<Object> Module::Evaluate(Handle<Module> module) {
- DCHECK(module->code()->IsJSFunction()); // Instantiated.
-
- Isolate* isolate = module->GetIsolate();
+ DCHECK(module->instantiated());
// Each module can only be evaluated once.
+ Isolate* isolate = module->GetIsolate();
if (module->evaluated()) return isolate->factory()->undefined_value();
- module->set_evaluated(true);
+ Handle<JSFunction> function(JSFunction::cast(module->code()), isolate);
+ module->set_evaluated();
// Initialization.
- Handle<JSFunction> function(JSFunction::cast(module->code()), isolate);
DCHECK_EQ(MODULE_SCOPE, function->shared()->scope_info()->scope_type());
Handle<Object> receiver = isolate->factory()->undefined_value();
Handle<Object> argv[] = {module};
@@ -19945,5 +20216,192 @@ MaybeHandle<Object> Module::Evaluate(Handle<Module> module) {
return Execution::Call(isolate, resume, generator, 0, nullptr);
}
+namespace {
+
+void FetchStarExports(Handle<Module> module, Zone* zone,
+ UnorderedModuleSet* visited) {
+ DCHECK(module->instantiated());
+
+ bool cycle = !visited->insert(module).second;
+ if (cycle) return;
+
+ Isolate* isolate = module->GetIsolate();
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ UnorderedStringMap more_exports(zone);
+
+ // TODO(neis): Only allocate more_exports if there are star exports.
+ // Maybe split special_exports into indirect_exports and star_exports.
+
+ Handle<FixedArray> special_exports(module->info()->special_exports(),
+ isolate);
+ for (int i = 0, n = special_exports->length(); i < n; ++i) {
+ Handle<ModuleInfoEntry> entry(
+ ModuleInfoEntry::cast(special_exports->get(i)), isolate);
+ if (!entry->export_name()->IsUndefined(isolate)) {
+ continue; // Indirect export.
+ }
+
+ Handle<Module> requested_module(
+ Module::cast(module->requested_modules()->get(entry->module_request())),
+ isolate);
+
+ // Recurse.
+ FetchStarExports(requested_module, zone, visited);
+
+ // Collect all of [requested_module]'s exports that must be added to
+ // [module]'s exports (i.e. to [exports]). We record these in
+ // [more_exports]. Ambiguities (conflicting exports) are marked by mapping
+ // the name to undefined instead of a Cell.
+ Handle<ObjectHashTable> requested_exports(requested_module->exports(),
+ isolate);
+ for (int i = 0, n = requested_exports->Capacity(); i < n; ++i) {
+ Handle<Object> key(requested_exports->KeyAt(i), isolate);
+ if (!requested_exports->IsKey(isolate, *key)) continue;
+ Handle<String> name = Handle<String>::cast(key);
+
+ if (name->Equals(isolate->heap()->default_string())) continue;
+ if (!exports->Lookup(name)->IsTheHole(isolate)) continue;
+
+ Handle<Cell> cell(Cell::cast(requested_exports->ValueAt(i)), isolate);
+ auto insert_result = more_exports.insert(std::make_pair(name, cell));
+ if (!insert_result.second) {
+ auto it = insert_result.first;
+ if (*it->second == *cell || it->second->IsUndefined(isolate)) {
+ // We already recorded this mapping before, or the name is already
+ // known to be ambiguous. In either case, there's nothing to do.
+ } else {
+ DCHECK(it->second->IsCell());
+ // Different star exports provide different cells for this name, hence
+ // mark the name as ambiguous.
+ it->second = isolate->factory()->undefined_value();
+ }
+ }
+ }
+ }
+
+ // Copy [more_exports] into [exports].
+ for (const auto& elem : more_exports) {
+ if (elem.second->IsUndefined(isolate)) continue; // Ambiguous export.
+ DCHECK(!elem.first->Equals(isolate->heap()->default_string()));
+ DCHECK(elem.second->IsCell());
+ exports = ObjectHashTable::Put(exports, elem.first, elem.second);
+ }
+ module->set_exports(*exports);
+}
+
+} // anonymous namespace
+
+Handle<JSModuleNamespace> Module::GetModuleNamespace(Handle<Module> module,
+ int module_request) {
+ Isolate* isolate = module->GetIsolate();
+ Handle<Module> requested_module(
+ Module::cast(module->requested_modules()->get(module_request)), isolate);
+ return Module::GetModuleNamespace(requested_module);
+}
+
+Handle<JSModuleNamespace> Module::GetModuleNamespace(Handle<Module> module) {
+ Isolate* isolate = module->GetIsolate();
+
+ Handle<HeapObject> object(module->module_namespace(), isolate);
+ if (!object->IsUndefined(isolate)) {
+ // Namespace object already exists.
+ return Handle<JSModuleNamespace>::cast(object);
+ }
+
+ // Create the namespace object (initially empty).
+ Handle<JSModuleNamespace> ns = isolate->factory()->NewJSModuleNamespace();
+ ns->set_module(*module);
+ module->set_module_namespace(*ns);
+
+ // Collect the export names.
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ UnorderedModuleSet visited(&zone);
+ FetchStarExports(module, &zone, &visited);
+ Handle<ObjectHashTable> exports(module->exports(), isolate);
+ ZoneVector<Handle<String>> names(&zone);
+ names.reserve(exports->NumberOfElements());
+ for (int i = 0, n = exports->Capacity(); i < n; ++i) {
+ Handle<Object> key(exports->KeyAt(i), isolate);
+ if (!exports->IsKey(isolate, *key)) continue;
+ DCHECK(exports->ValueAt(i)->IsCell());
+ names.push_back(Handle<String>::cast(key));
+ }
+ DCHECK_EQ(static_cast<int>(names.size()), exports->NumberOfElements());
+
+ // Sort them alphabetically.
+ struct {
+ bool operator()(Handle<String> a, Handle<String> b) {
+ return String::Compare(a, b) == ComparisonResult::kLessThan;
+ }
+ } StringLess;
+ std::sort(names.begin(), names.end(), StringLess);
+
+ // Create the corresponding properties in the namespace object.
+ PropertyAttributes attr = DONT_DELETE;
+ for (const auto& name : names) {
+ JSObject::SetAccessor(
+ ns, Accessors::ModuleNamespaceEntryInfo(isolate, name, attr))
+ .Check();
+ }
+ JSObject::PreventExtensions(ns, THROW_ON_ERROR).ToChecked();
+
+ return ns;
+}
+
+MaybeHandle<Name> FunctionTemplateInfo::TryGetCachedPropertyName(
+ Isolate* isolate, Handle<Object> getter) {
+ if (getter->IsFunctionTemplateInfo()) {
+ Handle<FunctionTemplateInfo> fti =
+ Handle<FunctionTemplateInfo>::cast(getter);
+ // Check if the accessor uses a cached property.
+ if (!fti->cached_property_name()->IsTheHole(isolate)) {
+ return handle(Name::cast(fti->cached_property_name()));
+ }
+ }
+ return MaybeHandle<Name>();
+}
+
+// static
+ElementsKind JSArrayIterator::ElementsKindForInstanceType(InstanceType type) {
+ DCHECK_GE(type, FIRST_ARRAY_ITERATOR_TYPE);
+ DCHECK_LE(type, LAST_ARRAY_ITERATOR_TYPE);
+
+ if (type <= LAST_ARRAY_KEY_ITERATOR_TYPE) {
+ // Should be ignored for key iterators.
+ return FAST_ELEMENTS;
+ } else {
+ ElementsKind kind;
+ if (type < FIRST_ARRAY_VALUE_ITERATOR_TYPE) {
+ // Convert `type` to a value iterator from an entries iterator
+ type = static_cast<InstanceType>(type +
+ (FIRST_ARRAY_VALUE_ITERATOR_TYPE -
+ FIRST_ARRAY_KEY_VALUE_ITERATOR_TYPE));
+ DCHECK_GE(type, FIRST_ARRAY_VALUE_ITERATOR_TYPE);
+ DCHECK_LE(type, LAST_ARRAY_ITERATOR_TYPE);
+ }
+
+ if (type <= JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE) {
+ kind =
+ static_cast<ElementsKind>(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
+ (type - FIRST_ARRAY_VALUE_ITERATOR_TYPE));
+ DCHECK_LE(kind, LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+ } else if (type < JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE) {
+ kind = static_cast<ElementsKind>(
+ FIRST_FAST_ELEMENTS_KIND +
+ (type - JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE));
+ DCHECK_LE(kind, LAST_FAST_ELEMENTS_KIND);
+ } else {
+ // For any slow element cases, the actual elements kind is not known.
+ // Simply
+ // return a slow elements kind in this case. Users of this function must
+ // not
+ // depend on this.
+ return DICTIONARY_ELEMENTS;
+ }
+ DCHECK_LE(kind, LAST_ELEMENTS_KIND);
+ return kind;
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index fcc1f9457b..747a4f0511 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -71,6 +71,8 @@
// - JSValue
// - JSDate
// - JSMessageObject
+// - JSModuleNamespace
+// - JSFixedArrayIterator
// - JSProxy
// - FixedArrayBase
// - ByteArray
@@ -95,7 +97,6 @@
// - TemplateList
// - TransitionArray
// - ScopeInfo
-// - ModuleInfoEntry
// - ModuleInfo
// - ScriptContextTable
// - WeakFixedArray
@@ -142,6 +143,8 @@
// - Struct
// - Box
// - AccessorInfo
+// - PromiseResolveThenableJobInfo
+// - PromiseReactionJobInfo
// - AccessorPair
// - AccessCheckInfo
// - InterceptorInfo
@@ -155,6 +158,7 @@
// - CodeCache
// - PrototypeInfo
// - Module
+// - ModuleInfoEntry
// - WeakCell
//
// Formats of Object*:
@@ -164,6 +168,8 @@
namespace v8 {
namespace internal {
+struct InliningPosition;
+
enum KeyedAccessStoreMode {
STANDARD_STORE,
STORE_TRANSITION_TO_OBJECT,
@@ -397,10 +403,13 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(TYPE_FEEDBACK_INFO_TYPE) \
V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
V(BOX_TYPE) \
- V(PROMISE_CONTAINER_TYPE) \
+ V(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE) \
+ V(PROMISE_REACTION_JOB_INFO_TYPE) \
V(PROTOTYPE_INFO_TYPE) \
+ V(TUPLE3_TYPE) \
V(CONTEXT_EXTENSION_TYPE) \
V(MODULE_TYPE) \
+ V(MODULE_INFO_ENTRY_TYPE) \
\
V(FIXED_ARRAY_TYPE) \
V(FIXED_DOUBLE_ARRAY_TYPE) \
@@ -416,6 +425,8 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_ARGUMENTS_TYPE) \
V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
V(JS_GENERATOR_OBJECT_TYPE) \
+ V(JS_MODULE_NAMESPACE_TYPE) \
+ V(JS_FIXED_ARRAY_ITERATOR_TYPE) \
V(JS_GLOBAL_OBJECT_TYPE) \
V(JS_GLOBAL_PROXY_TYPE) \
V(JS_API_OBJECT_TYPE) \
@@ -436,6 +447,46 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_ERROR_TYPE) \
V(JS_STRING_ITERATOR_TYPE) \
\
+ V(JS_TYPED_ARRAY_KEY_ITERATOR_TYPE) \
+ V(JS_FAST_ARRAY_KEY_ITERATOR_TYPE) \
+ V(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE) \
+ \
+ V(JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ \
+ V(JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ \
+ V(JS_INT8_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_INT16_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_INT32_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE) \
+ \
+ V(JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE) \
+ \
V(JS_BOUND_FUNCTION_TYPE) \
V(JS_FUNCTION_TYPE) \
V(DEBUG_INFO_TYPE) \
@@ -503,7 +554,10 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
// manually.
#define STRUCT_LIST(V) \
V(BOX, Box, box) \
- V(PROMISE_CONTAINER, PromiseContainer, promise_container) \
+ V(PROMISE_RESOLVE_THENABLE_JOB_INFO, PromiseResolveThenableJobInfo, \
+ promise_resolve_thenable_job_info) \
+ V(PROMISE_REACTION_JOB_INFO, PromiseReactionJobInfo, \
+ promise_reaction_job_info) \
V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \
V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
@@ -519,7 +573,9 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(DEBUG_INFO, DebugInfo, debug_info) \
V(BREAK_POINT_INFO, BreakPointInfo, break_point_info) \
V(PROTOTYPE_INFO, PrototypeInfo, prototype_info) \
+ V(TUPLE3, Tuple3, tuple3) \
V(MODULE, Module, module) \
+ V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry) \
V(CONTEXT_EXTENSION, ContextExtension, context_extension)
// We use the full 8 bits of the instance_type field to encode heap object
@@ -685,7 +741,8 @@ enum InstanceType {
TYPE_FEEDBACK_INFO_TYPE,
ALIASED_ARGUMENTS_ENTRY_TYPE,
BOX_TYPE,
- PROMISE_CONTAINER_TYPE,
+ PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
+ PROMISE_REACTION_JOB_INFO_TYPE,
DEBUG_INFO_TYPE,
BREAK_POINT_INFO_TYPE,
FIXED_ARRAY_TYPE,
@@ -695,8 +752,10 @@ enum InstanceType {
TRANSITION_ARRAY_TYPE,
PROPERTY_CELL_TYPE,
PROTOTYPE_INFO_TYPE,
+ TUPLE3_TYPE,
CONTEXT_EXTENSION_TYPE,
MODULE_TYPE,
+ MODULE_INFO_ENTRY_TYPE,
// All the following types are subtypes of JSReceiver, which corresponds to
// objects in the JS sense. The first and the last type in this range are
@@ -717,6 +776,8 @@ enum InstanceType {
JS_ARGUMENTS_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JS_GENERATOR_OBJECT_TYPE,
+ JS_MODULE_NAMESPACE_TYPE,
+ JS_FIXED_ARRAY_ITERATOR_TYPE,
JS_ARRAY_TYPE,
JS_ARRAY_BUFFER_TYPE,
JS_TYPED_ARRAY_TYPE,
@@ -731,6 +792,47 @@ enum InstanceType {
JS_REGEXP_TYPE,
JS_ERROR_TYPE,
JS_STRING_ITERATOR_TYPE,
+
+ JS_TYPED_ARRAY_KEY_ITERATOR_TYPE,
+ JS_FAST_ARRAY_KEY_ITERATOR_TYPE,
+ JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE,
+
+ JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+
+ JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+
+ JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_INT8_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_INT16_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_INT32_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE,
+
+ JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_FAST_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
+ JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE,
+
JS_BOUND_FUNCTION_TYPE,
JS_FUNCTION_TYPE, // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
@@ -767,6 +869,18 @@ enum InstanceType {
// an empty fixed array as elements backing store. This is true for string
// wrappers.
LAST_CUSTOM_ELEMENTS_RECEIVER = JS_VALUE_TYPE,
+
+ FIRST_ARRAY_KEY_ITERATOR_TYPE = JS_TYPED_ARRAY_KEY_ITERATOR_TYPE,
+ LAST_ARRAY_KEY_ITERATOR_TYPE = JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE,
+
+ FIRST_ARRAY_KEY_VALUE_ITERATOR_TYPE = JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+ LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE = JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE,
+
+ FIRST_ARRAY_VALUE_ITERATOR_TYPE = JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE,
+ LAST_ARRAY_VALUE_ITERATOR_TYPE = JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE,
+
+ FIRST_ARRAY_ITERATOR_TYPE = FIRST_ARRAY_KEY_ITERATOR_TYPE,
+ LAST_ARRAY_ITERATOR_TYPE = LAST_ARRAY_VALUE_ITERATOR_TYPE,
};
STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
@@ -775,8 +889,8 @@ STATIC_ASSERT(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
STATIC_ASSERT(ODDBALL_TYPE == Internals::kOddballType);
STATIC_ASSERT(FOREIGN_TYPE == Internals::kForeignType);
-
-std::ostream& operator<<(std::ostream& os, InstanceType instance_type);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ InstanceType instance_type);
#define FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(V) \
V(BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE) \
@@ -886,6 +1000,7 @@ class LayoutDescriptor;
class LiteralsArray;
class LookupIterator;
class FieldType;
+class Module;
class ModuleDescriptor;
class ModuleInfoEntry;
class ModuleInfo;
@@ -970,6 +1085,8 @@ template <class C> inline bool Is(Object* obj);
V(JSObject) \
V(JSContextExtensionObject) \
V(JSGeneratorObject) \
+ V(JSModuleNamespace) \
+ V(JSFixedArrayIterator) \
V(Map) \
V(DescriptorArray) \
V(FrameArray) \
@@ -985,11 +1102,11 @@ template <class C> inline bool Is(Object* obj);
V(FixedDoubleArray) \
V(WeakFixedArray) \
V(ArrayList) \
+ V(RegExpMatchInfo) \
V(Context) \
V(ScriptContextTable) \
V(NativeContext) \
V(ScopeInfo) \
- V(ModuleInfoEntry) \
V(ModuleInfo) \
V(JSBoundFunction) \
V(JSFunction) \
@@ -1008,6 +1125,7 @@ template <class C> inline bool Is(Object* obj);
V(JSArrayBufferView) \
V(JSCollection) \
V(JSTypedArray) \
+ V(JSArrayIterator) \
V(JSDataView) \
V(JSProxy) \
V(JSError) \
@@ -1383,7 +1501,7 @@ class Object {
// Checks whether this object has the same value as the given one. This
// function is implemented according to ES5, section 9.12 and can be used
// to implement the Harmony "egal" function.
- bool SameValue(Object* other);
+ V8_EXPORT_PRIVATE bool SameValue(Object* other);
// Checks whether this object has the same value as the given one.
// +0 and -0 are treated equal. Everything else is the same as SameValue.
@@ -1445,7 +1563,7 @@ class Object {
friend class StringStream;
// Return the map of the root of object's prototype chain.
- Map* GetRootMap(Isolate* isolate);
+ Map* GetPrototypeChainRootMap(Isolate* isolate);
// Helper for SetProperty and SetSuperProperty.
// Return value is only meaningful if [found] is set to true on return.
@@ -1470,9 +1588,7 @@ struct Brief {
const Object* value;
};
-
-std::ostream& operator<<(std::ostream& os, const Brief& v);
-
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, const Brief& v);
// Smi represents integer Numbers that can be stored in 31 bits.
// Smis are immediate which means they are NOT allocated in the heap.
@@ -1507,9 +1623,10 @@ class Smi: public Object {
DECLARE_CAST(Smi)
// Dispatched behavior.
- void SmiPrint(std::ostream& os) const; // NOLINT
+ V8_EXPORT_PRIVATE void SmiPrint(std::ostream& os) const; // NOLINT
DECLARE_VERIFIER(Smi)
+ V8_EXPORT_PRIVATE static Smi* const kZero;
static const int kMinValue =
(static_cast<unsigned int>(-1)) << (kSmiValueSize - 1);
static const int kMaxValue = -(kMinValue + 1);
@@ -1735,7 +1852,7 @@ class HeapNumber: public HeapObject {
// Dispatched behavior.
bool HeapNumberBooleanValue();
- void HeapNumberPrint(std::ostream& os); // NOLINT
+ V8_EXPORT_PRIVATE void HeapNumberPrint(std::ostream& os); // NOLINT
DECLARE_VERIFIER(HeapNumber)
inline int get_exponent();
@@ -2236,6 +2353,11 @@ class JSObject: public JSReceiver {
static bool UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate);
static void InvalidatePrototypeChains(Map* map);
+ // Updates prototype chain tracking information when an object changes its
+ // map from |old_map| to |new_map|.
+ static void NotifyMapChange(Handle<Map> old_map, Handle<Map> new_map,
+ Isolate* isolate);
+
// Utility used by many Array builtins and runtime functions
static inline bool PrototypeHasNoElements(Isolate* isolate, JSObject* object);
@@ -2724,7 +2846,7 @@ class FixedArray: public FixedArrayBase {
// Setter that uses write barrier.
inline void set(int index, Object* value);
- inline bool is_the_hole(int index);
+ inline bool is_the_hole(Isolate* isolate, int index);
// Setter that doesn't need write barrier.
inline void set(int index, Smi* value);
@@ -2814,6 +2936,7 @@ class FixedDoubleArray: public FixedArrayBase {
inline void set_the_hole(int index);
// Checking for the hole.
+ inline bool is_the_hole(Isolate* isolate, int index);
inline bool is_the_hole(int index);
// Garbage collection support.
@@ -2872,7 +2995,7 @@ class WeakFixedArray : public FixedArray {
inline int Length() const;
inline bool IsEmptySlot(int index) const;
- static Object* Empty() { return Smi::FromInt(0); }
+ static Object* Empty() { return Smi::kZero; }
class Iterator {
public:
@@ -2944,8 +3067,59 @@ class ArrayList : public FixedArray {
DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayList);
};
+// The property RegExpMatchInfo includes the matchIndices
+// array of the last successful regexp match (an array of start/end index
+// pairs for the match and all the captured substrings), the invariant is
+// that there are at least two capture indices. The array also contains
+// the subject string for the last successful match.
+// After creation the result must be treated as a FixedArray in all regards.
+class V8_EXPORT_PRIVATE RegExpMatchInfo : NON_EXPORTED_BASE(public FixedArray) {
+ public:
+ // Returns the number of captures, which is defined as the length of the
+ // matchIndices objects of the last match. matchIndices contains two indices
+ // for each capture (including the match itself), i.e. 2 * #captures + 2.
+ inline int NumberOfCaptureRegisters();
+ inline void SetNumberOfCaptureRegisters(int value);
+
+ // Returns the subject string of the last match.
+ inline String* LastSubject();
+ inline void SetLastSubject(String* value);
+
+ // Like LastSubject, but modifiable by the user.
+ inline Object* LastInput();
+ inline void SetLastInput(Object* value);
+
+ // Returns the i'th capture index, 0 <= i < NumberOfCaptures(). Capture(0) and
+ // Capture(1) determine the start- and endpoint of the match itself.
+ inline int Capture(int i);
+ inline void SetCapture(int i, int value);
+
+ // Reserves space for captures.
+ static Handle<RegExpMatchInfo> ReserveCaptures(
+ Handle<RegExpMatchInfo> match_info, int capture_count);
+
+ DECLARE_CAST(RegExpMatchInfo)
+
+ static const int kNumberOfCapturesIndex = 0;
+ static const int kLastSubjectIndex = 1;
+ static const int kLastInputIndex = 2;
+ static const int kFirstCaptureIndex = 3;
+ static const int kLastMatchOverhead = kFirstCaptureIndex;
+
+ static const int kNumberOfCapturesOffset = FixedArray::kHeaderSize;
+ static const int kLastSubjectOffset = kNumberOfCapturesOffset + kPointerSize;
+ static const int kLastInputOffset = kLastSubjectOffset + kPointerSize;
+ static const int kFirstCaptureOffset = kLastInputOffset + kPointerSize;
+
+ // Every match info is guaranteed to have enough space to store two captures.
+ static const int kInitialCaptureIndices = 2;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMatchInfo);
+};
+
#define FRAME_ARRAY_FIELD_LIST(V) \
- V(WasmObject, Object) \
+ V(WasmInstance, Object) \
V(WasmFunctionIndex, Smi) \
V(Receiver, Object) \
V(Function, JSFunction) \
@@ -2963,14 +3137,16 @@ class FrameArray : public FixedArray {
#undef DECLARE_FRAME_ARRAY_ACCESSORS
inline bool IsWasmFrame(int frame_ix) const;
+ inline bool IsAsmJsWasmFrame(int frame_ix) const;
inline int FrameCount() const;
void ShrinkToFit();
// Flags.
static const int kIsWasmFrame = 1 << 0;
- static const int kIsStrict = 1 << 1;
- static const int kForceConstructor = 1 << 2;
+ static const int kIsAsmJsWasmFrame = 1 << 1;
+ static const int kIsStrict = 1 << 2;
+ static const int kForceConstructor = 1 << 3;
static Handle<FrameArray> AppendJSFrame(Handle<FrameArray> in,
Handle<Object> receiver,
@@ -2978,7 +3154,7 @@ class FrameArray : public FixedArray {
Handle<AbstractCode> code, int offset,
int flags);
static Handle<FrameArray> AppendWasmFrame(Handle<FrameArray> in,
- Handle<Object> wasm_object,
+ Handle<Object> wasm_instance,
int wasm_function_index,
Handle<AbstractCode> code,
int offset, int flags);
@@ -2993,7 +3169,7 @@ class FrameArray : public FixedArray {
//
// with internal offsets as below:
- static const int kWasmObjectOffset = 0;
+ static const int kWasmInstanceOffset = 0;
static const int kWasmFunctionIndexOffset = 1;
static const int kReceiverOffset = 0;
@@ -3323,6 +3499,9 @@ class HashTableBase : public FixedArray {
// Constant used for denoting a absent entry.
static const int kNotFound = -1;
+ // Minimum capacity for newly created hash tables.
+ static const int kMinCapacity = 4;
+
protected:
// Update the number of elements in the hash table.
inline void SetNumberOfElements(int nof);
@@ -3400,8 +3579,11 @@ class HashTable : public HashTableBase {
static const int kEntryKeyIndex = 0;
static const int kElementsStartOffset =
kHeaderSize + kElementsStartIndex * kPointerSize;
- static const int kCapacityOffset =
- kHeaderSize + kCapacityIndex * kPointerSize;
+ // Maximal capacity of HashTable. Based on maximal length of underlying
+ // FixedArray. Staying below kMaxCapacity also ensures that EntryToIndex
+ // cannot overflow.
+ static const int kMaxCapacity =
+ (FixedArray::kMaxLength - kElementsStartIndex) / kEntrySize;
// Returns the index for an entry (of the key)
static inline int EntryToIndex(int entry) {
@@ -3438,12 +3620,6 @@ class HashTable : public HashTableBase {
set(kCapacityIndex, Smi::FromInt(capacity));
}
- // Maximal capacity of HashTable. Based on maximal length of underlying
- // FixedArray. Staying below kMaxCapacity also ensures that EntryToIndex
- // cannot overflow.
- static const int kMaxCapacity =
- (FixedArray::kMaxLength - kElementsStartOffset) / kEntrySize;
-
private:
// Returns _expected_ if one of entries given by the first _probe_ probes is
// equal to _expected_. Otherwise, returns the entry given by the probe
@@ -3672,23 +3848,22 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
static Handle<FixedArray> BuildIterationIndicesArray(
Handle<Derived> dictionary);
+ static const int kMaxNumberKeyIndex = DerivedHashTable::kPrefixStartIndex;
+ static const int kNextEnumerationIndexIndex = kMaxNumberKeyIndex + 1;
+
protected:
// Generic at put operation.
MUST_USE_RESULT static Handle<Derived> AtPut(
Handle<Derived> dictionary,
Key key,
Handle<Object> value);
-
// Add entry to dictionary. Returns entry value.
static int AddEntry(Handle<Derived> dictionary, Key key, Handle<Object> value,
PropertyDetails details, uint32_t hash);
-
// Generate new enumeration indices to avoid enumeration index overflow.
// Returns iteration indices array for the |dictionary|.
static Handle<FixedArray> GenerateNewEnumerationIndices(
Handle<Derived> dictionary);
- static const int kMaxNumberKeyIndex = DerivedHashTable::kPrefixStartIndex;
- static const int kNextEnumerationIndexIndex = kMaxNumberKeyIndex + 1;
};
@@ -3760,6 +3935,7 @@ class NameDictionary
static const int kEntryValueIndex = 1;
static const int kEntryDetailsIndex = 2;
+ static const int kInitialCapacity = 2;
};
@@ -3950,6 +4126,9 @@ class ObjectHashTable: public HashTable<ObjectHashTable,
Object* Lookup(Handle<Object> key, int32_t hash);
Object* Lookup(Isolate* isolate, Handle<Object> key, int32_t hash);
+ // Returns the value at entry.
+ Object* ValueAt(int entry);
+
// Adds (or overwrites) the value associated with the given key.
static Handle<ObjectHashTable> Put(Handle<ObjectHashTable> table,
Handle<Object> key,
@@ -4377,8 +4556,9 @@ class ScopeInfo : public FixedArray {
VariableMode* mode, InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag);
- // Lookup metadata of a MODULE-allocated variable. Return a negative value if
- // there is no module variable with the given name.
+ // Lookup metadata of a MODULE-allocated variable. Return 0 if there is no
+ // module variable with the given name (the index value of a MODULE variable
+ // is never 0).
int ModuleIndex(Handle<String> name, VariableMode* mode,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag);
@@ -4428,7 +4608,7 @@ class ScopeInfo : public FixedArray {
static Handle<ScopeInfo> CreateGlobalThisBinding(Isolate* isolate);
// Serializes empty scope info.
- static ScopeInfo* Empty(Isolate* isolate);
+ V8_EXPORT_PRIVATE static ScopeInfo* Empty(Isolate* isolate);
#ifdef DEBUG
void Print();
@@ -4517,6 +4697,14 @@ class ScopeInfo : public FixedArray {
VariableLocation* location, InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag);
+ // Get metadata of i-th MODULE-allocated variable, where 0 <= i <
+ // ModuleVariableCount. The metadata is returned via out-arguments, which may
+ // be nullptr if the corresponding information is not requested
+ void ModuleVariable(int i, String** name, int* index,
+ VariableMode* mode = nullptr,
+ InitializationFlag* init_flag = nullptr,
+ MaybeAssignedFlag* maybe_assigned_flag = nullptr);
+
// Used for the function name variable for named function expressions, and for
// the receiver.
enum VariableAllocationInfo { NONE, STACK, CONTEXT, UNUSED };
@@ -4556,58 +4744,6 @@ class ScopeInfo : public FixedArray {
friend class ScopeIterator;
};
-class ModuleInfoEntry : public FixedArray {
- public:
- DECLARE_CAST(ModuleInfoEntry)
- static Handle<ModuleInfoEntry> New(Isolate* isolate,
- Handle<Object> export_name,
- Handle<Object> local_name,
- Handle<Object> import_name,
- Handle<Object> module_request);
- inline Object* export_name() const;
- inline Object* local_name() const;
- inline Object* import_name() const;
- inline Object* module_request() const;
-
- private:
- friend class Factory;
- enum {
- kExportNameIndex,
- kLocalNameIndex,
- kImportNameIndex,
- kModuleRequestIndex,
- kLength
- };
-};
-
-// ModuleInfo is to ModuleDescriptor what ScopeInfo is to Scope.
-class ModuleInfo : public FixedArray {
- public:
- DECLARE_CAST(ModuleInfo)
- static Handle<ModuleInfo> New(Isolate* isolate, Zone* zone,
- ModuleDescriptor* descr);
- inline FixedArray* module_requests() const;
- inline FixedArray* special_exports() const;
- inline FixedArray* regular_exports() const;
- inline FixedArray* namespace_imports() const;
- inline FixedArray* regular_imports() const;
-
-#ifdef DEBUG
- inline bool Equals(ModuleInfo* other) const;
-#endif
-
- private:
- friend class Factory;
- enum {
- kModuleRequestsIndex,
- kSpecialExportsIndex,
- kRegularExportsIndex,
- kNamespaceImportsIndex,
- kRegularImportsIndex,
- kLength
- };
-};
-
// The cache for maps used by normalized (dictionary mode) objects.
// Such maps do not have property descriptors, so a typical program
// needs very limited number of distinct normalized maps.
@@ -4726,7 +4862,6 @@ class ByteArray: public FixedArrayBase {
// Setter and getter.
inline byte get(int index);
inline void set(int index, byte value);
- inline const byte* data() const;
// Copy in / copy out whole byte slices.
inline void copy_out(int index, byte* buffer, int length);
@@ -4774,6 +4909,32 @@ class ByteArray: public FixedArrayBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
};
+// Wrapper class for ByteArray which can store arbitrary C++ classes, as long
+// as they can be copied with memcpy.
+template <class T>
+class PodArray : public ByteArray {
+ public:
+ static Handle<PodArray<T>> New(Isolate* isolate, int length,
+ PretenureFlag pretenure = NOT_TENURED);
+ void copy_out(int index, T* result) {
+ ByteArray::copy_out(index * sizeof(T), reinterpret_cast<byte*>(result),
+ sizeof(T));
+ }
+ T get(int index) {
+ T result;
+ copy_out(index, &result);
+ return result;
+ }
+ void set(int index, const T& value) {
+ copy_in(index * sizeof(T), reinterpret_cast<const byte*>(&value),
+ sizeof(T));
+ }
+ int length() { return ByteArray::length() / sizeof(T); }
+ DECLARE_CAST(PodArray<T>)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PodArray<T>);
+};
// BytecodeArray represents a sequence of interpreter bytecodes.
class BytecodeArray : public FixedArrayBase {
@@ -5006,7 +5167,6 @@ TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS)
#undef FIXED_TYPED_ARRAY_TRAITS
-
// DeoptimizationInputData is a fixed array used to hold the deoptimization
// data for code generated by the Hydrogen/Lithium compiler. It also
// contains information about functions that were inlined. If N different
@@ -5025,7 +5185,8 @@ class DeoptimizationInputData: public FixedArray {
static const int kOptimizationIdIndex = 5;
static const int kSharedFunctionInfoIndex = 6;
static const int kWeakCellCacheIndex = 7;
- static const int kFirstDeoptEntryIndex = 8;
+ static const int kInliningPositionsIndex = 8;
+ static const int kFirstDeoptEntryIndex = 9;
// Offsets of deopt entry elements relative to the start of the entry.
static const int kAstIdRawOffset = 0;
@@ -5047,6 +5208,7 @@ class DeoptimizationInputData: public FixedArray {
DECLARE_ELEMENT_ACCESSORS(OptimizationId, Smi)
DECLARE_ELEMENT_ACCESSORS(SharedFunctionInfo, Object)
DECLARE_ELEMENT_ACCESSORS(WeakCellCache, Object)
+ DECLARE_ELEMENT_ACCESSORS(InliningPositions, PodArray<InliningPosition>)
#undef DECLARE_ELEMENT_ACCESSORS
@@ -5068,6 +5230,12 @@ class DeoptimizationInputData: public FixedArray {
inline int DeoptCount();
+ static const int kNotInlinedIndex = -1;
+
+ // Returns the inlined function at the given position in LiteralArray, or the
+ // outer function if index == kNotInlinedIndex.
+ class SharedFunctionInfo* GetInlinedFunction(int index);
+
// Allocates a DeoptimizationInputData.
static Handle<DeoptimizationInputData> New(Isolate* isolate,
int deopt_entry_count,
@@ -5088,7 +5256,6 @@ class DeoptimizationInputData: public FixedArray {
static int LengthFor(int entry_count) { return IndexForEntry(entry_count); }
};
-
// DeoptimizationOutputData is a fixed array used to hold the deoptimization
// data for code generated by the full compiler.
// The format of the these objects is
@@ -5128,7 +5295,7 @@ class LiteralsArray : public FixedArray {
public:
static const int kVectorIndex = 0;
static const int kFirstLiteralIndex = 1;
- static const int kFeedbackVectorOffset;
+ V8_EXPORT_PRIVATE static const int kFeedbackVectorOffset;
static const int kOffsetToFirstLiteral;
static int OffsetOfLiteralAt(int index) {
@@ -5217,11 +5384,14 @@ class Code: public HeapObject {
static const int kPrologueOffsetNotSet = -1;
-#ifdef ENABLE_DISASSEMBLER
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
// Printing
static const char* ICState2String(InlineCacheState state);
static void PrintExtraICState(std::ostream& os, // NOLINT
Kind kind, ExtraICState extra);
+#endif // defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
+
+#ifdef ENABLE_DISASSEMBLER
void Disassemble(const char* name, std::ostream& os); // NOLINT
#endif // ENABLE_DISASSEMBLER
@@ -5825,9 +5995,9 @@ class DependentCode: public FixedArray {
// Group of code that depends on global property values in property cells
// not being changed.
kPropertyCellChangedGroup,
- // Group of code that omit run-time type checks for the field(s) introduced
- // by this map.
- kFieldTypeGroup,
+ // Group of code that omit run-time checks for field(s) introduced by
+ // this map, i.e. for the field type.
+ kFieldOwnerGroup,
// Group of code that omit run-time type checks for initial maps of
// constructors.
kInitialMapChangedGroup,
@@ -6143,6 +6313,14 @@ class Map: public HeapObject {
static const int kPrototypeChainValid = 0;
static const int kPrototypeChainInvalid = 1;
+ // Return the map of the root of object's prototype chain.
+ Map* GetPrototypeChainRootMap(Isolate* isolate);
+
+ // Returns a WeakCell object containing given prototype. The cell is cached
+ // in PrototypeInfo which is created lazily.
+ static Handle<WeakCell> GetOrCreatePrototypeWeakCell(
+ Handle<JSObject> prototype, Isolate* isolate);
+
Map* FindRootMap();
Map* FindFieldOwner(int descriptor);
@@ -6665,33 +6843,58 @@ class Struct: public HeapObject {
DECLARE_CAST(Struct)
};
-// A container struct to hold state required for
-// PromiseResolveThenableJob. {before, after}_debug_event could
-// potentially be undefined if the debugger is turned off.
-class PromiseContainer : public Struct {
+// A container struct to hold state required for PromiseResolveThenableJob.
+class PromiseResolveThenableJobInfo : public Struct {
public:
DECL_ACCESSORS(thenable, JSReceiver)
DECL_ACCESSORS(then, JSReceiver)
DECL_ACCESSORS(resolve, JSFunction)
DECL_ACCESSORS(reject, JSFunction)
- DECL_ACCESSORS(before_debug_event, Object)
- DECL_ACCESSORS(after_debug_event, Object)
+ DECL_ACCESSORS(debug_id, Object)
+ DECL_ACCESSORS(debug_name, Object)
+ DECL_ACCESSORS(context, Context)
static const int kThenableOffset = Struct::kHeaderSize;
static const int kThenOffset = kThenableOffset + kPointerSize;
static const int kResolveOffset = kThenOffset + kPointerSize;
static const int kRejectOffset = kResolveOffset + kPointerSize;
- static const int kBeforeDebugEventOffset = kRejectOffset + kPointerSize;
- static const int kAfterDebugEventOffset =
- kBeforeDebugEventOffset + kPointerSize;
- static const int kSize = kAfterDebugEventOffset + kPointerSize;
+ static const int kDebugIdOffset = kRejectOffset + kPointerSize;
+ static const int kDebugNameOffset = kDebugIdOffset + kPointerSize;
+ static const int kContextOffset = kDebugNameOffset + kPointerSize;
+ static const int kSize = kContextOffset + kPointerSize;
- DECLARE_CAST(PromiseContainer)
- DECLARE_PRINTER(PromiseContainer)
- DECLARE_VERIFIER(PromiseContainer)
+ DECLARE_CAST(PromiseResolveThenableJobInfo)
+ DECLARE_PRINTER(PromiseResolveThenableJobInfo)
+ DECLARE_VERIFIER(PromiseResolveThenableJobInfo)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseContainer);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseResolveThenableJobInfo);
+};
+
+// Struct to hold state required for PromiseReactionJob.
+class PromiseReactionJobInfo : public Struct {
+ public:
+ DECL_ACCESSORS(value, Object)
+ DECL_ACCESSORS(tasks, Object)
+ DECL_ACCESSORS(deferred, Object)
+ DECL_ACCESSORS(debug_id, Object)
+ DECL_ACCESSORS(debug_name, Object)
+ DECL_ACCESSORS(context, Context)
+
+ static const int kValueOffset = Struct::kHeaderSize;
+ static const int kTasksOffset = kValueOffset + kPointerSize;
+ static const int kDeferredOffset = kTasksOffset + kPointerSize;
+ static const int kDebugIdOffset = kDeferredOffset + kPointerSize;
+ static const int kDebugNameOffset = kDebugIdOffset + kPointerSize;
+ static const int kContextOffset = kDebugNameOffset + kPointerSize;
+ static const int kSize = kContextOffset + kPointerSize;
+
+ DECLARE_CAST(PromiseReactionJobInfo)
+ DECLARE_PRINTER(PromiseReactionJobInfo)
+ DECLARE_VERIFIER(PromiseReactionJobInfo)
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseReactionJobInfo);
};
// A simple one-element struct, useful where smis need to be boxed.
@@ -6719,6 +6922,9 @@ class PrototypeInfo : public Struct {
public:
static const int UNREGISTERED = -1;
+ // [weak_cell]: A WeakCell containing this prototype. ICs cache the cell here.
+ DECL_ACCESSORS(weak_cell, Object)
+
// [prototype_users]: WeakFixedArray containing maps using this prototype,
// or Smi(0) if uninitialized.
DECL_ACCESSORS(prototype_users, Object)
@@ -6752,7 +6958,8 @@ class PrototypeInfo : public Struct {
DECLARE_PRINTER(PrototypeInfo)
DECLARE_VERIFIER(PrototypeInfo)
- static const int kPrototypeUsersOffset = HeapObject::kHeaderSize;
+ static const int kWeakCellOffset = HeapObject::kHeaderSize;
+ static const int kPrototypeUsersOffset = kWeakCellOffset + kPointerSize;
static const int kRegistrySlotOffset = kPrototypeUsersOffset + kPointerSize;
static const int kValidityCellOffset = kRegistrySlotOffset + kPointerSize;
static const int kObjectCreateMap = kValidityCellOffset + kPointerSize;
@@ -6768,6 +6975,26 @@ class PrototypeInfo : public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(PrototypeInfo);
};
+class Tuple3 : public Struct {
+ public:
+ DECL_ACCESSORS(value1, Object)
+ DECL_ACCESSORS(value2, Object)
+ DECL_ACCESSORS(value3, Object)
+
+ DECLARE_CAST(Tuple3)
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(Tuple3)
+ DECLARE_VERIFIER(Tuple3)
+
+ static const int kValue1Offset = HeapObject::kHeaderSize;
+ static const int kValue2Offset = kValue1Offset + kPointerSize;
+ static const int kValue3Offset = kValue2Offset + kPointerSize;
+ static const int kSize = kValue3Offset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Tuple3);
+};
// Pair used to store both a ScopeInfo and an extension object in the extension
// slot of a block, catch, or with context. Needed in the rare case where a
@@ -6869,13 +7096,9 @@ class Script: public Struct {
// [source_mapping_url]: sourceMappingURL magic comment
DECL_ACCESSORS(source_mapping_url, Object)
- // [wasm_object]: the wasm object this script belongs to.
- // This must only be called if the type of this script is TYPE_WASM.
- DECL_ACCESSORS(wasm_object, JSObject)
-
- // [wasm_function_index]: the wasm function index this script belongs to.
+ // [wasm_compiled_module]: the compiled wasm module this script belongs to.
// This must only be called if the type of this script is TYPE_WASM.
- DECL_INT_ACCESSORS(wasm_function_index)
+ DECL_ACCESSORS(wasm_compiled_module, Object)
// [compilation_type]: how the the script was compiled. Encoded in the
// 'flags' field.
@@ -6916,14 +7139,6 @@ class Script: public Struct {
// Init line_ends array with source code positions of line ends.
static void InitLineEnds(Handle<Script> script);
- // Convert code offset into column number.
- static int GetColumnNumber(Handle<Script> script, int code_offset);
-
- // Convert code offset into (zero-based) line number.
- // The non-handlified version does not allocate, but may be much slower.
- static int GetLineNumber(Handle<Script> script, int code_offset);
- int GetLineNumber(int code_pos);
-
// Carries information about a source position.
struct PositionInfo {
PositionInfo() : line(-1), column(-1), line_start(-1), line_end(-1) {}
@@ -6931,7 +7146,7 @@ class Script: public Struct {
int line; // Zero-based line number.
int column; // Zero-based column number.
int line_start; // Position of first character in line.
- int line_end; // Position of last (non-linebreak) character in line.
+ int line_end; // Position of final linebreak character in line.
};
// Specifies whether to add offsets to position infos.
@@ -6940,8 +7155,20 @@ class Script: public Struct {
// Retrieves information about the given position, optionally with an offset.
// Returns false on failure, and otherwise writes into the given info object
// on success.
+ // The static method should is preferable for handlified callsites because it
+ // initializes the line ends array, avoiding expensive recomputations.
+ // The non-static version is not allocating and safe for unhandlified
+ // callsites.
+ static bool GetPositionInfo(Handle<Script> script, int position,
+ PositionInfo* info, OffsetFlag offset_flag);
bool GetPositionInfo(int position, PositionInfo* info,
- OffsetFlag offset_flag);
+ OffsetFlag offset_flag) const;
+
+ // Wrappers for GetPositionInfo
+ static int GetColumnNumber(Handle<Script> script, int code_offset);
+ int GetColumnNumber(int code_pos) const;
+ static int GetLineNumber(Handle<Script> script, int code_offset);
+ int GetLineNumber(int code_pos) const;
// Get the JS object wrapping the given script; create it if none exists.
static Handle<JSObject> GetWrapper(Handle<Script> script);
@@ -6985,8 +7212,6 @@ class Script: public Struct {
static const int kSize = kSourceMappingUrlOffset + kPointerSize;
private:
- int GetLineNumberWithArray(int code_pos);
-
// Bit positions in the flags field.
static const int kCompilationTypeBit = 0;
static const int kCompilationStateBit = 1;
@@ -7072,6 +7297,7 @@ class Script: public Struct {
V(Number, isInteger, NumberIsInteger) \
V(Number, isNaN, NumberIsNaN) \
V(Number, isSafeInteger, NumberIsSafeInteger) \
+ V(Number, parseFloat, NumberParseFloat) \
V(Number, parseInt, NumberParseInt) \
V(Number.prototype, toString, NumberToString)
@@ -7091,6 +7317,10 @@ enum BuiltinFunctionId {
kMathPowHalf,
// These are manually assigned to special getters during bootstrapping.
kArrayBufferByteLength,
+ kArrayEntries,
+ kArrayKeys,
+ kArrayValues,
+ kArrayIteratorNext,
kDataViewBuffer,
kDataViewByteLength,
kDataViewByteOffset,
@@ -7105,8 +7335,12 @@ enum BuiltinFunctionId {
kGlobalIsNaN,
kTypedArrayByteLength,
kTypedArrayByteOffset,
+ kTypedArrayEntries,
+ kTypedArrayKeys,
kTypedArrayLength,
+ kTypedArrayValues,
kSharedArrayBufferByteLength,
+ kStringIterator,
kStringIteratorNext,
};
@@ -7133,6 +7367,14 @@ class SharedFunctionInfo: public HeapObject {
// a Code object or a BytecodeArray.
inline AbstractCode* abstract_code();
+ // Tells whether or not this shared function info is interpreted.
+ //
+ // Note: function->IsInterpreted() does not necessarily return the same value
+ // as function->shared()->IsInterpreted() because the shared function info
+ // could tier up to baseline via a different function closure. The interpreter
+ // entry stub will "self-heal" this divergence when the function is executed.
+ inline bool IsInterpreted() const;
+
inline void ReplaceCode(Code* code);
inline bool HasBaselineCode() const;
@@ -7169,10 +7411,6 @@ class SharedFunctionInfo: public HeapObject {
static Handle<LiteralsArray> FindOrCreateLiterals(
Handle<SharedFunctionInfo> shared, Handle<Context> native_context);
- // Add or update entry in the optimized code map for context-independent code.
- static void AddSharedCodeToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
- Handle<Code> code);
-
// Add or update entry in the optimized code map for context-dependent code.
// If {code} is not given, then an existing entry's code won't be overwritten.
static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
@@ -7183,12 +7421,11 @@ class SharedFunctionInfo: public HeapObject {
// Set up the link between shared function info and the script. The shared
// function info is added to the list on the script.
- static void SetScript(Handle<SharedFunctionInfo> shared,
- Handle<Object> script_object);
+ V8_EXPORT_PRIVATE static void SetScript(Handle<SharedFunctionInfo> shared,
+ Handle<Object> script_object);
// Layout description of the optimized code map.
- static const int kSharedCodeIndex = 0;
- static const int kEntriesStart = 1;
+ static const int kEntriesStart = 0;
static const int kContextOffset = 0;
static const int kCachedCodeOffset = 1;
static const int kLiteralsOffset = 2;
@@ -7298,7 +7535,7 @@ class SharedFunctionInfo: public HeapObject {
inline String* inferred_name();
inline void set_inferred_name(String* inferred_name);
- // [script info]: Script from which the function originates.
+ // [script]: Script from which the function originates.
DECL_ACCESSORS(script, Object)
// [num_literals]: Number of literals used by this function.
@@ -7368,12 +7605,6 @@ class SharedFunctionInfo: public HeapObject {
// when doing GC if we expect that the function will no longer be used.
DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation)
- // Indicates if this function can be lazy compiled without a context.
- // This is used to determine if we can force compilation without reaching
- // the function through program execution but through other means (e.g. heap
- // iteration by the debugger).
- DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation_without_context)
-
// Indicates whether optimizations have been disabled for this
// shared function info. If a function is repeatedly optimized or if
// we cannot optimize the function we disable optimization to avoid
@@ -7442,6 +7673,9 @@ class SharedFunctionInfo: public HeapObject {
// Whether this function was created from a FunctionDeclaration.
DECL_BOOLEAN_ACCESSORS(is_declaration)
+ // Whether this function was marked to be tiered up.
+ DECL_BOOLEAN_ACCESSORS(marked_for_tier_up)
+
// Indicates that asm->wasm conversion failed and should not be re-attempted.
DECL_BOOLEAN_ACCESSORS(is_asm_wasm_broken)
@@ -7691,7 +7925,7 @@ class SharedFunctionInfo: public HeapObject {
enum CompilerHints {
// byte 0
kAllowLazyCompilation,
- kAllowLazyCompilationWithoutContext,
+ kMarkedForTierUp,
kOptimizationDisabled,
kNeverCompiled,
kNative,
@@ -7741,7 +7975,8 @@ class SharedFunctionInfo: public HeapObject {
static const int kCompilerHintsSize = kIntSize;
#endif
- STATIC_ASSERT(SharedFunctionInfo::kCompilerHintsCount <=
+ STATIC_ASSERT(SharedFunctionInfo::kCompilerHintsCount +
+ SharedFunctionInfo::kCompilerHintsSmiTagSize <=
SharedFunctionInfo::kCompilerHintsSize * kBitsPerByte);
public:
@@ -7758,6 +7993,9 @@ class SharedFunctionInfo: public HeapObject {
static const int kAllFunctionKindBitsMask = FunctionKindBits::kMask
<< kCompilerHintsSmiTagSize;
+ static const int kMarkedForTierUpBit =
+ kMarkedForTierUp + kCompilerHintsSmiTagSize;
+
// Constants for optimizing codegen for strict mode function and
// native tests.
// Allows to use byte-width instructions.
@@ -7770,6 +8008,9 @@ class SharedFunctionInfo: public HeapObject {
FunctionKind::kClassConstructor << kCompilerHintsSmiTagSize;
STATIC_ASSERT(kClassConstructorBitsWithinByte < (1 << kBitsPerByte));
+ static const int kMarkedForTierUpBitWithinByte =
+ kMarkedForTierUpBit % kBitsPerByte;
+
#if defined(V8_TARGET_LITTLE_ENDIAN)
#define BYTE_OFFSET(compiler_hint) \
kCompilerHintsOffset + \
@@ -7786,12 +8027,13 @@ class SharedFunctionInfo: public HeapObject {
static const int kFunctionKindByteOffset = BYTE_OFFSET(kFunctionKind);
static const int kHasDuplicateParametersByteOffset =
BYTE_OFFSET(kHasDuplicateParameters);
+ static const int kMarkedForTierUpByteOffset = BYTE_OFFSET(kMarkedForTierUp);
#undef BYTE_OFFSET
private:
// Returns entry from optimized code map for specified context and OSR entry.
- // The result is either kNotFound, kSharedCodeIndex for context-independent
- // entry or a start index of the context-dependent entry.
+ // The result is either kNotFound, or a start index of the context-dependent
+ // entry.
int SearchOptimizedCodeMapEntry(Context* native_context,
BailoutId osr_ast_id);
@@ -7875,6 +8117,110 @@ class JSGeneratorObject: public JSObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
};
+class ModuleInfoEntry : public Struct {
+ public:
+ DECLARE_CAST(ModuleInfoEntry)
+ DECLARE_PRINTER(ModuleInfoEntry)
+ DECLARE_VERIFIER(ModuleInfoEntry)
+
+ DECL_ACCESSORS(export_name, Object)
+ DECL_ACCESSORS(local_name, Object)
+ DECL_ACCESSORS(import_name, Object)
+ DECL_INT_ACCESSORS(module_request)
+ DECL_INT_ACCESSORS(cell_index)
+ DECL_INT_ACCESSORS(beg_pos)
+ DECL_INT_ACCESSORS(end_pos)
+
+ static Handle<ModuleInfoEntry> New(Isolate* isolate,
+ Handle<Object> export_name,
+ Handle<Object> local_name,
+ Handle<Object> import_name,
+ int module_request, int cell_index,
+ int beg_pos, int end_pos);
+
+ static const int kExportNameOffset = HeapObject::kHeaderSize;
+ static const int kLocalNameOffset = kExportNameOffset + kPointerSize;
+ static const int kImportNameOffset = kLocalNameOffset + kPointerSize;
+ static const int kModuleRequestOffset = kImportNameOffset + kPointerSize;
+ static const int kCellIndexOffset = kModuleRequestOffset + kPointerSize;
+ static const int kBegPosOffset = kCellIndexOffset + kPointerSize;
+ static const int kEndPosOffset = kBegPosOffset + kPointerSize;
+ static const int kSize = kEndPosOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ModuleInfoEntry);
+};
+
+// ModuleInfo is to ModuleDescriptor what ScopeInfo is to Scope.
+class ModuleInfo : public FixedArray {
+ public:
+ DECLARE_CAST(ModuleInfo)
+
+ static Handle<ModuleInfo> New(Isolate* isolate, Zone* zone,
+ ModuleDescriptor* descr);
+
+ inline FixedArray* module_requests() const;
+ inline FixedArray* special_exports() const;
+ inline FixedArray* regular_exports() const;
+ inline FixedArray* namespace_imports() const;
+ inline FixedArray* regular_imports() const;
+
+ // Accessors for [regular_exports].
+ int RegularExportCount() const;
+ String* RegularExportLocalName(int i) const;
+ int RegularExportCellIndex(int i) const;
+ FixedArray* RegularExportExportNames(int i) const;
+
+ static Handle<ModuleInfoEntry> LookupRegularImport(Handle<ModuleInfo> info,
+ Handle<String> local_name);
+
+#ifdef DEBUG
+ inline bool Equals(ModuleInfo* other) const;
+#endif
+
+ private:
+ friend class Factory;
+ friend class ModuleDescriptor;
+ enum {
+ kModuleRequestsIndex,
+ kSpecialExportsIndex,
+ kRegularExportsIndex,
+ kNamespaceImportsIndex,
+ kRegularImportsIndex,
+ kLength
+ };
+ enum {
+ kRegularExportLocalNameOffset,
+ kRegularExportCellIndexOffset,
+ kRegularExportExportNamesOffset,
+ kRegularExportLength
+ };
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ModuleInfo);
+};
+// When importing a module namespace (import * as foo from "bar"), a
+// JSModuleNamespace object (representing module "bar") is created and bound to
+// the declared variable (foo). A module can have at most one namespace object.
+class JSModuleNamespace : public JSObject {
+ public:
+ DECLARE_CAST(JSModuleNamespace)
+ DECLARE_PRINTER(JSModuleNamespace)
+ DECLARE_VERIFIER(JSModuleNamespace)
+
+ // The actual module whose namespace is being represented.
+ DECL_ACCESSORS(module, Module)
+
+ // Retrieve the value exported by [module] under the given [name]. If there is
+ // no such export, return Just(undefined). If the export is uninitialized,
+ // schedule an exception and return Nothing.
+ MUST_USE_RESULT MaybeHandle<Object> GetExport(Handle<String> name);
+
+ static const int kModuleOffset = JSObject::kHeaderSize;
+ static const int kSize = kModuleOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSModuleNamespace);
+};
+
// A Module object is a mapping from export names to cells
// This is still very much in flux.
class Module : public Struct {
@@ -7883,69 +8229,82 @@ class Module : public Struct {
DECLARE_VERIFIER(Module)
DECLARE_PRINTER(Module)
- // The code representing this Module, either a
- // SharedFunctionInfo or a JSFunction depending
- // on whether it's been instantiated.
+ // The code representing this Module, or an abstraction thereof.
+ // This is either a SharedFunctionInfo or a JSFunction or a ModuleInfo
+ // depending on whether the module has been instantiated and evaluated. See
+ // Module::ModuleVerify() for the precise invariant.
DECL_ACCESSORS(code, Object)
+ // Arrays of cells corresponding to regular exports and regular imports.
+ // A cell's position in the array is determined by the cell index of the
+ // associated module entry (which coincides with the variable index of the
+ // associated variable).
+ DECL_ACCESSORS(regular_exports, FixedArray)
+ DECL_ACCESSORS(regular_imports, FixedArray)
+
+ // The complete export table, mapping an export name to its cell.
+ // TODO(neis): We may want to remove the regular exports from the table.
DECL_ACCESSORS(exports, ObjectHashTable)
- // [[RequestedModules]]: Modules imported or re-exported by this module.
+ // Hash for this object (a random non-zero Smi).
+ DECL_INT_ACCESSORS(hash)
+
+ // The namespace object (or undefined).
+ DECL_ACCESSORS(module_namespace, HeapObject)
+
+ // Modules imported or re-exported by this module.
// Corresponds 1-to-1 to the module specifier strings in
// ModuleInfo::module_requests.
DECL_ACCESSORS(requested_modules, FixedArray)
- // [[Evaluated]]: Whether this module has been evaluated. Modules
- // are only evaluated a single time.
- DECL_BOOLEAN_ACCESSORS(evaluated)
-
- // Storage for [[Evaluated]]
- DECL_INT_ACCESSORS(flags)
-
- // Embedder-specified data
- DECL_ACCESSORS(embedder_data, Object)
-
- // Get the SharedFunctionInfo associated with the code.
- inline SharedFunctionInfo* shared() const;
-
// Get the ModuleInfo associated with the code.
inline ModuleInfo* info() const;
- // Compute a hash for this object.
- inline uint32_t Hash() const;
+ inline bool instantiated() const;
+ inline bool evaluated() const;
+ inline void set_evaluated();
// Implementation of spec operation ModuleDeclarationInstantiation.
// Returns false if an exception occurred during instantiation, true
// otherwise.
static MUST_USE_RESULT bool Instantiate(Handle<Module> module,
v8::Local<v8::Context> context,
- v8::Module::ResolveCallback callback,
- v8::Local<v8::Value> callback_data);
+ v8::Module::ResolveCallback callback);
// Implementation of spec operation ModuleEvaluation.
static MUST_USE_RESULT MaybeHandle<Object> Evaluate(Handle<Module> module);
- static Handle<Object> LoadExport(Handle<Module> module, Handle<String> name);
- static void StoreExport(Handle<Module> module, Handle<String> name,
- Handle<Object> value);
+ static Handle<Object> LoadVariable(Handle<Module> module, int cell_index);
+ static void StoreVariable(Handle<Module> module, int cell_index,
+ Handle<Object> value);
- static Handle<Object> LoadImport(Handle<Module> module, Handle<String> name,
- int module_request);
+ // Get the namespace object for [module_request] of [module]. If it doesn't
+ // exist yet, it is created.
+ static Handle<JSModuleNamespace> GetModuleNamespace(Handle<Module> module,
+ int module_request);
static const int kCodeOffset = HeapObject::kHeaderSize;
static const int kExportsOffset = kCodeOffset + kPointerSize;
- static const int kRequestedModulesOffset = kExportsOffset + kPointerSize;
- static const int kFlagsOffset = kRequestedModulesOffset + kPointerSize;
- static const int kEmbedderDataOffset = kFlagsOffset + kPointerSize;
- static const int kSize = kEmbedderDataOffset + kPointerSize;
+ static const int kRegularExportsOffset = kExportsOffset + kPointerSize;
+ static const int kRegularImportsOffset = kRegularExportsOffset + kPointerSize;
+ static const int kHashOffset = kRegularImportsOffset + kPointerSize;
+ static const int kModuleNamespaceOffset = kHashOffset + kPointerSize;
+ static const int kRequestedModulesOffset =
+ kModuleNamespaceOffset + kPointerSize;
+ static const int kSize = kRequestedModulesOffset + kPointerSize;
private:
enum { kEvaluatedBit };
- static void CreateExport(Handle<Module> module, Handle<FixedArray> names);
+ static void CreateExport(Handle<Module> module, int cell_index,
+ Handle<FixedArray> names);
static void CreateIndirectExport(Handle<Module> module, Handle<String> name,
Handle<ModuleInfoEntry> entry);
+ // Get the namespace object for [module]. If it doesn't exist yet, it is
+ // created.
+ static Handle<JSModuleNamespace> GetModuleNamespace(Handle<Module> module);
+
// The [must_resolve] argument indicates whether or not an exception should be
// thrown in case the module does not provide an export named [name]
// (including when a cycle is detected). An exception is always thrown in the
@@ -7956,16 +8315,16 @@ class Module : public Struct {
// exception (so check manually!).
class ResolveSet;
static MUST_USE_RESULT MaybeHandle<Cell> ResolveExport(
- Handle<Module> module, Handle<String> name, bool must_resolve,
- ResolveSet* resolve_set);
+ Handle<Module> module, Handle<String> name, MessageLocation loc,
+ bool must_resolve, ResolveSet* resolve_set);
static MUST_USE_RESULT MaybeHandle<Cell> ResolveImport(
Handle<Module> module, Handle<String> name, int module_request,
- bool must_resolve, ResolveSet* resolve_set);
+ MessageLocation loc, bool must_resolve, ResolveSet* resolve_set);
// Helper for ResolveExport.
static MUST_USE_RESULT MaybeHandle<Cell> ResolveExportUsingStarExports(
- Handle<Module> module, Handle<String> name, bool must_resolve,
- ResolveSet* resolve_set);
+ Handle<Module> module, Handle<String> name, MessageLocation loc,
+ bool must_resolve, ResolveSet* resolve_set);
DISALLOW_IMPLICIT_CONSTRUCTORS(Module);
};
@@ -8050,6 +8409,14 @@ class JSFunction: public JSObject {
// Tells whether this function inlines the given shared function info.
bool Inlines(SharedFunctionInfo* candidate);
+ // Tells whether or not this function is interpreted.
+ //
+ // Note: function->IsInterpreted() does not necessarily return the same value
+ // as function->shared()->IsInterpreted() because the shared function info
+ // could tier up to baseline via a different function closure. The interpreter
+ // entry stub will "self-heal" this divergence when the function is executed.
+ inline bool IsInterpreted();
+
// Tells whether or not this function has been optimized.
inline bool IsOptimized();
@@ -8230,6 +8597,8 @@ class JSGlobalProxy : public JSObject {
inline bool IsDetachedFrom(JSGlobalObject* global) const;
+ static int SizeWithInternalFields(int internal_field_count);
+
// Dispatched behavior.
DECLARE_PRINTER(JSGlobalProxy)
DECLARE_VERIFIER(JSGlobalProxy)
@@ -8519,6 +8888,9 @@ class JSRegExp: public JSObject {
// Set implementation data after the object has been prepared.
inline void SetDataAt(int index, Object* value);
+ inline void SetLastIndex(int index);
+ inline Object* LastIndex();
+
static int code_index(bool is_latin1) {
if (is_latin1) {
return kIrregexpLatin1CodeIndex;
@@ -8879,7 +9251,7 @@ class AllocationSite: public Struct {
DECLARE_CAST(AllocationSite)
static inline AllocationSiteMode GetMode(
ElementsKind boilerplate_elements_kind);
- static inline AllocationSiteMode GetMode(ElementsKind from, ElementsKind to);
+ static AllocationSiteMode GetMode(ElementsKind from, ElementsKind to);
static inline bool CanTrack(InstanceType type);
static const int kTransitionInfoOffset = HeapObject::kHeaderSize;
@@ -9406,14 +9778,37 @@ class String: public Name {
MUST_USE_RESULT static ComparisonResult Compare(Handle<String> x,
Handle<String> y);
+ // Perform ES6 21.1.3.8, including checking arguments.
+ static Object* IndexOf(Isolate* isolate, Handle<Object> receiver,
+ Handle<Object> search, Handle<Object> position);
// Perform string match of pattern on subject, starting at start index.
- // Caller must ensure that 0 <= start_index <= sub->length().
- static int IndexOf(Isolate* isolate, Handle<String> sub, Handle<String> pat,
- int start_index);
+ // Caller must ensure that 0 <= start_index <= sub->length(), as this does not
+ // check any arguments.
+ static int IndexOf(Isolate* isolate, Handle<String> receiver,
+ Handle<String> search, int start_index);
static Object* LastIndexOf(Isolate* isolate, Handle<Object> receiver,
Handle<Object> search, Handle<Object> position);
+ // Encapsulates logic related to a match and its capture groups as required
+ // by GetSubstitution.
+ class Match {
+ public:
+ virtual Handle<String> GetMatch() = 0;
+ virtual MaybeHandle<String> GetCapture(int i, bool* capture_exists) = 0;
+ virtual Handle<String> GetPrefix() = 0;
+ virtual Handle<String> GetSuffix() = 0;
+ virtual int CaptureCount() = 0;
+ virtual ~Match() {}
+ };
+
+ // ES#sec-getsubstitution
+ // GetSubstitution(matched, str, position, captures, replacement)
+ // Expand the $-expressions in the string and return a new string with
+ // the result.
+ MUST_USE_RESULT static MaybeHandle<String> GetSubstitution(
+ Isolate* isolate, Match* match, Handle<String> replacement);
+
// String equality operations.
inline bool Equals(String* other);
inline static bool Equals(Handle<String> one, Handle<String> two);
@@ -9716,7 +10111,7 @@ class ConsString: public String {
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// Dispatched behavior.
- uint16_t ConsStringGet(int index);
+ V8_EXPORT_PRIVATE uint16_t ConsStringGet(int index);
DECLARE_CAST(ConsString)
@@ -9759,7 +10154,7 @@ class SlicedString: public String {
inline void set_offset(int offset);
// Dispatched behavior.
- uint16_t SlicedStringGet(int index);
+ V8_EXPORT_PRIVATE uint16_t SlicedStringGet(int index);
DECLARE_CAST(SlicedString)
@@ -10343,6 +10738,36 @@ class JSMap : public JSCollection {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
};
+class JSArrayIterator : public JSObject {
+ public:
+ DECLARE_PRINTER(JSArrayIterator)
+ DECLARE_VERIFIER(JSArrayIterator)
+
+ DECLARE_CAST(JSArrayIterator)
+
+ // [object]: the [[IteratedObject]] internal field.
+ DECL_ACCESSORS(object, Object)
+
+ // [index]: The [[ArrayIteratorNextIndex]] internal field.
+ DECL_ACCESSORS(index, Object)
+
+ // [map]: The Map of the [[IteratedObject]] field at the time the iterator is
+ // allocated.
+ DECL_ACCESSORS(object_map, Object)
+
+ // Return the ElementsKind that a JSArrayIterator's [[IteratedObject]] is
+ // expected to have, based on its instance type.
+ static ElementsKind ElementsKindForInstanceType(InstanceType instance_type);
+
+ static const int kIteratedObjectOffset = JSObject::kHeaderSize;
+ static const int kNextIndexOffset = kIteratedObjectOffset + kPointerSize;
+ static const int kIteratedObjectMapOffset = kNextIndexOffset + kPointerSize;
+ static const int kSize = kIteratedObjectMapOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayIterator);
+};
+
class JSStringIterator : public JSObject {
public:
// Dispatched behavior.
@@ -10366,6 +10791,37 @@ class JSStringIterator : public JSObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSStringIterator);
};
+// A JS iterator over the elements of a FixedArray.
+// This corresponds to ListIterator in ecma262/#sec-createlistiterator.
+class JSFixedArrayIterator : public JSObject {
+ public:
+ DECLARE_CAST(JSFixedArrayIterator)
+ DECLARE_PRINTER(JSFixedArrayIterator)
+ DECLARE_VERIFIER(JSFixedArrayIterator)
+
+ // The array over which the iterator iterates.
+ DECL_ACCESSORS(array, FixedArray)
+
+ // The index of the array element that will be returned next.
+ DECL_INT_ACCESSORS(index)
+
+ // The initial value of the object's "next" property.
+ DECL_ACCESSORS(initial_next, JSFunction)
+
+ static const int kArrayOffset = JSObject::kHeaderSize;
+ static const int kIndexOffset = kArrayOffset + kPointerSize;
+ static const int kNextOffset = kIndexOffset + kPointerSize;
+ static const int kHeaderSize = kNextOffset + kPointerSize;
+
+ enum InObjectPropertyIndex {
+ kNextIndex,
+ kInObjectPropertyCount // Dummy.
+ };
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSFixedArrayIterator);
+};
+
// OrderedHashTableIterator is an iterator that iterates over the keys and
// values of an OrderedHashTable.
//
@@ -10571,9 +11027,10 @@ class JSArrayBuffer: public JSObject {
void Neuter();
- static void Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
- bool is_external, void* data, size_t allocated_length,
- SharedFlag shared = SharedFlag::kNotShared);
+ V8_EXPORT_PRIVATE static void Setup(
+ Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
+ void* data, size_t allocated_length,
+ SharedFlag shared = SharedFlag::kNotShared);
static bool SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
Isolate* isolate, size_t allocated_length,
@@ -10652,7 +11109,7 @@ class JSTypedArray: public JSArrayBufferView {
DECLARE_CAST(JSTypedArray)
ExternalArrayType type();
- size_t element_size();
+ V8_EXPORT_PRIVATE size_t element_size();
Handle<JSArrayBuffer> GetBuffer();
@@ -10862,6 +11319,9 @@ class AccessorInfo: public Struct {
inline bool is_special_data_property();
inline void set_is_special_data_property(bool value);
+ inline bool replace_on_access();
+ inline void set_replace_on_access(bool value);
+
inline bool is_sloppy();
inline void set_is_sloppy(bool value);
@@ -10903,7 +11363,8 @@ class AccessorInfo: public Struct {
static const int kAllCanWriteBit = 1;
static const int kSpecialDataProperty = 2;
static const int kIsSloppy = 3;
- class AttributesField : public BitField<PropertyAttributes, 4, 3> {};
+ static const int kReplaceOnAccess = 4;
+ class AttributesField : public BitField<PropertyAttributes, 5, 3> {};
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorInfo);
};
@@ -11069,9 +11530,7 @@ class TemplateInfo: public Struct {
static const int kPropertyListOffset = kNumberOfProperties + kPointerSize;
static const int kPropertyAccessorsOffset =
kPropertyListOffset + kPointerSize;
- static const int kPropertyIntrinsicsOffset =
- kPropertyAccessorsOffset + kPointerSize;
- static const int kHeaderSize = kPropertyIntrinsicsOffset + kPointerSize;
+ static const int kHeaderSize = kPropertyAccessorsOffset + kPointerSize;
static const int kFastTemplateInstantiationsCacheSize = 1 * KB;
@@ -11110,6 +11569,8 @@ class FunctionTemplateInfo: public TemplateInfo {
DECL_BOOLEAN_ACCESSORS(do_not_cache)
DECL_BOOLEAN_ACCESSORS(accept_any_receiver)
+ DECL_ACCESSORS(cached_property_name, Object)
+
DECLARE_CAST(FunctionTemplateInfo)
// Dispatched behavior.
@@ -11136,7 +11597,8 @@ class FunctionTemplateInfo: public TemplateInfo {
kAccessCheckInfoOffset + kPointerSize;
static const int kFlagOffset = kSharedFunctionInfoOffset + kPointerSize;
static const int kLengthOffset = kFlagOffset + kPointerSize;
- static const int kSize = kLengthOffset + kPointerSize;
+ static const int kCachedPropertyNameOffset = kLengthOffset + kPointerSize;
+ static const int kSize = kCachedPropertyNameOffset + kPointerSize;
static Handle<SharedFunctionInfo> GetOrCreateSharedFunctionInfo(
Isolate* isolate, Handle<FunctionTemplateInfo> info);
@@ -11147,6 +11609,10 @@ class FunctionTemplateInfo: public TemplateInfo {
bool IsTemplateFor(Map* map);
inline bool instantiated();
+ // Helper function for cached accessors.
+ static MaybeHandle<Name> TryGetCachedPropertyName(Isolate* isolate,
+ Handle<Object> getter);
+
private:
// Bit position in the flag, from least significant bit position.
static const int kHiddenPrototypeBit = 0;
@@ -11217,7 +11683,7 @@ class DebugInfo: public Struct {
// Get the number of break points for this function.
int GetBreakPointCount();
- static Smi* uninitialized() { return Smi::FromInt(0); }
+ static Smi* uninitialized() { return Smi::kZero; }
inline bool HasDebugBytecodeArray();
inline bool HasDebugCode();
diff --git a/deps/v8/src/ostreams.h b/deps/v8/src/ostreams.h
index dea751413e..e72c8eec30 100644
--- a/deps/v8/src/ostreams.h
+++ b/deps/v8/src/ostreams.h
@@ -80,7 +80,8 @@ struct AsHex {
std::ostream& operator<<(std::ostream& os, const AsReversiblyEscapedUC16& c);
// Same as AsReversiblyEscapedUC16 with additional escaping of \n, \r, " and '.
-std::ostream& operator<<(std::ostream& os, const AsEscapedUC16ForJSON& c);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const AsEscapedUC16ForJSON& c);
// Writes the given character to the output escaping everything outside
// of printable ASCII range.
diff --git a/deps/v8/src/parsing/expression-classifier.h b/deps/v8/src/parsing/expression-classifier.h
index 6a1fbac35a..d2dc6fa170 100644
--- a/deps/v8/src/parsing/expression-classifier.h
+++ b/deps/v8/src/parsing/expression-classifier.h
@@ -22,8 +22,7 @@ class DuplicateFinder;
T(StrictModeFormalParametersProduction, 5) \
T(ArrowFormalParametersProduction, 6) \
T(LetPatternProduction, 7) \
- T(TailCallExpressionProduction, 8) \
- T(AsyncArrowFormalParametersProduction, 9)
+ T(AsyncArrowFormalParametersProduction, 8)
// Expression classifiers serve two purposes:
//
@@ -191,13 +190,6 @@ class ExpressionClassifier {
return reported_error(kLetPatternProduction);
}
- V8_INLINE bool has_tail_call_expression() const {
- return !is_valid(TailCallExpressionProduction);
- }
- V8_INLINE const Error& tail_call_expression_error() const {
- return reported_error(kTailCallExpressionProduction);
- }
-
V8_INLINE const Error& async_arrow_formal_parameters_error() const {
return reported_error(kAsyncArrowFormalParametersProduction);
}
@@ -299,14 +291,6 @@ class ExpressionClassifier {
Add(Error(loc, message, kLetPatternProduction, arg));
}
- void RecordTailCallExpressionError(const Scanner::Location& loc,
- MessageTemplate::Template message,
- const char* arg = nullptr) {
- if (has_tail_call_expression()) return;
- invalid_productions_ |= TailCallExpressionProduction;
- Add(Error(loc, message, kTailCallExpressionProduction, arg));
- }
-
void Accumulate(ExpressionClassifier* inner, unsigned productions,
bool merge_non_patterns = true) {
DCHECK_EQ(inner->reported_errors_, reported_errors_);
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 5b9b5e4ef0..4fbfb1948d 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -31,18 +31,12 @@ ParseInfo::ParseInfo(Zone* zone)
function_name_(nullptr),
literal_(nullptr) {}
-ParseInfo::ParseInfo(Zone* zone, Handle<JSFunction> function)
- : ParseInfo(zone, Handle<SharedFunctionInfo>(function->shared())) {
- if (!function->context()->IsNativeContext()) {
- set_outer_scope_info(handle(function->context()->scope_info()));
- }
-}
-
ParseInfo::ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared)
: ParseInfo(zone) {
isolate_ = shared->GetIsolate();
- set_lazy();
+ set_toplevel(shared->is_toplevel());
+ set_allow_lazy_parsing(FLAG_lazy_inner_functions);
set_hash_seed(isolate_->heap()->HashSeed());
set_is_named_expression(shared->is_named_expression());
set_calls_eval(shared->scope_info()->CallsEval());
@@ -56,22 +50,29 @@ ParseInfo::ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared)
Handle<Script> script(Script::cast(shared->script()));
set_script(script);
- if (!script.is_null() && script->type() == Script::TYPE_NATIVE) {
- set_native();
+ set_native(script->type() == Script::TYPE_NATIVE);
+ set_eval(script->compilation_type() == Script::COMPILATION_TYPE_EVAL);
+
+ Handle<HeapObject> scope_info(shared->outer_scope_info());
+ if (!scope_info->IsTheHole(isolate()) &&
+ Handle<ScopeInfo>::cast(scope_info)->length() > 0) {
+ set_outer_scope_info(Handle<ScopeInfo>::cast(scope_info));
}
}
ParseInfo::ParseInfo(Zone* zone, Handle<Script> script) : ParseInfo(zone) {
isolate_ = script->GetIsolate();
+ set_allow_lazy_parsing(String::cast(script->source())->length() >
+ FLAG_min_preparse_length);
+ set_toplevel();
set_hash_seed(isolate_->heap()->HashSeed());
set_stack_limit(isolate_->stack_guard()->real_climit());
set_unicode_cache(isolate_->unicode_cache());
set_script(script);
- if (script->type() == Script::TYPE_NATIVE) {
- set_native();
- }
+ set_native(script->type() == Script::TYPE_NATIVE);
+ set_eval(script->compilation_type() == Script::COMPILATION_TYPE_EVAL);
}
ParseInfo::~ParseInfo() {
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 4aedae4978..24188d95e2 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -26,12 +26,10 @@ class Utf16CharacterStream;
class Zone;
// A container for the inputs, configuration options, and outputs of parsing.
-class ParseInfo {
+class V8_EXPORT_PRIVATE ParseInfo {
public:
explicit ParseInfo(Zone* zone);
- ParseInfo(Zone* zone, Handle<JSFunction> function);
ParseInfo(Zone* zone, Handle<Script> script);
- // TODO(all) Only used via Debug::FindSharedFunctionInfoInScript, remove?
ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared);
~ParseInfo();
@@ -45,9 +43,7 @@ class ParseInfo {
void setter(bool val) { SetFlag(flag, val); }
FLAG_ACCESSOR(kToplevel, is_toplevel, set_toplevel)
- FLAG_ACCESSOR(kLazy, is_lazy, set_lazy)
FLAG_ACCESSOR(kEval, is_eval, set_eval)
- FLAG_ACCESSOR(kGlobal, is_global, set_global)
FLAG_ACCESSOR(kStrictMode, is_strict_mode, set_strict_mode)
FLAG_ACCESSOR(kNative, is_native, set_native)
FLAG_ACCESSOR(kModule, is_module, set_module)
@@ -57,6 +53,8 @@ class ParseInfo {
FLAG_ACCESSOR(kIsNamedExpression, is_named_expression,
set_is_named_expression)
FLAG_ACCESSOR(kCallsEval, calls_eval, set_calls_eval)
+ FLAG_ACCESSOR(kDebug, is_debug, set_is_debug)
+ FLAG_ACCESSOR(kSerializing, will_serialize, set_will_serialize)
#undef FLAG_ACCESSOR
@@ -99,6 +97,9 @@ class ParseInfo {
return compile_options_;
}
void set_compile_options(ScriptCompiler::CompileOptions compile_options) {
+ if (compile_options == ScriptCompiler::kConsumeParserCache) {
+ set_allow_lazy_parsing();
+ }
compile_options_ = compile_options;
}
@@ -198,16 +199,17 @@ class ParseInfo {
kToplevel = 1 << 0,
kLazy = 1 << 1,
kEval = 1 << 2,
- kGlobal = 1 << 3,
- kStrictMode = 1 << 4,
- kNative = 1 << 5,
- kParseRestriction = 1 << 6,
- kModule = 1 << 7,
- kAllowLazyParsing = 1 << 8,
- kIsNamedExpression = 1 << 9,
- kCallsEval = 1 << 10,
+ kStrictMode = 1 << 3,
+ kNative = 1 << 4,
+ kParseRestriction = 1 << 5,
+ kModule = 1 << 6,
+ kAllowLazyParsing = 1 << 7,
+ kIsNamedExpression = 1 << 8,
+ kCallsEval = 1 << 9,
+ kDebug = 1 << 10,
+ kSerializing = 1 << 11,
// ---------- Output flags --------------------------
- kAstValueFactoryOwned = 1 << 11
+ kAstValueFactoryOwned = 1 << 12
};
//------------- Inputs to parsing and scope analysis -----------------------
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 1ebbee4959..bb62f86e3b 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -59,10 +59,27 @@ static inline bool operator&(ParseFunctionFlags bitfield,
struct FormalParametersBase {
explicit FormalParametersBase(DeclarationScope* scope) : scope(scope) {}
+
+ int num_parameters() const {
+ // Don't include the rest parameter into the function's formal parameter
+ // count (esp. the SharedFunctionInfo::internal_formal_parameter_count,
+ // which says whether we need to create an arguments adaptor frame).
+ return arity - has_rest;
+ }
+
+ void UpdateArityAndFunctionLength(bool is_optional, bool is_rest) {
+ if (!is_optional && !is_rest && function_length == arity) {
+ ++function_length;
+ }
+ ++arity;
+ }
+
DeclarationScope* scope;
bool has_rest = false;
bool is_simple = true;
int materialized_literals_count = 0;
+ int function_length = 0;
+ int arity = 0;
};
@@ -175,27 +192,25 @@ class ParserBase {
ParserBase(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
v8::Extension* extension, AstValueFactory* ast_value_factory,
- ParserRecorder* log)
+ RuntimeCallStats* runtime_call_stats)
: scope_state_(nullptr),
function_state_(nullptr),
extension_(extension),
fni_(nullptr),
ast_value_factory_(ast_value_factory),
ast_node_factory_(ast_value_factory),
- log_(log),
- mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
+ runtime_call_stats_(runtime_call_stats),
parsing_module_(false),
stack_limit_(stack_limit),
zone_(zone),
classifier_(nullptr),
scanner_(scanner),
stack_overflow_(false),
+ default_eager_compile_hint_(FunctionLiteral::kShouldLazyCompile),
allow_lazy_(false),
allow_natives_(false),
allow_tailcalls_(false),
- allow_harmony_restrictive_declarations_(false),
allow_harmony_do_expressions_(false),
- allow_harmony_for_in_(false),
allow_harmony_function_sent_(false),
allow_harmony_async_await_(false),
allow_harmony_restrictive_generators_(false),
@@ -209,9 +224,7 @@ class ParserBase {
ALLOW_ACCESSORS(lazy);
ALLOW_ACCESSORS(natives);
ALLOW_ACCESSORS(tailcalls);
- ALLOW_ACCESSORS(harmony_restrictive_declarations);
ALLOW_ACCESSORS(harmony_do_expressions);
- ALLOW_ACCESSORS(harmony_for_in);
ALLOW_ACCESSORS(harmony_function_sent);
ALLOW_ACCESSORS(harmony_async_await);
ALLOW_ACCESSORS(harmony_restrictive_generators);
@@ -224,26 +237,26 @@ class ParserBase {
void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
+ void set_default_eager_compile_hint(
+ FunctionLiteral::EagerCompileHint eager_compile_hint) {
+ default_eager_compile_hint_ = eager_compile_hint;
+ }
+
+ FunctionLiteral::EagerCompileHint default_eager_compile_hint() const {
+ return default_eager_compile_hint_;
+ }
+
Zone* zone() const { return zone_; }
protected:
friend class v8::internal::ExpressionClassifier<ParserTypes<Impl>>;
- // clang-format off
enum AllowRestrictedIdentifiers {
kAllowRestrictedIdentifiers,
kDontAllowRestrictedIdentifiers
};
- enum Mode {
- PARSE_LAZILY,
- PARSE_EAGERLY
- };
-
- enum LazyParsingResult {
- kLazyParsingComplete,
- kLazyParsingAborted
- };
+ enum LazyParsingResult { kLazyParsingComplete, kLazyParsingAborted };
enum VariableDeclarationContext {
kStatementListItem,
@@ -251,11 +264,7 @@ class ParserBase {
kForStatement
};
- enum class FunctionBodyType {
- kNormal,
- kSingleExpression
- };
- // clang-format on
+ enum class FunctionBodyType { kNormal, kSingleExpression };
class Checkpoint;
class ClassLiteralChecker;
@@ -581,22 +590,6 @@ class ParserBase {
int expected_property_count_;
};
- class ParsingModeScope BASE_EMBEDDED {
- public:
- ParsingModeScope(ParserBase* parser, Mode mode)
- : parser_(parser),
- old_mode_(parser->mode()) {
- parser_->mode_ = mode;
- }
- ~ParsingModeScope() {
- parser_->mode_ = old_mode_;
- }
-
- private:
- ParserBase* parser_;
- Mode old_mode_;
- };
-
struct DeclarationDescriptor {
enum Kind { NORMAL, PARAMETER };
Scope* scope;
@@ -659,11 +652,11 @@ class ParserBase {
explicit ForInfo(ParserBase* parser)
: bound_names(1, parser->zone()),
mode(ForEachStatement::ENUMERATE),
- each_loc(),
+ position(kNoSourcePosition),
parsing_result() {}
ZoneList<const AstRawString*> bound_names;
ForEachStatement::VisitMode mode;
- Scanner::Location each_loc;
+ int position;
DeclarationParsingResult parsing_result;
};
@@ -743,7 +736,6 @@ class ParserBase {
int peek_position() const { return scanner_->peek_location().beg_pos; }
bool stack_overflow() const { return stack_overflow_; }
void set_stack_overflow() { stack_overflow_ = true; }
- Mode mode() const { return mode_; }
INLINE(Token::Value peek()) {
if (stack_overflow_) return Token::ILLEGAL;
@@ -1430,8 +1422,7 @@ class ParserBase {
FuncNameInferrer* fni_;
AstValueFactory* ast_value_factory_; // Not owned.
typename Types::Factory ast_node_factory_;
- ParserRecorder* log_;
- Mode mode_;
+ RuntimeCallStats* runtime_call_stats_;
bool parsing_module_;
uintptr_t stack_limit_;
@@ -1444,12 +1435,12 @@ class ParserBase {
Scanner* scanner_;
bool stack_overflow_;
+ FunctionLiteral::EagerCompileHint default_eager_compile_hint_;
+
bool allow_lazy_;
bool allow_natives_;
bool allow_tailcalls_;
- bool allow_harmony_restrictive_declarations_;
bool allow_harmony_do_expressions_;
- bool allow_harmony_for_in_;
bool allow_harmony_function_sent_;
bool allow_harmony_async_await_;
bool allow_harmony_restrictive_generators_;
@@ -1755,8 +1746,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
case Token::FUTURE_STRICT_RESERVED_WORD: {
// Using eval or arguments in this context is OK even in strict mode.
IdentifierT name = ParseAndClassifyIdentifier(CHECK_OK);
- return impl()->ExpressionFromIdentifier(name, beg_pos,
- scanner()->location().end_pos);
+ return impl()->ExpressionFromIdentifier(name, beg_pos);
}
case Token::STRING: {
@@ -2286,10 +2276,10 @@ ParserBase<Impl>::ParseClassFieldForInitializer(bool has_initializer,
FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
impl()->EmptyIdentifierString(), initializer_scope, body,
initializer_state.materialized_literal_count(),
- initializer_state.expected_property_count(), 0,
+ initializer_state.expected_property_count(), 0, 0,
FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::kAnonymousExpression,
- FunctionLiteral::kShouldLazyCompile, initializer_scope->start_position());
+ FunctionLiteral::kAnonymousExpression, default_eager_compile_hint_,
+ initializer_scope->start_position(), true);
function_literal->set_is_class_field_initializer(true);
return function_literal;
}
@@ -2377,8 +2367,7 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
Scanner::Location(next_beg_pos, next_end_pos),
MessageTemplate::kAwaitBindingIdentifier);
}
- ExpressionT lhs =
- impl()->ExpressionFromIdentifier(name, next_beg_pos, next_end_pos);
+ ExpressionT lhs = impl()->ExpressionFromIdentifier(name, next_beg_pos);
CheckDestructuringElement(lhs, next_beg_pos, next_end_pos);
ExpressionT value;
@@ -2645,8 +2634,8 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
PeekAhead() == Token::ARROW) {
// async Identifier => AsyncConciseBody
IdentifierT name = ParseAndClassifyIdentifier(CHECK_OK);
- expression = impl()->ExpressionFromIdentifier(
- name, position(), scanner()->location().end_pos, InferName::kNo);
+ expression =
+ impl()->ExpressionFromIdentifier(name, position(), InferName::kNo);
if (fni_) {
// Remove `async` keyword from inferred name stack.
fni_->RemoveAsyncKeywordFromEnd();
@@ -2722,8 +2711,7 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
if (is_destructuring_assignment) {
// This is definitely not an expression so don't accumulate
// expression-related errors.
- productions &= ~(ExpressionClassifier::ExpressionProduction |
- ExpressionClassifier::TailCallExpressionProduction);
+ productions &= ~ExpressionClassifier::ExpressionProduction;
}
if (!Token::IsAssignmentOp(peek())) {
@@ -3083,8 +3071,8 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
// Also the trailing parenthesis are a hint that the function will
// be called immediately. If we happen to have parsed a preceding
// function literal eagerly, we can also compile it eagerly.
- if (result->IsFunctionLiteral() && mode() == PARSE_EAGERLY) {
- result->AsFunctionLiteral()->set_should_eager_compile();
+ if (result->IsFunctionLiteral()) {
+ result->AsFunctionLiteral()->SetShouldEagerCompile();
}
}
Scanner::Location spread_pos;
@@ -3413,10 +3401,10 @@ ParserBase<Impl>::ParseMemberExpressionContinuation(ExpressionT expression,
pos = position();
} else {
pos = peek_position();
- if (expression->IsFunctionLiteral() && mode() == PARSE_EAGERLY) {
+ if (expression->IsFunctionLiteral()) {
// If the tag function looks like an IIFE, set_parenthesized() to
// force eager compilation.
- expression->AsFunctionLiteral()->set_should_eager_compile();
+ expression->AsFunctionLiteral()->SetShouldEagerCompile();
}
}
expression = ParseTemplateLiteral(expression, pos, CHECK_OK);
@@ -3482,11 +3470,11 @@ void ParserBase<Impl>::ParseFormalParameterList(FormalParametersT* parameters,
// FormalParameter[?Yield]
// FormalParameterList[?Yield] , FormalParameter[?Yield]
- DCHECK_EQ(0, parameters->Arity());
+ DCHECK_EQ(0, parameters->arity);
if (peek() != Token::RPAREN) {
while (true) {
- if (parameters->Arity() > Code::kMaxArguments) {
+ if (parameters->arity > Code::kMaxArguments) {
ReportMessage(MessageTemplate::kTooManyParameters);
*ok = false;
return;
@@ -3513,7 +3501,7 @@ void ParserBase<Impl>::ParseFormalParameterList(FormalParametersT* parameters,
}
}
- for (int i = 0; i < parameters->Arity(); ++i) {
+ for (int i = 0; i < parameters->arity; ++i) {
auto parameter = parameters->at(i);
impl()->DeclareFormalParameter(parameters->scope, parameter);
}
@@ -3671,13 +3659,10 @@ ParserBase<Impl>::ParseFunctionDeclaration(bool* ok) {
int pos = position();
ParseFunctionFlags flags = ParseFunctionFlags::kIsNormal;
if (Check(Token::MUL)) {
- flags |= ParseFunctionFlags::kIsGenerator;
- if (allow_harmony_restrictive_declarations()) {
- impl()->ReportMessageAt(scanner()->location(),
- MessageTemplate::kGeneratorInLegacyContext);
- *ok = false;
- return impl()->NullStatement();
- }
+ impl()->ReportMessageAt(scanner()->location(),
+ MessageTemplate::kGeneratorInLegacyContext);
+ *ok = false;
+ return impl()->NullStatement();
}
return ParseHoistableDeclaration(pos, flags, nullptr, false, ok);
}
@@ -3905,6 +3890,11 @@ template <typename Impl>
typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::ParseArrowFunctionLiteral(
bool accept_IN, const FormalParametersT& formal_parameters, bool* ok) {
+ RuntimeCallTimerScope runtime_timer(
+ runtime_call_stats_,
+ Impl::IsPreParser() ? &RuntimeCallStats::ParseArrowFunctionLiteral
+ : &RuntimeCallStats::PreParseArrowFunctionLiteral);
+
if (peek() == Token::ARROW && scanner_->HasAnyLineTerminatorBeforeNext()) {
// ASI inserts `;` after arrow parameters if a line terminator is found.
// `=> ...` is never a valid expression, so report as syntax error.
@@ -3915,14 +3905,20 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
}
StatementListT body = impl()->NullStatementList();
- int num_parameters = formal_parameters.scope->num_parameters();
int materialized_literal_count = -1;
int expected_property_count = -1;
FunctionKind kind = formal_parameters.scope->function_kind();
FunctionLiteral::EagerCompileHint eager_compile_hint =
- FunctionLiteral::kShouldLazyCompile;
+ default_eager_compile_hint_;
+ bool can_preparse = impl()->parse_lazily() &&
+ eager_compile_hint == FunctionLiteral::kShouldLazyCompile;
+ // TODO(marja): consider lazy-parsing inner arrow functions too. is_this
+ // handling in Scope::ResolveVariable needs to change.
+ bool is_lazy_top_level_function =
+ can_preparse && impl()->AllowsLazyParsingWithoutUnresolvedVariables();
bool should_be_used_once_hint = false;
+ bool has_braces = true;
{
FunctionState function_state(&function_state_, &scope_state_,
formal_parameters.scope);
@@ -3936,18 +3932,22 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
if (peek() == Token::LBRACE) {
// Multiple statement body
- Consume(Token::LBRACE);
DCHECK_EQ(scope(), formal_parameters.scope);
- bool is_lazily_parsed =
- (mode() == PARSE_LAZILY &&
- formal_parameters.scope
- ->AllowsLazyParsingWithoutUnresolvedVariables());
- // TODO(marja): consider lazy-parsing inner arrow functions too. is_this
- // handling in Scope::ResolveVariable needs to change.
- if (is_lazily_parsed) {
+ if (is_lazy_top_level_function) {
+ // FIXME(marja): Arrow function parameters will be parsed even if the
+ // body is preparsed; move relevant parts of parameter handling to
+ // simulate consistent parameter handling.
Scanner::BookmarkScope bookmark(scanner());
bookmark.Set();
- LazyParsingResult result = impl()->SkipLazyFunctionBody(
+ // For arrow functions, we don't need to retrieve data about function
+ // parameters.
+ int dummy_num_parameters = -1;
+ int dummy_function_length = -1;
+ bool dummy_has_duplicate_parameters = false;
+ DCHECK((kind & FunctionKind::kArrowFunction) != 0);
+ LazyParsingResult result = impl()->SkipFunction(
+ kind, formal_parameters.scope, &dummy_num_parameters,
+ &dummy_function_length, &dummy_has_duplicate_parameters,
&materialized_literal_count, &expected_property_count, false, true,
CHECK_OK);
formal_parameters.scope->ResetAfterPreparsing(
@@ -3961,7 +3961,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
if (result == kLazyParsingAborted) {
bookmark.Apply();
// Trigger eager (re-)parsing, just below this block.
- is_lazily_parsed = false;
+ is_lazy_top_level_function = false;
// This is probably an initialization function. Inform the compiler it
// should also eager-compile this function, and that we expect it to
@@ -3970,7 +3970,8 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
should_be_used_once_hint = true;
}
}
- if (!is_lazily_parsed) {
+ if (!is_lazy_top_level_function) {
+ Consume(Token::LBRACE);
body = impl()->ParseEagerFunctionBody(
impl()->EmptyIdentifier(), kNoSourcePosition, formal_parameters,
kind, FunctionLiteral::kAnonymousExpression, CHECK_OK);
@@ -3980,6 +3981,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
}
} else {
// Single-expression body
+ has_braces = false;
int pos = position();
DCHECK(ReturnExprContext::kInsideValidBlock ==
function_state_->return_expr_context());
@@ -3997,7 +3999,9 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
} else {
ExpressionT expression = ParseAssignmentExpression(accept_IN, CHECK_OK);
impl()->RewriteNonPattern(CHECK_OK);
- body->Add(factory()->NewReturnStatement(expression, pos), zone());
+ body->Add(
+ factory()->NewReturnStatement(expression, expression->position()),
+ zone());
if (allow_tailcalls() && !is_sloppy(language_mode())) {
// ES6 14.6.1 Static Semantics: IsInTailPosition
impl()->MarkTailPosition(expression);
@@ -4028,12 +4032,19 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
impl()->RewriteDestructuringAssignments();
}
+ if (FLAG_trace_preparse) {
+ Scope* scope = formal_parameters.scope;
+ PrintF(" [%s]: %i-%i (arrow function)\n",
+ is_lazy_top_level_function ? "Preparse no-resolution" : "Full parse",
+ scope->start_position(), scope->end_position());
+ }
FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
impl()->EmptyIdentifierString(), formal_parameters.scope, body,
- materialized_literal_count, expected_property_count, num_parameters,
+ materialized_literal_count, expected_property_count,
+ formal_parameters.num_parameters(), formal_parameters.function_length,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression, eager_compile_hint,
- formal_parameters.scope->start_position());
+ formal_parameters.scope->start_position(), has_braces);
function_literal->set_function_token_position(
formal_parameters.scope->start_position());
@@ -4391,11 +4402,6 @@ ParserBase<Impl>::ParseStatementList(StatementListT body, int end_token,
*ok = false;
return kLazyParsingComplete;
}
- // Because declarations in strict eval code don't leak into the scope
- // of the eval call, it is likely that functions declared in strict
- // eval code will be used within the eval code, so lazy parsing is
- // probably not a win.
- if (scope()->is_eval_scope()) mode_ = PARSE_EAGERLY;
} else if (impl()->IsUseAsmDirective(stat) &&
token_loc.end_pos - token_loc.beg_pos ==
sizeof("use asm") + 1) {
@@ -4622,8 +4628,7 @@ typename ParserBase<Impl>::BlockT ParserBase<Impl>::ParseBlock(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseScopedStatement(
ZoneList<const AstRawString*>* labels, bool legacy, bool* ok) {
- if (is_strict(language_mode()) || peek() != Token::FUNCTION ||
- (legacy && allow_harmony_restrictive_declarations())) {
+ if (is_strict(language_mode()) || peek() != Token::FUNCTION || legacy) {
return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
} else {
if (legacy) {
@@ -4693,7 +4698,7 @@ ParserBase<Impl>::ParseExpressionOrLabelledStatement(
// Identifier ':' Statement
//
// ExpressionStatement[Yield] :
- // [lookahead ∉ {{, function, class, let [}] Expression[In, ?Yield] ;
+ // [lookahead notin {{, function, class, let [}] Expression[In, ?Yield] ;
int pos = peek_position();
@@ -5164,7 +5169,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
nullptr, CHECK_OK);
bound_names_are_lexical =
IsLexicalVariableMode(for_info.parsing_result.descriptor.mode);
- for_info.each_loc = scanner()->location();
+ for_info.position = scanner()->location().beg_pos;
if (CheckInOrOf(&for_info.mode)) {
// Just one declaration followed by in/of.
@@ -5181,13 +5186,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
for_info.mode == ForEachStatement::ITERATE ||
bound_names_are_lexical ||
!impl()->IsIdentifier(
- for_info.parsing_result.declarations[0].pattern) ||
- allow_harmony_for_in())) {
- // Only increment the use count if we would have let this through
- // without the flag.
- if (allow_harmony_for_in()) {
- impl()->CountUsage(v8::Isolate::kForInInitializer);
- }
+ for_info.parsing_result.declarations[0].pattern))) {
impl()->ReportMessageAt(
for_info.parsing_result.first_initializer_loc,
MessageTemplate::kForInOfLoopInitializer,
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 7b88695e77..8d8890129f 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -68,7 +68,6 @@ bool ParseData::IsSane() {
if (data_length < PreparseDataConstants::kHeaderSize) return false;
if (Magic() != PreparseDataConstants::kMagicNumber) return false;
if (Version() != PreparseDataConstants::kCurrentVersion) return false;
- if (HasError()) return false;
// Check that the space allocated for function entries is sane.
int functions_size = FunctionsSize();
if (functions_size < 0) return false;
@@ -90,11 +89,6 @@ void ParseData::Initialize() {
}
-bool ParseData::HasError() {
- return Data()[PreparseDataConstants::kHasErrorOffset];
-}
-
-
unsigned ParseData::Magic() {
return Data()[PreparseDataConstants::kMagicOffset];
}
@@ -124,6 +118,7 @@ class DiscardableZoneScope {
parser_->zone_ = temp_zone;
if (parser_->reusable_preparser_ != nullptr) {
parser_->reusable_preparser_->zone_ = temp_zone;
+ parser_->reusable_preparser_->factory()->set_zone(temp_zone);
}
}
}
@@ -132,6 +127,7 @@ class DiscardableZoneScope {
parser_->zone_ = prev_zone_;
if (parser_->reusable_preparser_ != nullptr) {
parser_->reusable_preparser_->zone_ = prev_zone_;
+ parser_->reusable_preparser_->factory()->set_zone(prev_zone_);
}
ast_node_factory_scope_.Reset();
}
@@ -148,12 +144,11 @@ class DiscardableZoneScope {
};
void Parser::SetCachedData(ParseInfo* info) {
- if (compile_options_ == ScriptCompiler::kNoCompileOptions) {
- cached_parse_data_ = NULL;
- } else {
- DCHECK(info->cached_data() != NULL);
- if (compile_options_ == ScriptCompiler::kConsumeParserCache) {
- cached_parse_data_ = ParseData::FromCachedData(*info->cached_data());
+ DCHECK_NULL(cached_parse_data_);
+ if (consume_cached_parse_data()) {
+ cached_parse_data_ = ParseData::FromCachedData(*info->cached_data());
+ if (cached_parse_data_ == nullptr) {
+ compile_options_ = ScriptCompiler::kNoCompileOptions;
}
}
}
@@ -219,7 +214,7 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
LanguageMode language_mode) {
int materialized_literal_count = -1;
int expected_property_count = -1;
- int parameter_count = 0;
+ const int parameter_count = 0;
if (name == nullptr) name = ast_value_factory()->empty_string();
FunctionKind kind = call_super ? FunctionKind::kDefaultSubclassConstructor
@@ -282,59 +277,16 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
name, function_scope, body, materialized_literal_count,
- expected_property_count, parameter_count,
+ expected_property_count, parameter_count, parameter_count,
FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::kAnonymousExpression,
- FunctionLiteral::kShouldLazyCompile, pos);
+ FunctionLiteral::kAnonymousExpression, default_eager_compile_hint(), pos,
+ true);
function_literal->set_requires_class_field_init(requires_class_field_init);
return function_literal;
}
-
-// ----------------------------------------------------------------------------
-// Target is a support class to facilitate manipulation of the
-// Parser's target_stack_ (the stack of potential 'break' and
-// 'continue' statement targets). Upon construction, a new target is
-// added; it is removed upon destruction.
-
-class ParserTarget BASE_EMBEDDED {
- public:
- ParserTarget(ParserBase<Parser>* parser, BreakableStatement* statement)
- : variable_(&parser->impl()->target_stack_),
- statement_(statement),
- previous_(parser->impl()->target_stack_) {
- parser->impl()->target_stack_ = this;
- }
-
- ~ParserTarget() { *variable_ = previous_; }
-
- ParserTarget* previous() { return previous_; }
- BreakableStatement* statement() { return statement_; }
-
- private:
- ParserTarget** variable_;
- BreakableStatement* statement_;
- ParserTarget* previous_;
-};
-
-class ParserTargetScope BASE_EMBEDDED {
- public:
- explicit ParserTargetScope(ParserBase<Parser>* parser)
- : variable_(&parser->impl()->target_stack_),
- previous_(parser->impl()->target_stack_) {
- parser->impl()->target_stack_ = nullptr;
- }
-
- ~ParserTargetScope() { *variable_ = previous_; }
-
- private:
- ParserTarget** variable_;
- ParserTarget* previous_;
-};
-
-
// ----------------------------------------------------------------------------
// The CHECK_OK macro is a convenient macro to enforce error
// handling for functions that may fail (by returning !*ok).
@@ -521,9 +473,7 @@ Expression* Parser::NewSuperCallReference(int pos) {
}
Expression* Parser::NewTargetExpression(int pos) {
- static const int kNewTargetStringLength = 10;
- auto proxy = NewUnresolved(ast_value_factory()->new_target_string(), pos,
- pos + kNewTargetStringLength);
+ auto proxy = NewUnresolved(ast_value_factory()->new_target_string(), pos);
proxy->set_is_new_target();
return proxy;
}
@@ -547,7 +497,7 @@ Literal* Parser::ExpressionFromLiteral(Token::Value token, int pos) {
case Token::FALSE_LITERAL:
return factory()->NewBooleanLiteral(false, pos);
case Token::SMI: {
- int value = scanner()->smi_value();
+ uint32_t value = scanner()->smi_value();
return factory()->NewSmiLiteral(value, pos);
}
case Token::NUMBER: {
@@ -631,31 +581,48 @@ Expression* Parser::NewV8Intrinsic(const AstRawString* name,
Parser::Parser(ParseInfo* info)
: ParserBase<Parser>(info->zone(), &scanner_, info->stack_limit(),
- info->extension(), info->ast_value_factory(), NULL),
+ info->extension(), info->ast_value_factory(),
+ info->isolate()->counters()->runtime_call_stats()),
scanner_(info->unicode_cache()),
- reusable_preparser_(NULL),
- original_scope_(NULL),
- target_stack_(NULL),
+ reusable_preparser_(nullptr),
+ original_scope_(nullptr),
+ mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
+ target_stack_(nullptr),
compile_options_(info->compile_options()),
- cached_parse_data_(NULL),
+ cached_parse_data_(nullptr),
total_preparse_skipped_(0),
- pre_parse_timer_(NULL),
- parsing_on_main_thread_(true) {
+ parsing_on_main_thread_(true),
+ log_(nullptr) {
// Even though we were passed ParseInfo, we should not store it in
// Parser - this makes sure that Isolate is not accidentally accessed via
// ParseInfo during background parsing.
DCHECK(!info->script().is_null() || info->source_stream() != nullptr ||
info->character_stream() != nullptr);
+ // Determine if functions can be lazily compiled. This is necessary to
+ // allow some of our builtin JS files to be lazily compiled. These
+ // builtins cannot be handled lazily by the parser, since we have to know
+ // if a function uses the special natives syntax, which is something the
+ // parser records.
+ // If the debugger requests compilation for break points, we cannot be
+ // aggressive about lazy compilation, because it might trigger compilation
+ // of functions without an outer context when setting a breakpoint through
+ // Debug::FindSharedFunctionInfoInScript
+ bool can_compile_lazily = FLAG_lazy && !info->is_debug();
+
+ // Consider compiling eagerly when targeting the code cache.
+ can_compile_lazily &= !(FLAG_serialize_eager && info->will_serialize());
+
+ set_default_eager_compile_hint(can_compile_lazily
+ ? FunctionLiteral::kShouldLazyCompile
+ : FunctionLiteral::kShouldEagerCompile);
set_allow_lazy(FLAG_lazy && info->allow_lazy_parsing() &&
- !info->is_native() && info->extension() == nullptr);
+ !info->is_native() && info->extension() == nullptr &&
+ can_compile_lazily);
set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
set_allow_tailcalls(FLAG_harmony_tailcalls && !info->is_native() &&
info->isolate()->is_tail_call_elimination_enabled());
set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
- set_allow_harmony_for_in(FLAG_harmony_for_in);
set_allow_harmony_function_sent(FLAG_harmony_function_sent);
- set_allow_harmony_restrictive_declarations(
- FLAG_harmony_restrictive_declarations);
set_allow_harmony_async_await(FLAG_harmony_async_await);
set_allow_harmony_restrictive_generators(FLAG_harmony_restrictive_generators);
set_allow_harmony_trailing_commas(FLAG_harmony_trailing_commas);
@@ -699,9 +666,10 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
// called in the main thread.
DCHECK(parsing_on_main_thread_);
- HistogramTimerScope timer_scope(isolate->counters()->parse(), true);
- RuntimeCallTimerScope runtime_timer(isolate, &RuntimeCallStats::Parse);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.Parse");
+ RuntimeCallTimerScope runtime_timer(
+ runtime_call_stats_, info->is_eval() ? &RuntimeCallStats::ParseEval
+ : &RuntimeCallStats::ParseProgram);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseProgram");
Handle<String> source(String::cast(info->script()->source()));
isolate->counters()->total_parse_size()->Increment(source->length());
base::ElapsedTimer timer;
@@ -711,10 +679,10 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
// Initialize parser state.
- CompleteParserRecorder recorder;
+ ParserLogger logger;
if (produce_cached_parse_data()) {
- log_ = &recorder;
+ log_ = &logger;
} else if (consume_cached_parse_data()) {
cached_parse_data_->Initialize();
}
@@ -734,7 +702,7 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
}
HandleSourceURLComments(isolate, info->script());
- if (FLAG_trace_parse && result != NULL) {
+ if (FLAG_trace_parse && result != nullptr) {
double ms = timer.Elapsed().InMillisecondsF();
if (info->is_eval()) {
PrintF("[parsing eval");
@@ -747,10 +715,10 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
}
PrintF(" - took %0.3f ms]\n", ms);
}
- if (produce_cached_parse_data()) {
- if (result != NULL) *info->cached_data() = recorder.GetScriptData();
- log_ = NULL;
+ if (produce_cached_parse_data() && result != nullptr) {
+ *info->cached_data() = logger.GetScriptData();
}
+ log_ = nullptr;
return result;
}
@@ -762,7 +730,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
DCHECK_NULL(scope_state_);
DCHECK_NULL(target_stack_);
- Mode parsing_mode = allow_lazy() ? PARSE_LAZILY : PARSE_EAGERLY;
+ ParsingModeScope mode(this, allow_lazy() ? PARSE_LAZILY : PARSE_EAGERLY);
FunctionLiteral* result = NULL;
{
@@ -770,25 +738,16 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
DCHECK_NOT_NULL(outer);
parsing_module_ = info->is_module();
if (info->is_eval()) {
- if (!outer->is_script_scope() || is_strict(info->language_mode())) {
- parsing_mode = PARSE_EAGERLY;
- }
outer = NewEvalScope(outer);
} else if (parsing_module_) {
DCHECK_EQ(outer, info->script_scope());
outer = NewModuleScope(info->script_scope());
- // Never do lazy parsing in modules. If we want to support this in the
- // future, we must force context-allocation for all variables that are
- // declared at the module level but not MODULE-allocated.
- parsing_mode = PARSE_EAGERLY;
}
DeclarationScope* scope = outer->AsDeclarationScope();
scope->set_start_position(0);
- // Enter 'scope' with the given parsing mode.
- ParsingModeScope parsing_mode_scope(this, parsing_mode);
FunctionState function_state(&function_state_, &scope_state_, scope);
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
@@ -868,14 +827,13 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
return result;
}
-
-FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info) {
+FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info) {
// It's OK to use the Isolate & counters here, since this function is only
// called in the main thread.
DCHECK(parsing_on_main_thread_);
- RuntimeCallTimerScope runtime_timer(isolate, &RuntimeCallStats::ParseLazy);
- HistogramTimerScope timer_scope(isolate->counters()->parse_lazy());
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseLazy");
+ RuntimeCallTimerScope runtime_timer(runtime_call_stats_,
+ &RuntimeCallStats::ParseFunction);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseFunction");
Handle<String> source(String::cast(info->script()->source()));
isolate->counters()->total_parse_size()->Increment(source->length());
base::ElapsedTimer timer;
@@ -892,8 +850,8 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info) {
std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
source, shared_info->start_position(), shared_info->end_position()));
Handle<String> name(String::cast(shared_info->name()));
- result =
- DoParseLazy(info, ast_value_factory()->GetString(name), stream.get());
+ result = DoParseFunction(info, ast_value_factory()->GetString(name),
+ stream.get());
if (result != nullptr) {
Handle<String> inferred_name(shared_info->inferred_name());
result->set_inferred_name(inferred_name);
@@ -922,9 +880,9 @@ static FunctionLiteral::FunctionType ComputeFunctionType(ParseInfo* info) {
return FunctionLiteral::kAnonymousExpression;
}
-FunctionLiteral* Parser::DoParseLazy(ParseInfo* info,
- const AstRawString* raw_name,
- Utf16CharacterStream* source) {
+FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
+ const AstRawString* raw_name,
+ Utf16CharacterStream* source) {
scanner_.Initialize(source);
DCHECK_NULL(scope_state_);
DCHECK_NULL(target_stack_);
@@ -1130,15 +1088,19 @@ void Parser::ParseExportClause(ZoneList<const AstRawString*>* export_names,
}
const AstRawString* local_name = ParseIdentifierName(CHECK_OK_VOID);
const AstRawString* export_name = NULL;
+ Scanner::Location location = scanner()->location();
if (CheckContextualKeyword(CStrVector("as"))) {
export_name = ParseIdentifierName(CHECK_OK_VOID);
+ // Set the location to the whole "a as b" string, so that it makes sense
+ // both for errors due to "a" and for errors due to "b".
+ location.end_pos = scanner()->location().end_pos;
}
if (export_name == NULL) {
export_name = local_name;
}
export_names->Add(export_name, zone());
local_names->Add(local_name, zone());
- export_locations->Add(scanner()->location(), zone());
+ export_locations->Add(location, zone());
if (peek() == Token::RBRACE) break;
Expect(Token::COMMA, CHECK_OK_VOID);
}
@@ -1168,6 +1130,7 @@ ZoneList<const Parser::NamedImport*>* Parser::ParseNamedImports(
while (peek() != Token::RBRACE) {
const AstRawString* import_name = ParseIdentifierName(CHECK_OK);
const AstRawString* local_name = import_name;
+ Scanner::Location location = scanner()->location();
// In the presence of 'as', the left-side of the 'as' can
// be any IdentifierName. But without 'as', it must be a valid
// BindingIdentifier.
@@ -1188,8 +1151,8 @@ ZoneList<const Parser::NamedImport*>* Parser::ParseNamedImports(
DeclareVariable(local_name, CONST, kNeedsInitialization, position(),
CHECK_OK);
- NamedImport* import = new (zone()) NamedImport(
- import_name, local_name, scanner()->location());
+ NamedImport* import =
+ new (zone()) NamedImport(import_name, local_name, location);
result->Add(import, zone());
if (peek() == Token::RBRACE) break;
@@ -1377,21 +1340,23 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
// 'export' Declaration
// 'export' 'default' ... (handled in ParseExportDefault)
- int pos = peek_position();
Expect(Token::EXPORT, CHECK_OK);
+ int pos = position();
Statement* result = nullptr;
ZoneList<const AstRawString*> names(1, zone());
+ Scanner::Location loc = scanner()->peek_location();
switch (peek()) {
case Token::DEFAULT:
return ParseExportDefault(ok);
case Token::MUL: {
Consume(Token::MUL);
+ loc = scanner()->location();
ExpectContextualKeyword(CStrVector("from"), CHECK_OK);
const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK);
ExpectSemicolon(CHECK_OK);
- module()->AddStarExport(module_specifier, scanner()->location(), zone());
+ module()->AddStarExport(module_specifier, loc, zone());
return factory()->NewEmptyStatement(pos);
}
@@ -1472,11 +1437,11 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
ReportUnexpectedToken(scanner()->current_token());
return nullptr;
}
+ loc.end_pos = scanner()->location().end_pos;
ModuleDescriptor* descriptor = module();
for (int i = 0; i < names.length(); ++i) {
- // TODO(neis): Provide better location.
- descriptor->AddExport(names[i], names[i], scanner()->location(), zone());
+ descriptor->AddExport(names[i], names[i], loc, zone());
}
DCHECK_NOT_NULL(result);
@@ -1484,13 +1449,12 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
}
VariableProxy* Parser::NewUnresolved(const AstRawString* name, int begin_pos,
- int end_pos, VariableKind kind) {
- return scope()->NewUnresolved(factory(), name, begin_pos, end_pos, kind);
+ VariableKind kind) {
+ return scope()->NewUnresolved(factory(), name, begin_pos, kind);
}
VariableProxy* Parser::NewUnresolved(const AstRawString* name) {
- return scope()->NewUnresolved(factory(), name, scanner()->location().beg_pos,
- scanner()->location().end_pos);
+ return scope()->NewUnresolved(factory(), name, scanner()->location().beg_pos);
}
Declaration* Parser::DeclareVariable(const AstRawString* name,
@@ -1504,18 +1468,19 @@ Declaration* Parser::DeclareVariable(const AstRawString* name,
int pos, bool* ok) {
DCHECK_NOT_NULL(name);
VariableProxy* proxy = factory()->NewVariableProxy(
- name, NORMAL_VARIABLE, scanner()->location().beg_pos,
- scanner()->location().end_pos);
+ name, NORMAL_VARIABLE, scanner()->location().beg_pos);
Declaration* declaration =
factory()->NewVariableDeclaration(proxy, this->scope(), pos);
- Declare(declaration, DeclarationDescriptor::NORMAL, mode, init, CHECK_OK);
+ Declare(declaration, DeclarationDescriptor::NORMAL, mode, init, ok, nullptr,
+ scanner()->location().end_pos);
+ if (!*ok) return nullptr;
return declaration;
}
Variable* Parser::Declare(Declaration* declaration,
DeclarationDescriptor::Kind declaration_kind,
VariableMode mode, InitializationFlag init, bool* ok,
- Scope* scope) {
+ Scope* scope, int var_end_pos) {
if (scope == nullptr) {
scope = this->scope();
}
@@ -1524,11 +1489,18 @@ Variable* Parser::Declare(Declaration* declaration,
declaration, mode, init, allow_harmony_restrictive_generators(),
&sloppy_mode_block_scope_function_redefinition, ok);
if (!*ok) {
+ // If we only have the start position of a proxy, we can't highlight the
+ // whole variable name. Pretend its length is 1 so that we highlight at
+ // least the first character.
+ Scanner::Location loc(declaration->proxy()->position(),
+ var_end_pos != kNoSourcePosition
+ ? var_end_pos
+ : declaration->proxy()->position() + 1);
if (declaration_kind == DeclarationDescriptor::NORMAL) {
- ReportMessage(MessageTemplate::kVarRedeclaration,
- declaration->proxy()->raw_name());
+ ReportMessageAt(loc, MessageTemplate::kVarRedeclaration,
+ declaration->proxy()->raw_name());
} else {
- ReportMessage(MessageTemplate::kParamDupe);
+ ReportMessageAt(loc, MessageTemplate::kParamDupe);
}
return nullptr;
}
@@ -1947,7 +1919,6 @@ Block* Parser::RewriteForVarInLegacy(const ForInfo& for_info) {
for_info.parsing_result.declarations[0];
if (!IsLexicalVariableMode(for_info.parsing_result.descriptor.mode) &&
decl.pattern->IsVariableProxy() && decl.initializer != nullptr) {
- DCHECK(!allow_harmony_for_in());
++use_counts_[v8::Isolate::kForInInitializer];
const AstRawString* name = decl.pattern->AsVariableProxy()->raw_name();
VariableProxy* single_var = NewUnresolved(name);
@@ -2034,8 +2005,7 @@ void Parser::DesugarBindingInForEachStatement(ForInfo* for_info,
*body_block = factory()->NewBlock(nullptr, 3, false, kNoSourcePosition);
(*body_block)->statements()->Add(each_initialization_block, zone());
- *each_variable = factory()->NewVariableProxy(temp, for_info->each_loc.beg_pos,
- for_info->each_loc.end_pos);
+ *each_variable = factory()->NewVariableProxy(temp, for_info->position);
}
// Create a TDZ for any lexically-bound names in for in/of statements.
@@ -2483,7 +2453,7 @@ void Parser::DeclareArrowFunctionFormalParameters(
AddArrowFunctionFormalParameters(parameters, expr, params_loc.end_pos,
CHECK_OK_VOID);
- if (parameters->Arity() > Code::kMaxArguments) {
+ if (parameters->arity > Code::kMaxArguments) {
ReportMessageAt(params_loc, MessageTemplate::kMalformedArrowFunParamList);
*ok = false;
return;
@@ -2493,7 +2463,7 @@ void Parser::DeclareArrowFunctionFormalParameters(
if (!parameters->is_simple) {
this->classifier()->RecordNonSimpleParameter();
}
- for (int i = 0; i < parameters->Arity(); ++i) {
+ for (int i = 0; i < parameters->arity; ++i) {
auto parameter = parameters->at(i);
DeclareFormalParameter(parameters->scope, parameter);
if (!this->classifier()
@@ -2551,8 +2521,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
int pos = function_token_pos == kNoSourcePosition ? peek_position()
: function_token_pos;
- bool is_generator = IsGeneratorFunction(kind);
-
// Anonymous functions were passed either the empty symbol or a null
// handle as the function name. Remember if we were passed a non-empty
// handle to decide whether to invoke function name inference.
@@ -2566,7 +2534,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
FunctionLiteral::EagerCompileHint eager_compile_hint =
function_state_->next_function_is_parenthesized()
? FunctionLiteral::kShouldEagerCompile
- : FunctionLiteral::kShouldLazyCompile;
+ : default_eager_compile_hint();
// Determine if the function can be parsed lazily. Lazy parsing is
// different from lazy compilation; we need to parse more eagerly than we
@@ -2600,14 +2568,18 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// parenthesis before the function means that it will be called
// immediately). bar can be parsed lazily, but we need to parse it in a mode
// that tracks unresolved variables.
- DCHECK_IMPLIES(mode() == PARSE_LAZILY, FLAG_lazy);
- DCHECK_IMPLIES(mode() == PARSE_LAZILY, allow_lazy());
- DCHECK_IMPLIES(mode() == PARSE_LAZILY, extension_ == nullptr);
+ DCHECK_IMPLIES(parse_lazily(), FLAG_lazy);
+ DCHECK_IMPLIES(parse_lazily(), allow_lazy());
+ DCHECK_IMPLIES(parse_lazily(), extension_ == nullptr);
+
+ bool can_preparse = parse_lazily() &&
+ eager_compile_hint == FunctionLiteral::kShouldLazyCompile;
bool is_lazy_top_level_function =
- mode() == PARSE_LAZILY &&
- eager_compile_hint == FunctionLiteral::kShouldLazyCompile &&
- scope()->AllowsLazyParsingWithoutUnresolvedVariables();
+ can_preparse && impl()->AllowsLazyParsingWithoutUnresolvedVariables();
+
+ RuntimeCallTimerScope runtime_timer(runtime_call_stats_,
+ &RuntimeCallStats::ParseFunctionLiteral);
// Determine whether we can still lazy parse the inner function.
// The preconditions are:
@@ -2629,8 +2601,11 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// will migrate unresolved variable into a Scope in the main Zone.
// TODO(marja): Refactor parsing modes: simplify this.
bool use_temp_zone =
- allow_lazy() && function_type == FunctionLiteral::kDeclaration &&
- eager_compile_hint != FunctionLiteral::kShouldEagerCompile &&
+ (FLAG_lazy_inner_functions
+ ? can_preparse
+ : (is_lazy_top_level_function ||
+ (allow_lazy() && function_type == FunctionLiteral::kDeclaration &&
+ eager_compile_hint == FunctionLiteral::kShouldLazyCompile))) &&
!(FLAG_validate_asm && scope()->IsAsmModule());
bool is_lazy_inner_function =
use_temp_zone && FLAG_lazy_inner_functions && !is_lazy_top_level_function;
@@ -2638,40 +2613,20 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// This Scope lives in the main zone. We'll migrate data into that zone later.
DeclarationScope* scope = NewFunctionScope(kind);
SetLanguageMode(scope, language_mode);
+#ifdef DEBUG
+ scope->SetScopeName(function_name);
+#endif
ZoneList<Statement*>* body = nullptr;
- int arity = -1;
int materialized_literal_count = -1;
int expected_property_count = -1;
- DuplicateFinder duplicate_finder(scanner()->unicode_cache());
bool should_be_used_once_hint = false;
- bool has_duplicate_parameters;
-
- FunctionState function_state(&function_state_, &scope_state_, scope);
-#ifdef DEBUG
- scope->SetScopeName(function_name);
-#endif
-
- ExpressionClassifier formals_classifier(this, &duplicate_finder);
-
- if (is_generator) PrepareGeneratorVariables(&function_state);
+ int num_parameters = -1;
+ int function_length = -1;
+ bool has_duplicate_parameters = false;
Expect(Token::LPAREN, CHECK_OK);
- int start_position = scanner()->location().beg_pos;
- this->scope()->set_start_position(start_position);
- ParserFormalParameters formals(scope);
- ParseFormalParameterList(&formals, CHECK_OK);
- arity = formals.Arity();
- Expect(Token::RPAREN, CHECK_OK);
- int formals_end_position = scanner()->location().end_pos;
-
- CheckArityRestrictions(arity, kind, formals.has_rest, start_position,
- formals_end_position, CHECK_OK);
- Expect(Token::LBRACE, CHECK_OK);
- // Don't include the rest parameter into the function's formal parameter
- // count (esp. the SharedFunctionInfo::internal_formal_parameter_count,
- // which says whether we need to create an arguments adaptor frame).
- if (formals.has_rest) arity--;
+ scope->set_start_position(scanner()->location().beg_pos);
{
// Temporary zones can nest. When we migrate free variables (see below), we
@@ -2684,26 +2639,25 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// the previous zone is always restored after parsing the body. To be able
// to do scope analysis correctly after full parsing, we migrate needed
// information when the function is parsed.
- Zone temp_zone(zone()->allocator());
+ Zone temp_zone(zone()->allocator(), ZONE_NAME);
DiscardableZoneScope zone_scope(this, &temp_zone, use_temp_zone);
#ifdef DEBUG
if (use_temp_zone) scope->set_needs_migration();
#endif
// Eager or lazy parse? If is_lazy_top_level_function, we'll parse
- // lazily. We'll call SkipLazyFunctionBody, which may decide to abort lazy
- // parsing if it suspects that wasn't a good idea. If so (in which case the
- // parser is expected to have backtracked), or if we didn't try to lazy
- // parse in the first place, we'll have to parse eagerly.
+ // lazily. We'll call SkipFunction, which may decide to
+ // abort lazy parsing if it suspects that wasn't a good idea. If so (in
+ // which case the parser is expected to have backtracked), or if we didn't
+ // try to lazy parse in the first place, we'll have to parse eagerly.
if (is_lazy_top_level_function || is_lazy_inner_function) {
Scanner::BookmarkScope bookmark(scanner());
bookmark.Set();
- LazyParsingResult result = SkipLazyFunctionBody(
- &materialized_literal_count, &expected_property_count,
- is_lazy_inner_function, is_lazy_top_level_function, CHECK_OK);
-
- materialized_literal_count += formals.materialized_literals_count +
- function_state.materialized_literal_count();
+ LazyParsingResult result =
+ SkipFunction(kind, scope, &num_parameters, &function_length,
+ &has_duplicate_parameters, &materialized_literal_count,
+ &expected_property_count, is_lazy_inner_function,
+ is_lazy_top_level_function, CHECK_OK);
if (result == kLazyParsingAborted) {
DCHECK(is_lazy_top_level_function);
@@ -2723,31 +2677,41 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
if (!is_lazy_top_level_function && !is_lazy_inner_function) {
- body = ParseEagerFunctionBody(function_name, pos, formals, kind,
- function_type, CHECK_OK);
-
- materialized_literal_count = function_state.materialized_literal_count();
- expected_property_count = function_state.expected_property_count();
+ body = ParseFunction(
+ function_name, pos, kind, function_type, scope, &num_parameters,
+ &function_length, &has_duplicate_parameters,
+ &materialized_literal_count, &expected_property_count, CHECK_OK);
}
- if (use_temp_zone || is_lazy_top_level_function) {
+ DCHECK(use_temp_zone || !is_lazy_top_level_function);
+ if (use_temp_zone) {
// If the preconditions are correct the function body should never be
// accessed, but do this anyway for better behaviour if they're wrong.
body = nullptr;
scope->AnalyzePartially(&previous_zone_ast_node_factory);
}
- // Parsing the body may change the language mode in our scope.
- language_mode = scope->language_mode();
+ if (FLAG_trace_preparse) {
+ PrintF(" [%s]: %i-%i %.*s\n",
+ is_lazy_top_level_function
+ ? "Preparse no-resolution"
+ : (use_temp_zone ? "Preparse resolution" : "Full parse"),
+ scope->start_position(), scope->end_position(),
+ function_name->byte_length(), function_name->raw_data());
+ if (is_lazy_top_level_function) {
+ CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats_,
+ PreParseNoVariableResolution);
+ } else if (use_temp_zone) {
+ CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats_,
+ PreParseWithVariableResolution);
+ }
+ }
- // Validate name and parameter names. We can do this only after parsing the
- // function, since the function can declare itself strict.
+ // Validate function name. We can do this only after parsing the function,
+ // since the function can declare itself strict.
+ language_mode = scope->language_mode();
CheckFunctionName(language_mode, function_name, function_name_validity,
function_name_location, CHECK_OK);
- const bool allow_duplicate_parameters =
- is_sloppy(language_mode) && formals.is_simple && !IsConciseMethod(kind);
- ValidateFormalParameters(language_mode, allow_duplicate_parameters,
- CHECK_OK);
if (is_strict(language_mode)) {
CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
@@ -2756,13 +2720,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
scope->end_position());
}
CheckConflictingVarDeclarations(scope, CHECK_OK);
-
- if (body) {
- // If body can be inspected, rewrite queued destructuring assignments
- RewriteDestructuringAssignments();
- }
- has_duplicate_parameters =
- !classifier()->is_valid_formal_parameter_list_without_duplicates();
} // DiscardableZoneScope goes out of scope.
FunctionLiteral::ParameterFlag duplicate_parameters =
@@ -2772,8 +2729,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Note that the FunctionLiteral needs to be created in the main Zone again.
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
function_name, scope, body, materialized_literal_count,
- expected_property_count, arity, duplicate_parameters, function_type,
- eager_compile_hint, pos);
+ expected_property_count, num_parameters, function_length,
+ duplicate_parameters, function_type, eager_compile_hint, pos, true);
function_literal->set_function_token_position(function_token_pos);
if (should_be_used_once_hint)
function_literal->set_should_be_used_once_hint();
@@ -2785,44 +2742,72 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
return function_literal;
}
-Parser::LazyParsingResult Parser::SkipLazyFunctionBody(
+Parser::LazyParsingResult Parser::SkipFunction(
+ FunctionKind kind, DeclarationScope* function_scope, int* num_parameters,
+ int* function_length, bool* has_duplicate_parameters,
int* materialized_literal_count, int* expected_property_count,
bool is_inner_function, bool may_abort, bool* ok) {
+ DCHECK_NE(kNoSourcePosition, function_scope->start_position());
if (produce_cached_parse_data()) CHECK(log_);
- int function_block_pos = position();
- DeclarationScope* scope = function_state_->scope();
- DCHECK(scope->is_function_scope());
+ DCHECK_IMPLIES(IsArrowFunction(kind),
+ scanner()->current_token() == Token::ARROW);
+
// Inner functions are not part of the cached data.
if (!is_inner_function && consume_cached_parse_data() &&
!cached_parse_data_->rejected()) {
- // If we have cached data, we use it to skip parsing the function body. The
- // data contains the information we need to construct the lazy function.
+ // If we have cached data, we use it to skip parsing the function. The data
+ // contains the information we need to construct the lazy function.
FunctionEntry entry =
- cached_parse_data_->GetFunctionEntry(function_block_pos);
+ cached_parse_data_->GetFunctionEntry(function_scope->start_position());
// Check that cached data is valid. If not, mark it as invalid (the embedder
// handles it). Note that end position greater than end of stream is safe,
// and hard to check.
- if (entry.is_valid() && entry.end_pos() > function_block_pos) {
+ if (entry.is_valid() &&
+ entry.end_pos() > function_scope->start_position()) {
+ total_preparse_skipped_ += entry.end_pos() - position();
+ function_scope->set_end_position(entry.end_pos());
scanner()->SeekForward(entry.end_pos() - 1);
-
- scope->set_end_position(entry.end_pos());
Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
- total_preparse_skipped_ += scope->end_position() - function_block_pos;
+ *num_parameters = entry.num_parameters();
+ *function_length = entry.function_length();
+ *has_duplicate_parameters = entry.has_duplicate_parameters();
*materialized_literal_count = entry.literal_count();
*expected_property_count = entry.property_count();
- SetLanguageMode(scope, entry.language_mode());
- if (entry.uses_super_property()) scope->RecordSuperPropertyUsage();
- if (entry.calls_eval()) scope->RecordEvalCall();
+ SetLanguageMode(function_scope, entry.language_mode());
+ if (entry.uses_super_property())
+ function_scope->RecordSuperPropertyUsage();
+ if (entry.calls_eval()) function_scope->RecordEvalCall();
return kLazyParsingComplete;
}
cached_parse_data_->Reject();
}
+
// With no cached data, we partially parse the function, without building an
// AST. This gathers the data needed to build a lazy function.
- SingletonLogger logger;
- PreParser::PreParseResult result =
- ParseLazyFunctionBodyWithPreParser(&logger, is_inner_function, may_abort);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.PreParse");
+
+ if (reusable_preparser_ == NULL) {
+ reusable_preparser_ = new PreParser(zone(), &scanner_, ast_value_factory(),
+ &pending_error_handler_,
+ runtime_call_stats_, stack_limit_);
+ reusable_preparser_->set_allow_lazy(true);
+#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
+ SET_ALLOW(natives);
+ SET_ALLOW(harmony_do_expressions);
+ SET_ALLOW(harmony_function_sent);
+ SET_ALLOW(harmony_async_await);
+ SET_ALLOW(harmony_trailing_commas);
+ SET_ALLOW(harmony_class_fields);
+#undef SET_ALLOW
+ }
+ // Aborting inner function preparsing would leave scopes in an inconsistent
+ // state; we don't parse inner functions in the abortable mode anyway.
+ DCHECK(!is_inner_function || !may_abort);
+
+ PreParser::PreParseResult result = reusable_preparser_->PreParseFunction(
+ kind, function_scope, parsing_module_, is_inner_function, may_abort,
+ use_counts_);
// Return immediately if pre-parser decided to abort parsing.
if (result == PreParser::kPreParseAbort) return kLazyParsingAborted;
@@ -2832,28 +2817,27 @@ Parser::LazyParsingResult Parser::SkipLazyFunctionBody(
*ok = false;
return kLazyParsingComplete;
}
- if (logger.has_error()) {
- ReportMessageAt(Scanner::Location(logger.start(), logger.end()),
- logger.message(), logger.argument_opt(),
- logger.error_type());
+ if (pending_error_handler_.has_pending_error()) {
*ok = false;
return kLazyParsingComplete;
}
- scope->set_end_position(logger.end());
+ PreParserLogger* logger = reusable_preparser_->logger();
+ function_scope->set_end_position(logger->end());
Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
- total_preparse_skipped_ += scope->end_position() - function_block_pos;
- *materialized_literal_count = logger.literals();
- *expected_property_count = logger.properties();
- SetLanguageMode(scope, logger.language_mode());
- if (logger.uses_super_property()) scope->RecordSuperPropertyUsage();
- if (logger.calls_eval()) scope->RecordEvalCall();
+ total_preparse_skipped_ +=
+ function_scope->end_position() - function_scope->start_position();
+ *num_parameters = logger->num_parameters();
+ *function_length = logger->function_length();
+ *has_duplicate_parameters = logger->has_duplicate_parameters();
+ *materialized_literal_count = logger->literals();
+ *expected_property_count = logger->properties();
if (!is_inner_function && produce_cached_parse_data()) {
DCHECK(log_);
- // Position right after terminal '}'.
- int body_end = scanner()->location().end_pos;
- log_->LogFunction(function_block_pos, body_end, *materialized_literal_count,
- *expected_property_count, language_mode(),
- scope->uses_super_property(), scope->calls_eval());
+ log_->LogFunction(
+ function_scope->start_position(), function_scope->end_position(),
+ *num_parameters, *function_length, *has_duplicate_parameters,
+ *materialized_literal_count, *expected_property_count, language_mode(),
+ function_scope->uses_super_property(), function_scope->calls_eval());
}
return kLazyParsingComplete;
}
@@ -3126,15 +3110,57 @@ Expression* Parser::BuildInitialYield(int pos, FunctionKind kind) {
Yield::kOnExceptionThrow);
}
+ZoneList<Statement*>* Parser::ParseFunction(
+ const AstRawString* function_name, int pos, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope, int* num_parameters, int* function_length,
+ bool* has_duplicate_parameters, int* materialized_literal_count,
+ int* expected_property_count, bool* ok) {
+ FunctionState function_state(&function_state_, &scope_state_, function_scope);
+
+ DuplicateFinder duplicate_finder(scanner()->unicode_cache());
+ ExpressionClassifier formals_classifier(this, &duplicate_finder);
+
+ if (IsGeneratorFunction(kind)) PrepareGeneratorVariables(&function_state);
+
+ ParserFormalParameters formals(function_scope);
+ ParseFormalParameterList(&formals, CHECK_OK);
+ Expect(Token::RPAREN, CHECK_OK);
+ int formals_end_position = scanner()->location().end_pos;
+ *num_parameters = formals.num_parameters();
+ *function_length = formals.function_length;
+
+ CheckArityRestrictions(formals.arity, kind, formals.has_rest,
+ function_scope->start_position(), formals_end_position,
+ CHECK_OK);
+ Expect(Token::LBRACE, CHECK_OK);
+
+ ZoneList<Statement*>* body = ParseEagerFunctionBody(
+ function_name, pos, formals, kind, function_type, ok);
+
+ // Validate parameter names. We can do this only after parsing the function,
+ // since the function can declare itself strict.
+ const bool allow_duplicate_parameters =
+ is_sloppy(function_scope->language_mode()) && formals.is_simple &&
+ !IsConciseMethod(kind);
+ ValidateFormalParameters(function_scope->language_mode(),
+ allow_duplicate_parameters, CHECK_OK);
+
+ RewriteDestructuringAssignments();
+
+ *has_duplicate_parameters =
+ !classifier()->is_valid_formal_parameter_list_without_duplicates();
+
+ *materialized_literal_count = function_state.materialized_literal_count();
+ *expected_property_count = function_state.expected_property_count();
+ return body;
+}
+
ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
const AstRawString* function_name, int pos,
const ParserFormalParameters& parameters, FunctionKind kind,
FunctionLiteral::FunctionType function_type, bool* ok) {
- // Everything inside an eagerly parsed function will be parsed eagerly (see
- // comment above). Lazy inner functions are handled separately and they won't
- // require the mode to be PARSE_LAZILY (see ParseFunctionLiteral).
- // TODO(marja): Refactor parsing modes: remove this.
- ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
+ ParsingModeScope mode(this, allow_lazy() ? PARSE_LAZILY : PARSE_EAGERLY);
ZoneList<Statement*>* result = new(zone()) ZoneList<Statement*>(8, zone());
static const int kFunctionNameAssignmentIndex = 0;
@@ -3286,46 +3312,6 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
return result;
}
-PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
- SingletonLogger* logger, bool is_inner_function, bool may_abort) {
- // This function may be called on a background thread too; record only the
- // main thread preparse times.
- if (pre_parse_timer_ != NULL) {
- pre_parse_timer_->Start();
- }
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.PreParse");
-
- DCHECK_EQ(Token::LBRACE, scanner()->current_token());
-
- if (reusable_preparser_ == NULL) {
- reusable_preparser_ = new PreParser(zone(), &scanner_, ast_value_factory(),
- NULL, stack_limit_);
- reusable_preparser_->set_allow_lazy(true);
-#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
- SET_ALLOW(natives);
- SET_ALLOW(harmony_do_expressions);
- SET_ALLOW(harmony_for_in);
- SET_ALLOW(harmony_function_sent);
- SET_ALLOW(harmony_restrictive_declarations);
- SET_ALLOW(harmony_async_await);
- SET_ALLOW(harmony_trailing_commas);
- SET_ALLOW(harmony_class_fields);
-#undef SET_ALLOW
- }
- // Aborting inner function preparsing would leave scopes in an inconsistent
- // state; we don't parse inner functions in the abortable mode anyway.
- DCHECK(!is_inner_function || !may_abort);
-
- DeclarationScope* function_scope = function_state_->scope();
- PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(
- function_scope, parsing_module_, logger, is_inner_function, may_abort,
- use_counts_);
- if (pre_parse_timer_ != NULL) {
- pre_parse_timer_->Stop();
- }
- return result;
-}
-
Expression* Parser::InstallHomeObject(Expression* function_literal,
Expression* home_object) {
Block* do_block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
@@ -3426,12 +3412,12 @@ FunctionLiteral* Parser::SynthesizeClassFieldInitializer(int count) {
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
ast_value_factory()->empty_string(), initializer_scope, body,
initializer_state.materialized_literal_count(),
- initializer_state.expected_property_count(), 0,
+ initializer_state.expected_property_count(), 0, count,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression,
- FunctionLiteral::kShouldLazyCompile, initializer_scope->start_position());
+ FunctionLiteral::kShouldLazyCompile, initializer_scope->start_position(),
+ true);
function_literal->set_is_class_field_initializer(true);
- function_literal->scope()->set_arity(count);
return function_literal;
}
@@ -3442,7 +3428,7 @@ FunctionLiteral* Parser::InsertClassFieldInitializer(
constructor->scope(),
constructor->scope()->NewUnresolved(
factory(), ast_value_factory()->this_string(), kNoSourcePosition,
- kNoSourcePosition + 4, THIS_VARIABLE)),
+ THIS_VARIABLE)),
kNoSourcePosition);
constructor->body()->InsertAt(0, call_initializer, zone());
return constructor;
@@ -3636,6 +3622,7 @@ Expression* Parser::RewriteClassLiteral(const AstRawString* name,
}
do_block->set_scope(scope()->FinalizeBlockScope());
do_expr->set_represented_function(class_info->constructor);
+ AddFunctionForNameInference(class_info->constructor);
return do_expr;
}
@@ -3668,10 +3655,8 @@ void Parser::InsertShadowingVarBindingInitializers(Block* inner_block) {
DCHECK(inner_scope->is_declaration_scope());
Scope* function_scope = inner_scope->outer_scope();
DCHECK(function_scope->is_function_scope());
- ZoneList<Declaration*>* decls = inner_scope->declarations();
BlockState block_state(&scope_state_, inner_scope);
- for (int i = 0; i < decls->length(); ++i) {
- Declaration* decl = decls->at(i);
+ for (Declaration* decl : *inner_scope->declarations()) {
if (decl->proxy()->var()->mode() != VAR || !decl->IsVariableDeclaration()) {
continue;
}
@@ -3781,6 +3766,15 @@ void Parser::Internalize(Isolate* isolate, Handle<Script> script, bool error) {
}
isolate->counters()->total_preparse_skipped()->Increment(
total_preparse_skipped_);
+ if (!parsing_on_main_thread_ &&
+ FLAG_runtime_stats ==
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE) {
+ // Copy over the counters from the background thread to the main counters on
+ // the isolate.
+ // TODO(cbruni,lpy): properly attach the runtime stats to the trace for
+ // background parsing.
+ isolate->counters()->runtime_call_stats()->Add(runtime_call_stats_);
+ }
}
@@ -3804,18 +3798,12 @@ bool Parser::Parse(ParseInfo* info) {
// Ok to use Isolate here; this function is only called in the main thread.
DCHECK(parsing_on_main_thread_);
Isolate* isolate = info->isolate();
- pre_parse_timer_ = isolate->counters()->pre_parse();
- if (info->is_lazy()) {
- DCHECK(!info->is_eval());
- if (info->shared_info()->is_function()) {
- result = ParseLazy(isolate, info);
- } else {
- result = ParseProgram(isolate, info);
- }
- } else {
+ if (info->is_toplevel()) {
SetCachedData(info);
result = ParseProgram(isolate, info);
+ } else {
+ result = ParseFunction(isolate, info);
}
info->set_literal(result);
@@ -3830,8 +3818,12 @@ void Parser::ParseOnBackground(ParseInfo* info) {
DCHECK(info->literal() == NULL);
FunctionLiteral* result = NULL;
- CompleteParserRecorder recorder;
- if (produce_cached_parse_data()) log_ = &recorder;
+ ParserLogger logger;
+ if (produce_cached_parse_data()) log_ = &logger;
+ if (FLAG_runtime_stats) {
+ // Create separate runtime stats for background parsing.
+ runtime_call_stats_ = new (zone()) RuntimeCallStats();
+ }
std::unique_ptr<Utf16CharacterStream> stream;
Utf16CharacterStream* stream_ptr;
@@ -3854,12 +3846,12 @@ void Parser::ParseOnBackground(ParseInfo* info) {
// don't). We work around this by storing all the scopes which need their end
// position set at the end of the script (the top scope and possible eval
// scopes) and set their end position after we know the script length.
- if (info->is_lazy()) {
- result = DoParseLazy(info, info->function_name(), stream_ptr);
- } else {
+ if (info->is_toplevel()) {
fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
scanner_.Initialize(stream_ptr);
result = DoParseProgram(info);
+ } else {
+ result = DoParseFunction(info, info->function_name(), stream_ptr);
}
info->set_literal(result);
@@ -3868,9 +3860,13 @@ void Parser::ParseOnBackground(ParseInfo* info) {
// care of calling Parser::Internalize just before compilation.
if (produce_cached_parse_data()) {
- if (result != NULL) *info->cached_data() = recorder.GetScriptData();
+ if (result != NULL) *info->cached_data() = logger.GetScriptData();
log_ = NULL;
}
+ if (FLAG_runtime_stats) {
+ // TODO(cbruni,lpy): properly attach the runtime stats to the trace for
+ // background parsing.
+ }
}
Parser::TemplateLiteralState Parser::OpenTemplateLiteral(int pos) {
@@ -3943,9 +3939,9 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
const_cast<ZoneList<Expression*>*>(raw_strings), raw_idx, pos),
zone());
- // Ensure hash is suitable as a Smi value
+ // Truncate hash to Smi-range.
Smi* hash_obj = Smi::cast(Internals::IntToSmi(static_cast<int>(hash)));
- args->Add(factory()->NewSmiLiteral(hash_obj->value(), pos), zone());
+ args->Add(factory()->NewNumberLiteral(hash_obj->value(), pos), zone());
Expression* call_site = factory()->NewCallRuntime(
Context::GET_TEMPLATE_CALL_SITE_INDEX, args, start);
@@ -4002,7 +3998,7 @@ ZoneList<Expression*>* Parser::PrepareSpreadArguments(
ZoneList<Expression*>* spread_list =
new (zone()) ZoneList<Expression*>(0, zone());
spread_list->Add(list->at(0)->AsSpread()->expression(), zone());
- args->Add(factory()->NewCallRuntime(Context::SPREAD_ITERABLE_INDEX,
+ args->Add(factory()->NewCallRuntime(Runtime::kSpreadIterablePrepare,
spread_list, kNoSourcePosition),
zone());
return args;
@@ -4338,8 +4334,7 @@ Expression* Parser::RewriteAssignExponentiation(Expression* left,
Expression* result;
DCHECK_NOT_NULL(lhs->raw_name());
- result = ExpressionFromIdentifier(lhs->raw_name(), lhs->position(),
- lhs->end_position());
+ result = ExpressionFromIdentifier(lhs->raw_name(), lhs->position());
args->Add(left, zone());
args->Add(right, zone());
Expression* call =
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 418bedf81b..736419daf2 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -7,9 +7,11 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/parsing/parser-base.h"
-#include "src/parsing/preparse-data.h"
#include "src/parsing/preparse-data-format.h"
+#include "src/parsing/preparse-data.h"
#include "src/parsing/preparser.h"
#include "src/pending-compilation-error-handler.h"
@@ -29,11 +31,11 @@ class FunctionEntry BASE_EMBEDDED {
enum {
kStartPositionIndex,
kEndPositionIndex,
+ kNumParametersIndex,
+ kFunctionLengthIndex,
kLiteralCountIndex,
kPropertyCountIndex,
- kLanguageModeIndex,
- kUsesSuperPropertyIndex,
- kCallsEvalIndex,
+ kFlagsIndex,
kSize
};
@@ -42,18 +44,43 @@ class FunctionEntry BASE_EMBEDDED {
FunctionEntry() : backing_() { }
- int start_pos() { return backing_[kStartPositionIndex]; }
- int end_pos() { return backing_[kEndPositionIndex]; }
- int literal_count() { return backing_[kLiteralCountIndex]; }
- int property_count() { return backing_[kPropertyCountIndex]; }
- LanguageMode language_mode() {
- DCHECK(is_valid_language_mode(backing_[kLanguageModeIndex]));
- return static_cast<LanguageMode>(backing_[kLanguageModeIndex]);
+ class LanguageModeField : public BitField<LanguageMode, 0, 1> {};
+ class UsesSuperPropertyField
+ : public BitField<bool, LanguageModeField::kNext, 1> {};
+ class CallsEvalField
+ : public BitField<bool, UsesSuperPropertyField::kNext, 1> {};
+ class HasDuplicateParametersField
+ : public BitField<bool, CallsEvalField::kNext, 1> {};
+
+ static uint32_t EncodeFlags(LanguageMode language_mode,
+ bool uses_super_property, bool calls_eval,
+ bool has_duplicate_parameters) {
+ return LanguageModeField::encode(language_mode) |
+ UsesSuperPropertyField::encode(uses_super_property) |
+ CallsEvalField::encode(calls_eval) |
+ HasDuplicateParametersField::encode(has_duplicate_parameters);
}
- bool uses_super_property() { return backing_[kUsesSuperPropertyIndex]; }
- bool calls_eval() { return backing_[kCallsEvalIndex]; }
- bool is_valid() { return !backing_.is_empty(); }
+ int start_pos() const { return backing_[kStartPositionIndex]; }
+ int end_pos() const { return backing_[kEndPositionIndex]; }
+ int num_parameters() const { return backing_[kNumParametersIndex]; }
+ int function_length() const { return backing_[kFunctionLengthIndex]; }
+ int literal_count() const { return backing_[kLiteralCountIndex]; }
+ int property_count() const { return backing_[kPropertyCountIndex]; }
+ LanguageMode language_mode() const {
+ return LanguageModeField::decode(backing_[kFlagsIndex]);
+ }
+ bool uses_super_property() const {
+ return UsesSuperPropertyField::decode(backing_[kFlagsIndex]);
+ }
+ bool calls_eval() const {
+ return CallsEvalField::decode(backing_[kFlagsIndex]);
+ }
+ bool has_duplicate_parameters() const {
+ return HasDuplicateParametersField::decode(backing_[kFlagsIndex]);
+ }
+
+ bool is_valid() const { return !backing_.is_empty(); }
private:
Vector<unsigned> backing_;
@@ -75,8 +102,6 @@ class ParseData {
FunctionEntry GetFunctionEntry(int start);
int FunctionCount();
- bool HasError();
-
unsigned* Data() { // Writable data as unsigned int array.
return reinterpret_cast<unsigned*>(const_cast<byte*>(script_data_->data()));
}
@@ -107,7 +132,6 @@ class ParseData {
// JAVASCRIPT PARSING
class Parser;
-class SingletonLogger;
struct ParserFormalParameters : FormalParametersBase {
@@ -134,7 +158,6 @@ struct ParserFormalParameters : FormalParametersBase {
: FormalParametersBase(scope), params(4, scope->zone()) {}
ZoneList<Parameter> params;
- int Arity() const { return params.length(); }
const Parameter& at(int i) const { return params[i]; }
};
@@ -168,7 +191,7 @@ struct ParserTypes<Parser> {
typedef ParserTargetScope TargetScope;
};
-class Parser : public ParserBase<Parser> {
+class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
public:
explicit Parser(ParseInfo* info);
~Parser() {
@@ -178,6 +201,8 @@ class Parser : public ParserBase<Parser> {
cached_parse_data_ = NULL;
}
+ static bool const IsPreParser() { return false; }
+
// Parses the source code represented by the compilation info and sets its
// function literal. Returns false (and deallocates any allocated AST
// nodes) if parsing failed.
@@ -205,6 +230,27 @@ class Parser : public ParserBase<Parser> {
friend class ParserBase<Parser>;
friend class v8::internal::ExpressionClassifier<ParserTypes<Parser>>;
+ bool AllowsLazyParsingWithoutUnresolvedVariables() const {
+ return scope()->AllowsLazyParsingWithoutUnresolvedVariables(
+ original_scope_);
+ }
+
+ bool parse_lazily() const { return mode_ == PARSE_LAZILY; }
+ enum Mode { PARSE_LAZILY, PARSE_EAGERLY };
+
+ class ParsingModeScope BASE_EMBEDDED {
+ public:
+ ParsingModeScope(Parser* parser, Mode mode)
+ : parser_(parser), old_mode_(parser->mode_) {
+ parser_->mode_ = mode;
+ }
+ ~ParsingModeScope() { parser_->mode_ = old_mode_; }
+
+ private:
+ Parser* parser_;
+ Mode old_mode_;
+ };
+
// Runtime encoding of different completion modes.
enum CompletionKind {
kNormalCompletion,
@@ -230,9 +276,10 @@ class Parser : public ParserBase<Parser> {
// Returns NULL if parsing failed.
FunctionLiteral* ParseProgram(Isolate* isolate, ParseInfo* info);
- FunctionLiteral* ParseLazy(Isolate* isolate, ParseInfo* info);
- FunctionLiteral* DoParseLazy(ParseInfo* info, const AstRawString* raw_name,
- Utf16CharacterStream* source);
+ FunctionLiteral* ParseFunction(Isolate* isolate, ParseInfo* info);
+ FunctionLiteral* DoParseFunction(ParseInfo* info,
+ const AstRawString* raw_name,
+ Utf16CharacterStream* source);
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(ParseInfo* info);
@@ -243,11 +290,12 @@ class Parser : public ParserBase<Parser> {
return compile_options_;
}
bool consume_cached_parse_data() const {
- return compile_options_ == ScriptCompiler::kConsumeParserCache &&
- cached_parse_data_ != NULL;
+ return allow_lazy() &&
+ compile_options_ == ScriptCompiler::kConsumeParserCache;
}
bool produce_cached_parse_data() const {
- return compile_options_ == ScriptCompiler::kProduceParserCache;
+ return allow_lazy() &&
+ compile_options_ == ScriptCompiler::kProduceParserCache;
}
void ParseModuleItemList(ZoneList<Statement*>* body, bool* ok);
@@ -358,11 +406,13 @@ class Parser : public ParserBase<Parser> {
void VisitObjectLiteral(ObjectLiteral* node, Variable** temp_var);
void VisitArrayLiteral(ArrayLiteral* node, Variable** temp_var);
- bool IsBindingContext() const { return IsBindingContext(context_); }
+ bool IsBindingContext() const {
+ return context_ == BINDING || context_ == INITIALIZER;
+ }
bool IsInitializerContext() const { return context_ != ASSIGNMENT; }
- bool IsAssignmentContext() const { return IsAssignmentContext(context_); }
- bool IsAssignmentContext(PatternContext c) const;
- bool IsBindingContext(PatternContext c) const;
+ bool IsAssignmentContext() const {
+ return context_ == ASSIGNMENT || context_ == ASSIGNMENT_INITIALIZER;
+ }
bool IsSubPattern() const { return recursion_level_ > 1; }
PatternContext SetAssignmentContextIfNeeded(Expression* node);
PatternContext SetInitializerContextIfNeeded(Expression* node);
@@ -453,13 +503,13 @@ class Parser : public ParserBase<Parser> {
void InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope);
VariableProxy* NewUnresolved(const AstRawString* name, int begin_pos,
- int end_pos = kNoSourcePosition,
VariableKind kind = NORMAL_VARIABLE);
VariableProxy* NewUnresolved(const AstRawString* name);
Variable* Declare(Declaration* declaration,
DeclarationDescriptor::Kind declaration_kind,
VariableMode mode, InitializationFlag init, bool* ok,
- Scope* declaration_scope = nullptr);
+ Scope* declaration_scope = nullptr,
+ int var_end_pos = kNoSourcePosition);
Declaration* DeclareVariable(const AstRawString* name, VariableMode mode,
int pos, bool* ok);
Declaration* DeclareVariable(const AstRawString* name, VariableMode mode,
@@ -480,13 +530,11 @@ class Parser : public ParserBase<Parser> {
// by parsing the function with PreParser. Consumes the ending }.
// If may_abort == true, the (pre-)parser may decide to abort skipping
// in order to force the function to be eagerly parsed, after all.
- LazyParsingResult SkipLazyFunctionBody(int* materialized_literal_count,
- int* expected_property_count,
- bool is_inner_function, bool may_abort,
- bool* ok);
-
- PreParser::PreParseResult ParseLazyFunctionBodyWithPreParser(
- SingletonLogger* logger, bool is_inner_function, bool may_abort);
+ LazyParsingResult SkipFunction(
+ FunctionKind kind, DeclarationScope* function_scope, int* num_parameters,
+ int* function_length, bool* has_duplicate_parameters,
+ int* materialized_literal_count, int* expected_property_count,
+ bool is_inner_function, bool may_abort, bool* ok);
Block* BuildParameterInitializationBlock(
const ParserFormalParameters& parameters, bool* ok);
@@ -498,6 +546,13 @@ class Parser : public ParserBase<Parser> {
const ParserFormalParameters& parameters, FunctionKind kind,
FunctionLiteral::FunctionType function_type, bool* ok);
+ ZoneList<Statement*>* ParseFunction(
+ const AstRawString* function_name, int pos, FunctionKind kind,
+ FunctionLiteral::FunctionType function_type,
+ DeclarationScope* function_scope, int* num_parameters,
+ int* function_length, bool* has_duplicate_parameters,
+ int* materialized_literal_count, int* expected_property_count, bool* ok);
+
void ThrowPendingError(Isolate* isolate, Handle<Script> script);
class TemplateLiteral : public ZoneObject {
@@ -923,7 +978,7 @@ class Parser : public ParserBase<Parser> {
}
V8_INLINE Expression* ThisExpression(int pos = kNoSourcePosition) {
- return NewUnresolved(ast_value_factory()->this_string(), pos, pos + 4,
+ return NewUnresolved(ast_value_factory()->this_string(), pos,
THIS_VARIABLE);
}
@@ -935,12 +990,12 @@ class Parser : public ParserBase<Parser> {
Literal* ExpressionFromLiteral(Token::Value token, int pos);
V8_INLINE Expression* ExpressionFromIdentifier(
- const AstRawString* name, int start_position, int end_position,
+ const AstRawString* name, int start_position,
InferName infer = InferName::kYes) {
if (infer == InferName::kYes) {
fni_->PushVariableName(name);
}
- return NewUnresolved(name, start_position, end_position);
+ return NewUnresolved(name, start_position);
}
V8_INLINE Expression* ExpressionFromString(int pos) {
@@ -994,6 +1049,7 @@ class Parser : public ParserBase<Parser> {
Expression* initializer,
int initializer_end_position,
bool is_rest) {
+ parameters->UpdateArityAndFunctionLength(initializer != nullptr, is_rest);
bool is_simple = pattern->IsVariableProxy() && initializer == nullptr;
const AstRawString* name = is_simple
? pattern->AsVariableProxy()->raw_name()
@@ -1076,6 +1132,7 @@ class Parser : public ParserBase<Parser> {
Scanner scanner_;
PreParser* reusable_preparser_;
Scope* original_scope_; // for ES5 function declarations in sloppy eval
+ Mode mode_;
friend class ParserTarget;
friend class ParserTargetScope;
@@ -1090,9 +1147,49 @@ class Parser : public ParserBase<Parser> {
// parsing.
int use_counts_[v8::Isolate::kUseCounterFeatureCount];
int total_preparse_skipped_;
- HistogramTimer* pre_parse_timer_;
-
bool parsing_on_main_thread_;
+ ParserLogger* log_;
+};
+
+// ----------------------------------------------------------------------------
+// Target is a support class to facilitate manipulation of the
+// Parser's target_stack_ (the stack of potential 'break' and
+// 'continue' statement targets). Upon construction, a new target is
+// added; it is removed upon destruction.
+
+class ParserTarget BASE_EMBEDDED {
+ public:
+ ParserTarget(ParserBase<Parser>* parser, BreakableStatement* statement)
+ : variable_(&parser->impl()->target_stack_),
+ statement_(statement),
+ previous_(parser->impl()->target_stack_) {
+ parser->impl()->target_stack_ = this;
+ }
+
+ ~ParserTarget() { *variable_ = previous_; }
+
+ ParserTarget* previous() { return previous_; }
+ BreakableStatement* statement() { return statement_; }
+
+ private:
+ ParserTarget** variable_;
+ BreakableStatement* statement_;
+ ParserTarget* previous_;
+};
+
+class ParserTargetScope BASE_EMBEDDED {
+ public:
+ explicit ParserTargetScope(ParserBase<Parser>* parser)
+ : variable_(&parser->impl()->target_stack_),
+ previous_(parser->impl()->target_stack_) {
+ parser->impl()->target_stack_ = nullptr;
+ }
+
+ ~ParserTargetScope() { *variable_ = previous_; }
+
+ private:
+ ParserTarget** variable_;
+ ParserTarget* previous_;
};
} // namespace internal
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index 7898f87244..f3d9bb02a3 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -68,16 +68,6 @@ Expression* Parser::PatternRewriter::RewriteDestructuringAssignment(
}
-bool Parser::PatternRewriter::IsAssignmentContext(PatternContext c) const {
- return c == ASSIGNMENT || c == ASSIGNMENT_INITIALIZER;
-}
-
-
-bool Parser::PatternRewriter::IsBindingContext(PatternContext c) const {
- return c == BINDING || c == INITIALIZER;
-}
-
-
Parser::PatternRewriter::PatternContext
Parser::PatternRewriter::SetAssignmentContextIfNeeded(Expression* node) {
PatternContext old_context = context();
@@ -142,9 +132,8 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// an initial value in the declaration (because they are initialized upon
// entering the function).
const AstRawString* name = pattern->raw_name();
- VariableProxy* proxy = factory()->NewVariableProxy(
- name, NORMAL_VARIABLE, parser_->scanner()->location().beg_pos,
- parser_->scanner()->location().end_pos);
+ VariableProxy* proxy =
+ factory()->NewVariableProxy(name, NORMAL_VARIABLE, pattern->position());
Declaration* declaration = factory()->NewVariableDeclaration(
proxy, descriptor_->scope, descriptor_->declaration_pos);
Variable* var = parser_->Declare(
diff --git a/deps/v8/src/parsing/preparse-data-format.h b/deps/v8/src/parsing/preparse-data-format.h
index f7d9f68cce..30d1d75a4f 100644
--- a/deps/v8/src/parsing/preparse-data-format.h
+++ b/deps/v8/src/parsing/preparse-data-format.h
@@ -14,22 +14,13 @@ struct PreparseDataConstants {
public:
// Layout and constants of the preparse data exchange format.
static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 11;
+ static const unsigned kCurrentVersion = 13;
static const int kMagicOffset = 0;
static const int kVersionOffset = 1;
- static const int kHasErrorOffset = 2;
- static const int kFunctionsSizeOffset = 3;
- static const int kSizeOffset = 4;
- static const int kHeaderSize = 5;
-
- // If encoding a message, the following positions are fixed.
- static const int kMessageStartPos = 0;
- static const int kMessageEndPos = 1;
- static const int kMessageArgCountPos = 2;
- static const int kParseErrorTypePos = 3;
- static const int kMessageTemplatePos = 4;
- static const int kMessageArgPos = 5;
+ static const int kFunctionsSizeOffset = 2;
+ static const int kSizeOffset = 3;
+ static const int kHeaderSize = 4;
static const unsigned char kNumberTerminator = 0x80u;
};
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index e1ef74c33c..e9a4e8f4b5 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -12,53 +12,36 @@
namespace v8 {
namespace internal {
+void ParserLogger::LogFunction(int start, int end, int num_parameters,
+ int function_length,
+ bool has_duplicate_parameters, int literals,
+ int properties, LanguageMode language_mode,
+ bool uses_super_property, bool calls_eval) {
+ function_store_.Add(start);
+ function_store_.Add(end);
+ function_store_.Add(num_parameters);
+ function_store_.Add(function_length);
+ function_store_.Add(literals);
+ function_store_.Add(properties);
+ function_store_.Add(
+ FunctionEntry::EncodeFlags(language_mode, uses_super_property, calls_eval,
+ has_duplicate_parameters));
+}
-CompleteParserRecorder::CompleteParserRecorder() {
+ParserLogger::ParserLogger() {
preamble_[PreparseDataConstants::kMagicOffset] =
PreparseDataConstants::kMagicNumber;
preamble_[PreparseDataConstants::kVersionOffset] =
PreparseDataConstants::kCurrentVersion;
- preamble_[PreparseDataConstants::kHasErrorOffset] = false;
preamble_[PreparseDataConstants::kFunctionsSizeOffset] = 0;
preamble_[PreparseDataConstants::kSizeOffset] = 0;
- DCHECK_EQ(5, PreparseDataConstants::kHeaderSize);
+ DCHECK_EQ(4, PreparseDataConstants::kHeaderSize);
#ifdef DEBUG
prev_start_ = -1;
#endif
}
-
-void CompleteParserRecorder::LogMessage(int start_pos, int end_pos,
- MessageTemplate::Template message,
- const char* arg_opt,
- ParseErrorType error_type) {
- if (HasError()) return;
- preamble_[PreparseDataConstants::kHasErrorOffset] = true;
- function_store_.Reset();
- STATIC_ASSERT(PreparseDataConstants::kMessageStartPos == 0);
- function_store_.Add(start_pos);
- STATIC_ASSERT(PreparseDataConstants::kMessageEndPos == 1);
- function_store_.Add(end_pos);
- STATIC_ASSERT(PreparseDataConstants::kMessageArgCountPos == 2);
- function_store_.Add((arg_opt == NULL) ? 0 : 1);
- STATIC_ASSERT(PreparseDataConstants::kParseErrorTypePos == 3);
- function_store_.Add(error_type);
- STATIC_ASSERT(PreparseDataConstants::kMessageTemplatePos == 4);
- function_store_.Add(static_cast<unsigned>(message));
- STATIC_ASSERT(PreparseDataConstants::kMessageArgPos == 5);
- if (arg_opt != NULL) WriteString(CStrVector(arg_opt));
-}
-
-
-void CompleteParserRecorder::WriteString(Vector<const char> str) {
- function_store_.Add(str.length());
- for (int i = 0; i < str.length(); i++) {
- function_store_.Add(str[i]);
- }
-}
-
-
-ScriptData* CompleteParserRecorder::GetScriptData() {
+ScriptData* ParserLogger::GetScriptData() {
int function_size = function_store_.size();
int total_size = PreparseDataConstants::kHeaderSize + function_size;
unsigned* data = NewArray<unsigned>(total_size);
diff --git a/deps/v8/src/parsing/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
index ddc4d03321..767484ad7f 100644
--- a/deps/v8/src/parsing/preparse-data.h
+++ b/deps/v8/src/parsing/preparse-data.h
@@ -46,158 +46,64 @@ class ScriptData {
DISALLOW_COPY_AND_ASSIGN(ScriptData);
};
-// Abstract interface for preparse data recorder.
-class ParserRecorder {
+class PreParserLogger final {
public:
- ParserRecorder() { }
- virtual ~ParserRecorder() { }
-
- // Logs the scope and some details of a function literal in the source.
- virtual void LogFunction(int start, int end, int literals, int properties,
- LanguageMode language_mode, bool uses_super_property,
- bool calls_eval) = 0;
-
- // Logs an error message and marks the log as containing an error.
- // Further logging will be ignored, and ExtractData will return a vector
- // representing the error only.
- virtual void LogMessage(int start, int end, MessageTemplate::Template message,
- const char* argument_opt,
- ParseErrorType error_type) = 0;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ParserRecorder);
-};
-
-
-class SingletonLogger : public ParserRecorder {
- public:
- SingletonLogger()
- : has_error_(false), start_(-1), end_(-1), error_type_(kSyntaxError) {}
- virtual ~SingletonLogger() {}
-
- void Reset() { has_error_ = false; }
-
- virtual void LogFunction(int start, int end, int literals, int properties,
- LanguageMode language_mode, bool uses_super_property,
- bool calls_eval) {
- DCHECK(!has_error_);
- start_ = start;
+ PreParserLogger()
+ : end_(-1),
+ num_parameters_(-1),
+ function_length_(-1),
+ has_duplicate_parameters_(false) {}
+
+ void LogFunction(int end, int num_parameters, int function_length,
+ bool has_duplicate_parameters, int literals,
+ int properties) {
end_ = end;
+ num_parameters_ = num_parameters;
+ function_length_ = function_length;
+ has_duplicate_parameters_ = has_duplicate_parameters;
literals_ = literals;
properties_ = properties;
- language_mode_ = language_mode;
- uses_super_property_ = uses_super_property;
- calls_eval_ = calls_eval;
}
- // Logs an error message and marks the log as containing an error.
- // Further logging will be ignored, and ExtractData will return a vector
- // representing the error only.
- virtual void LogMessage(int start, int end, MessageTemplate::Template message,
- const char* argument_opt, ParseErrorType error_type) {
- if (has_error_) return;
- has_error_ = true;
- start_ = start;
- end_ = end;
- message_ = message;
- argument_opt_ = argument_opt;
- error_type_ = error_type;
- }
-
- bool has_error() const { return has_error_; }
-
- int start() const { return start_; }
int end() const { return end_; }
+ int num_parameters() const {
+ return num_parameters_;
+ }
+ int function_length() const {
+ return function_length_;
+ }
+ bool has_duplicate_parameters() const {
+ return has_duplicate_parameters_;
+ }
int literals() const {
- DCHECK(!has_error_);
return literals_;
}
int properties() const {
- DCHECK(!has_error_);
return properties_;
}
- LanguageMode language_mode() const {
- DCHECK(!has_error_);
- return language_mode_;
- }
- bool uses_super_property() const {
- DCHECK(!has_error_);
- return uses_super_property_;
- }
- bool calls_eval() const {
- DCHECK(!has_error_);
- return calls_eval_;
- }
- ParseErrorType error_type() const {
- DCHECK(has_error_);
- return error_type_;
- }
- MessageTemplate::Template message() {
- DCHECK(has_error_);
- return message_;
- }
- const char* argument_opt() const {
- DCHECK(has_error_);
- return argument_opt_;
- }
private:
- bool has_error_;
- int start_;
int end_;
// For function entries.
+ int num_parameters_;
+ int function_length_;
+ bool has_duplicate_parameters_;
int literals_;
int properties_;
- LanguageMode language_mode_;
- bool uses_super_property_;
- bool calls_eval_;
- // For error messages.
- MessageTemplate::Template message_;
- const char* argument_opt_;
- ParseErrorType error_type_;
};
-
-class CompleteParserRecorder : public ParserRecorder {
+class ParserLogger final {
public:
- struct Key {
- bool is_one_byte;
- Vector<const byte> literal_bytes;
- };
-
- CompleteParserRecorder();
- virtual ~CompleteParserRecorder() {}
-
- virtual void LogFunction(int start, int end, int literals, int properties,
- LanguageMode language_mode, bool uses_super_property,
- bool calls_eval) {
- function_store_.Add(start);
- function_store_.Add(end);
- function_store_.Add(literals);
- function_store_.Add(properties);
- function_store_.Add(language_mode);
- function_store_.Add(uses_super_property);
- function_store_.Add(calls_eval);
- }
+ ParserLogger();
- // Logs an error message and marks the log as containing an error.
- // Further logging will be ignored, and ExtractData will return a vector
- // representing the error only.
- virtual void LogMessage(int start, int end, MessageTemplate::Template message,
- const char* argument_opt, ParseErrorType error_type);
- ScriptData* GetScriptData();
+ void LogFunction(int start, int end, int num_parameters, int function_length,
+ bool has_duplicate_parameters, int literals, int properties,
+ LanguageMode language_mode, bool uses_super_property,
+ bool calls_eval);
- bool HasError() {
- return static_cast<bool>(preamble_[PreparseDataConstants::kHasErrorOffset]);
- }
- Vector<unsigned> ErrorMessageData() {
- DCHECK(HasError());
- return function_store_.ToVector();
- }
+ ScriptData* GetScriptData();
private:
- void WriteString(Vector<const char> str);
-
Collector<unsigned> function_store_;
unsigned preamble_[PreparseDataConstants::kHeaderSize];
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index 88470f7fa9..1b21c3dc1e 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -83,12 +83,16 @@ PreParserIdentifier PreParser::GetSymbol() const {
return symbol;
}
-PreParser::PreParseResult PreParser::PreParseLazyFunction(
- DeclarationScope* function_scope, bool parsing_module, ParserRecorder* log,
+PreParser::PreParseResult PreParser::PreParseFunction(
+ FunctionKind kind, DeclarationScope* function_scope, bool parsing_module,
bool is_inner_function, bool may_abort, int* use_counts) {
+ RuntimeCallTimerScope runtime_timer(
+ runtime_call_stats_,
+ track_unresolved_variables_
+ ? &RuntimeCallStats::PreParseWithVariableResolution
+ : &RuntimeCallStats::PreParseNoVariableResolution);
DCHECK_EQ(FUNCTION_SCOPE, function_scope->scope_type());
parsing_module_ = parsing_module;
- log_ = log;
use_counts_ = use_counts;
DCHECK(!track_unresolved_variables_);
track_unresolved_variables_ = is_inner_function;
@@ -98,24 +102,62 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
// PreParser.
DCHECK_NULL(scope_state_);
FunctionState function_state(&function_state_, &scope_state_, function_scope);
- DCHECK_EQ(Token::LBRACE, scanner()->current_token());
- bool ok = true;
- int start_position = peek_position();
- LazyParsingResult result = ParseLazyFunctionLiteralBody(may_abort, &ok);
+ // This indirection is needed so that we can use the CHECK_OK macros.
+ bool ok_holder = true;
+ bool* ok = &ok_holder;
+
+ PreParserFormalParameters formals(function_scope);
+ bool has_duplicate_parameters = false;
+ DuplicateFinder duplicate_finder(scanner()->unicode_cache());
+ std::unique_ptr<ExpressionClassifier> formals_classifier;
+
+ // Parse non-arrow function parameters. For arrow functions, the parameters
+ // have already been parsed.
+ if (!IsArrowFunction(kind)) {
+ formals_classifier.reset(new ExpressionClassifier(this, &duplicate_finder));
+ // We return kPreParseSuccess in failure cases too - errors are retrieved
+ // separately by Parser::SkipLazyFunctionBody.
+ ParseFormalParameterList(&formals, CHECK_OK_VALUE(kPreParseSuccess));
+ Expect(Token::RPAREN, CHECK_OK_VALUE(kPreParseSuccess));
+ int formals_end_position = scanner()->location().end_pos;
+
+ CheckArityRestrictions(
+ formals.arity, kind, formals.has_rest, function_scope->start_position(),
+ formals_end_position, CHECK_OK_VALUE(kPreParseSuccess));
+ has_duplicate_parameters =
+ !classifier()->is_valid_formal_parameter_list_without_duplicates();
+ }
+
+ Expect(Token::LBRACE, CHECK_OK_VALUE(kPreParseSuccess));
+ LazyParsingResult result = ParseStatementListAndLogFunction(
+ &formals, has_duplicate_parameters, may_abort, ok);
use_counts_ = nullptr;
track_unresolved_variables_ = false;
if (result == kLazyParsingAborted) {
return kPreParseAbort;
} else if (stack_overflow()) {
return kPreParseStackOverflow;
- } else if (!ok) {
- ReportUnexpectedToken(scanner()->current_token());
+ } else if (!*ok) {
+ DCHECK(pending_error_handler_->has_pending_error());
} else {
DCHECK_EQ(Token::RBRACE, scanner()->peek());
+
+ if (!IsArrowFunction(kind)) {
+ // Validate parameter names. We can do this only after parsing the
+ // function, since the function can declare itself strict.
+ const bool allow_duplicate_parameters =
+ is_sloppy(function_scope->language_mode()) && formals.is_simple &&
+ !IsConciseMethod(kind);
+ ValidateFormalParameters(function_scope->language_mode(),
+ allow_duplicate_parameters,
+ CHECK_OK_VALUE(kPreParseSuccess));
+ }
+
if (is_strict(function_scope->language_mode())) {
int end_pos = scanner()->location().end_pos;
- CheckStrictOctalLiteral(start_position, end_pos, &ok);
- CheckDecimalLiteralWithLeadingZero(start_position, end_pos);
+ CheckStrictOctalLiteral(function_scope->start_position(), end_pos, ok);
+ CheckDecimalLiteralWithLeadingZero(function_scope->start_position(),
+ end_pos);
}
}
return kPreParseSuccess;
@@ -142,10 +184,14 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
LanguageMode language_mode, bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
+ RuntimeCallTimerScope runtime_timer(
+ runtime_call_stats_,
+ track_unresolved_variables_
+ ? &RuntimeCallStats::PreParseWithVariableResolution
+ : &RuntimeCallStats::PreParseNoVariableResolution);
// Parse function body.
PreParserStatementList body;
- bool outer_is_script_scope = scope()->is_script_scope();
DeclarationScope* function_scope = NewFunctionScope(kind);
function_scope->SetLanguageMode(language_mode);
FunctionState function_state(&function_state_, &scope_state_, function_scope);
@@ -163,17 +209,8 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
CheckArityRestrictions(formals.arity, kind, formals.has_rest, start_position,
formals_end_position, CHECK_OK);
- // See Parser::ParseFunctionLiteral for more information about lazy parsing
- // and lazy compilation.
- bool is_lazily_parsed = (outer_is_script_scope && allow_lazy() &&
- !function_state_->this_function_is_parenthesized());
-
Expect(Token::LBRACE, CHECK_OK);
- if (is_lazily_parsed) {
- ParseLazyFunctionLiteralBody(false, CHECK_OK);
- } else {
- ParseStatementList(body, Token::RBRACE, CHECK_OK);
- }
+ ParseStatementList(body, Token::RBRACE, CHECK_OK);
Expect(Token::RBRACE, CHECK_OK);
// Parsing the body may change the language mode in our scope.
@@ -187,18 +224,26 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
is_sloppy(language_mode) && formals.is_simple && !IsConciseMethod(kind);
ValidateFormalParameters(language_mode, allow_duplicate_parameters, CHECK_OK);
+ int end_position = scanner()->location().end_pos;
if (is_strict(language_mode)) {
- int end_position = scanner()->location().end_pos;
CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
CheckDecimalLiteralWithLeadingZero(start_position, end_position);
}
+ function_scope->set_end_position(end_position);
+
+ if (FLAG_trace_preparse) {
+ PrintF(" [%s]: %i-%i\n",
+ track_unresolved_variables_ ? "Preparse resolution"
+ : "Preparse no-resolution",
+ function_scope->start_position(), function_scope->end_position());
+ }
return Expression::Default();
}
-PreParser::LazyParsingResult PreParser::ParseLazyFunctionLiteralBody(
+PreParser::LazyParsingResult PreParser::ParseStatementListAndLogFunction(
+ PreParserFormalParameters* formals, bool has_duplicate_parameters,
bool may_abort, bool* ok) {
- int body_start = position();
PreParserStatementList body;
LazyParsingResult result = ParseStatementList(
body, Token::RBRACE, may_abort, CHECK_OK_VALUE(kLazyParsingComplete));
@@ -207,28 +252,26 @@ PreParser::LazyParsingResult PreParser::ParseLazyFunctionLiteralBody(
// Position right after terminal '}'.
DCHECK_EQ(Token::RBRACE, scanner()->peek());
int body_end = scanner()->peek_location().end_pos;
- DeclarationScope* scope = this->scope()->AsDeclarationScope();
- DCHECK(scope->is_function_scope());
- log_->LogFunction(body_start, body_end,
- function_state_->materialized_literal_count(),
- function_state_->expected_property_count(), language_mode(),
- scope->uses_super_property(), scope->calls_eval());
+ DCHECK(this->scope()->is_function_scope());
+ log_.LogFunction(body_end, formals->num_parameters(),
+ formals->function_length, has_duplicate_parameters,
+ function_state_->materialized_literal_count(),
+ function_state_->expected_property_count());
return kLazyParsingComplete;
}
PreParserExpression PreParser::ExpressionFromIdentifier(
- PreParserIdentifier name, int start_position, int end_position,
- InferName infer) {
+ PreParserIdentifier name, int start_position, InferName infer) {
if (track_unresolved_variables_) {
AstNodeFactory factory(ast_value_factory());
// Setting the Zone is necessary because zone_ might be the temp Zone, and
// AstValueFactory doesn't know about it.
factory.set_zone(zone());
DCHECK_NOT_NULL(name.string_);
- scope()->NewUnresolved(&factory, name.string_, start_position, end_position,
+ scope()->NewUnresolved(&factory, name.string_, start_position,
NORMAL_VARIABLE);
}
- return PreParserExpression::FromIdentifier(name);
+ return PreParserExpression::FromIdentifier(name, zone());
}
void PreParser::DeclareAndInitializeVariables(
@@ -236,20 +279,23 @@ void PreParser::DeclareAndInitializeVariables(
const DeclarationDescriptor* declaration_descriptor,
const DeclarationParsingResult::Declaration* declaration,
ZoneList<const AstRawString*>* names, bool* ok) {
- if (declaration->pattern.string_) {
+ if (declaration->pattern.identifiers_ != nullptr) {
+ DCHECK(FLAG_lazy_inner_functions);
/* Mimic what Parser does when declaring variables (see
Parser::PatternRewriter::VisitVariableProxy).
var + no initializer -> RemoveUnresolved
- let + no initializer -> RemoveUnresolved
+ let / const + no initializer -> RemoveUnresolved
var + initializer -> RemoveUnresolved followed by NewUnresolved
- let + initializer -> RemoveUnresolved
+ let / const + initializer -> RemoveUnresolved
*/
if (declaration->initializer.IsEmpty() ||
- declaration_descriptor->mode == VariableMode::LET) {
- declaration_descriptor->scope->RemoveUnresolved(
- declaration->pattern.string_);
+ (declaration_descriptor->mode == VariableMode::LET ||
+ declaration_descriptor->mode == VariableMode::CONST)) {
+ for (auto identifier : *(declaration->pattern.identifiers_)) {
+ declaration_descriptor->scope->RemoveUnresolved(identifier);
+ }
}
}
}
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 4b5474854c..f4687eb3f7 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -118,27 +118,33 @@ class PreParserIdentifier {
const AstRawString* string_;
friend class PreParserExpression;
friend class PreParser;
+ friend class PreParserFactory;
};
class PreParserExpression {
public:
- PreParserExpression() : code_(TypeField::encode(kEmpty)) {}
+ PreParserExpression()
+ : code_(TypeField::encode(kEmpty)), identifiers_(nullptr) {}
static PreParserExpression Empty() { return PreParserExpression(); }
- static PreParserExpression Default() {
- return PreParserExpression(TypeField::encode(kExpression));
+ static PreParserExpression Default(
+ ZoneList<const AstRawString*>* identifiers = nullptr) {
+ return PreParserExpression(TypeField::encode(kExpression), identifiers);
}
static PreParserExpression Spread(PreParserExpression expression) {
- return PreParserExpression(TypeField::encode(kSpreadExpression));
+ return PreParserExpression(TypeField::encode(kSpreadExpression),
+ expression.identifiers_);
}
- static PreParserExpression FromIdentifier(PreParserIdentifier id) {
- return PreParserExpression(TypeField::encode(kIdentifierExpression) |
- IdentifierTypeField::encode(id.type_),
- id.string_);
+ static PreParserExpression FromIdentifier(PreParserIdentifier id,
+ Zone* zone) {
+ PreParserExpression expression(TypeField::encode(kIdentifierExpression) |
+ IdentifierTypeField::encode(id.type_));
+ expression.AddIdentifier(id.string_, zone);
+ return expression;
}
static PreParserExpression BinaryOperation(PreParserExpression left,
@@ -152,12 +158,16 @@ class PreParserExpression {
ExpressionTypeField::encode(kAssignment));
}
- static PreParserExpression ObjectLiteral() {
- return PreParserExpression(TypeField::encode(kObjectLiteralExpression));
+ static PreParserExpression ObjectLiteral(
+ ZoneList<const AstRawString*>* identifiers = nullptr) {
+ return PreParserExpression(TypeField::encode(kObjectLiteralExpression),
+ identifiers);
}
- static PreParserExpression ArrayLiteral() {
- return PreParserExpression(TypeField::encode(kArrayLiteralExpression));
+ static PreParserExpression ArrayLiteral(
+ ZoneList<const AstRawString*>* identifiers = nullptr) {
+ return PreParserExpression(TypeField::encode(kArrayLiteralExpression),
+ identifiers);
}
static PreParserExpression StringLiteral() {
@@ -313,7 +323,7 @@ class PreParserExpression {
// More dummy implementations of things PreParser doesn't need to track:
void set_index(int index) {} // For YieldExpressions
- void set_should_eager_compile() {}
+ void SetShouldEagerCompile() {}
void set_should_be_used_once_hint() {}
int position() const { return kNoSourcePosition; }
@@ -344,9 +354,20 @@ class PreParserExpression {
kAssignment
};
- explicit PreParserExpression(uint32_t expression_code,
- const AstRawString* string = nullptr)
- : code_(expression_code), string_(string) {}
+ explicit PreParserExpression(
+ uint32_t expression_code,
+ ZoneList<const AstRawString*>* identifiers = nullptr)
+ : code_(expression_code), identifiers_(identifiers) {}
+
+ void AddIdentifier(const AstRawString* identifier, Zone* zone) {
+ if (identifier == nullptr) {
+ return;
+ }
+ if (identifiers_ == nullptr) {
+ identifiers_ = new (zone) ZoneList<const AstRawString*>(1, zone);
+ }
+ identifiers_->Add(identifier, zone);
+ }
// The first three bits are for the Type.
typedef BitField<Type, 0, 3> TypeField;
@@ -368,31 +389,61 @@ class PreParserExpression {
typedef BitField<bool, TypeField::kNext, 1> HasCoverInitializedNameField;
uint32_t code_;
- // Non-nullptr if the expression is one identifier.
- const AstRawString* string_;
+ // If the PreParser is used in the identifier tracking mode,
+ // PreParserExpression accumulates identifiers in that expression.
+ ZoneList<const AstRawString*>* identifiers_;
friend class PreParser;
+ friend class PreParserFactory;
+ template <typename T>
+ friend class PreParserList;
};
// The pre-parser doesn't need to build lists of expressions, identifiers, or
-// the like.
+// the like. If the PreParser is used in identifier tracking mode, it needs to
+// build lists of identifiers though.
template <typename T>
class PreParserList {
public:
// These functions make list->Add(some_expression) work (and do nothing).
- PreParserList() : length_(0) {}
+ PreParserList() : length_(0), identifiers_(nullptr) {}
PreParserList* operator->() { return this; }
- void Add(T, void*) { ++length_; }
+ void Add(T, Zone* zone);
int length() const { return length_; }
static PreParserList Null() { return PreParserList(-1); }
bool IsNull() const { return length_ == -1; }
private:
- explicit PreParserList(int n) : length_(n) {}
+ explicit PreParserList(int n) : length_(n), identifiers_(nullptr) {}
int length_;
+ ZoneList<const AstRawString*>* identifiers_;
+
+ friend class PreParser;
+ friend class PreParserFactory;
};
+template <>
+inline void PreParserList<PreParserExpression>::Add(
+ PreParserExpression expression, Zone* zone) {
+ if (expression.identifiers_ != nullptr) {
+ DCHECK(FLAG_lazy_inner_functions);
+ DCHECK(zone != nullptr);
+ if (identifiers_ == nullptr) {
+ identifiers_ = new (zone) ZoneList<const AstRawString*>(1, zone);
+ }
+ for (auto identifier : (*expression.identifiers_)) {
+ identifiers_->Add(identifier, zone);
+ }
+ }
+ ++length_;
+}
+
+template <typename T>
+void PreParserList<T>::Add(T, Zone* zone) {
+ ++length_;
+}
+
typedef PreParserList<PreParserExpression> PreParserExpressionList;
class PreParserStatement;
@@ -480,10 +531,18 @@ class PreParserStatement {
class PreParserFactory {
public:
- explicit PreParserFactory(void* unused_value_factory) {}
+ explicit PreParserFactory(AstValueFactory* ast_value_factory)
+ : zone_(ast_value_factory->zone()) {}
+
+ void set_zone(Zone* zone) { zone_ = zone; }
+
PreParserExpression NewStringLiteral(PreParserIdentifier identifier,
int pos) {
- return PreParserExpression::Default();
+ // This is needed for object literal property names. Property names are
+ // normalized to string literals during object literal parsing.
+ PreParserExpression expression = PreParserExpression::Default();
+ expression.AddIdentifier(identifier.string_, zone_);
+ return expression;
}
PreParserExpression NewNumberLiteral(double number,
int pos) {
@@ -500,7 +559,7 @@ class PreParserFactory {
PreParserExpression NewArrayLiteral(PreParserExpressionList values,
int first_spread_index, int literal_index,
int pos) {
- return PreParserExpression::ArrayLiteral();
+ return PreParserExpression::ArrayLiteral(values.identifiers_);
}
PreParserExpression NewClassLiteralProperty(PreParserExpression key,
PreParserExpression value,
@@ -513,18 +572,18 @@ class PreParserFactory {
PreParserExpression value,
ObjectLiteralProperty::Kind kind,
bool is_computed_name) {
- return PreParserExpression::Default();
+ return PreParserExpression::Default(value.identifiers_);
}
PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
PreParserExpression value,
bool is_computed_name) {
- return PreParserExpression::Default();
+ return PreParserExpression::Default(value.identifiers_);
}
PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
int literal_index,
int boilerplate_properties,
int pos) {
- return PreParserExpression::ObjectLiteral();
+ return PreParserExpression::ObjectLiteral(properties.identifiers_);
}
PreParserExpression NewVariableProxy(void* variable) {
return PreParserExpression::Default();
@@ -599,10 +658,11 @@ class PreParserFactory {
PreParserExpression NewFunctionLiteral(
PreParserIdentifier name, Scope* scope, PreParserStatementList body,
int materialized_literal_count, int expected_property_count,
- int parameter_count,
+ int parameter_count, int function_length,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
- FunctionLiteral::EagerCompileHint eager_compile_hint, int position) {
+ FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
+ bool has_braces) {
return PreParserExpression::Default();
}
@@ -693,15 +753,15 @@ class PreParserFactory {
static int dummy = 42;
return &dummy;
}
+
+ private:
+ Zone* zone_;
};
struct PreParserFormalParameters : FormalParametersBase {
explicit PreParserFormalParameters(DeclarationScope* scope)
: FormalParametersBase(scope) {}
- int arity = 0;
-
- int Arity() const { return arity; }
PreParserIdentifier at(int i) { return PreParserIdentifier(); } // Dummy
};
@@ -779,11 +839,17 @@ class PreParser : public ParserBase<PreParser> {
};
PreParser(Zone* zone, Scanner* scanner, AstValueFactory* ast_value_factory,
- ParserRecorder* log, uintptr_t stack_limit)
- : ParserBase<PreParser>(zone, scanner, stack_limit, NULL,
- ast_value_factory, log),
+ PendingCompilationErrorHandler* pending_error_handler,
+ RuntimeCallStats* runtime_call_stats, uintptr_t stack_limit)
+ : ParserBase<PreParser>(zone, scanner, stack_limit, nullptr,
+ ast_value_factory, runtime_call_stats),
use_counts_(nullptr),
- track_unresolved_variables_(false) {}
+ track_unresolved_variables_(false),
+ pending_error_handler_(pending_error_handler) {}
+
+ static bool const IsPreParser() { return true; }
+
+ PreParserLogger* logger() { return &log_; }
// Pre-parse the program from the character stream; returns true on
// success (even if parsing failed, the pre-parse data successfully
@@ -828,10 +894,11 @@ class PreParser : public ParserBase<PreParser> {
// keyword and parameters, and have consumed the initial '{'.
// At return, unless an error occurred, the scanner is positioned before the
// the final '}'.
- PreParseResult PreParseLazyFunction(DeclarationScope* function_scope,
- bool parsing_module, ParserRecorder* log,
- bool track_unresolved_variables,
- bool may_abort, int* use_counts);
+ PreParseResult PreParseFunction(FunctionKind kind,
+ DeclarationScope* function_scope,
+ bool parsing_module,
+ bool track_unresolved_variables,
+ bool may_abort, int* use_counts);
private:
// These types form an algebra over syntactic categories that is just
@@ -849,9 +916,16 @@ class PreParser : public ParserBase<PreParser> {
const PreParserFormalParameters& parameters, FunctionKind kind,
FunctionLiteral::FunctionType function_type, bool* ok);
- V8_INLINE LazyParsingResult SkipLazyFunctionBody(
+ // Indicates that we won't switch from the preparser to the preparser; we'll
+ // just stay where we are.
+ bool AllowsLazyParsingWithoutUnresolvedVariables() const { return false; }
+ bool parse_lazily() const { return false; }
+
+ V8_INLINE LazyParsingResult SkipFunction(
+ FunctionKind kind, DeclarationScope* function_scope, int* num_parameters,
+ int* function_length, bool* has_duplicate_parameters,
int* materialized_literal_count, int* expected_property_count,
- bool track_unresolved_variables, bool may_abort, bool* ok) {
+ bool is_inner_function, bool may_abort, bool* ok) {
UNREACHABLE();
return kLazyParsingComplete;
}
@@ -860,7 +934,9 @@ class PreParser : public ParserBase<PreParser> {
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_pos, FunctionLiteral::FunctionType function_type,
LanguageMode language_mode, bool* ok);
- LazyParsingResult ParseLazyFunctionLiteralBody(bool may_abort, bool* ok);
+ LazyParsingResult ParseStatementListAndLogFunction(
+ PreParserFormalParameters* formals, bool has_duplicate_parameters,
+ bool maybe_abort, bool* ok);
struct TemplateLiteralState {};
@@ -1202,8 +1278,9 @@ class PreParser : public ParserBase<PreParser> {
MessageTemplate::Template message,
const char* arg = NULL,
ParseErrorType error_type = kSyntaxError) {
- log_->LogMessage(source_location.beg_pos, source_location.end_pos, message,
- arg, error_type);
+ pending_error_handler_->ReportMessageAt(source_location.beg_pos,
+ source_location.end_pos, message,
+ arg, error_type);
}
V8_INLINE void ReportMessageAt(Scanner::Location source_location,
@@ -1322,7 +1399,7 @@ class PreParser : public ParserBase<PreParser> {
}
PreParserExpression ExpressionFromIdentifier(
- PreParserIdentifier name, int start_position, int end_position,
+ PreParserIdentifier name, int start_position,
InferName infer = InferName::kYes);
V8_INLINE PreParserExpression ExpressionFromString(int pos) {
@@ -1372,7 +1449,7 @@ class PreParser : public ParserBase<PreParser> {
PreParserExpression initializer,
int initializer_end_position,
bool is_rest) {
- ++parameters->arity;
+ parameters->UpdateArityAndFunctionLength(!initializer.IsEmpty(), is_rest);
}
V8_INLINE void DeclareFormalParameter(DeclarationScope* scope,
@@ -1408,7 +1485,7 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE PreParserExpression
ExpressionListToExpression(PreParserExpressionList args) {
- return PreParserExpression::Default();
+ return PreParserExpression::Default(args.identifiers_);
}
V8_INLINE void AddAccessorPrefixToFunctionName(bool is_get,
@@ -1436,6 +1513,8 @@ class PreParser : public ParserBase<PreParser> {
int* use_counts_;
bool track_unresolved_variables_;
+ PreParserLogger log_;
+ PendingCompilationErrorHandler* pending_error_handler_;
};
PreParserExpression PreParser::SpreadCall(PreParserExpression function,
@@ -1454,7 +1533,6 @@ PreParserStatementList PreParser::ParseEagerFunctionBody(
PreParserIdentifier function_name, int pos,
const PreParserFormalParameters& parameters, FunctionKind kind,
FunctionLiteral::FunctionType function_type, bool* ok) {
- ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
PreParserStatementList result;
Scope* inner_scope = scope();
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index 57009bd207..69ac4171c2 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -20,6 +20,7 @@ class Processor final : public AstVisitor<Processor> {
result_assigned_(false),
replacement_(nullptr),
is_set_(false),
+ breakable_(false),
zone_(ast_value_factory->zone()),
closure_scope_(closure_scope),
factory_(ast_value_factory) {
@@ -33,6 +34,7 @@ class Processor final : public AstVisitor<Processor> {
result_assigned_(false),
replacement_(nullptr),
is_set_(false),
+ breakable_(false),
zone_(ast_value_factory->zone()),
closure_scope_(closure_scope),
factory_(ast_value_factory) {
@@ -77,6 +79,22 @@ class Processor final : public AstVisitor<Processor> {
// was hoping for.
bool is_set_;
+ bool breakable_;
+
+ class BreakableScope final {
+ public:
+ explicit BreakableScope(Processor* processor, bool breakable = true)
+ : processor_(processor), previous_(processor->breakable_) {
+ processor->breakable_ = processor->breakable_ || breakable;
+ }
+
+ ~BreakableScope() { processor_->breakable_ = previous_; }
+
+ private:
+ Processor* processor_;
+ bool previous_;
+ };
+
Zone* zone_;
DeclarationScope* closure_scope_;
AstNodeFactory factory_;
@@ -106,7 +124,13 @@ Statement* Processor::AssignUndefinedBefore(Statement* s) {
void Processor::Process(ZoneList<Statement*>* statements) {
- for (int i = statements->length() - 1; i >= 0; --i) {
+ // If we're in a breakable scope (named block, iteration, or switch), we walk
+ // all statements. The last value producing statement before the break needs
+ // to assign to .result. If we're not in a breakable scope, only the last
+ // value producing statement in the block assigns to .result, so we can stop
+ // early.
+ for (int i = statements->length() - 1; i >= 0 && (breakable_ || !is_set_);
+ --i) {
Visit(statements->at(i));
statements->Set(i, replacement_);
}
@@ -122,7 +146,10 @@ void Processor::VisitBlock(Block* node) {
// with some JS VMs: For instance, using smjs, print(eval('var x = 7'))
// returns 'undefined'. To obtain the same behavior with v8, we need
// to prevent rewriting in that case.
- if (!node->ignore_completion_value()) Process(node->statements());
+ if (!node->ignore_completion_value()) {
+ BreakableScope scope(this, node->labels() != nullptr);
+ Process(node->statements());
+ }
replacement_ = node;
}
@@ -140,35 +167,33 @@ void Processor::VisitExpressionStatement(ExpressionStatement* node) {
void Processor::VisitIfStatement(IfStatement* node) {
// Rewrite both branches.
bool set_after = is_set_;
+
Visit(node->then_statement());
node->set_then_statement(replacement_);
bool set_in_then = is_set_;
+
is_set_ = set_after;
Visit(node->else_statement());
node->set_else_statement(replacement_);
- is_set_ = is_set_ && set_in_then;
- replacement_ = node;
- if (!is_set_) {
- is_set_ = true;
- replacement_ = AssignUndefinedBefore(node);
- }
+ replacement_ = set_in_then && is_set_ ? node : AssignUndefinedBefore(node);
+ is_set_ = true;
}
void Processor::VisitIterationStatement(IterationStatement* node) {
- // Rewrite the body.
- bool set_after = is_set_;
- is_set_ = false; // We are in a loop, so we can't rely on [set_after].
+ // The statement may have to produce a value, so always assign undefined
+ // before.
+ // TODO(verwaest): Omit it if we know that there's no break/continue leaving
+ // it early.
+ DCHECK(breakable_ || !is_set_);
+ BreakableScope scope(this);
+
Visit(node->body());
node->set_body(replacement_);
- is_set_ = is_set_ && set_after;
- replacement_ = node;
- if (!is_set_) {
- is_set_ = true;
- replacement_ = AssignUndefinedBefore(node);
- }
+ replacement_ = AssignUndefinedBefore(node);
+ is_set_ = true;
}
@@ -200,73 +225,72 @@ void Processor::VisitForOfStatement(ForOfStatement* node) {
void Processor::VisitTryCatchStatement(TryCatchStatement* node) {
// Rewrite both try and catch block.
bool set_after = is_set_;
+
Visit(node->try_block());
node->set_try_block(static_cast<Block*>(replacement_));
bool set_in_try = is_set_;
+
is_set_ = set_after;
Visit(node->catch_block());
node->set_catch_block(static_cast<Block*>(replacement_));
- is_set_ = is_set_ && set_in_try;
- replacement_ = node;
- if (!is_set_) {
- is_set_ = true;
- replacement_ = AssignUndefinedBefore(node);
- }
+ replacement_ = is_set_ && set_in_try ? node : AssignUndefinedBefore(node);
+ is_set_ = true;
}
void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
- // Rewrite both try and finally block (in reverse order).
- bool set_after = is_set_;
- is_set_ = true; // Don't normally need to assign in finally block.
- Visit(node->finally_block());
- node->set_finally_block(replacement_->AsBlock());
- { // Save .result value at the beginning of the finally block and restore it
- // at the end again: ".backup = .result; ...; .result = .backup"
- // This is necessary because the finally block does not normally contribute
- // to the completion value.
- CHECK_NOT_NULL(closure_scope());
- Variable* backup = closure_scope()->NewTemporary(
- factory()->ast_value_factory()->dot_result_string());
- Expression* backup_proxy = factory()->NewVariableProxy(backup);
- Expression* result_proxy = factory()->NewVariableProxy(result_);
- Expression* save = factory()->NewAssignment(
- Token::ASSIGN, backup_proxy, result_proxy, kNoSourcePosition);
- Expression* restore = factory()->NewAssignment(
- Token::ASSIGN, result_proxy, backup_proxy, kNoSourcePosition);
- node->finally_block()->statements()->InsertAt(
- 0, factory()->NewExpressionStatement(save, kNoSourcePosition), zone());
- node->finally_block()->statements()->Add(
- factory()->NewExpressionStatement(restore, kNoSourcePosition), zone());
+ // Only rewrite finally if it could contain 'break' or 'continue'. Always
+ // rewrite try.
+ if (breakable_) {
+ bool set_after = is_set_;
+ // Only set result before a 'break' or 'continue'.
+ is_set_ = true;
+ Visit(node->finally_block());
+ node->set_finally_block(replacement_->AsBlock());
+ // Save .result value at the beginning of the finally block and restore it
+ // at the end again: ".backup = .result; ...; .result = .backup"
+ // This is necessary because the finally block does not normally contribute
+ // to the completion value.
+ CHECK_NOT_NULL(closure_scope());
+ Variable* backup = closure_scope()->NewTemporary(
+ factory()->ast_value_factory()->dot_result_string());
+ Expression* backup_proxy = factory()->NewVariableProxy(backup);
+ Expression* result_proxy = factory()->NewVariableProxy(result_);
+ Expression* save = factory()->NewAssignment(
+ Token::ASSIGN, backup_proxy, result_proxy, kNoSourcePosition);
+ Expression* restore = factory()->NewAssignment(
+ Token::ASSIGN, result_proxy, backup_proxy, kNoSourcePosition);
+ node->finally_block()->statements()->InsertAt(
+ 0, factory()->NewExpressionStatement(save, kNoSourcePosition), zone());
+ node->finally_block()->statements()->Add(
+ factory()->NewExpressionStatement(restore, kNoSourcePosition), zone());
+ is_set_ = set_after;
}
- is_set_ = set_after;
Visit(node->try_block());
node->set_try_block(replacement_->AsBlock());
- replacement_ = node;
- if (!is_set_) {
- is_set_ = true;
- replacement_ = AssignUndefinedBefore(node);
- }
+ replacement_ = is_set_ ? node : AssignUndefinedBefore(node);
+ is_set_ = true;
}
void Processor::VisitSwitchStatement(SwitchStatement* node) {
- // Rewrite statements in all case clauses (in reverse order).
+ // The statement may have to produce a value, so always assign undefined
+ // before.
+ // TODO(verwaest): Omit it if we know that there's no break/continue leaving
+ // it early.
+ DCHECK(breakable_ || !is_set_);
+ BreakableScope scope(this);
+ // Rewrite statements in all case clauses.
ZoneList<CaseClause*>* clauses = node->cases();
- bool set_after = is_set_;
for (int i = clauses->length() - 1; i >= 0; --i) {
CaseClause* clause = clauses->at(i);
Process(clause->statements());
}
- is_set_ = is_set_ && set_after;
- replacement_ = node;
- if (!is_set_) {
- is_set_ = true;
- replacement_ = AssignUndefinedBefore(node);
- }
+ replacement_ = AssignUndefinedBefore(node);
+ is_set_ = true;
}
@@ -285,12 +309,9 @@ void Processor::VisitBreakStatement(BreakStatement* node) {
void Processor::VisitWithStatement(WithStatement* node) {
Visit(node->statement());
node->set_statement(replacement_);
- replacement_ = node;
- if (!is_set_) {
- is_set_ = true;
- replacement_ = AssignUndefinedBefore(node);
- }
+ replacement_ = is_set_ ? node : AssignUndefinedBefore(node);
+ is_set_ = true;
}
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index 3f10cfa4c1..f7c7fd526f 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -14,6 +14,10 @@
namespace v8 {
namespace internal {
+namespace {
+const unibrow::uchar kUtf8Bom = 0xfeff;
+} // namespace
+
// ----------------------------------------------------------------------------
// BufferedUtf16CharacterStreams
//
@@ -259,7 +263,9 @@ bool Utf8ExternalStreamingStream::SkipToPosition(size_t position) {
while (it < chunk.length && chars < position) {
unibrow::uchar t =
unibrow::Utf8::ValueOfIncremental(chunk.data[it], &incomplete_char);
- if (t != unibrow::Utf8::kIncomplete) {
+ if (t == kUtf8Bom && current_.pos.chars == 0) {
+ // BOM detected at beginning of the stream. Don't copy it.
+ } else if (t != unibrow::Utf8::kIncomplete) {
chars++;
if (t > unibrow::Utf16::kMaxNonSurrogateCharCode) chars++;
}
@@ -300,8 +306,6 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
return;
}
- static const unibrow::uchar kUtf8Bom = 0xfeff;
-
unibrow::Utf8::Utf8IncrementalBuffer incomplete_char =
current_.pos.incomplete_char;
size_t it;
@@ -349,9 +353,9 @@ void Utf8ExternalStreamingStream::SearchPosition(size_t position) {
// No chunks. Fetch at least one, so we can assume !chunks_.empty() below.
if (chunks_.empty()) {
- DCHECK_EQ(current_.chunk_no, 0);
- DCHECK_EQ(current_.pos.bytes, 0);
- DCHECK_EQ(current_.pos.chars, 0);
+ DCHECK_EQ(current_.chunk_no, 0u);
+ DCHECK_EQ(current_.pos.bytes, 0u);
+ DCHECK_EQ(current_.pos.chars, 0u);
FetchChunk();
}
@@ -438,7 +442,8 @@ size_t Utf8ExternalStreamingStream::FillBuffer(size_t position) {
FillBufferFromCurrentChunk();
}
- DCHECK_EQ(current_.pos.chars - position, buffer_end_ - buffer_cursor_);
+ DCHECK_EQ(current_.pos.chars - position,
+ static_cast<size_t>(buffer_end_ - buffer_cursor_));
return buffer_end_ - buffer_cursor_;
}
@@ -497,7 +502,7 @@ size_t FindChunk(Chunks& chunks, ScriptCompiler::ExternalSourceStream* source_,
// let's look at chunks back-to-front.
size_t chunk_no = chunks.size() - 1;
while (chunks[chunk_no].byte_pos > position) {
- DCHECK_NE(chunk_no, 0);
+ DCHECK_NE(chunk_no, 0u);
chunk_no--;
}
DCHECK_LE(chunks[chunk_no].byte_pos, position);
@@ -537,6 +542,7 @@ size_t OneByteExternalStreamingStream::FillBuffer(size_t position) {
return len;
}
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
// ----------------------------------------------------------------------------
// TwoByteExternalStreamingStream
//
@@ -592,7 +598,7 @@ bool TwoByteExternalStreamingStream::ReadBlock() {
// one_char_buffer_ to hold the full character.
bool lonely_byte = (chunks_[chunk_no].byte_pos == (2 * position + 1));
if (lonely_byte) {
- DCHECK_NE(chunk_no, 0);
+ DCHECK_NE(chunk_no, 0u);
Chunk& previous_chunk = chunks_[chunk_no - 1];
#ifdef V8_TARGET_BIG_ENDIAN
uc16 character = current.data[0] |
@@ -630,6 +636,162 @@ bool TwoByteExternalStreamingStream::ReadBlock() {
return true;
}
+#else
+
+// ----------------------------------------------------------------------------
+// TwoByteExternalBufferedStream
+//
+// This class is made specifically to address unaligned access to 16-bit data
+// in MIPS and ARM architectures. It replaces class
+// TwoByteExternalStreamingStream which in some cases does have unaligned
+// accesse to 16-bit data
+
+class TwoByteExternalBufferedStream : public Utf16CharacterStream {
+ public:
+ explicit TwoByteExternalBufferedStream(
+ ScriptCompiler::ExternalSourceStream* source);
+ ~TwoByteExternalBufferedStream();
+
+ protected:
+ static const size_t kBufferSize = 512;
+
+ bool ReadBlock() override;
+
+ // FillBuffer should read up to kBufferSize characters at position and store
+ // them into buffer_[0..]. It returns the number of characters stored.
+ size_t FillBuffer(size_t position, size_t chunk_no);
+
+ // Fixed sized buffer that this class reads from.
+ // The base class' buffer_start_ should always point to buffer_.
+ uc16 buffer_[kBufferSize];
+
+ Chunks chunks_;
+ ScriptCompiler::ExternalSourceStream* source_;
+};
+
+TwoByteExternalBufferedStream::TwoByteExternalBufferedStream(
+ ScriptCompiler::ExternalSourceStream* source)
+ : Utf16CharacterStream(buffer_, buffer_, buffer_, 0), source_(source) {}
+
+TwoByteExternalBufferedStream::~TwoByteExternalBufferedStream() {
+ DeleteChunks(chunks_);
+}
+
+bool TwoByteExternalBufferedStream::ReadBlock() {
+ size_t position = pos();
+ // Find chunk in which the position belongs
+ size_t chunk_no = FindChunk(chunks_, source_, 2 * position + 1);
+
+ // Out of data? Return 0.
+ if (chunks_[chunk_no].byte_length == 0) {
+ buffer_cursor_ = buffer_start_;
+ buffer_end_ = buffer_start_;
+ return false;
+ }
+
+ Chunk& current = chunks_[chunk_no];
+
+ bool odd_start = current.byte_pos % 2;
+ // Common case: character is in current chunk.
+ DCHECK_LE(current.byte_pos, 2 * position + odd_start);
+ DCHECK_LT(2 * position + 1, current.byte_pos + current.byte_length);
+
+ // If character starts on odd address copy text in buffer so there is always
+ // aligned access to characters. This is important on MIPS and ARM
+ // architectures. Otherwise read characters from memory directly.
+ if (!odd_start) {
+ buffer_start_ = reinterpret_cast<const uint16_t*>(current.data);
+ size_t number_chars = current.byte_length / 2;
+ buffer_end_ = buffer_start_ + number_chars;
+ buffer_pos_ = current.byte_pos / 2;
+ buffer_cursor_ = buffer_start_ + (position - buffer_pos_);
+ DCHECK_EQ(position, pos());
+ return true;
+ } else {
+ buffer_start_ = buffer_;
+ buffer_pos_ = position;
+ buffer_cursor_ = buffer_;
+ buffer_end_ = buffer_ + FillBuffer(position, chunk_no);
+ DCHECK_EQ(pos(), position);
+ DCHECK_LE(buffer_end_, buffer_start_ + kBufferSize);
+ return buffer_cursor_ < buffer_end_;
+ }
+}
+
+size_t TwoByteExternalBufferedStream::FillBuffer(size_t position,
+ size_t chunk_no) {
+ DCHECK_EQ(chunks_[chunk_no].byte_pos % 2, 1u);
+ bool odd_start = true;
+ // Align buffer_pos_ to the size of the buffer.
+ {
+ size_t new_pos = position / kBufferSize * kBufferSize;
+ if (new_pos != position) {
+ chunk_no = FindChunk(chunks_, source_, 2 * new_pos + 1);
+ buffer_pos_ = new_pos;
+ buffer_cursor_ = buffer_start_ + (position - buffer_pos_);
+ position = new_pos;
+ odd_start = chunks_[chunk_no].byte_pos % 2;
+ }
+ }
+
+ Chunk* current = &chunks_[chunk_no];
+
+ // Annoying edge case: Chunks may not be 2-byte aligned, meaning that a
+ // character may be split between the previous and the current chunk.
+ // If we find such a lonely byte at the beginning of the chunk, we'll copy
+ // it to the first byte in buffer_.
+ size_t totalLength = 0;
+ bool lonely_byte = (current->byte_pos == (2 * position + 1));
+ if (lonely_byte) {
+ DCHECK_NE(chunk_no, 0u);
+ Chunk& previous_chunk = chunks_[chunk_no - 1];
+ *reinterpret_cast<uint8_t*>(buffer_) =
+ previous_chunk.data[previous_chunk.byte_length - 1];
+ totalLength++;
+ }
+
+ // Common case: character is in current chunk.
+ DCHECK_LE(current->byte_pos, 2 * position + odd_start);
+ DCHECK_LT(2 * position + 1, current->byte_pos + current->byte_length);
+
+ // Copy characters from current chunk starting from chunk_pos to the end of
+ // buffer or chunk.
+ size_t chunk_pos = position - current->byte_pos / 2;
+ size_t start_offset = odd_start && chunk_pos != 0;
+ size_t bytes_to_move =
+ i::Min(2 * kBufferSize - lonely_byte,
+ current->byte_length - 2 * chunk_pos + start_offset);
+ i::MemMove(reinterpret_cast<uint8_t*>(buffer_) + lonely_byte,
+ current->data + 2 * chunk_pos - start_offset, bytes_to_move);
+
+ // Fill up the rest of the buffer if there is space and data left.
+ totalLength += bytes_to_move;
+ position = (current->byte_pos + current->byte_length) / 2;
+ if (position - buffer_pos_ < kBufferSize) {
+ chunk_no = FindChunk(chunks_, source_, 2 * position + 1);
+ current = &chunks_[chunk_no];
+ odd_start = current->byte_pos % 2;
+ bytes_to_move = i::Min(2 * kBufferSize - totalLength, current->byte_length);
+ while (bytes_to_move) {
+ // Common case: character is in current chunk.
+ DCHECK_LE(current->byte_pos, 2 * position + odd_start);
+ DCHECK_LT(2 * position + 1, current->byte_pos + current->byte_length);
+
+ i::MemMove(reinterpret_cast<uint8_t*>(buffer_) + totalLength,
+ current->data, bytes_to_move);
+ totalLength += bytes_to_move;
+ position = (current->byte_pos + current->byte_length) / 2;
+ chunk_no = FindChunk(chunks_, source_, 2 * position + 1);
+ current = &chunks_[chunk_no];
+ odd_start = current->byte_pos % 2;
+ bytes_to_move =
+ i::Min(2 * kBufferSize - totalLength, current->byte_length);
+ }
+ }
+ return totalLength / 2;
+}
+#endif
+
// ----------------------------------------------------------------------------
// ScannerStream: Create stream instances.
@@ -669,7 +831,11 @@ Utf16CharacterStream* ScannerStream::For(
v8::ScriptCompiler::StreamedSource::Encoding encoding) {
switch (encoding) {
case v8::ScriptCompiler::StreamedSource::TWO_BYTE:
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
return new TwoByteExternalStreamingStream(source_stream);
+#else
+ return new TwoByteExternalBufferedStream(source_stream);
+#endif
case v8::ScriptCompiler::StreamedSource::ONE_BYTE:
return new OneByteExternalStreamingStream(source_stream);
case v8::ScriptCompiler::StreamedSource::UTF8:
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index e41b56fd4b..363ab7dfe9 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -59,7 +59,7 @@ void Scanner::BookmarkScope::Apply() {
} else {
scanner_->SeekNext(bookmark_);
scanner_->Next();
- DCHECK_EQ(scanner_->location().beg_pos, bookmark_);
+ DCHECK_EQ(scanner_->location().beg_pos, static_cast<int>(bookmark_));
}
bookmark_ = kBookmarkWasApplied;
}
@@ -1153,7 +1153,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
if (next_.literal_chars->one_byte_literal().length() <= 10 &&
value <= Smi::kMaxValue && c0_ != '.' && c0_ != 'e' && c0_ != 'E') {
- next_.smi_value_ = static_cast<int>(value);
+ next_.smi_value_ = static_cast<uint32_t>(value);
literal.Complete();
HandleLeadSurrogate();
@@ -1638,7 +1638,7 @@ void Scanner::SeekNext(size_t position) {
// 3, re-scan, by scanning the look-ahead char + 1 token (next_).
c0_ = source_->Advance();
Next();
- DCHECK_EQ(next_.location.beg_pos, position);
+ DCHECK_EQ(next_.location.beg_pos, static_cast<int>(position));
}
} // namespace internal
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index b2b1a8a3f4..6f6fab5543 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -284,7 +284,7 @@ class Scanner {
}
// Returns the value of the last smi that was scanned.
- int smi_value() const { return current_.smi_value_; }
+ uint32_t smi_value() const { return current_.smi_value_; }
// Seek forward to the given position. This operation does not
// work in general, for instance when there are pushed back
@@ -369,14 +369,15 @@ class Scanner {
INLINE(void AddChar(uc32 code_unit)) {
if (position_ >= backing_store_.length()) ExpandBuffer();
if (is_one_byte_) {
- if (code_unit <= unibrow::Latin1::kMaxChar) {
+ if (code_unit <= static_cast<uc32>(unibrow::Latin1::kMaxChar)) {
backing_store_[position_] = static_cast<byte>(code_unit);
position_ += kOneByteSize;
return;
}
ConvertToTwoByte();
}
- if (code_unit <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ if (code_unit <=
+ static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
*reinterpret_cast<uint16_t*>(&backing_store_[position_]) = code_unit;
position_ += kUC16Size;
} else {
@@ -487,7 +488,7 @@ class Scanner {
Location location;
LiteralBuffer* literal_chars;
LiteralBuffer* raw_literal_chars;
- int smi_value_;
+ uint32_t smi_value_;
Token::Value token;
};
diff --git a/deps/v8/src/pending-compilation-error-handler.cc b/deps/v8/src/pending-compilation-error-handler.cc
index 3e88efc999..8f7660dd6b 100644
--- a/deps/v8/src/pending-compilation-error-handler.cc
+++ b/deps/v8/src/pending-compilation-error-handler.cc
@@ -13,20 +13,29 @@
namespace v8 {
namespace internal {
+Handle<String> PendingCompilationErrorHandler::ArgumentString(
+ Isolate* isolate) {
+ if (arg_ != NULL) return arg_->string();
+ if (char_arg_ != NULL) {
+ return isolate->factory()
+ ->NewStringFromUtf8(CStrVector(char_arg_))
+ .ToHandleChecked();
+ }
+ if (!handle_arg_.is_null()) return handle_arg_;
+ return isolate->factory()->undefined_string();
+}
+
+Handle<String> PendingCompilationErrorHandler::FormatMessage(Isolate* isolate) {
+ return MessageTemplate::FormatMessage(isolate, message_,
+ ArgumentString(isolate));
+}
+
void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
Handle<Script> script) {
if (!has_pending_error_) return;
MessageLocation location(script, start_position_, end_position_);
Factory* factory = isolate->factory();
- Handle<String> argument;
- if (arg_ != NULL) {
- argument = arg_->string();
- } else if (char_arg_ != NULL) {
- argument =
- factory->NewStringFromUtf8(CStrVector(char_arg_)).ToHandleChecked();
- } else if (!handle_arg_.is_null()) {
- argument = handle_arg_;
- }
+ Handle<String> argument = ArgumentString(isolate);
isolate->debug()->OnCompileError(script);
Handle<Object> error;
diff --git a/deps/v8/src/pending-compilation-error-handler.h b/deps/v8/src/pending-compilation-error-handler.h
index 6190d49f52..563bef93a2 100644
--- a/deps/v8/src/pending-compilation-error-handler.h
+++ b/deps/v8/src/pending-compilation-error-handler.h
@@ -75,8 +75,11 @@ class PendingCompilationErrorHandler {
bool has_pending_error() const { return has_pending_error_; }
void ThrowPendingError(Isolate* isolate, Handle<Script> script);
+ Handle<String> FormatMessage(Isolate* isolate);
private:
+ Handle<String> ArgumentString(Isolate* isolate);
+
bool has_pending_error_;
int start_position_;
int end_position_;
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
index a8c7255396..6641a1259b 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/perf-jit.cc
@@ -272,7 +272,7 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
static_cast<size_t>(name_length));
name_string = std::unique_ptr<char[]>(buffer);
}
- DCHECK_EQ(name_length, strlen(name_string.get()));
+ DCHECK_EQ(name_length, static_cast<int>(strlen(name_string.get())));
PerfJitCodeDebugInfo debug_info;
@@ -299,7 +299,7 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
for (SourcePositionTableIterator iterator(code->source_position_table());
!iterator.done(); iterator.Advance()) {
- int position = iterator.source_position();
+ int position = iterator.source_position().ScriptOffset();
int line_number = Script::GetLineNumber(script, position);
// Compute column.
int relative_line_number = line_number - script_line_offset;
@@ -356,8 +356,8 @@ void PerfJitLogger::LogWriteUnwindingInfo(Code* code) {
}
char padding_bytes[] = "\0\0\0\0\0\0\0\0";
- DCHECK_LT(padding_size, sizeof(padding_bytes));
- LogWriteBytes(padding_bytes, padding_size);
+ DCHECK_LT(padding_size, static_cast<int>(sizeof(padding_bytes)));
+ LogWriteBytes(padding_bytes, static_cast<int>(padding_size));
}
void PerfJitLogger::CodeMoveEvent(AbstractCode* from, Address to) {
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 7843e2e07d..f49ac6305e 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -1216,7 +1216,8 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+ void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+ int id);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index ce423ea53a..a48fc06116 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -561,7 +561,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
__ and_(r5, lhs, rhs);
__ JumpIfNotSmi(r5, &not_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
@@ -1576,13 +1576,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ SmiToShortArrayOffset(r4, r4);
__ addi(r4, r4, Operand(2));
- __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(r3, &runtime);
- __ CompareObjectType(r3, r5, r5, JS_OBJECT_TYPE);
- __ bne(&runtime);
+ // Check that the last match info is a FixedArray.
+ __ LoadP(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(last_match_info_elements, &runtime);
// Check that the object has fast elements.
- __ LoadP(last_match_info_elements,
- FieldMemOperand(r3, JSArray::kElementsOffset));
__ LoadP(r3,
FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ CompareRoot(r3, Heap::kFixedArrayMapRootIndex);
@@ -1591,7 +1588,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// additional information.
__ LoadP(
r3, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ addi(r5, r4, Operand(RegExpImpl::kLastMatchOverhead));
+ __ addi(r5, r4, Operand(RegExpMatchInfo::kLastMatchOverhead));
__ SmiUntag(r0, r3);
__ cmp(r5, r0);
__ bgt(&runtime);
@@ -1601,21 +1598,23 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Store the capture count.
__ SmiTag(r5, r4);
__ StoreP(r5, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastCaptureCountOffset),
+ RegExpMatchInfo::kNumberOfCapturesOffset),
r0);
// Store last subject and last input.
__ StoreP(subject, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset),
+ RegExpMatchInfo::kLastSubjectOffset),
r0);
__ mr(r5, subject);
- __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset,
- subject, r10, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpMatchInfo::kLastSubjectOffset, subject, r10,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
__ mr(subject, r5);
__ StoreP(subject, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastInputOffset),
+ RegExpMatchInfo::kLastInputOffset),
r0);
- __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastInputOffset,
- subject, r10, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpMatchInfo::kLastInputOffset, subject, r10,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -1626,10 +1625,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// r5: offsets vector
Label next_capture;
// Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ addi(
- r3, last_match_info_elements,
- Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag - kPointerSize));
+ // counts down until wrapping after zero.
+ __ addi(r3, last_match_info_elements,
+ Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag -
+ kPointerSize));
__ addi(r5, r5, Operand(-kIntSize)); // bias down for lwzu
__ mtctr(r4);
__ bind(&next_capture);
@@ -1641,7 +1640,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bdnz(&next_capture);
// Return last match info.
- __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset));
+ __ mr(r3, last_match_info_elements);
__ addi(sp, sp, Operand(4 * kPointerSize));
__ Ret();
@@ -1873,6 +1872,7 @@ static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+ // r3 - number of arguments
// r4 - function
// r6 - slot id
// r5 - vector
@@ -1881,25 +1881,22 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ cmp(r4, r8);
__ bne(miss);
- __ mov(r3, Operand(arg_count()));
-
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, r5, r6, r0);
__ mr(r5, r7);
__ mr(r6, r4);
- ArrayConstructorStub stub(masm->isolate(), arg_count());
+ ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
void CallICStub::Generate(MacroAssembler* masm) {
+ // r3 - number of arguments
// r4 - function
// r6 - slot id (Smi)
// r5 - vector
Label extra_checks_or_miss, call, call_function, call_count_incremented;
- int argc = arg_count();
- ParameterCount actual(argc);
// The checks. First, does r4 match the recorded monomorphic target?
__ SmiToPtrArrayOffset(r9, r6);
@@ -1933,7 +1930,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, r5, r6, r0);
- __ mov(r3, Operand(argc));
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -1977,7 +1973,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
IncrementCallCount(masm, r5, r6, r0);
__ bind(&call_count_incremented);
- __ mov(r3, Operand(argc));
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -2010,13 +2005,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
- __ Push(r5);
- __ Push(r6);
- __ Push(cp, r4);
+ __ SmiTag(r3);
+ __ Push(r3, r5, r6, cp, r4);
__ CallStub(&create_stub);
- __ Pop(cp, r4);
- __ Pop(r6);
- __ Pop(r5);
+ __ Pop(r5, r6, cp, r4);
+ __ Pop(r3);
+ __ SmiUntag(r3);
}
__ b(&call_function);
@@ -2033,14 +2027,21 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push the function and feedback info.
- __ Push(r4, r5, r6);
+ // Preserve the number of arguments as Smi.
+ __ SmiTag(r3);
+
+ // Push the receiver and the function and feedback info.
+ __ Push(r3, r4, r5, r6);
// Call the entry.
__ CallRuntime(Runtime::kCallIC_Miss);
// Move result to r4 and exit the internal frame.
__ mr(r4, r3);
+
+ // Restore number of arguments.
+ __ Pop(r3);
+ __ SmiUntag(r3);
}
@@ -3195,21 +3196,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(r5);
CallICStub stub(isolate(), state());
@@ -3217,14 +3203,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
}
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, bool is_polymorphic,
@@ -3318,184 +3296,12 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ Jump(ip);
}
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r4
- Register name = LoadWithVectorDescriptor::NameRegister(); // r5
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // r6
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // r3
- Register feedback = r7;
- Register receiver_map = r8;
- Register scratch1 = r9;
-
- __ SmiToPtrArrayOffset(r0, slot);
- __ add(feedback, vector, r0);
- __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ bne(&not_array);
- HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, true, &miss);
-
- __ bind(&not_array);
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ bne(&miss);
- masm->isolate()->load_stub_cache()->GenerateProbe(
- masm, receiver, name, feedback, receiver_map, scratch1, r10);
-
- __ bind(&miss);
- LoadIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ b(&compare_map);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r4
- Register key = LoadWithVectorDescriptor::NameRegister(); // r5
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // r6
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // r3
- Register feedback = r7;
- Register receiver_map = r8;
- Register scratch1 = r9;
-
- __ SmiToPtrArrayOffset(r0, slot);
- __ add(feedback, vector, r0);
- __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ bne(&not_array);
-
- // We have a polymorphic element handler.
- Label polymorphic, try_poly_name;
- __ bind(&polymorphic);
- HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, true, &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ bne(&try_poly_name);
- Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ cmp(key, feedback);
- __ bne(&miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ SmiToPtrArrayOffset(r0, slot);
- __ add(feedback, vector, r0);
- __ LoadP(feedback,
- FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, false, &miss);
-
- __ bind(&miss);
- KeyedLoadIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ b(&compare_map);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- StoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r4
- Register key = StoreWithVectorDescriptor::NameRegister(); // r5
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // r6
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // r7
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r3)); // r3
- Register feedback = r8;
- Register receiver_map = r9;
- Register scratch1 = r10;
-
- __ SmiToPtrArrayOffset(r0, slot);
- __ add(feedback, vector, r0);
- __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ bne(&not_array);
-
- Register scratch2 = r11;
- HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
- &miss);
-
- __ bind(&not_array);
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ bne(&miss);
- masm->isolate()->store_stub_cache()->GenerateProbe(
- masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
-
- __ bind(&miss);
- StoreIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ b(&compare_map);
-}
-
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
@@ -3862,30 +3668,19 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm, AllocationSiteOverrideMode mode) {
- if (argument_count() == ANY) {
- Label not_zero_case, not_one_case;
- __ cmpi(r3, Operand::Zero());
- __ bne(&not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ Label not_zero_case, not_one_case;
+ __ cmpi(r3, Operand::Zero());
+ __ bne(&not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- __ bind(&not_zero_case);
- __ cmpi(r3, Operand(1));
- __ bgt(&not_one_case);
- CreateArrayDispatchOneArgument(masm, mode);
+ __ bind(&not_zero_case);
+ __ cmpi(r3, Operand(1));
+ __ bgt(&not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else if (argument_count() == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count() == ONE) {
- CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count() == MORE_THAN_ONE) {
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- UNREACHABLE();
- }
+ __ bind(&not_one_case);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
@@ -3937,23 +3732,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
__ bind(&subclassing);
- switch (argument_count()) {
- case ANY:
- case MORE_THAN_ONE:
- __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
- __ StorePX(r4, MemOperand(sp, r0));
- __ addi(r3, r3, Operand(3));
- break;
- case NONE:
- __ StoreP(r4, MemOperand(sp, 0 * kPointerSize));
- __ li(r3, Operand(3));
- break;
- case ONE:
- __ StoreP(r4, MemOperand(sp, 1 * kPointerSize));
- __ li(r3, Operand(4));
- break;
- }
-
+ __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+ __ StorePX(r4, MemOperand(sp, r0));
+ __ addi(r3, r3, Operand(3));
__ Push(r6, r5);
__ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -4385,7 +4166,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
// If there are no mapped parameters, we do not need the parameter_map.
- __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
+ __ CmpSmiLiteral(r9, Smi::kZero, r0);
if (CpuFeatures::IsSupported(ISELECT)) {
__ SmiToPtrArrayOffset(r11, r9);
__ addi(r11, r11, Operand(kParameterMapHeaderSize));
@@ -4467,7 +4248,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// r9 = mapped parameter count (tagged)
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
- __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
+ __ CmpSmiLiteral(r9, Smi::kZero, r0);
if (CpuFeatures::IsSupported(ISELECT)) {
__ isel(eq, r4, r7, r4);
__ beq(&skip_parameter_map);
@@ -4690,134 +4471,6 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewStrictArguments);
}
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register value = r3;
- Register slot = r5;
-
- Register cell = r4;
- Register cell_details = r6;
- Register cell_value = r7;
- Register cell_value_map = r8;
- Register scratch = r9;
-
- Register context = cp;
- Register context_temp = cell;
-
- Label fast_heapobject_case, fast_smi_case, slow_case;
-
- if (FLAG_debug_code) {
- __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
- __ Check(ne, kUnexpectedValue);
- }
-
- // Go up the context chain to the script context.
- for (int i = 0; i < depth(); i++) {
- __ LoadP(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- context = context_temp;
- }
-
- // Load the PropertyCell at the specified slot.
- __ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2));
- __ add(cell, context, r0);
- __ LoadP(cell, ContextMemOperand(cell));
-
- // Load PropertyDetails for the cell (actually only the cell_type and kind).
- __ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
- __ SmiUntag(cell_details);
- __ andi(cell_details, cell_details,
- Operand(PropertyDetails::PropertyCellTypeField::kMask |
- PropertyDetails::KindField::kMask |
- PropertyDetails::kAttributesReadOnlyMask));
-
- // Check if PropertyCell holds mutable data.
- Label not_mutable_data;
- __ cmpi(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kMutable) |
- PropertyDetails::KindField::encode(kData)));
- __ bne(&not_mutable_data);
- __ JumpIfSmi(value, &fast_smi_case);
-
- __ bind(&fast_heapobject_case);
- __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
- // RecordWriteField clobbers the value register, so we copy it before the
- // call.
- __ mr(r6, value);
- __ RecordWriteField(cell, PropertyCell::kValueOffset, r6, scratch,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Ret();
-
- __ bind(&not_mutable_data);
- // Check if PropertyCell value matches the new value (relevant for Constant,
- // ConstantType and Undefined cells).
- Label not_same_value;
- __ LoadP(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
- __ cmp(cell_value, value);
- __ bne(&not_same_value);
-
- // Make sure the PropertyCell is not marked READ_ONLY.
- __ andi(r0, cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
- __ bne(&slow_case, cr0);
-
- if (FLAG_debug_code) {
- Label done;
- // This can only be true for Constant, ConstantType and Undefined cells,
- // because we never store the_hole via this stub.
- __ cmpi(cell_details,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstant) |
- PropertyDetails::KindField::encode(kData)));
- __ beq(&done);
- __ cmpi(cell_details,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
- __ beq(&done);
- __ cmpi(cell_details,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kUndefined) |
- PropertyDetails::KindField::encode(kData)));
- __ Check(eq, kUnexpectedValue);
- __ bind(&done);
- }
- __ Ret();
- __ bind(&not_same_value);
-
- // Check if PropertyCell contains data with constant type (and is not
- // READ_ONLY).
- __ cmpi(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
- __ bne(&slow_case);
-
- // Now either both old and new values must be smis or both must be heap
- // objects with same map.
- Label value_is_heap_object;
- __ JumpIfNotSmi(value, &value_is_heap_object);
- __ JumpIfNotSmi(cell_value, &slow_case);
- // Old and new values are smis, no need for a write barrier here.
- __ bind(&fast_smi_case);
- __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
- __ Ret();
-
- __ bind(&value_is_heap_object);
- __ JumpIfSmi(cell_value, &slow_case);
-
- __ LoadP(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
- __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- __ cmp(cell_value_map, scratch);
- __ beq(&fast_heapobject_case);
-
- // Fallback to runtime.
- __ bind(&slow_case);
- __ SmiTag(slot);
- __ Push(slot, value);
- __ TailCallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
@@ -5113,7 +4766,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ Push(scratch, scratch);
__ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
__ Push(scratch, holder);
- __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
+ __ Push(Smi::kZero); // should_throw_on_error -> false
__ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
__ push(scratch);
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 3ff0fde047..74ad56405f 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -29,9 +29,9 @@ const Register LoadDescriptor::ReceiverRegister() { return r4; }
const Register LoadDescriptor::NameRegister() { return r5; }
const Register LoadDescriptor::SlotRegister() { return r3; }
-
const Register LoadWithVectorDescriptor::VectorRegister() { return r6; }
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return r7; }
const Register StoreDescriptor::ReceiverRegister() { return r4; }
const Register StoreDescriptor::NameRegister() { return r5; }
@@ -44,10 +44,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return r7; }
const Register StoreTransitionDescriptor::VectorRegister() { return r6; }
const Register StoreTransitionDescriptor::MapRegister() { return r8; }
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r5; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r3; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return r4; }
const Register StringCompareDescriptor::RightRegister() { return r3; }
@@ -157,7 +153,7 @@ void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {r4, r6, r5};
+ Register registers[] = {r4, r3, r6, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -206,13 +202,6 @@ void ConstructTrampolineDescriptor::InitializePlatformSpecific(
}
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r5, r4, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r3, r4};
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 9b5f80ebe9..6588540035 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -1605,90 +1605,6 @@ void MacroAssembler::PopStackHandler() {
}
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch, Label* miss) {
- Label same_contexts;
-
- DCHECK(!holder_reg.is(scratch));
- DCHECK(!holder_reg.is(ip));
- DCHECK(!scratch.is(ip));
-
- // Load current lexical context from the active StandardFrame, which
- // may require crawling past STUB frames.
- Label load_context;
- Label has_context;
- DCHECK(!ip.is(scratch));
- mr(ip, fp);
- bind(&load_context);
- LoadP(scratch,
- MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
- JumpIfNotSmi(scratch, &has_context);
- LoadP(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
- b(&load_context);
- bind(&has_context);
-
-// In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
- cmpi(scratch, Operand::Zero());
- Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
-#endif
-
- // Load the native context of the current context.
- LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // Cannot use ip as a temporary in this verification code. Due to the fact
- // that ip is clobbered as part of cmp with an object Operand.
- push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the native_context_map.
- LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kNativeContextMapRootIndex);
- cmp(holder_reg, ip);
- Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
- pop(holder_reg); // Restore holder.
- }
-
- // Check if both contexts are the same.
- LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- cmp(scratch, ip);
- beq(&same_contexts);
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // Cannot use ip as a temporary in this verification code. Due to the fact
- // that ip is clobbered as part of cmp with an object Operand.
- push(holder_reg); // Temporarily save holder on the stack.
- mr(holder_reg, ip); // Move ip to its holding place.
- LoadRoot(ip, Heap::kNullValueRootIndex);
- cmp(holder_reg, ip);
- Check(ne, kJSGlobalProxyContextShouldNotBeNull);
-
- LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- LoadRoot(ip, Heap::kNativeContextMapRootIndex);
- cmp(holder_reg, ip);
- Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
- // Restore ip is not needed. ip is reloaded below.
- pop(holder_reg); // Restore holder.
- // Restore ip to holder's context.
- LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- }
-
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- int token_offset =
- Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
- LoadP(scratch, FieldMemOperand(scratch, token_offset));
- LoadP(ip, FieldMemOperand(ip, token_offset));
- cmp(scratch, ip);
- bne(miss);
-
- bind(&same_contexts);
-}
-
-
// Compute the hash code from the untagged key. This must be kept in sync with
// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
@@ -1729,86 +1645,6 @@ void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
ExtractBitRange(t0, t0, 29, 0);
}
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
- Register key, Register result,
- Register t0, Register t1,
- Register t2) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- //
- // Scratch registers:
- //
- // t0 - holds the untagged key on entry and holds the hash once computed.
- //
- // t1 - used to hold the capacity mask of the dictionary
- //
- // t2 - used for the index into the dictionary.
- Label done;
-
- GetNumberHash(t0, t1);
-
- // Compute the capacity mask.
- LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
- SmiUntag(t1);
- subi(t1, t1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before giving up.
- for (int i = 0; i < kNumberDictionaryProbes; i++) {
- // Use t2 for index calculations and keep the hash intact in t0.
- mr(t2, t0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- addi(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
- }
- and_(t2, t2, t1);
-
- // Scale the index by multiplying by the element size.
- DCHECK(SeededNumberDictionary::kEntrySize == 3);
- slwi(ip, t2, Operand(1));
- add(t2, t2, ip); // t2 = t2 * 3
-
- // Check if the key is identical to the name.
- slwi(t2, t2, Operand(kPointerSizeLog2));
- add(t2, elements, t2);
- LoadP(ip,
- FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
- cmp(key, ip);
- if (i != kNumberDictionaryProbes - 1) {
- beq(&done);
- } else {
- bne(miss);
- }
- }
-
- bind(&done);
- // Check that the value is a field property.
- // t2: elements + (index * kPointerSize)
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
- LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
- DCHECK_EQ(DATA, 0);
- and_(r0, t1, ip, SetRC);
- bne(miss, cr0);
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- LoadP(result, FieldMemOperand(t2, kValueOffset));
-}
-
-
void MacroAssembler::Allocate(int object_size, Register result,
Register scratch1, Register scratch2,
Label* gc_required, AllocationFlags flags) {
@@ -2234,20 +2070,6 @@ void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
cmp(obj, r0);
}
-
-void MacroAssembler::CheckFastElements(Register map, Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
- cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
- bgt(fail);
-}
-
-
void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
@@ -2525,18 +2347,6 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
-}
-
-
void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
SmiUntag(ip, smi);
ConvertIntToDouble(ip, value);
@@ -3282,73 +3092,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
-
-void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
- Register scratch) {
- Label align_loop, aligned, word_loop, byte_loop, byte_loop_1, done;
-
- DCHECK(!scratch.is(r0));
-
- cmpi(length, Operand::Zero());
- beq(&done);
-
- // Check src alignment and length to see whether word_loop is possible
- andi(scratch, src, Operand(kPointerSize - 1));
- beq(&aligned, cr0);
- subfic(scratch, scratch, Operand(kPointerSize * 2));
- cmp(length, scratch);
- blt(&byte_loop);
-
- // Align src before copying in word size chunks.
- subi(scratch, scratch, Operand(kPointerSize));
- mtctr(scratch);
- bind(&align_loop);
- lbz(scratch, MemOperand(src));
- addi(src, src, Operand(1));
- subi(length, length, Operand(1));
- stb(scratch, MemOperand(dst));
- addi(dst, dst, Operand(1));
- bdnz(&align_loop);
-
- bind(&aligned);
-
- // Copy bytes in word size chunks.
- if (emit_debug_code()) {
- andi(r0, src, Operand(kPointerSize - 1));
- Assert(eq, kExpectingAlignmentForCopyBytes, cr0);
- }
-
- ShiftRightImm(scratch, length, Operand(kPointerSizeLog2));
- cmpi(scratch, Operand::Zero());
- beq(&byte_loop);
-
- mtctr(scratch);
- bind(&word_loop);
- LoadP(scratch, MemOperand(src));
- addi(src, src, Operand(kPointerSize));
- subi(length, length, Operand(kPointerSize));
-
- StoreP(scratch, MemOperand(dst));
- addi(dst, dst, Operand(kPointerSize));
- bdnz(&word_loop);
-
- // Copy the last bytes if any left.
- cmpi(length, Operand::Zero());
- beq(&done);
-
- bind(&byte_loop);
- mtctr(length);
- bind(&byte_loop_1);
- lbz(scratch, MemOperand(src));
- addi(src, src, Operand(1));
- stb(scratch, MemOperand(dst));
- addi(dst, dst, Operand(1));
- bdnz(&byte_loop_1);
-
- bind(&done);
-}
-
-
void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
Register count,
Register filler) {
@@ -3451,7 +3194,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
cmp(index, ip);
Check(lt, kIndexIsTooLarge);
- DCHECK(Smi::FromInt(0) == 0);
+ DCHECK(Smi::kZero == 0);
cmpi(index, Operand::Zero());
Check(ge, kIndexIsNegative);
@@ -3828,7 +3571,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// For all objects but the receiver, check that the cache is empty.
EnumLength(r6, r4);
- CmpSmiLiteral(r6, Smi::FromInt(0), r0);
+ CmpSmiLiteral(r6, Smi::kZero, r0);
bne(call_runtime);
bind(&start);
@@ -4687,7 +4430,8 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
- const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+ const int kMementoLastWordOffset =
+ kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
Register mask = scratch2_reg;
DCHECK(!AreAliased(receiver_reg, scratch_reg, mask));
@@ -4697,7 +4441,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
lis(mask, Operand((~Page::kPageAlignmentMask >> 16)));
- addi(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ addi(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
@@ -4718,7 +4462,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// we are below top.
bind(&top_check);
cmp(scratch_reg, ip);
- bgt(no_memento_found);
+ bge(no_memento_found);
// Memento map check.
bind(&map_check);
LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index ba4d277688..28eceb18a4 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -662,19 +662,8 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Inline caching support
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, whereas both scratch registers are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg, Register scratch,
- Label* miss);
-
void GetNumberHash(Register t0, Register scratch);
- void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
- Register result, Register t0, Register t1,
- Register t2);
-
-
inline void MarkCode(NopMarkerTypes type) { nop(type); }
// Check if the given instruction is a 'type' marker.
@@ -769,11 +758,6 @@ class MacroAssembler : public Assembler {
Register scratch1, Register scratch2,
Label* gc_required);
- // Copies a number of bytes from src to dst. All registers are clobbered. On
- // exit src and dst will point to the place just after where the last byte was
- // read or written and length will be zero.
- void CopyBytes(Register src, Register dst, Register length, Register scratch);
-
// Initialize fields with filler values. |count| fields starting at
// |current_address| are overwritten with the value in |filler|. At the end
// the loop, |current_address| points at the next uninitialized field.
@@ -819,11 +803,6 @@ class MacroAssembler : public Assembler {
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
-
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map, Register scratch, Label* fail);
-
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map, Register scratch, Label* fail);
@@ -912,13 +891,6 @@ class MacroAssembler : public Assembler {
return eq;
}
-
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
// Get the number of least significant bits from a register
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
diff --git a/deps/v8/src/profiler/cpu-profiler-inl.h b/deps/v8/src/profiler/cpu-profiler-inl.h
index 504c3f6e1a..440c6a1cce 100644
--- a/deps/v8/src/profiler/cpu-profiler-inl.h
+++ b/deps/v8/src/profiler/cpu-profiler-inl.h
@@ -35,7 +35,7 @@ void CodeDisableOptEventRecord::UpdateCodeMap(CodeMap* code_map) {
void CodeDeoptEventRecord::UpdateCodeMap(CodeMap* code_map) {
CodeEntry* entry = code_map->FindEntry(start);
- if (entry != NULL) entry->set_deopt_info(deopt_reason, position, deopt_id);
+ if (entry != NULL) entry->set_deopt_info(deopt_reason, deopt_id);
}
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 7a0cf9c8bf..6821ba64ca 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -305,7 +305,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
// Disable logging when using the new implementation.
saved_is_logging_ = logger->is_logging_;
logger->is_logging_ = false;
- generator_.reset(new ProfileGenerator(profiles_.get()));
+ generator_.reset(new ProfileGenerator(isolate_, profiles_.get()));
processor_.reset(new ProfilerEventsProcessor(isolate_, generator_.get(),
sampling_interval_));
logger->SetUpProfilerListener();
@@ -326,33 +326,21 @@ void CpuProfiler::StartProcessorIfNotStarted() {
processor_->StartSynchronously();
}
-
CpuProfile* CpuProfiler::StopProfiling(const char* title) {
if (!is_profiling_) return nullptr;
StopProcessorIfLastProfile(title);
- CpuProfile* result = profiles_->StopProfiling(title);
- if (result) {
- result->Print();
- }
- return result;
+ return profiles_->StopProfiling(title);
}
-
CpuProfile* CpuProfiler::StopProfiling(String* title) {
- if (!is_profiling_) return nullptr;
- const char* profile_title = profiles_->GetName(title);
- StopProcessorIfLastProfile(profile_title);
- return profiles_->StopProfiling(profile_title);
+ return StopProfiling(profiles_->GetName(title));
}
-
void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
- if (profiles_->IsLastProfile(title)) {
- StopProcessor();
- }
+ if (!profiles_->IsLastProfile(title)) return;
+ StopProcessor();
}
-
void CpuProfiler::StopProcessor() {
Logger* logger = isolate_->logger();
is_profiling_ = false;
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index e9ccc5703e..fa31754a6f 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -83,7 +83,6 @@ class CodeDeoptEventRecord : public CodeEventRecord {
public:
Address start;
const char* deopt_reason;
- SourcePosition position;
int deopt_id;
void* pc;
int fp_to_sp_delta;
@@ -123,7 +122,7 @@ class CodeEventsContainer {
CodeEventRecord generic;
#define DECLARE_CLASS(ignore, type) type type##_;
CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
-#undef DECLARE_TYPE
+#undef DECLARE_CLASS
};
};
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index d0fa2e4c1b..2fd682e567 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -1312,7 +1312,7 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
HeapObject* obj = shared;
String* shared_name = shared->DebugName();
const char* name = NULL;
- if (shared_name != *heap_->isolate()->factory()->empty_string()) {
+ if (shared_name != heap_->empty_string()) {
name = names_->GetName(shared_name);
TagObject(shared->code(), names_->GetFormatted("(code for %s)", name));
} else {
diff --git a/deps/v8/src/profiler/profile-generator-inl.h b/deps/v8/src/profiler/profile-generator-inl.h
index c50964d990..5a7017ad49 100644
--- a/deps/v8/src/profiler/profile-generator-inl.h
+++ b/deps/v8/src/profiler/profile-generator-inl.h
@@ -25,25 +25,26 @@ CodeEntry::CodeEntry(CodeEventListener::LogEventsAndTags tag, const char* name,
position_(0),
bailout_reason_(kEmptyBailoutReason),
deopt_reason_(kNoDeoptReason),
- deopt_position_(SourcePosition::Unknown()),
deopt_id_(kNoDeoptimizationId),
line_info_(line_info),
instruction_start_(instruction_start) {}
-ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
+ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry,
+ ProfileNode* parent)
: tree_(tree),
entry_(entry),
self_ticks_(0),
children_(CodeEntriesMatch),
+ parent_(parent),
id_(tree->next_node_id()),
- line_ticks_(LineTickMatch) {}
-
+ line_ticks_(LineTickMatch) {
+ tree_->EnqueueNode(this);
+}
inline unsigned ProfileNode::function_id() const {
return tree_->GetFunctionId(this);
}
-
inline Isolate* ProfileNode::isolate() const { return tree_->isolate(); }
} // namespace internal
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 583ef0f4e3..b647670b59 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -10,6 +10,8 @@
#include "src/global-handles.h"
#include "src/profiler/cpu-profiler.h"
#include "src/profiler/profile-generator-inl.h"
+#include "src/tracing/trace-event.h"
+#include "src/tracing/traced-value.h"
#include "src/unicode.h"
namespace v8 {
@@ -140,11 +142,8 @@ int CodeEntry::GetSourceLine(int pc_offset) const {
}
void CodeEntry::AddInlineStack(int pc_offset,
- std::vector<CodeEntry*>& inline_stack) {
- // It's better to use std::move to place the vector into the map,
- // but it's not supported by the current stdlibc++ on MacOS.
- inline_locations_.insert(std::make_pair(pc_offset, std::vector<CodeEntry*>()))
- .first->second.swap(inline_stack);
+ std::vector<CodeEntry*> inline_stack) {
+ inline_locations_.insert(std::make_pair(pc_offset, std::move(inline_stack)));
}
const std::vector<CodeEntry*>* CodeEntry::GetInlineStack(int pc_offset) const {
@@ -153,12 +152,9 @@ const std::vector<CodeEntry*>* CodeEntry::GetInlineStack(int pc_offset) const {
}
void CodeEntry::AddDeoptInlinedFrames(
- int deopt_id, std::vector<DeoptInlinedFrame>& inlined_frames) {
- // It's better to use std::move to place the vector into the map,
- // but it's not supported by the current stdlibc++ on MacOS.
- deopt_inlined_frames_
- .insert(std::make_pair(deopt_id, std::vector<DeoptInlinedFrame>()))
- .first->second.swap(inlined_frames);
+ int deopt_id, std::vector<CpuProfileDeoptFrame> inlined_frames) {
+ deopt_inlined_frames_.insert(
+ std::make_pair(deopt_id, std::move(inlined_frames)));
}
bool CodeEntry::HasDeoptInlinedFramesFor(int deopt_id) const {
@@ -181,16 +177,9 @@ CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
DCHECK_NE(kNoDeoptimizationId, deopt_id_);
if (deopt_inlined_frames_.find(deopt_id_) == deopt_inlined_frames_.end()) {
info.stack.push_back(CpuProfileDeoptFrame(
- {script_id_, position_ + deopt_position_.position()}));
+ {script_id_, static_cast<size_t>(std::max(0, position()))}));
} else {
- size_t deopt_position = deopt_position_.raw();
- // Copy stack of inlined frames where the deopt happened.
- std::vector<DeoptInlinedFrame>& frames = deopt_inlined_frames_[deopt_id_];
- for (DeoptInlinedFrame& inlined_frame : base::Reversed(frames)) {
- info.stack.push_back(CpuProfileDeoptFrame(
- {inlined_frame.script_id, deopt_position + inlined_frame.position}));
- deopt_position = 0; // Done with innermost frame.
- }
+ info.stack = deopt_inlined_frames_[deopt_id_];
}
return info;
}
@@ -214,9 +203,8 @@ ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
base::HashMap::Entry* map_entry =
children_.LookupOrInsert(entry, CodeEntryHash(entry));
ProfileNode* node = reinterpret_cast<ProfileNode*>(map_entry->value);
- if (node == NULL) {
- // New node added.
- node = new ProfileNode(tree_, entry);
+ if (!node) {
+ node = new ProfileNode(tree_, entry, this);
map_entry->value = node;
children_list_.Add(node);
}
@@ -305,7 +293,7 @@ class DeleteNodesCallback {
ProfileTree::ProfileTree(Isolate* isolate)
: root_entry_(CodeEventListener::FUNCTION_TAG, "(root)"),
next_node_id_(1),
- root_(new ProfileNode(this, &root_entry_)),
+ root_(new ProfileNode(this, &root_entry_, nullptr)),
isolate_(isolate),
next_function_id_(1),
function_ids_(ProfileNode::CodeEntriesMatch) {}
@@ -397,13 +385,22 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
}
}
+using v8::tracing::TracedValue;
+
CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
bool record_samples)
: title_(title),
record_samples_(record_samples),
start_time_(base::TimeTicks::HighResolutionNow()),
top_down_(profiler->isolate()),
- profiler_(profiler) {}
+ profiler_(profiler),
+ streaming_next_sample_(0) {
+ auto value = TracedValue::Create();
+ value->SetDouble("startTime",
+ (start_time_ - base::TimeTicks()).InMicroseconds());
+ TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
+ "Profile", this, "data", std::move(value));
+}
void CpuProfile::AddPath(base::TimeTicks timestamp,
const std::vector<CodeEntry*>& path, int src_line,
@@ -414,10 +411,94 @@ void CpuProfile::AddPath(base::TimeTicks timestamp,
timestamps_.Add(timestamp);
samples_.Add(top_frame_node);
}
+ const int kSamplesFlushCount = 100;
+ const int kNodesFlushCount = 10;
+ if (samples_.length() - streaming_next_sample_ >= kSamplesFlushCount ||
+ top_down_.pending_nodes_count() >= kNodesFlushCount) {
+ StreamPendingTraceEvents();
+ }
+}
+
+namespace {
+
+void BuildNodeValue(const ProfileNode* node, TracedValue* value) {
+ const CodeEntry* entry = node->entry();
+ value->BeginDictionary("callFrame");
+ value->SetString("functionName", entry->name());
+ if (*entry->resource_name()) {
+ value->SetString("url", entry->resource_name());
+ }
+ value->SetInteger("scriptId", entry->script_id());
+ if (entry->line_number()) {
+ value->SetInteger("lineNumber", entry->line_number() - 1);
+ }
+ if (entry->column_number()) {
+ value->SetInteger("columnNumber", entry->column_number() - 1);
+ }
+ value->EndDictionary();
+ value->SetInteger("id", node->id());
+ if (node->parent()) {
+ value->SetInteger("parent", node->parent()->id());
+ }
+ const char* deopt_reason = entry->bailout_reason();
+ if (deopt_reason && deopt_reason[0] && strcmp(deopt_reason, "no reason")) {
+ value->SetString("deoptReason", deopt_reason);
+ }
+}
+
+} // namespace
+
+void CpuProfile::StreamPendingTraceEvents() {
+ std::vector<const ProfileNode*> pending_nodes = top_down_.TakePendingNodes();
+ if (pending_nodes.empty() && !samples_.length()) return;
+ auto value = TracedValue::Create();
+
+ if (!pending_nodes.empty() || streaming_next_sample_ != samples_.length()) {
+ value->BeginDictionary("cpuProfile");
+ if (!pending_nodes.empty()) {
+ value->BeginArray("nodes");
+ for (auto node : pending_nodes) {
+ value->BeginDictionary();
+ BuildNodeValue(node, value.get());
+ value->EndDictionary();
+ }
+ value->EndArray();
+ }
+ if (streaming_next_sample_ != samples_.length()) {
+ value->BeginArray("samples");
+ for (int i = streaming_next_sample_; i < samples_.length(); ++i) {
+ value->AppendInteger(samples_[i]->id());
+ }
+ value->EndArray();
+ }
+ value->EndDictionary();
+ }
+ if (streaming_next_sample_ != samples_.length()) {
+ value->BeginArray("timeDeltas");
+ base::TimeTicks lastTimestamp =
+ streaming_next_sample_ ? timestamps_[streaming_next_sample_ - 1]
+ : start_time();
+ for (int i = streaming_next_sample_; i < timestamps_.length(); ++i) {
+ value->AppendInteger(
+ static_cast<int>((timestamps_[i] - lastTimestamp).InMicroseconds()));
+ lastTimestamp = timestamps_[i];
+ }
+ value->EndArray();
+ DCHECK(samples_.length() == timestamps_.length());
+ streaming_next_sample_ = samples_.length();
+ }
+
+ TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
+ "ProfileChunk", this, "data", std::move(value));
}
-void CpuProfile::CalculateTotalTicksAndSamplingRate() {
+void CpuProfile::FinishProfile() {
end_time_ = base::TimeTicks::HighResolutionNow();
+ StreamPendingTraceEvents();
+ auto value = TracedValue::Create();
+ value->SetDouble("endTime", (end_time_ - base::TimeTicks()).InMicroseconds());
+ TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
+ "ProfileChunk", this, "data", std::move(value));
}
void CpuProfile::Print() {
@@ -504,7 +585,7 @@ bool CpuProfilesCollection::StartProfiling(const char* title,
CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
const int title_len = StrLength(title);
- CpuProfile* profile = NULL;
+ CpuProfile* profile = nullptr;
current_profiles_semaphore_.Wait();
for (int i = current_profiles_.length() - 1; i >= 0; --i) {
if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
@@ -514,8 +595,8 @@ CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
}
current_profiles_semaphore_.Signal();
- if (profile == NULL) return NULL;
- profile->CalculateTotalTicksAndSamplingRate();
+ if (!profile) return nullptr;
+ profile->FinishProfile();
finished_profiles_.Add(profile);
return profile;
}
@@ -554,8 +635,9 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
current_profiles_semaphore_.Signal();
}
-ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
- : profiles_(profiles) {}
+ProfileGenerator::ProfileGenerator(Isolate* isolate,
+ CpuProfilesCollection* profiles)
+ : isolate_(isolate), profiles_(profiles) {}
void ProfileGenerator::RecordTickSample(const TickSample& sample) {
std::vector<CodeEntry*> entries;
@@ -576,16 +658,14 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
// Don't use PC when in external callback code, as it can point
// inside callback's code, and we will erroneously report
// that a callback calls itself.
- entries.push_back(code_map_.FindEntry(
- reinterpret_cast<Address>(sample.external_callback_entry)));
+ entries.push_back(FindEntry(sample.external_callback_entry));
} else {
- CodeEntry* pc_entry =
- code_map_.FindEntry(reinterpret_cast<Address>(sample.pc));
+ CodeEntry* pc_entry = FindEntry(sample.pc);
// If there is no pc_entry we're likely in native code.
// Find out, if top of stack was pointing inside a JS function
// meaning that we have encountered a frameless invocation.
if (!pc_entry && !sample.has_external_callback) {
- pc_entry = code_map_.FindEntry(reinterpret_cast<Address>(sample.tos));
+ pc_entry = FindEntry(sample.tos);
}
// If pc is in the function code before it set up stack frame or after the
// frame was destroyed SafeStackFrameIterator incorrectly thinks that
@@ -618,8 +698,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
for (unsigned i = 0; i < sample.frames_count; ++i) {
Address stack_pos = reinterpret_cast<Address>(sample.stack[i]);
- CodeEntry* entry = code_map_.FindEntry(stack_pos);
-
+ CodeEntry* entry = FindEntry(stack_pos);
if (entry) {
// Find out if the entry has an inlining stack associated.
int pc_offset =
@@ -662,6 +741,22 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
sample.update_stats);
}
+CodeEntry* ProfileGenerator::FindEntry(void* address) {
+ CodeEntry* entry = code_map_.FindEntry(reinterpret_cast<Address>(address));
+ if (!entry) {
+ RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
+ void* start = reinterpret_cast<void*>(rcs);
+ void* end = reinterpret_cast<void*>(rcs + 1);
+ if (start <= address && address < end) {
+ RuntimeCallCounter* counter =
+ reinterpret_cast<RuntimeCallCounter*>(address);
+ entry = new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name,
+ CodeEntry::kEmptyNamePrefix, "native V8Runtime");
+ code_map_.AddCode(reinterpret_cast<Address>(address), entry, 1);
+ }
+ }
+ return entry;
+}
CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
switch (tag) {
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 179d411429..1b3cad6dc3 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -49,13 +49,6 @@ class CodeEntry {
Address instruction_start = NULL);
~CodeEntry();
- // Container describing inlined frames at eager deopt points. Is eventually
- // being translated into v8::CpuProfileDeoptFrame by the profiler.
- struct DeoptInlinedFrame {
- int position;
- int script_id;
- };
-
const char* name_prefix() const { return name_prefix_; }
bool has_name_prefix() const { return name_prefix_[0] != '\0'; }
const char* name() const { return name_; }
@@ -72,18 +65,15 @@ class CodeEntry {
}
const char* bailout_reason() const { return bailout_reason_; }
- void set_deopt_info(const char* deopt_reason, SourcePosition position,
- int deopt_id) {
+ void set_deopt_info(const char* deopt_reason, int deopt_id) {
DCHECK(!has_deopt_info());
deopt_reason_ = deopt_reason;
- deopt_position_ = position;
deopt_id_ = deopt_id;
}
CpuProfileDeoptInfo GetDeoptInfo();
bool has_deopt_info() const { return deopt_id_ != kNoDeoptimizationId; }
void clear_deopt_info() {
deopt_reason_ = kNoDeoptReason;
- deopt_position_ = SourcePosition::Unknown();
deopt_id_ = kNoDeoptimizationId;
}
@@ -99,10 +89,10 @@ class CodeEntry {
int GetSourceLine(int pc_offset) const;
- void AddInlineStack(int pc_offset, std::vector<CodeEntry*>& inline_stack);
+ void AddInlineStack(int pc_offset, std::vector<CodeEntry*> inline_stack);
const std::vector<CodeEntry*>* GetInlineStack(int pc_offset) const;
- void AddDeoptInlinedFrames(int deopt_id, std::vector<DeoptInlinedFrame>&);
+ void AddDeoptInlinedFrames(int deopt_id, std::vector<CpuProfileDeoptFrame>);
bool HasDeoptInlinedFramesFor(int deopt_id) const;
Address instruction_start() const { return instruction_start_; }
@@ -167,13 +157,12 @@ class CodeEntry {
int position_;
const char* bailout_reason_;
const char* deopt_reason_;
- SourcePosition deopt_position_;
int deopt_id_;
JITLineInfoTable* line_info_;
Address instruction_start_;
// Should be an unordered_map, but it doesn't currently work on Win & MacOS.
std::map<int, std::vector<CodeEntry*>> inline_locations_;
- std::map<int, std::vector<DeoptInlinedFrame>> deopt_inlined_frames_;
+ std::map<int, std::vector<CpuProfileDeoptFrame>> deopt_inlined_frames_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
};
@@ -183,7 +172,7 @@ class ProfileTree;
class ProfileNode {
public:
- inline ProfileNode(ProfileTree* tree, CodeEntry* entry);
+ inline ProfileNode(ProfileTree* tree, CodeEntry* entry, ProfileNode* parent);
ProfileNode* FindChild(CodeEntry* entry);
ProfileNode* FindOrAddChild(CodeEntry* entry);
@@ -196,6 +185,7 @@ class ProfileNode {
const List<ProfileNode*>* children() const { return &children_list_; }
unsigned id() const { return id_; }
unsigned function_id() const;
+ ProfileNode* parent() const { return parent_; }
unsigned int GetHitLineCount() const { return line_ticks_.occupancy(); }
bool GetLineTicks(v8::CpuProfileNode::LineTick* entries,
unsigned int length) const;
@@ -223,6 +213,7 @@ class ProfileNode {
// Mapping from CodeEntry* to ProfileNode*
base::CustomMatcherHashMap children_;
List<ProfileNode*> children_list_;
+ ProfileNode* parent_;
unsigned id_;
base::CustomMatcherHashMap line_ticks_;
@@ -251,10 +242,18 @@ class ProfileTree {
Isolate* isolate() const { return isolate_; }
+ void EnqueueNode(const ProfileNode* node) { pending_nodes_.push_back(node); }
+ size_t pending_nodes_count() const { return pending_nodes_.size(); }
+ std::vector<const ProfileNode*> TakePendingNodes() {
+ return std::move(pending_nodes_);
+ }
+
private:
template <typename Callback>
void TraverseDepthFirst(Callback* callback);
+ std::vector<const ProfileNode*> pending_nodes_;
+
CodeEntry root_entry_;
unsigned next_node_id_;
ProfileNode* root_;
@@ -274,7 +273,7 @@ class CpuProfile {
// Add pc -> ... -> main() call path to the profile.
void AddPath(base::TimeTicks timestamp, const std::vector<CodeEntry*>& path,
int src_line, bool update_stats);
- void CalculateTotalTicksAndSamplingRate();
+ void FinishProfile();
const char* title() const { return title_; }
const ProfileTree* top_down() const { return &top_down_; }
@@ -294,6 +293,8 @@ class CpuProfile {
void Print();
private:
+ void StreamPendingTraceEvents();
+
const char* title_;
bool record_samples_;
base::TimeTicks start_time_;
@@ -302,6 +303,7 @@ class CpuProfile {
List<base::TimeTicks> timestamps_;
ProfileTree top_down_;
CpuProfiler* const profiler_;
+ int streaming_next_sample_;
DISALLOW_COPY_AND_ASSIGN(CpuProfile);
};
@@ -366,15 +368,17 @@ class CpuProfilesCollection {
class ProfileGenerator {
public:
- explicit ProfileGenerator(CpuProfilesCollection* profiles);
+ ProfileGenerator(Isolate* isolate, CpuProfilesCollection* profiles);
void RecordTickSample(const TickSample& sample);
CodeMap* code_map() { return &code_map_; }
private:
+ CodeEntry* FindEntry(void* address);
CodeEntry* EntryForVMState(StateTag tag);
+ Isolate* isolate_;
CpuProfilesCollection* profiles_;
CodeMap code_map_;
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index 4bceac2e89..640f967e3d 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -90,18 +90,13 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
line_table = new JITLineInfoTable();
int offset = abstract_code->IsCode() ? Code::kHeaderSize
: BytecodeArray::kHeaderSize;
- int start_position = shared->start_position();
- int end_position = shared->end_position();
for (SourcePositionTableIterator it(abstract_code->source_position_table());
!it.done(); it.Advance()) {
- int position = it.source_position();
- // TODO(alph): in case of inlining the position may correspond to an
- // inlined function source code. Do not collect positions that fall
- // beyond the function source code. There's however a chance the
- // inlined function has similar positions but in another script. So
- // the proper fix is to store script_id in some form along with the
- // inlined function positions.
- if (position < start_position || position >= end_position) continue;
+ // TODO(alph,tebbi) Skipping inlined positions for now, because they might
+ // refer to a different script.
+ if (it.source_position().InliningId() != SourcePosition::kNotInlined)
+ continue;
+ int position = it.source_position().ScriptOffset();
int line_number = script->GetLineNumber(position) + 1;
int pc_offset = it.code_offset() + offset;
line_table->SetPosition(pc_offset, line_number);
@@ -156,7 +151,6 @@ void ProfilerListener::CodeDeoptEvent(Code* code, Address pc,
Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
rec->start = code->address();
rec->deopt_reason = DeoptimizeReasonToString(info.deopt_reason);
- rec->position = info.position;
rec->deopt_id = info.deopt_id;
rec->pc = reinterpret_cast<void*>(pc);
rec->fp_to_sp_delta = fp_to_sp_delta;
@@ -245,8 +239,7 @@ void ProfilerListener::RecordInliningInfo(CodeEntry* entry,
inline_stack.push_back(inline_entry);
}
if (!inline_stack.empty()) {
- entry->AddInlineStack(pc_offset, inline_stack);
- DCHECK(inline_stack.empty());
+ entry->AddInlineStack(pc_offset, std::move(inline_stack));
}
}
}
@@ -254,55 +247,36 @@ void ProfilerListener::RecordInliningInfo(CodeEntry* entry,
void ProfilerListener::RecordDeoptInlinedFrames(CodeEntry* entry,
AbstractCode* abstract_code) {
if (abstract_code->kind() != AbstractCode::OPTIMIZED_FUNCTION) return;
- Code* code = abstract_code->GetCode();
- DeoptimizationInputData* deopt_input_data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- int const mask = RelocInfo::ModeMask(RelocInfo::DEOPT_ID);
- for (RelocIterator rit(code, mask); !rit.done(); rit.next()) {
- RelocInfo* reloc_info = rit.rinfo();
- DCHECK(RelocInfo::IsDeoptId(reloc_info->rmode()));
- int deopt_id = static_cast<int>(reloc_info->data());
- int translation_index =
- deopt_input_data->TranslationIndex(deopt_id)->value();
- TranslationIterator it(deopt_input_data->TranslationByteArray(),
- translation_index);
- Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
- DCHECK_EQ(Translation::BEGIN, opcode);
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- std::vector<CodeEntry::DeoptInlinedFrame> inlined_frames;
- while (it.HasNext() &&
- Translation::BEGIN !=
- (opcode = static_cast<Translation::Opcode>(it.Next()))) {
- if (opcode != Translation::JS_FRAME &&
- opcode != Translation::INTERPRETED_FRAME) {
- it.Skip(Translation::NumberOfOperandsFor(opcode));
- continue;
- }
- BailoutId ast_id = BailoutId(it.Next());
- int shared_info_id = it.Next();
- it.Next(); // Skip height
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(
- deopt_input_data->LiteralArray()->get(shared_info_id));
- int source_position;
- if (opcode == Translation::INTERPRETED_FRAME) {
- source_position =
- Deoptimizer::ComputeSourcePositionFromBytecodeArray(shared, ast_id);
- } else {
- DCHECK(opcode == Translation::JS_FRAME);
- source_position =
- Deoptimizer::ComputeSourcePositionFromBaselineCode(shared, ast_id);
+ Handle<Code> code(abstract_code->GetCode());
+
+ SourcePosition last_position = SourcePosition::Unknown();
+ int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_ID) |
+ RelocInfo::ModeMask(RelocInfo::DEOPT_SCRIPT_OFFSET) |
+ RelocInfo::ModeMask(RelocInfo::DEOPT_INLINING_ID);
+ for (RelocIterator it(*code, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ if (info->rmode() == RelocInfo::DEOPT_SCRIPT_OFFSET) {
+ int script_offset = static_cast<int>(info->data());
+ it.next();
+ DCHECK(it.rinfo()->rmode() == RelocInfo::DEOPT_INLINING_ID);
+ int inlining_id = static_cast<int>(it.rinfo()->data());
+ last_position = SourcePosition(script_offset, inlining_id);
+ continue;
+ }
+ if (info->rmode() == RelocInfo::DEOPT_ID) {
+ int deopt_id = static_cast<int>(info->data());
+ DCHECK(last_position.IsKnown());
+ std::vector<CpuProfileDeoptFrame> inlined_frames;
+ for (SourcePositionInfo& pos_info : last_position.InliningStack(code)) {
+ DCHECK(pos_info.position.ScriptOffset() != kNoSourcePosition);
+ size_t offset = static_cast<size_t>(pos_info.position.ScriptOffset());
+ int script_id = Script::cast(pos_info.function->script())->id();
+ inlined_frames.push_back(CpuProfileDeoptFrame({script_id, offset}));
}
- int script_id = v8::UnboundScript::kNoScriptId;
- if (shared->script()->IsScript()) {
- Script* script = Script::cast(shared->script());
- script_id = script->id();
+ if (!inlined_frames.empty() &&
+ !entry->HasDeoptInlinedFramesFor(deopt_id)) {
+ entry->AddDeoptInlinedFrames(deopt_id, std::move(inlined_frames));
}
- CodeEntry::DeoptInlinedFrame frame = {source_position, script_id};
- inlined_frames.push_back(frame);
- }
- if (!inlined_frames.empty() && !entry->HasDeoptInlinedFramesFor(deopt_id)) {
- entry->AddDeoptInlinedFrames(deopt_id, inlined_frames);
- DCHECK(inlined_frames.empty());
}
}
}
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
index 3b2ca630ac..f2a3d4a2cb 100644
--- a/deps/v8/src/profiler/sampling-heap-profiler.cc
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -65,7 +65,7 @@ SamplingHeapProfiler::SamplingHeapProfiler(
stack_depth_(stack_depth),
rate_(rate),
flags_(flags) {
- CHECK_GT(rate_, 0);
+ CHECK_GT(rate_, 0u);
heap->new_space()->AddAllocationObserver(new_space_observer_.get());
AllSpaces spaces(heap);
for (Space* space = spaces.next(); space != nullptr; space = spaces.next()) {
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index ecb2bf46f7..e1c84c46bf 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -5,6 +5,7 @@
#include "src/profiler/tick-sample.h"
#include "include/v8-profiler.h"
+#include "src/counters.h"
#include "src/frames-inl.h"
#include "src/msan.h"
#include "src/simulator.h"
@@ -237,7 +238,15 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
it.top_frame_type() == internal::StackFrame::BUILTIN_EXIT)) {
frames[i++] = isolate->c_function();
}
+ i::RuntimeCallTimer* timer =
+ isolate->counters()->runtime_call_stats()->current_timer();
for (; !it.done() && i < frames_limit; it.Advance()) {
+ while (timer && reinterpret_cast<i::Address>(timer) < it.frame()->fp() &&
+ i < frames_limit) {
+ frames[i++] = reinterpret_cast<i::Address>(timer->counter());
+ timer = timer->parent();
+ }
+ if (i == frames_limit) break;
if (!it.frame()->is_interpreted()) {
frames[i++] = it.frame()->pc();
continue;
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.cc b/deps/v8/src/profiler/tracing-cpu-profiler.cc
index b24ca2fd25..8b31225905 100644
--- a/deps/v8/src/profiler/tracing-cpu-profiler.cc
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.cc
@@ -4,8 +4,13 @@
#include "src/profiler/tracing-cpu-profiler.h"
+#include "src/profiler/cpu-profiler.h"
+#include "src/tracing/trace-event.h"
#include "src/v8.h"
+#define PROFILER_TRACE_CATEGORY_ENABLED(cat) \
+ (*TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT(cat)))
+
namespace v8 {
std::unique_ptr<TracingCpuProfiler> TracingCpuProfiler::Create(
@@ -17,9 +22,57 @@ std::unique_ptr<TracingCpuProfiler> TracingCpuProfiler::Create(
namespace internal {
-TracingCpuProfilerImpl::TracingCpuProfilerImpl(Isolate* isolate) {}
+TracingCpuProfilerImpl::TracingCpuProfilerImpl(Isolate* isolate)
+ : isolate_(isolate), profiling_enabled_(false) {
+ // Make sure tracing system notices profiler categories.
+ PROFILER_TRACE_CATEGORY_ENABLED("v8.cpu_profiler");
+ PROFILER_TRACE_CATEGORY_ENABLED("v8.cpu_profiler.hires");
+ V8::GetCurrentPlatform()->AddTraceStateObserver(this);
+}
+
+TracingCpuProfilerImpl::~TracingCpuProfilerImpl() {
+ StopProfiling();
+ V8::GetCurrentPlatform()->RemoveTraceStateObserver(this);
+}
+
+void TracingCpuProfilerImpl::OnTraceEnabled() {
+ if (!PROFILER_TRACE_CATEGORY_ENABLED("v8.cpu_profiler")) return;
+ profiling_enabled_ = true;
+ isolate_->RequestInterrupt(
+ [](v8::Isolate*, void* data) {
+ reinterpret_cast<TracingCpuProfilerImpl*>(data)->StartProfiling();
+ },
+ this);
+}
-TracingCpuProfilerImpl::~TracingCpuProfilerImpl() {}
+void TracingCpuProfilerImpl::OnTraceDisabled() {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ if (!profiling_enabled_) return;
+ profiling_enabled_ = false;
+ isolate_->RequestInterrupt(
+ [](v8::Isolate*, void* data) {
+ reinterpret_cast<TracingCpuProfilerImpl*>(data)->StopProfiling();
+ },
+ this);
+}
+
+void TracingCpuProfilerImpl::StartProfiling() {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ if (!profiling_enabled_ || profiler_) return;
+ int sampling_interval_us =
+ PROFILER_TRACE_CATEGORY_ENABLED("v8.cpu_profiler.hires") ? 100 : 1000;
+ profiler_.reset(new CpuProfiler(isolate_));
+ profiler_->set_sampling_interval(
+ base::TimeDelta::FromMicroseconds(sampling_interval_us));
+ profiler_->StartProfiling("", true);
+}
+
+void TracingCpuProfilerImpl::StopProfiling() {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ if (!profiler_) return;
+ profiler_->StopProfiling("");
+ profiler_.reset();
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/profiler/tracing-cpu-profiler.h b/deps/v8/src/profiler/tracing-cpu-profiler.h
index 80f1bdcc92..a512a940f8 100644
--- a/deps/v8/src/profiler/tracing-cpu-profiler.h
+++ b/deps/v8/src/profiler/tracing-cpu-profiler.h
@@ -5,18 +5,37 @@
#ifndef V8_PROFILER_TRACING_CPU_PROFILER_H
#define V8_PROFILER_TRACING_CPU_PROFILER_H
+#include "include/v8-platform.h"
#include "include/v8-profiler.h"
+#include "src/base/atomic-utils.h"
#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
namespace v8 {
namespace internal {
-class TracingCpuProfilerImpl final : public TracingCpuProfiler {
+class CpuProfiler;
+class Isolate;
+
+class TracingCpuProfilerImpl final : public TracingCpuProfiler,
+ private v8::Platform::TraceStateObserver {
public:
explicit TracingCpuProfilerImpl(Isolate*);
~TracingCpuProfilerImpl();
+ // v8::Platform::TraceStateObserver
+ void OnTraceEnabled() final;
+ void OnTraceDisabled() final;
+
private:
+ void StartProfiling();
+ void StopProfiling();
+
+ Isolate* isolate_;
+ std::unique_ptr<CpuProfiler> profiler_;
+ bool profiling_enabled_;
+ base::Mutex mutex_;
+
DISALLOW_COPY_AND_ASSIGN(TracingCpuProfilerImpl);
};
diff --git a/deps/v8/src/promise-utils.cc b/deps/v8/src/promise-utils.cc
new file mode 100644
index 0000000000..607dbe8caa
--- /dev/null
+++ b/deps/v8/src/promise-utils.cc
@@ -0,0 +1,75 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/promise-utils.h"
+
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+enum PromiseResolvingFunctionContextSlot {
+ kAlreadyVisitedSlot = Context::MIN_CONTEXT_SLOTS,
+ kPromiseSlot,
+ kDebugEventSlot,
+ kPromiseContextLength,
+};
+
+JSObject* PromiseUtils::GetPromise(Handle<Context> context) {
+ return JSObject::cast(context->get(kPromiseSlot));
+}
+
+Object* PromiseUtils::GetDebugEvent(Handle<Context> context) {
+ return context->get(kDebugEventSlot);
+}
+
+bool PromiseUtils::HasAlreadyVisited(Handle<Context> context) {
+ return Smi::cast(context->get(kAlreadyVisitedSlot))->value() != 0;
+}
+
+void PromiseUtils::SetAlreadyVisited(Handle<Context> context) {
+ context->set(kAlreadyVisitedSlot, Smi::FromInt(1));
+}
+
+void PromiseUtils::CreateResolvingFunctions(Isolate* isolate,
+ Handle<JSObject> promise,
+ Handle<Object> debug_event,
+ Handle<JSFunction>* resolve,
+ Handle<JSFunction>* reject) {
+ DCHECK(debug_event->IsTrue(isolate) || debug_event->IsFalse(isolate));
+ Handle<Context> context =
+ isolate->factory()->NewPromiseResolvingFunctionContext(
+ kPromiseContextLength);
+ context->set_native_context(*isolate->native_context());
+ // We set the closure to be an empty function, same as native context.
+ context->set_closure(isolate->native_context()->closure());
+ context->set(kAlreadyVisitedSlot, Smi::kZero);
+ context->set(kPromiseSlot, *promise);
+ context->set(kDebugEventSlot, *debug_event);
+
+ Handle<SharedFunctionInfo> resolve_shared_fun(
+ isolate->native_context()->promise_resolve_shared_fun(), isolate);
+ Handle<JSFunction> resolve_fun =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ isolate->sloppy_function_without_prototype_map(), resolve_shared_fun,
+ isolate->native_context(), TENURED);
+
+ Handle<SharedFunctionInfo> reject_shared_fun(
+ isolate->native_context()->promise_reject_shared_fun(), isolate);
+ Handle<JSFunction> reject_fun =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(
+ isolate->sloppy_function_without_prototype_map(), reject_shared_fun,
+ isolate->native_context(), TENURED);
+
+ resolve_fun->set_context(*context);
+ reject_fun->set_context(*context);
+
+ *resolve = resolve_fun;
+ *reject = reject_fun;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/promise-utils.h b/deps/v8/src/promise-utils.h
new file mode 100644
index 0000000000..6ed6fcde5f
--- /dev/null
+++ b/deps/v8/src/promise-utils.h
@@ -0,0 +1,32 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROMISE_UTILS_H_
+#define V8_PROMISE_UTILS_H_
+
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Helper methods for Promise builtins.
+class PromiseUtils : public AllStatic {
+ public:
+ // These get and set the slots on the PromiseResolvingContext, which
+ // is used by the resolve/reject promise callbacks.
+ static JSObject* GetPromise(Handle<Context> context);
+ static Object* GetDebugEvent(Handle<Context> context);
+ static bool HasAlreadyVisited(Handle<Context> context);
+ static void SetAlreadyVisited(Handle<Context> context);
+
+ static void CreateResolvingFunctions(Isolate* isolate,
+ Handle<JSObject> promise,
+ Handle<Object> debug_event,
+ Handle<JSFunction>* resolve,
+ Handle<JSFunction>* reject);
+};
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PROMISE_UTILS_H_
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index ebe7d3b673..233233c5d4 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -32,7 +32,7 @@ class Descriptor BASE_EMBEDDED {
PropertyDetails details_;
protected:
- Descriptor() : details_(Smi::FromInt(0)) {}
+ Descriptor() : details_(Smi::kZero) {}
void Init(Handle<Name> key, Handle<Object> value, PropertyDetails details) {
DCHECK(key->IsUniqueName());
diff --git a/deps/v8/src/prototype.h b/deps/v8/src/prototype.h
index 032d9b6b34..38d6cab985 100644
--- a/deps/v8/src/prototype.h
+++ b/deps/v8/src/prototype.h
@@ -32,9 +32,9 @@ class PrototypeIterator {
PrototypeIterator(Isolate* isolate, Handle<JSReceiver> receiver,
WhereToStart where_to_start = kStartAtPrototype,
WhereToEnd where_to_end = END_AT_NULL)
- : object_(NULL),
+ : isolate_(isolate),
+ object_(NULL),
handle_(receiver),
- isolate_(isolate),
where_to_end_(where_to_end),
is_at_end_(false),
seen_proxies_(0) {
@@ -45,28 +45,43 @@ class PrototypeIterator {
PrototypeIterator(Isolate* isolate, JSReceiver* receiver,
WhereToStart where_to_start = kStartAtPrototype,
WhereToEnd where_to_end = END_AT_NULL)
- : object_(receiver),
- isolate_(isolate),
+ : isolate_(isolate),
+ object_(receiver),
where_to_end_(where_to_end),
is_at_end_(false),
seen_proxies_(0) {
if (where_to_start == kStartAtPrototype) Advance();
}
- explicit PrototypeIterator(Map* receiver_map)
- : object_(receiver_map->prototype()),
- isolate_(receiver_map->GetIsolate()),
- where_to_end_(END_AT_NULL),
+ explicit PrototypeIterator(Map* receiver_map,
+ WhereToEnd where_to_end = END_AT_NULL)
+ : isolate_(receiver_map->GetIsolate()),
+ object_(receiver_map->GetPrototypeChainRootMap(isolate_)->prototype()),
+ where_to_end_(where_to_end),
is_at_end_(object_->IsNull(isolate_)),
- seen_proxies_(0) {}
+ seen_proxies_(0) {
+ if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
+ DCHECK(object_->IsJSReceiver());
+ Map* map = JSReceiver::cast(object_)->map();
+ is_at_end_ = !map->has_hidden_prototype();
+ }
+ }
- explicit PrototypeIterator(Handle<Map> receiver_map)
- : object_(NULL),
- handle_(handle(receiver_map->prototype(), receiver_map->GetIsolate())),
- isolate_(receiver_map->GetIsolate()),
- where_to_end_(END_AT_NULL),
+ explicit PrototypeIterator(Handle<Map> receiver_map,
+ WhereToEnd where_to_end = END_AT_NULL)
+ : isolate_(receiver_map->GetIsolate()),
+ object_(NULL),
+ handle_(receiver_map->GetPrototypeChainRootMap(isolate_)->prototype(),
+ isolate_),
+ where_to_end_(where_to_end),
is_at_end_(handle_->IsNull(isolate_)),
- seen_proxies_(0) {}
+ seen_proxies_(0) {
+ if (!is_at_end_ && where_to_end_ == END_AT_NON_HIDDEN) {
+ DCHECK(handle_->IsJSReceiver());
+ Map* map = JSReceiver::cast(*handle_)->map();
+ is_at_end_ = !map->has_hidden_prototype();
+ }
+ }
~PrototypeIterator() {}
@@ -161,9 +176,9 @@ class PrototypeIterator {
bool IsAtEnd() const { return is_at_end_; }
private:
+ Isolate* isolate_;
Object* object_;
Handle<Object> handle_;
- Isolate* isolate_;
WhereToEnd where_to_end_;
bool is_at_end_;
int seen_proxies_;
diff --git a/deps/v8/src/regexp/OWNERS b/deps/v8/src/regexp/OWNERS
index d9d588df6c..c493afa8f0 100644
--- a/deps/v8/src/regexp/OWNERS
+++ b/deps/v8/src/regexp/OWNERS
@@ -1,6 +1,4 @@
set noparent
-jochen@chromium.org
-marja@chromium.org
-ulan@chromium.org
+jgruber@chromium.org
yangguo@chromium.org
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 96a778cfb7..f0abc9a8b3 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -136,16 +136,12 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
Handle<String> pattern,
JSRegExp::Flags flags) {
Isolate* isolate = re->GetIsolate();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
CompilationCache* compilation_cache = isolate->compilation_cache();
MaybeHandle<FixedArray> maybe_cached =
compilation_cache->LookupRegExp(pattern, flags);
Handle<FixedArray> cached;
- bool in_cache = maybe_cached.ToHandle(&cached);
- LOG(isolate, RegExpCompileEvent(re, in_cache));
-
- Handle<Object> result;
- if (in_cache) {
+ if (maybe_cached.ToHandle(&cached)) {
re->set_data(*cached);
return re;
}
@@ -194,7 +190,7 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
MaybeHandle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp,
Handle<String> subject, int index,
- Handle<JSObject> last_match_info) {
+ Handle<RegExpMatchInfo> last_match_info) {
switch (regexp->TypeTag()) {
case JSRegExp::ATOM:
return AtomExec(regexp, subject, index, last_match_info);
@@ -222,17 +218,14 @@ void RegExpImpl::AtomCompile(Handle<JSRegExp> re,
match_pattern);
}
-
-static void SetAtomLastCapture(FixedArray* array,
- String* subject,
- int from,
- int to) {
- SealHandleScope shs(array->GetIsolate());
- RegExpImpl::SetLastCaptureCount(array, 2);
- RegExpImpl::SetLastSubject(array, subject);
- RegExpImpl::SetLastInput(array, subject);
- RegExpImpl::SetCapture(array, 0, from);
- RegExpImpl::SetCapture(array, 1, to);
+static void SetAtomLastCapture(Handle<RegExpMatchInfo> last_match_info,
+ String* subject, int from, int to) {
+ SealHandleScope shs(last_match_info->GetIsolate());
+ last_match_info->SetNumberOfCaptureRegisters(2);
+ last_match_info->SetLastSubject(subject);
+ last_match_info->SetLastInput(subject);
+ last_match_info->SetCapture(0, from);
+ last_match_info->SetCapture(1, to);
}
@@ -289,7 +282,7 @@ int RegExpImpl::AtomExecRaw(Handle<JSRegExp> regexp,
Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re, Handle<String> subject,
int index,
- Handle<JSObject> last_match_info) {
+ Handle<RegExpMatchInfo> last_match_info) {
Isolate* isolate = re->GetIsolate();
static const int kNumRegisters = 2;
@@ -302,8 +295,8 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re, Handle<String> subject,
DCHECK_EQ(res, RegExpImpl::RE_SUCCESS);
SealHandleScope shs(isolate);
- FixedArray* array = FixedArray::cast(last_match_info->elements());
- SetAtomLastCapture(array, *subject, output_registers[0], output_registers[1]);
+ SetAtomLastCapture(last_match_info, *subject, output_registers[0],
+ output_registers[1]);
return last_match_info;
}
@@ -343,7 +336,7 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
bool is_one_byte) {
// Compile the RegExp.
Isolate* isolate = re->GetIsolate();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
PostponeInterruptsScope postpone(isolate);
// If we had a compilation error the last time this is saved at the
// saved code index.
@@ -417,7 +410,7 @@ void RegExpImpl::SetIrregexpMaxRegisterCount(FixedArray* re, int value) {
void RegExpImpl::SetIrregexpCaptureNameMap(FixedArray* re,
Handle<FixedArray> value) {
if (value.is_null()) {
- re->set(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::FromInt(0));
+ re->set(JSRegExp::kIrregexpCaptureNameMapIndex, Smi::kZero);
} else {
re->set(JSRegExp::kIrregexpCaptureNameMapIndex, *value);
}
@@ -566,10 +559,9 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
#endif // V8_INTERPRETED_REGEXP
}
-MaybeHandle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
- Handle<String> subject,
- int previous_index,
- Handle<JSObject> last_match_info) {
+MaybeHandle<Object> RegExpImpl::IrregexpExec(
+ Handle<JSRegExp> regexp, Handle<String> subject, int previous_index,
+ Handle<RegExpMatchInfo> last_match_info) {
Isolate* isolate = regexp->GetIsolate();
DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
@@ -613,31 +605,40 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> regexp,
return isolate->factory()->null_value();
}
-static void EnsureSize(Handle<JSObject> array, uint32_t minimum_size) {
- if (static_cast<uint32_t>(array->elements()->length()) < minimum_size) {
- array->GetElementsAccessor()->GrowCapacityAndConvert(array, minimum_size);
- }
-}
+Handle<RegExpMatchInfo> RegExpImpl::SetLastMatchInfo(
+ Handle<RegExpMatchInfo> last_match_info, Handle<String> subject,
+ int capture_count, int32_t* match) {
+ // This is the only place where match infos can grow. If, after executing the
+ // regexp, RegExpExecStub finds that the match info is too small, it restarts
+ // execution in RegExpImpl::Exec, which finally grows the match info right
+ // here.
-Handle<JSObject> RegExpImpl::SetLastMatchInfo(Handle<JSObject> last_match_info,
- Handle<String> subject,
- int capture_count,
- int32_t* match) {
- DCHECK(last_match_info->HasFastObjectElements());
int capture_register_count = (capture_count + 1) * 2;
- EnsureSize(last_match_info, capture_register_count + kLastMatchOverhead);
+ Handle<RegExpMatchInfo> result =
+ RegExpMatchInfo::ReserveCaptures(last_match_info, capture_register_count);
+ result->SetNumberOfCaptureRegisters(capture_register_count);
+
+ if (*result != *last_match_info) {
+ // The match info has been reallocated, update the corresponding reference
+ // on the native context.
+ Isolate* isolate = last_match_info->GetIsolate();
+ if (*last_match_info == *isolate->regexp_last_match_info()) {
+ isolate->native_context()->set_regexp_last_match_info(*result);
+ } else if (*last_match_info == *isolate->regexp_internal_match_info()) {
+ isolate->native_context()->set_regexp_internal_match_info(*result);
+ }
+ }
+
DisallowHeapAllocation no_allocation;
- FixedArray* array = FixedArray::cast(last_match_info->elements());
if (match != NULL) {
for (int i = 0; i < capture_register_count; i += 2) {
- SetCapture(array, i, match[i]);
- SetCapture(array, i + 1, match[i + 1]);
+ result->SetCapture(i, match[i]);
+ result->SetCapture(i + 1, match[i + 1]);
}
}
- SetLastCaptureCount(array, capture_register_count);
- SetLastSubject(array, *subject);
- SetLastInput(array, *subject);
- return last_match_info;
+ result->SetLastSubject(*subject);
+ result->SetLastInput(*subject);
+ return result;
}
@@ -6781,10 +6782,10 @@ Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
FixedArray** last_match_cache,
ResultsCacheType type) {
FixedArray* cache;
- if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
+ if (!key_string->IsInternalizedString()) return Smi::kZero;
if (type == STRING_SPLIT_SUBSTRINGS) {
DCHECK(key_pattern->IsString());
- if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
+ if (!key_pattern->IsInternalizedString()) return Smi::kZero;
cache = heap->string_split_cache();
} else {
DCHECK(type == REGEXP_MULTIPLE_INDICES);
@@ -6801,7 +6802,7 @@ Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
if (cache->get(index + kStringOffset) != key_string ||
cache->get(index + kPatternOffset) != key_pattern) {
- return Smi::FromInt(0);
+ return Smi::kZero;
}
}
@@ -6831,7 +6832,7 @@ void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
uint32_t hash = key_string->Hash();
uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
~(kArrayEntriesPerCacheEntry - 1));
- if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
+ if (cache->get(index + kStringOffset) == Smi::kZero) {
cache->set(index + kStringOffset, *key_string);
cache->set(index + kPatternOffset, *key_pattern);
cache->set(index + kArrayOffset, *value_array);
@@ -6839,16 +6840,16 @@ void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
} else {
uint32_t index2 =
((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
- if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
+ if (cache->get(index2 + kStringOffset) == Smi::kZero) {
cache->set(index2 + kStringOffset, *key_string);
cache->set(index2 + kPatternOffset, *key_pattern);
cache->set(index2 + kArrayOffset, *value_array);
cache->set(index2 + kLastMatchOffset, *last_match_cache);
} else {
- cache->set(index2 + kStringOffset, Smi::FromInt(0));
- cache->set(index2 + kPatternOffset, Smi::FromInt(0));
- cache->set(index2 + kArrayOffset, Smi::FromInt(0));
- cache->set(index2 + kLastMatchOffset, Smi::FromInt(0));
+ cache->set(index2 + kStringOffset, Smi::kZero);
+ cache->set(index2 + kPatternOffset, Smi::kZero);
+ cache->set(index2 + kArrayOffset, Smi::kZero);
+ cache->set(index2 + kLastMatchOffset, Smi::kZero);
cache->set(index + kStringOffset, *key_string);
cache->set(index + kPatternOffset, *key_pattern);
cache->set(index + kArrayOffset, *value_array);
@@ -6865,13 +6866,13 @@ void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
}
}
// Convert backing store to a copy-on-write array.
- value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
+ value_array->set_map_no_write_barrier(isolate->heap()->fixed_cow_array_map());
}
void RegExpResultsCache::Clear(FixedArray* cache) {
for (int i = 0; i < kRegExpResultsCacheSize; i++) {
- cache->set(i, Smi::FromInt(0));
+ cache->set(i, Smi::kZero);
}
}
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index 8118889966..b2e84ba4f3 100644
--- a/deps/v8/src/regexp/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -48,7 +48,7 @@ class RegExpImpl {
// This function calls the garbage collector if necessary.
V8_EXPORT_PRIVATE MUST_USE_RESULT static MaybeHandle<Object> Exec(
Handle<JSRegExp> regexp, Handle<String> subject, int index,
- Handle<JSObject> lastMatchInfo);
+ Handle<RegExpMatchInfo> last_match_info);
// Prepares a JSRegExp object with Irregexp-specific data.
static void IrregexpInitialize(Handle<JSRegExp> re,
@@ -71,7 +71,7 @@ class RegExpImpl {
static Handle<Object> AtomExec(Handle<JSRegExp> regexp,
Handle<String> subject, int index,
- Handle<JSObject> lastMatchInfo);
+ Handle<RegExpMatchInfo> last_match_info);
enum IrregexpResult { RE_FAILURE = 0, RE_SUCCESS = 1, RE_EXCEPTION = -1 };
@@ -103,12 +103,12 @@ class RegExpImpl {
// Returns an empty handle in case of an exception.
MUST_USE_RESULT static MaybeHandle<Object> IrregexpExec(
Handle<JSRegExp> regexp, Handle<String> subject, int index,
- Handle<JSObject> lastMatchInfo);
+ Handle<RegExpMatchInfo> last_match_info);
// Set last match info. If match is NULL, then setting captures is omitted.
- static Handle<JSObject> SetLastMatchInfo(Handle<JSObject> last_match_info,
- Handle<String> subject,
- int capture_count, int32_t* match);
+ static Handle<RegExpMatchInfo> SetLastMatchInfo(
+ Handle<RegExpMatchInfo> last_match_info, Handle<String> subject,
+ int capture_count, int32_t* match);
class GlobalCache {
public:
@@ -142,49 +142,6 @@ class RegExpImpl {
Handle<String> subject_;
};
-
- // Array index in the lastMatchInfo array.
- static const int kLastCaptureCount = 0;
- static const int kLastSubject = 1;
- static const int kLastInput = 2;
- static const int kFirstCapture = 3;
- static const int kLastMatchOverhead = 3;
-
- // Direct offset into the lastMatchInfo array.
- static const int kLastCaptureCountOffset =
- FixedArray::kHeaderSize + kLastCaptureCount * kPointerSize;
- static const int kLastSubjectOffset =
- FixedArray::kHeaderSize + kLastSubject * kPointerSize;
- static const int kLastInputOffset =
- FixedArray::kHeaderSize + kLastInput * kPointerSize;
- static const int kFirstCaptureOffset =
- FixedArray::kHeaderSize + kFirstCapture * kPointerSize;
-
- // Used to access the lastMatchInfo array.
- static int GetCapture(FixedArray* array, int index) {
- return Smi::cast(array->get(index + kFirstCapture))->value();
- }
-
- static void SetLastCaptureCount(FixedArray* array, int to) {
- array->set(kLastCaptureCount, Smi::FromInt(to));
- }
-
- static void SetLastSubject(FixedArray* array, String* to) {
- array->set(kLastSubject, to);
- }
-
- static void SetLastInput(FixedArray* array, String* to) {
- array->set(kLastInput, to);
- }
-
- static void SetCapture(FixedArray* array, int index, int to) {
- array->set(index + kFirstCapture, Smi::FromInt(to));
- }
-
- static int GetLastCaptureCount(FixedArray* array) {
- return Smi::cast(array->get(kLastCaptureCount))->value();
- }
-
// For acting on the JSRegExp data FixedArray.
static int IrregexpMaxRegisterCount(FixedArray* re);
static void SetIrregexpMaxRegisterCount(FixedArray* re, int value);
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
index ec86526033..abdf577209 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
@@ -12,9 +12,9 @@ namespace internal {
RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
Isolate* isolate, RegExpMacroAssembler* assembler)
: RegExpMacroAssembler(isolate, assembler->zone()), assembler_(assembler) {
- unsigned int type = assembler->Implementation();
- DCHECK(type < 8);
- const char* impl_names[] = {"IA32", "ARM", "ARM64", "MIPS",
+ IrregexpImplementation type = assembler->Implementation();
+ DCHECK_LT(type, 9);
+ const char* impl_names[] = {"IA32", "ARM", "ARM64", "MIPS", "S390",
"PPC", "X64", "X87", "Bytecode"};
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
}
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index dba81ae9a7..fd3123f674 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -1607,7 +1607,7 @@ void RegExpBuilder::AddCharacter(uc16 c) {
void RegExpBuilder::AddUnicodeCharacter(uc32 c) {
- if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ if (c > static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
DCHECK(unicode());
AddLeadSurrogate(unibrow::Utf16::LeadSurrogate(c));
AddTrailSurrogate(unibrow::Utf16::TrailSurrogate(c));
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
new file mode 100644
index 0000000000..62daf3f1d5
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -0,0 +1,192 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/regexp/regexp-utils.h"
+
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/regexp/jsregexp.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<String> RegExpUtils::GenericCaptureGetter(
+ Isolate* isolate, Handle<RegExpMatchInfo> match_info, int capture,
+ bool* ok) {
+ const int index = capture * 2;
+ if (index >= match_info->NumberOfCaptureRegisters()) {
+ if (ok != nullptr) *ok = false;
+ return isolate->factory()->empty_string();
+ }
+
+ const int match_start = match_info->Capture(index);
+ const int match_end = match_info->Capture(index + 1);
+ if (match_start == -1 || match_end == -1) {
+ if (ok != nullptr) *ok = false;
+ return isolate->factory()->empty_string();
+ }
+
+ if (ok != nullptr) *ok = true;
+ Handle<String> last_subject(match_info->LastSubject());
+ return isolate->factory()->NewSubString(last_subject, match_start, match_end);
+}
+
+namespace {
+
+V8_INLINE bool HasInitialRegExpMap(Isolate* isolate, Handle<JSReceiver> recv) {
+ return recv->map() == isolate->regexp_function()->initial_map();
+}
+
+} // namespace
+
+MaybeHandle<Object> RegExpUtils::SetLastIndex(Isolate* isolate,
+ Handle<JSReceiver> recv,
+ int value) {
+ if (HasInitialRegExpMap(isolate, recv)) {
+ JSRegExp::cast(*recv)->SetLastIndex(value);
+ return recv;
+ } else {
+ return Object::SetProperty(recv, isolate->factory()->lastIndex_string(),
+ handle(Smi::FromInt(value), isolate), STRICT);
+ }
+}
+
+MaybeHandle<Object> RegExpUtils::GetLastIndex(Isolate* isolate,
+ Handle<JSReceiver> recv) {
+ if (HasInitialRegExpMap(isolate, recv)) {
+ return handle(JSRegExp::cast(*recv)->LastIndex(), isolate);
+ } else {
+ return Object::GetProperty(recv, isolate->factory()->lastIndex_string());
+ }
+}
+
+// ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
+// Also takes an optional exec method in case our caller
+// has already fetched exec.
+MaybeHandle<Object> RegExpUtils::RegExpExec(Isolate* isolate,
+ Handle<JSReceiver> regexp,
+ Handle<String> string,
+ Handle<Object> exec) {
+ if (exec->IsUndefined(isolate)) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, exec,
+ Object::GetProperty(regexp, isolate->factory()->exec_string()), Object);
+ }
+
+ if (exec->IsCallable()) {
+ const int argc = 1;
+ ScopedVector<Handle<Object>> argv(argc);
+ argv[0] = string;
+
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, exec, regexp, argc, argv.start()), Object);
+
+ if (!result->IsJSReceiver() && !result->IsNull(isolate)) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kInvalidRegExpExecResult),
+ Object);
+ }
+ return result;
+ }
+
+ if (!regexp->IsJSRegExp()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "RegExp.prototype.exec"),
+ regexp),
+ Object);
+ }
+
+ {
+ Handle<JSFunction> regexp_exec = isolate->regexp_exec_function();
+
+ const int argc = 1;
+ ScopedVector<Handle<Object>> argv(argc);
+ argv[0] = string;
+
+ return Execution::Call(isolate, regexp_exec, regexp, argc, argv.start());
+ }
+}
+
+Maybe<bool> RegExpUtils::IsRegExp(Isolate* isolate, Handle<Object> object) {
+ if (!object->IsJSReceiver()) return Just(false);
+
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+
+ if (isolate->regexp_function()->initial_map() == receiver->map()) {
+ // Fast-path for unmodified JSRegExp instances.
+ // TODO(ishell): Adapt for new fast-path logic.
+ return Just(true);
+ }
+
+ Handle<Object> match;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, match,
+ JSObject::GetProperty(receiver, isolate->factory()->match_symbol()),
+ Nothing<bool>());
+
+ if (!match->IsUndefined(isolate)) return Just(match->BooleanValue());
+ return Just(object->IsJSRegExp());
+}
+
+bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
+ // TODO(ishell): Update this check once map changes for constant field
+ // tracking are landing.
+
+ if (!obj->IsJSReceiver()) return false;
+
+ JSReceiver* recv = JSReceiver::cast(*obj);
+
+ // Check the receiver's map.
+ Handle<JSFunction> regexp_function = isolate->regexp_function();
+ if (recv->map() != regexp_function->initial_map()) return false;
+
+ // Check the receiver's prototype's map.
+ Object* proto = recv->map()->prototype();
+ if (!proto->IsJSReceiver()) return false;
+
+ Handle<Map> initial_proto_initial_map = isolate->regexp_prototype_map();
+ return (JSReceiver::cast(proto)->map() == *initial_proto_initial_map);
+}
+
+int RegExpUtils::AdvanceStringIndex(Isolate* isolate, Handle<String> string,
+ int index, bool unicode) {
+ if (unicode && index < string->length()) {
+ const uint16_t first = string->Get(index);
+ if (first >= 0xD800 && first <= 0xDBFF && string->length() > index + 1) {
+ const uint16_t second = string->Get(index + 1);
+ if (second >= 0xDC00 && second <= 0xDFFF) {
+ return index + 2;
+ }
+ }
+ }
+
+ return index + 1;
+}
+
+MaybeHandle<Object> RegExpUtils::SetAdvancedStringIndex(
+ Isolate* isolate, Handle<JSReceiver> regexp, Handle<String> string,
+ bool unicode) {
+ Handle<Object> last_index_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, last_index_obj,
+ Object::GetProperty(regexp, isolate->factory()->lastIndex_string()),
+ Object);
+
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, last_index_obj,
+ Object::ToLength(isolate, last_index_obj), Object);
+
+ const int last_index = Handle<Smi>::cast(last_index_obj)->value();
+ const int new_last_index =
+ AdvanceStringIndex(isolate, string, last_index, unicode);
+
+ return SetLastIndex(isolate, regexp, new_last_index);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-utils.h b/deps/v8/src/regexp/regexp-utils.h
new file mode 100644
index 0000000000..eff1ed739c
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-utils.h
@@ -0,0 +1,52 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_UTILS_H_
+#define V8_REGEXP_REGEXP_UTILS_H_
+
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Helper methods for C++ regexp builtins.
+class RegExpUtils : public AllStatic {
+ public:
+ // Last match info accessors.
+ static Handle<String> GenericCaptureGetter(Isolate* isolate,
+ Handle<RegExpMatchInfo> match_info,
+ int capture, bool* ok = nullptr);
+
+ // Last index (RegExp.lastIndex) accessors.
+ static MUST_USE_RESULT MaybeHandle<Object> SetLastIndex(
+ Isolate* isolate, Handle<JSReceiver> regexp, int value);
+ static MUST_USE_RESULT MaybeHandle<Object> GetLastIndex(
+ Isolate* isolate, Handle<JSReceiver> recv);
+
+ // ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
+ static MUST_USE_RESULT MaybeHandle<Object> RegExpExec(
+ Isolate* isolate, Handle<JSReceiver> regexp, Handle<String> string,
+ Handle<Object> exec);
+
+ // ES#sec-isregexp IsRegExp ( argument )
+ // Includes checking of the match property.
+ static Maybe<bool> IsRegExp(Isolate* isolate, Handle<Object> object);
+
+ // Checks whether the given object is an unmodified JSRegExp instance.
+ // Neither the object's map, nor its prototype's map may be modified.
+ static bool IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj);
+
+ // ES#sec-advancestringindex
+ // AdvanceStringIndex ( S, index, unicode )
+ static int AdvanceStringIndex(Isolate* isolate, Handle<String> string,
+ int index, bool unicode);
+ static MUST_USE_RESULT MaybeHandle<Object> SetAdvancedStringIndex(
+ Isolate* isolate, Handle<JSReceiver> regexp, Handle<String> string,
+ bool unicode);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_UTILS_H_
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/register-configuration.cc
index 1a198ea42c..28d0ab2cc5 100644
--- a/deps/v8/src/register-configuration.cc
+++ b/deps/v8/src/register-configuration.cc
@@ -70,15 +70,12 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
#if V8_TARGET_ARCH_IA32
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_X87
kMaxAllocatableGeneralRegisterCount,
compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
- compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_X64
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_ARM
FLAG_enable_embedded_constant_pool
? (kMaxAllocatableGeneralRegisterCount - 1)
@@ -86,27 +83,21 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
CpuFeatures::IsSupported(VFP32DREGS)
? kMaxAllocatableDoubleRegisterCount
: (ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT) 0),
- ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT) 0,
#elif V8_TARGET_ARCH_ARM64
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_MIPS
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_MIPS64
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_PPC
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_S390
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
- kMaxAllocatableDoubleRegisterCount,
#else
#error Unsupported target architecture.
#endif
@@ -145,7 +136,6 @@ const RegisterConfiguration* RegisterConfiguration::Turbofan() {
RegisterConfiguration::RegisterConfiguration(
int num_general_registers, int num_double_registers,
int num_allocatable_general_registers, int num_allocatable_double_registers,
- int num_allocatable_aliased_double_registers,
const int* allocatable_general_codes, const int* allocatable_double_codes,
AliasingKind fp_aliasing_kind, const char* const* general_register_names,
const char* const* float_register_names,
@@ -158,8 +148,6 @@ RegisterConfiguration::RegisterConfiguration(
num_allocatable_general_registers_(num_allocatable_general_registers),
num_allocatable_float_registers_(0),
num_allocatable_double_registers_(num_allocatable_double_registers),
- num_allocatable_aliased_double_registers_(
- num_allocatable_aliased_double_registers),
num_allocatable_simd128_registers_(0),
allocatable_general_codes_mask_(0),
allocatable_float_codes_mask_(0),
diff --git a/deps/v8/src/register-configuration.h b/deps/v8/src/register-configuration.h
index 2cb7c91eec..946c82e56c 100644
--- a/deps/v8/src/register-configuration.h
+++ b/deps/v8/src/register-configuration.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_REGISTER_CONFIGURATION_H_
#include "src/base/macros.h"
+#include "src/globals.h"
#include "src/machine-type.h"
namespace v8 {
@@ -13,7 +14,7 @@ namespace internal {
// An architecture independent representation of the sets of registers available
// for instruction creation.
-class RegisterConfiguration {
+class V8_EXPORT_PRIVATE RegisterConfiguration {
public:
enum AliasingKind {
// Registers alias a single register of every other size (e.g. Intel).
@@ -35,7 +36,6 @@ class RegisterConfiguration {
RegisterConfiguration(int num_general_registers, int num_double_registers,
int num_allocatable_general_registers,
int num_allocatable_double_registers,
- int num_allocatable_aliased_double_registers,
const int* allocatable_general_codes,
const int* allocatable_double_codes,
AliasingKind fp_aliasing_kind,
@@ -57,12 +57,6 @@ class RegisterConfiguration {
int num_allocatable_double_registers() const {
return num_allocatable_double_registers_;
}
- // TODO(bbudge): This is a temporary work-around required because our
- // register allocator does not yet support the aliasing of single/double
- // registers on ARM.
- int num_allocatable_aliased_double_registers() const {
- return num_allocatable_aliased_double_registers_;
- }
int num_allocatable_simd128_registers() const {
return num_allocatable_simd128_registers_;
}
@@ -142,7 +136,6 @@ class RegisterConfiguration {
int num_allocatable_general_registers_;
int num_allocatable_float_registers_;
int num_allocatable_double_registers_;
- int num_allocatable_aliased_double_registers_;
int num_allocatable_simd128_registers_;
int32_t allocatable_general_codes_mask_;
int32_t allocatable_float_codes_mask_;
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index b1e640c2ec..0de9e1c2bb 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -22,7 +22,7 @@ namespace internal {
// Number of times a function has to be seen on the stack before it is
// compiled for baseline.
-static const int kProfilerTicksBeforeBaseline = 1;
+static const int kProfilerTicksBeforeBaseline = 0;
// Number of times a function has to be seen on the stack before it is
// optimized.
static const int kProfilerTicksBeforeOptimization = 2;
@@ -43,17 +43,25 @@ STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
static const int kOSRCodeSizeAllowanceBase =
100 * FullCodeGenerator::kCodeSizeMultiplier;
static const int kOSRCodeSizeAllowanceBaseIgnition =
- 100 * interpreter::Interpreter::kCodeSizeMultiplier;
+ 10 * interpreter::Interpreter::kCodeSizeMultiplier;
static const int kOSRCodeSizeAllowancePerTick =
4 * FullCodeGenerator::kCodeSizeMultiplier;
static const int kOSRCodeSizeAllowancePerTickIgnition =
- 4 * interpreter::Interpreter::kCodeSizeMultiplier;
+ 2 * interpreter::Interpreter::kCodeSizeMultiplier;
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
static const int kMaxSizeEarlyOpt =
5 * FullCodeGenerator::kCodeSizeMultiplier;
+static const int kMaxSizeEarlyOptIgnition =
+ 5 * interpreter::Interpreter::kCodeSizeMultiplier;
+
+// Certain functions are simply too big to be worth optimizing.
+// We aren't using the code size multiplier here because there is no
+// "kMaxSizeOpt" with which we would need to normalize. This constant is
+// only for optimization decisions coming into TurboFan from Ignition.
+static const int kMaxSizeOptIgnition = 250 * 1024;
#define OPTIMIZATION_REASON_LIST(V) \
V(DoNotOptimize, "do not optimize") \
@@ -108,14 +116,10 @@ static void GetICCounts(JSFunction* function, int* ic_with_type_info_count,
// Harvest vector-ics as well
TypeFeedbackVector* vector = function->feedback_vector();
int with = 0, gen = 0, type_vector_ic_count = 0;
- const bool is_interpreted =
- function->shared()->code()->is_interpreter_trampoline_builtin();
+ const bool is_interpreted = function->shared()->IsInterpreted();
vector->ComputeCounts(&with, &gen, &type_vector_ic_count, is_interpreted);
- if (is_interpreted) {
- DCHECK_EQ(*ic_total_count, 0);
- *ic_total_count = type_vector_ic_count;
- }
+ *ic_total_count += type_vector_ic_count;
*ic_with_type_info_count += with;
*ic_generic_count += gen;
@@ -158,11 +162,7 @@ void RuntimeProfiler::Baseline(JSFunction* function,
OptimizationReason reason) {
DCHECK_NE(reason, OptimizationReason::kDoNotOptimize);
TraceRecompile(function, OptimizationReasonToString(reason), "baseline");
-
- // TODO(4280): Fix this to check function is compiled for the interpreter
- // once we have a standard way to check that. For now function will only
- // have a bytecode array if compiled for the interpreter.
- DCHECK(function->shared()->HasBytecodeArray());
+ DCHECK(function->shared()->IsInterpreted());
function->MarkForBaseline();
}
@@ -264,7 +264,7 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
}
return;
}
- if (function->IsOptimized()) return;
+ if (frame->is_optimized()) return;
int ticks = shared_code->profiler_ticks();
@@ -358,7 +358,7 @@ void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
return;
}
- if (function->IsOptimized()) return;
+ if (frame->is_optimized()) return;
OptimizationReason reason = ShouldOptimizeIgnition(function, frame);
@@ -369,8 +369,6 @@ void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
bool RuntimeProfiler::MaybeOSRIgnition(JSFunction* function,
JavaScriptFrame* frame) {
- if (!FLAG_ignition_osr) return false;
-
SharedFunctionInfo* shared = function->shared();
int ticks = shared->profiler_ticks();
@@ -402,6 +400,10 @@ OptimizationReason RuntimeProfiler::ShouldOptimizeIgnition(
SharedFunctionInfo* shared = function->shared();
int ticks = shared->profiler_ticks();
+ if (shared->bytecode_array()->Size() > kMaxSizeOptIgnition) {
+ return OptimizationReason::kDoNotOptimize;
+ }
+
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, generic, total, type_percentage, generic_percentage;
GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
@@ -422,9 +424,18 @@ OptimizationReason RuntimeProfiler::ShouldOptimizeIgnition(
}
return OptimizationReason::kDoNotOptimize;
}
+ } else if (!any_ic_changed_ &&
+ shared->bytecode_array()->Size() < kMaxSizeEarlyOptIgnition) {
+ // If no IC was patched since the last tick and this function is very
+ // small, optimistically optimize it now.
+ int typeinfo, generic, total, type_percentage, generic_percentage;
+ GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
+ &generic_percentage);
+ if (type_percentage >= FLAG_type_info_threshold &&
+ generic_percentage <= FLAG_generic_ic_threshold) {
+ return OptimizationReason::kSmallFunction;
+ }
}
- // TODO(rmcilroy): Consider whether we should optimize small functions when
- // they are first seen on the stack (e.g., kMaxSizeEarlyOpt).
return OptimizationReason::kDoNotOptimize;
}
@@ -446,19 +457,9 @@ void RuntimeProfiler::MarkCandidatesForOptimization() {
JavaScriptFrame* frame = it.frame();
JSFunction* function = frame->function();
- List<JSFunction*> functions(4);
- frame->GetFunctions(&functions);
- for (int i = functions.length(); --i >= 0; ) {
- SharedFunctionInfo* shared_function_info = functions[i]->shared();
- int ticks = shared_function_info->profiler_ticks();
- if (ticks < Smi::kMaxValue) {
- shared_function_info->set_profiler_ticks(ticks + 1);
- }
- }
-
Compiler::CompilationTier next_tier =
Compiler::NextCompilationTier(function);
- if (function->shared()->code()->is_interpreter_trampoline_builtin()) {
+ if (function->shared()->IsInterpreted()) {
if (next_tier == Compiler::BASELINE) {
MaybeBaselineIgnition(function, frame);
} else {
@@ -469,6 +470,19 @@ void RuntimeProfiler::MarkCandidatesForOptimization() {
DCHECK_EQ(next_tier, Compiler::OPTIMIZED);
MaybeOptimizeFullCodegen(function, frame, frame_count);
}
+
+ // Update shared function info ticks after checking for whether functions
+ // should be optimized to keep FCG (which updates ticks on code) and
+ // Ignition (which updates ticks on shared function info) in sync.
+ List<JSFunction*> functions(4);
+ frame->GetFunctions(&functions);
+ for (int i = functions.length(); --i >= 0;) {
+ SharedFunctionInfo* shared_function_info = functions[i]->shared();
+ int ticks = shared_function_info->profiler_ticks();
+ if (ticks < Smi::kMaxValue) {
+ shared_function_info->set_profiler_ticks(ticks + 1);
+ }
+ }
}
any_ic_changed_ = false;
}
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index cbde8f372e..1a2d957caf 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -28,11 +28,13 @@ RUNTIME_FUNCTION(Runtime_FinishArrayPrototypeSetup) {
// This is necessary to enable fast checks for absence of elements
// on Array.prototype and below.
prototype->set_elements(isolate->heap()->empty_fixed_array());
- return Smi::FromInt(0);
+ return Smi::kZero;
}
-static void InstallCode(Isolate* isolate, Handle<JSObject> holder,
- const char* name, Handle<Code> code, int argc = -1) {
+static void InstallCode(
+ Isolate* isolate, Handle<JSObject> holder, const char* name,
+ Handle<Code> code, int argc = -1,
+ BuiltinFunctionId id = static_cast<BuiltinFunctionId>(-1)) {
Handle<String> key = isolate->factory()->InternalizeUtf8String(name);
Handle<JSFunction> optimized =
isolate->factory()->NewFunctionWithoutPrototype(key, code);
@@ -41,15 +43,19 @@ static void InstallCode(Isolate* isolate, Handle<JSObject> holder,
} else {
optimized->shared()->set_internal_formal_parameter_count(argc);
}
+ if (id >= 0) {
+ optimized->shared()->set_builtin_function_id(id);
+ }
JSObject::AddProperty(holder, key, optimized, NONE);
}
-static void InstallBuiltin(Isolate* isolate, Handle<JSObject> holder,
- const char* name, Builtins::Name builtin_name,
- int argc = -1) {
+static void InstallBuiltin(
+ Isolate* isolate, Handle<JSObject> holder, const char* name,
+ Builtins::Name builtin_name, int argc = -1,
+ BuiltinFunctionId id = static_cast<BuiltinFunctionId>(-1)) {
InstallCode(isolate, holder, name,
- handle(isolate->builtins()->builtin(builtin_name), isolate),
- argc);
+ handle(isolate->builtins()->builtin(builtin_name), isolate), argc,
+ id);
}
RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
@@ -71,6 +77,12 @@ RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
InstallBuiltin(isolate, holder, "splice", Builtins::kArraySplice);
InstallBuiltin(isolate, holder, "includes", Builtins::kArrayIncludes, 2);
InstallBuiltin(isolate, holder, "indexOf", Builtins::kArrayIndexOf, 2);
+ InstallBuiltin(isolate, holder, "keys", Builtins::kArrayPrototypeKeys, 0,
+ kArrayKeys);
+ InstallBuiltin(isolate, holder, "values", Builtins::kArrayPrototypeValues, 0,
+ kArrayValues);
+ InstallBuiltin(isolate, holder, "entries", Builtins::kArrayPrototypeEntries,
+ 0, kArrayEntries);
return *holder;
}
@@ -140,7 +152,7 @@ RUNTIME_FUNCTION(Runtime_MoveArrayContents) {
to->set_length(from->length());
JSObject::ResetElements(from);
- from->set_length(Smi::FromInt(0));
+ from->set_length(Smi::kZero);
JSObject::ValidateElements(to);
return *to;
@@ -376,7 +388,7 @@ RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
if (index >= capacity) {
if (!object->GetElementsAccessor()->GrowCapacity(object, index)) {
- return Smi::FromInt(0);
+ return Smi::kZero;
}
}
@@ -423,21 +435,6 @@ RUNTIME_FUNCTION(Runtime_IsArray) {
return isolate->heap()->ToBoolean(obj->IsJSArray());
}
-RUNTIME_FUNCTION(Runtime_HasCachedArrayIndex) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- return isolate->heap()->false_value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetCachedArrayIndex) {
- // This can never be reached, because Runtime_HasCachedArrayIndex always
- // returns false.
- UNIMPLEMENTED();
- return nullptr;
-}
-
-
RUNTIME_FUNCTION(Runtime_ArraySpeciesConstructor) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -639,5 +636,48 @@ RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
return Smi::FromInt(-1);
}
+RUNTIME_FUNCTION(Runtime_SpreadIterablePrepare) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, spread, 0);
+
+ if (spread->IsJSArray()) {
+ // Check that the spread arg has fast elements
+ Handle<JSArray> spread_array = Handle<JSArray>::cast(spread);
+ ElementsKind array_kind = spread_array->GetElementsKind();
+
+ // And that it has the orignal ArrayPrototype
+ JSObject* array_proto = JSObject::cast(spread_array->map()->prototype());
+ Map* iterator_map = isolate->initial_array_iterator_prototype()->map();
+
+ // Check that the iterator acts as expected.
+ // If IsArrayIteratorLookupChainIntact(), then we know that the initial
+ // ArrayIterator is being used. If the map of the prototype has changed,
+ // then take the slow path.
+
+ if (isolate->is_initial_array_prototype(array_proto) &&
+ isolate->IsArrayIteratorLookupChainIntact() &&
+ isolate->is_initial_array_iterator_prototype_map(iterator_map)) {
+ if (IsFastPackedElementsKind(array_kind)) {
+ return *spread;
+ }
+ if (IsFastHoleyElementsKind(array_kind) &&
+ isolate->IsFastArrayConstructorPrototypeChainIntact()) {
+ return *spread;
+ }
+ }
+ }
+
+ Handle<JSFunction> spread_iterable_function = isolate->spread_iterable();
+
+ Handle<Object> spreaded;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, spreaded,
+ Execution::Call(isolate, spread_iterable_function,
+ isolate->factory()->undefined_value(), 1, &spread));
+
+ return *spreaded;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index b25a5ef5c2..57e5d98532 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -95,7 +95,7 @@ RUNTIME_FUNCTION(Runtime_SetIteratorInitialize) {
kind == JSSetIterator::kKindEntries);
Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()));
holder->set_table(*table);
- holder->set_index(Smi::FromInt(0));
+ holder->set_index(Smi::kZero);
holder->set_kind(Smi::FromInt(kind));
return isolate->heap()->undefined_value();
}
@@ -191,7 +191,7 @@ RUNTIME_FUNCTION(Runtime_MapIteratorInitialize) {
kind == JSMapIterator::kKindEntries);
Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()));
holder->set_table(*table);
- holder->set_index(Smi::FromInt(0));
+ holder->set_index(Smi::kZero);
holder->set_kind(Smi::FromInt(kind));
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 01ec73d427..472e076de4 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -123,7 +123,7 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
function->shared()->ReplaceCode(
isolate->builtins()->builtin(Builtins::kCompileLazy));
}
- return Smi::FromInt(0);
+ return Smi::kZero;
}
RUNTIME_FUNCTION(Runtime_NotifyStubFailure) {
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 2d217b83f7..824ea92a0f 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -16,8 +16,8 @@
#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/runtime/runtime.h"
-#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -47,7 +47,7 @@ RUNTIME_FUNCTION(Runtime_DebugBreakOnBytecode) {
isolate->debug()->Break(it.frame());
// If live-edit has dropped frames, we are not going back to dispatch.
- if (LiveEdit::SetAfterBreakTarget(isolate->debug())) return Smi::FromInt(0);
+ if (LiveEdit::SetAfterBreakTarget(isolate->debug())) return Smi::kZero;
// Return the handler from the original bytecode array.
DCHECK(it.frame()->is_interpreted());
@@ -256,14 +256,14 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
const char* status = "rejected";
int status_val = Handle<Smi>::cast(status_obj)->value();
switch (status_val) {
- case +1:
+ case kPromiseFulfilled:
status = "resolved";
break;
- case 0:
+ case kPromisePending:
status = "pending";
break;
default:
- DCHECK_EQ(-1, status_val);
+ DCHECK_EQ(kPromiseRejected, status_val);
}
Handle<FixedArray> result = factory->NewFixedArray(2 * 2);
@@ -457,7 +457,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameCount) {
StackFrame::Id id = isolate->debug()->break_frame_id();
if (id == StackFrame::NO_ID) {
// If there is no JavaScript stack frame count is 0.
- return Smi::FromInt(0);
+ return Smi::kZero;
}
for (StackTraceFrameIterator it(isolate, id); !it.done(); it.Advance()) {
@@ -551,10 +551,10 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
details->set(kFrameDetailsFrameIdIndex, *frame_id);
// Add the function name.
- Handle<Object> wasm_obj(it.wasm_frame()->wasm_obj(), isolate);
+ Handle<Object> wasm_instance(it.wasm_frame()->wasm_instance(), isolate);
int func_index = it.wasm_frame()->function_index();
Handle<String> func_name =
- wasm::GetWasmFunctionName(isolate, wasm_obj, func_index);
+ wasm::GetWasmFunctionName(isolate, wasm_instance, func_index);
details->set(kFrameDetailsFunctionIndex, *func_name);
// Add the script wrapper
@@ -563,14 +563,26 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
details->set(kFrameDetailsScriptIndex, *script_wrapper);
// Add the arguments count.
- details->set(kFrameDetailsArgumentCountIndex, Smi::FromInt(0));
+ details->set(kFrameDetailsArgumentCountIndex, Smi::kZero);
// Add the locals count
- details->set(kFrameDetailsLocalCountIndex, Smi::FromInt(0));
+ details->set(kFrameDetailsLocalCountIndex, Smi::kZero);
// Add the source position.
+ // For wasm, it is function-local, so translate it to a module-relative
+ // position, such that together with the script it uniquely identifies the
+ // position.
+ Handle<Object> positionValue;
if (position != kNoSourcePosition) {
- details->set(kFrameDetailsSourcePositionIndex, Smi::FromInt(position));
+ int translated_position = position;
+ if (!wasm::WasmIsAsmJs(*wasm_instance, isolate)) {
+ Handle<WasmCompiledModule> compiled_module(
+ wasm::GetCompiledModule(JSObject::cast(*wasm_instance)), isolate);
+ translated_position +=
+ wasm::GetFunctionCodeOffset(compiled_module, func_index);
+ }
+ details->set(kFrameDetailsSourcePositionIndex,
+ Smi::FromInt(translated_position));
}
// Add the constructor information.
@@ -929,7 +941,7 @@ RUNTIME_FUNCTION(Runtime_GetGeneratorScopeCount) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- if (!args[0]->IsJSGeneratorObject()) return Smi::FromInt(0);
+ if (!args[0]->IsJSGeneratorObject()) return Smi::kZero;
// Check arguments.
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, gen, 0);
@@ -948,7 +960,7 @@ RUNTIME_FUNCTION(Runtime_GetGeneratorScopeDetails) {
DCHECK(args.length() == 2);
if (!args[0]->IsJSGeneratorObject()) {
- return *isolate->factory()->undefined_value();
+ return isolate->heap()->undefined_value();
}
// Check arguments.
@@ -1429,6 +1441,7 @@ RUNTIME_FUNCTION(Runtime_DebugGetPrototype) {
// Patches script source (should be called upon BeforeCompile event).
+// TODO(5530): Remove once uses in debug.js are gone.
RUNTIME_FUNCTION(Runtime_DebugSetScriptSource) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -1569,6 +1582,7 @@ RUNTIME_FUNCTION(Runtime_GetScript) {
return *Script::GetWrapper(found);
}
+// TODO(5530): Remove once uses in debug.js are gone.
RUNTIME_FUNCTION(Runtime_ScriptLineCount) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -1583,6 +1597,7 @@ RUNTIME_FUNCTION(Runtime_ScriptLineCount) {
return Smi::FromInt(line_ends_array->length());
}
+// TODO(5530): Remove once uses in debug.js are gone.
RUNTIME_FUNCTION(Runtime_ScriptLineStartPosition) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -1601,7 +1616,7 @@ RUNTIME_FUNCTION(Runtime_ScriptLineStartPosition) {
if (line < 0 || line > line_count) {
return Smi::FromInt(-1);
} else if (line == 0) {
- return Smi::FromInt(0);
+ return Smi::kZero;
} else {
DCHECK(0 < line && line <= line_count);
const int pos = Smi::cast(line_ends_array->get(line - 1))->value() + 1;
@@ -1609,6 +1624,7 @@ RUNTIME_FUNCTION(Runtime_ScriptLineStartPosition) {
}
}
+// TODO(5530): Remove once uses in debug.js are gone.
RUNTIME_FUNCTION(Runtime_ScriptLineEndPosition) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -1634,7 +1650,7 @@ static Handle<Object> GetJSPositionInfo(Handle<Script> script, int position,
Script::OffsetFlag offset_flag,
Isolate* isolate) {
Script::PositionInfo info;
- if (!script->GetPositionInfo(position, &info, offset_flag)) {
+ if (!Script::GetPositionInfo(script, position, &info, offset_flag)) {
return isolate->factory()->null_value();
}
@@ -1661,62 +1677,49 @@ static Handle<Object> GetJSPositionInfo(Handle<Script> script, int position,
return jsinfo;
}
-// Get information on a specific source line and column possibly offset by a
-// fixed source position. This function is used to find a source position from
-// a line and column position. The fixed source position offset is typically
-// used to find a source position in a function based on a line and column in
-// the source for the function alone. The offset passed will then be the
-// start position of the source for the function within the full script source.
-// Note that incoming line and column parameters may be undefined, and are
-// assumed to be passed *with* offsets.
-RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_CHECKED(JSValue, script, 0);
-
- CHECK(script->value()->IsScript());
- Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+namespace {
+Handle<Object> ScriptLocationFromLine(Isolate* isolate, Handle<Script> script,
+ Handle<Object> opt_line,
+ Handle<Object> opt_column,
+ int32_t offset) {
// Line and column are possibly undefined and we need to handle these cases,
// additionally subtracting corresponding offsets.
int32_t line;
- if (args[1]->IsNull(isolate) || args[1]->IsUndefined(isolate)) {
+ if (opt_line->IsNull(isolate) || opt_line->IsUndefined(isolate)) {
line = 0;
} else {
- CHECK(args[1]->IsNumber());
- line = NumberToInt32(args[1]) - script_handle->line_offset();
+ CHECK(opt_line->IsNumber());
+ line = NumberToInt32(*opt_line) - script->line_offset();
}
int32_t column;
- if (args[2]->IsNull(isolate) || args[2]->IsUndefined(isolate)) {
+ if (opt_column->IsNull(isolate) || opt_column->IsUndefined(isolate)) {
column = 0;
} else {
- CHECK(args[2]->IsNumber());
- column = NumberToInt32(args[2]);
- if (line == 0) column -= script_handle->column_offset();
+ CHECK(opt_column->IsNumber());
+ column = NumberToInt32(*opt_column);
+ if (line == 0) column -= script->column_offset();
}
- CONVERT_NUMBER_CHECKED(int32_t, offset_position, Int32, args[3]);
-
- if (line < 0 || column < 0 || offset_position < 0) {
- return isolate->heap()->null_value();
+ if (line < 0 || column < 0 || offset < 0) {
+ return isolate->factory()->null_value();
}
- Script::InitLineEnds(script_handle);
+ Script::InitLineEnds(script);
- FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
+ FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
const int line_count = line_ends_array->length();
int position;
if (line == 0) {
- position = offset_position + column;
+ position = offset + column;
} else {
Script::PositionInfo info;
- if (!script_handle->GetPositionInfo(offset_position, &info,
- Script::NO_OFFSET) ||
+ if (!Script::GetPositionInfo(script, offset, &info, Script::NO_OFFSET) ||
info.line + line >= line_count) {
- return isolate->heap()->null_value();
+ return isolate->factory()->null_value();
}
const int offset_line = info.line + line;
@@ -1727,10 +1730,65 @@ RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine) {
position = offset_line_position + column;
}
- return *GetJSPositionInfo(script_handle, position, Script::NO_OFFSET,
- isolate);
+ return GetJSPositionInfo(script, position, Script::NO_OFFSET, isolate);
+}
+
+// Slow traversal over all scripts on the heap.
+bool GetScriptById(Isolate* isolate, int needle, Handle<Script>* result) {
+ Script::Iterator iterator(isolate);
+ Script* script = NULL;
+ while ((script = iterator.Next()) != NULL) {
+ if (script->id() == needle) {
+ *result = handle(script);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+} // namespace
+
+// Get information on a specific source line and column possibly offset by a
+// fixed source position. This function is used to find a source position from
+// a line and column position. The fixed source position offset is typically
+// used to find a source position in a function based on a line and column in
+// the source for the function alone. The offset passed will then be the
+// start position of the source for the function within the full script source.
+// Note that incoming line and column parameters may be undefined, and are
+// assumed to be passed *with* offsets.
+// TODO(5530): Remove once uses in debug.js are gone.
+RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ CONVERT_ARG_HANDLE_CHECKED(JSValue, script, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, opt_line, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, opt_column, 2);
+ CONVERT_NUMBER_CHECKED(int32_t, offset, Int32, args[3]);
+
+ CHECK(script->value()->IsScript());
+ Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+
+ return *ScriptLocationFromLine(isolate, script_handle, opt_line, opt_column,
+ offset);
+}
+
+// TODO(5530): Rename once conflicting function has been deleted.
+RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine2) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ CONVERT_NUMBER_CHECKED(int32_t, scriptid, Int32, args[0]);
+ CONVERT_ARG_HANDLE_CHECKED(Object, opt_line, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, opt_column, 2);
+ CONVERT_NUMBER_CHECKED(int32_t, offset, Int32, args[3]);
+
+ Handle<Script> script;
+ CHECK(GetScriptById(isolate, scriptid, &script));
+
+ return *ScriptLocationFromLine(isolate, script, opt_line, opt_column, offset);
}
+// TODO(5530): Remove once uses in debug.js are gone.
RUNTIME_FUNCTION(Runtime_ScriptPositionInfo) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
@@ -1748,6 +1806,7 @@ RUNTIME_FUNCTION(Runtime_ScriptPositionInfo) {
// Returns the given line as a string, or null if line is out of bounds.
// The parameter line is expected to include the script's line offset.
+// TODO(5530): Remove once uses in debug.js are gone.
RUNTIME_FUNCTION(Runtime_ScriptSourceLine) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -1822,12 +1881,19 @@ RUNTIME_FUNCTION(Runtime_DebugPopPromise) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_DebugNextMicrotaskId) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ return Smi::FromInt(isolate->GetNextDebugMicrotaskId());
+}
RUNTIME_FUNCTION(Runtime_DebugAsyncTaskEvent) {
- DCHECK(args.length() == 1);
+ DCHECK(args.length() == 3);
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, data, 0);
- isolate->debug()->OnAsyncTaskEvent(data);
+ CONVERT_ARG_HANDLE_CHECKED(String, type, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, id, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 2);
+ isolate->debug()->OnAsyncTaskEvent(type, id, name);
return isolate->heap()->undefined_value();
}
@@ -1843,34 +1909,5 @@ RUNTIME_FUNCTION(Runtime_DebugBreakInOptimizedCode) {
return NULL;
}
-RUNTIME_FUNCTION(Runtime_GetWasmFunctionOffsetTable) {
- DCHECK(args.length() == 1);
- HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(JSValue, script_val, 0);
-
- CHECK(script_val->value()->IsScript());
- Handle<Script> script = Handle<Script>(Script::cast(script_val->value()));
-
- Handle<wasm::WasmDebugInfo> debug_info =
- wasm::GetDebugInfo(handle(script->wasm_object(), isolate));
- Handle<FixedArray> elements = wasm::WasmDebugInfo::GetFunctionOffsetTable(
- debug_info, script->wasm_function_index());
- return *isolate->factory()->NewJSArrayWithElements(elements);
-}
-
-RUNTIME_FUNCTION(Runtime_DisassembleWasmFunction) {
- DCHECK(args.length() == 1);
- HandleScope scope(isolate);
- CONVERT_ARG_CHECKED(JSValue, script_val, 0);
-
- CHECK(script_val->value()->IsScript());
- Handle<Script> script = Handle<Script>(Script::cast(script_val->value()));
-
- Handle<wasm::WasmDebugInfo> debug_info =
- wasm::GetDebugInfo(handle(script->wasm_object(), isolate));
- return *wasm::WasmDebugInfo::DisassembleFunction(
- debug_info, script->wasm_function_index());
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index fa50941925..a91ab28cc6 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -55,7 +55,7 @@ RUNTIME_FUNCTION(Runtime_FunctionRemovePrototype) {
return isolate->heap()->undefined_value();
}
-
+// TODO(5530): Remove once uses in debug.js are gone.
RUNTIME_FUNCTION(Runtime_FunctionGetScript) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -71,6 +71,20 @@ RUNTIME_FUNCTION(Runtime_FunctionGetScript) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_FunctionGetScriptId) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
+
+ if (function->IsJSFunction()) {
+ Handle<Object> script(
+ Handle<JSFunction>::cast(function)->shared()->script(), isolate);
+ if (script->IsScript()) {
+ return Smi::FromInt(Handle<Script>::cast(script)->id());
+ }
+ }
+ return Smi::FromInt(-1);
+}
RUNTIME_FUNCTION(Runtime_FunctionGetSourceCode) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime-i18n.cc b/deps/v8/src/runtime/runtime-i18n.cc
index cac403baca..75e0952581 100644
--- a/deps/v8/src/runtime/runtime-i18n.cc
+++ b/deps/v8/src/runtime/runtime-i18n.cc
@@ -926,7 +926,7 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) {
if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
return *isolate->factory()->NewStringFromStaticChars("none");
} else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
- return *isolate->factory()->number_string();
+ return isolate->heap()->number_string();
} else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
return *isolate->factory()->NewStringFromStaticChars("letter");
} else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
@@ -939,55 +939,10 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) {
}
namespace {
-void ConvertCaseWithTransliterator(icu::UnicodeString* input,
- const char* transliterator_id) {
- UErrorCode status = U_ZERO_ERROR;
- std::unique_ptr<icu::Transliterator> translit(
- icu::Transliterator::createInstance(
- icu::UnicodeString(transliterator_id, -1, US_INV), UTRANS_FORWARD,
- status));
- if (U_FAILURE(status)) return;
- translit->transliterate(*input);
-}
-
MUST_USE_RESULT Object* LocaleConvertCase(Handle<String> s, Isolate* isolate,
bool is_to_upper, const char* lang) {
- int32_t src_length = s->length();
-
- // Greek uppercasing has to be done via transliteration.
- // TODO(jshin): Drop this special-casing once ICU's regular case conversion
- // API supports Greek uppercasing. See
- // http://bugs.icu-project.org/trac/ticket/10582 .
- // In the meantime, if there's no Greek character in |s|, call this
- // function again with the root locale (lang="").
- // ICU's C API for transliteration is nasty and we just use C++ API.
- if (V8_UNLIKELY(is_to_upper && lang[0] == 'e' && lang[1] == 'l')) {
- icu::UnicodeString converted;
- std::unique_ptr<uc16[]> sap;
- {
- DisallowHeapAllocation no_gc;
- String::FlatContent flat = s->GetFlatContent();
- const UChar* src = GetUCharBufferFromFlat(flat, &sap, src_length);
- // Starts with the source string (read-only alias with copy-on-write
- // semantics) and will be modified to contain the converted result.
- // Using read-only alias at first saves one copy operation if
- // transliteration does not change the input, which is rather rare.
- // Moreover, transliteration takes rather long so that saving one copy
- // helps only a little bit.
- converted.setTo(false, src, src_length);
- ConvertCaseWithTransliterator(&converted, "el-Upper");
- // If no change is made, just return |s|.
- if (converted.getBuffer() == src) return *s;
- }
- RETURN_RESULT_OR_FAILURE(
- isolate,
- isolate->factory()->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(converted.getBuffer()),
- converted.length())));
- }
-
auto case_converter = is_to_upper ? u_strToUpper : u_strToLower;
-
+ int32_t src_length = s->length();
int32_t dest_length = src_length;
UErrorCode status;
Handle<SeqTwoByteString> result;
@@ -1138,7 +1093,7 @@ RUNTIME_FUNCTION(Runtime_StringToLowerCaseI18N) {
s = String::Flatten(s);
// First scan the string for uppercase and non-ASCII characters:
if (s->HasOnlyOneByteChars()) {
- unsigned first_index_to_lower = length;
+ int first_index_to_lower = length;
for (int index = 0; index < length; ++index) {
// Blink specializes this path for one-byte strings, so it
// does not need to do a generic get, but can do the equivalent
@@ -1165,14 +1120,16 @@ RUNTIME_FUNCTION(Runtime_StringToLowerCaseI18N) {
String::FlatContent flat = s->GetFlatContent();
if (flat.IsOneByte()) {
const uint8_t* src = flat.ToOneByteVector().start();
- CopyChars(result->GetChars(), src, first_index_to_lower);
+ CopyChars(result->GetChars(), src,
+ static_cast<size_t>(first_index_to_lower));
for (int index = first_index_to_lower; index < length; ++index) {
uint16_t ch = static_cast<uint16_t>(src[index]);
result->SeqOneByteStringSet(index, ToLatin1Lower(ch));
}
} else {
const uint16_t* src = flat.ToUC16Vector().start();
- CopyChars(result->GetChars(), src, first_index_to_lower);
+ CopyChars(result->GetChars(), src,
+ static_cast<size_t>(first_index_to_lower));
for (int index = first_index_to_lower; index < length; ++index) {
uint16_t ch = src[index];
result->SeqOneByteStringSet(index, ToLatin1Lower(ch));
@@ -1283,7 +1240,7 @@ RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
if (!isolate->eternal_handles()->Exists(EternalHandles::DATE_CACHE_VERSION)) {
Handle<FixedArray> date_cache_version =
isolate->factory()->NewFixedArray(1, TENURED);
- date_cache_version->set(0, Smi::FromInt(0));
+ date_cache_version->set(0, Smi::kZero);
isolate->eternal_handles()->CreateSingleton(
isolate, *date_cache_version, EternalHandles::DATE_CACHE_VERSION);
}
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 26882b5c83..621f33547e 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -100,12 +100,29 @@ RUNTIME_FUNCTION(Runtime_ThrowStackOverflow) {
return isolate->StackOverflow();
}
+RUNTIME_FUNCTION(Runtime_ThrowTypeError) {
+ HandleScope scope(isolate);
+ DCHECK_LE(1, args.length());
+ CONVERT_SMI_ARG_CHECKED(message_id_smi, 0);
+
+ Handle<Object> undefined = isolate->factory()->undefined_value();
+ Handle<Object> arg0 = (args.length() > 1) ? args.at<Object>(1) : undefined;
+ Handle<Object> arg1 = (args.length() > 2) ? args.at<Object>(2) : undefined;
+ Handle<Object> arg2 = (args.length() > 3) ? args.at<Object>(3) : undefined;
+
+ MessageTemplate::Template message_id =
+ static_cast<MessageTemplate::Template>(message_id_smi);
+
+ THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+ NewTypeError(message_id, arg0, arg1, arg2));
+}
+
RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(message_id, 0);
CONVERT_SMI_ARG_CHECKED(byte_offset, 1);
- Handle<Object> error_obj = isolate->factory()->NewError(
+ Handle<Object> error_obj = isolate->factory()->NewWasmRuntimeError(
static_cast<MessageTemplate::Template>(message_id));
// For wasm traps, the byte offset (a.k.a source position) can not be
@@ -270,64 +287,6 @@ RUNTIME_FUNCTION(Runtime_ThrowApplyNonFunction) {
isolate, NewTypeError(MessageTemplate::kApplyNonFunction, object, type));
}
-namespace {
-
-void PromiseRejectEvent(Isolate* isolate, Handle<JSObject> promise,
- Handle<Object> rejected_promise, Handle<Object> value,
- bool debug_event) {
- if (isolate->debug()->is_active() && debug_event) {
- isolate->debug()->OnPromiseReject(rejected_promise, value);
- }
- Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
- // Do not report if we actually have a handler.
- if (JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate)) {
- isolate->ReportPromiseReject(promise, value,
- v8::kPromiseRejectWithNoHandler);
- }
-}
-
-} // namespace
-
-RUNTIME_FUNCTION(Runtime_PromiseRejectEvent) {
- DCHECK(args.length() == 3);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- CONVERT_BOOLEAN_ARG_CHECKED(debug_event, 2);
-
- PromiseRejectEvent(isolate, promise, promise, value, debug_event);
- return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
- DCHECK(args.length() == 2);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
-
- Handle<Object> rejected_promise = promise;
- if (isolate->debug()->is_active()) {
- // If the Promise.reject call is caught, then this will return
- // undefined, which will be interpreted by PromiseRejectEvent
- // as being a caught exception event.
- rejected_promise = isolate->GetPromiseOnStackOnThrow();
- }
- PromiseRejectEvent(isolate, promise, rejected_promise, value, true);
- return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
- DCHECK(args.length() == 1);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
- Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
- // At this point, no revocation has been issued before
- CHECK(JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate));
- isolate->ReportPromiseReject(promise, Handle<Object>(),
- v8::kPromiseHandlerAddedAfterReject);
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_StackGuard) {
SealHandleScope shs(isolate);
@@ -430,10 +389,10 @@ bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object) {
MessageLocation location;
if (ComputeLocation(isolate, &location)) {
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
std::unique_ptr<ParseInfo> info(
location.function()->shared()->is_function()
- ? new ParseInfo(&zone, location.function())
+ ? new ParseInfo(&zone, handle(location.function()->shared()))
: new ParseInfo(&zone, location.script()));
if (Parser::ParseStatic(info.get())) {
CallPrinter printer(isolate, location.function()->shared()->IsBuiltin());
@@ -554,36 +513,6 @@ RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
}
}
-RUNTIME_FUNCTION(Runtime_EnqueuePromiseResolveThenableJob) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 6);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, resolution, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, then, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, resolve, 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, reject, 3);
- CONVERT_ARG_HANDLE_CHECKED(Object, before_debug_event, 4);
- CONVERT_ARG_HANDLE_CHECKED(Object, after_debug_event, 5);
- Handle<PromiseContainer> container = isolate->factory()->NewPromiseContainer(
- resolution, then, resolve, reject, before_debug_event, after_debug_event);
- isolate->EnqueueMicrotask(container);
- return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, microtask, 0);
- isolate->EnqueueMicrotask(microtask);
- return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- isolate->RunMicrotasks();
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_OrdinaryHasInstance) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -593,13 +522,13 @@ RUNTIME_FUNCTION(Runtime_OrdinaryHasInstance) {
isolate, Object::OrdinaryHasInstance(isolate, callable, object));
}
-RUNTIME_FUNCTION(Runtime_IsWasmObject) {
+RUNTIME_FUNCTION(Runtime_IsWasmInstance) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, object, 0);
- bool is_wasm_object =
- object->IsJSObject() && wasm::IsWasmObject(JSObject::cast(object));
- return *isolate->factory()->ToBoolean(is_wasm_object);
+ bool is_wasm_instance =
+ object->IsJSObject() && wasm::IsWasmInstance(JSObject::cast(object));
+ return *isolate->factory()->ToBoolean(is_wasm_instance);
}
RUNTIME_FUNCTION(Runtime_Typeof) {
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index ce71e2c52d..62eee1744f 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -171,5 +171,19 @@ RUNTIME_FUNCTION(Runtime_InterpreterSetPendingMessage) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_InterpreterAdvanceBytecodeOffset) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(BytecodeArray, bytecode_array, 0);
+ CONVERT_SMI_ARG_CHECKED(bytecode_offset, 1);
+ interpreter::BytecodeArrayIterator it(bytecode_array);
+ int offset = bytecode_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
+ while (it.current_offset() < offset) it.Advance();
+ DCHECK_EQ(offset, it.current_offset());
+ it.Advance(); // Advance by one bytecode.
+ offset = it.current_offset() + BytecodeArray::kHeaderSize - kHeapObjectTag;
+ return Smi::FromInt(offset);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index ebdf04ccae..8bb4522a98 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -86,7 +86,7 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
if (key->ToArrayIndex(&element_index)) {
// Array index (uint32).
if (value->IsUninitialized(isolate)) {
- value = handle(Smi::FromInt(0), isolate);
+ value = handle(Smi::kZero, isolate);
}
maybe_result = JSObject::SetOwnElementIgnoreAttributes(
boilerplate, element_index, value, NONE);
diff --git a/deps/v8/src/runtime/runtime-maths.cc b/deps/v8/src/runtime/runtime-maths.cc
index 47e560d022..404305a150 100644
--- a/deps/v8/src/runtime/runtime-maths.cc
+++ b/deps/v8/src/runtime/runtime-maths.cc
@@ -15,58 +15,49 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_GenerateRandomNumbers) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- if (isolate->serializer_enabled()) {
- // Random numbers in the snapshot are not really that random. And we cannot
- // return a typed array as it cannot be serialized. To make calling
- // Math.random possible when creating a custom startup snapshot, we simply
- // return a normal array with a single random number.
- Handle<HeapNumber> random_number = isolate->factory()->NewHeapNumber(
- isolate->random_number_generator()->NextDouble());
- Handle<FixedArray> array_backing = isolate->factory()->NewFixedArray(1);
- array_backing->set(0, *random_number);
- return *isolate->factory()->NewJSArrayWithElements(array_backing);
- }
+ DCHECK(args.length() == 0);
+
+ Handle<Context> native_context = isolate->native_context();
+ DCHECK_EQ(0, native_context->math_random_index()->value());
+
+ static const int kCacheSize = 64;
+ static const int kState0Offset = kCacheSize - 1;
+ static const int kState1Offset = kState0Offset - 1;
+ // The index is decremented before used to access the cache.
+ static const int kInitialIndex = kState1Offset;
- static const int kState0Offset = 0;
- static const int kState1Offset = 1;
- static const int kRandomBatchSize = 64;
- CONVERT_ARG_HANDLE_CHECKED(Object, maybe_typed_array, 0);
- Handle<JSTypedArray> typed_array;
- // Allocate typed array if it does not yet exist.
- if (maybe_typed_array->IsJSTypedArray()) {
- typed_array = Handle<JSTypedArray>::cast(maybe_typed_array);
+ Handle<FixedDoubleArray> cache;
+ uint64_t state0 = 0;
+ uint64_t state1 = 0;
+ if (native_context->math_random_cache()->IsFixedDoubleArray()) {
+ cache = Handle<FixedDoubleArray>(
+ FixedDoubleArray::cast(native_context->math_random_cache()), isolate);
+ state0 = double_to_uint64(cache->get_scalar(kState0Offset));
+ state1 = double_to_uint64(cache->get_scalar(kState1Offset));
} else {
- static const int kByteLength = kRandomBatchSize * kDoubleSize;
- Handle<JSArrayBuffer> buffer =
- isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, TENURED);
- JSArrayBuffer::SetupAllocatingData(buffer, isolate, kByteLength, true,
- SharedFlag::kNotShared);
- typed_array = isolate->factory()->NewJSTypedArray(
- kExternalFloat64Array, buffer, 0, kRandomBatchSize);
+ cache = Handle<FixedDoubleArray>::cast(
+ isolate->factory()->NewFixedDoubleArray(kCacheSize, TENURED));
+ native_context->set_math_random_cache(*cache);
+ // Initialize state if not yet initialized.
+ while (state0 == 0 || state1 == 0) {
+ isolate->random_number_generator()->NextBytes(&state0, sizeof(state0));
+ isolate->random_number_generator()->NextBytes(&state1, sizeof(state1));
+ }
}
DisallowHeapAllocation no_gc;
- double* array =
- reinterpret_cast<double*>(typed_array->GetBuffer()->backing_store());
- // Fetch existing state.
- uint64_t state0 = double_to_uint64(array[kState0Offset]);
- uint64_t state1 = double_to_uint64(array[kState1Offset]);
- // Initialize state if not yet initialized.
- while (state0 == 0 || state1 == 0) {
- isolate->random_number_generator()->NextBytes(&state0, sizeof(state0));
- isolate->random_number_generator()->NextBytes(&state1, sizeof(state1));
- }
+ FixedDoubleArray* raw_cache = *cache;
// Create random numbers.
- for (int i = kState1Offset + 1; i < kRandomBatchSize; i++) {
+ for (int i = 0; i < kInitialIndex; i++) {
// Generate random numbers using xorshift128+.
base::RandomNumberGenerator::XorShift128(&state0, &state1);
- array[i] = base::RandomNumberGenerator::ToDouble(state0, state1);
+ raw_cache->set(i, base::RandomNumberGenerator::ToDouble(state0, state1));
}
+
// Persist current state.
- array[kState0Offset] = uint64_to_double(state0);
- array[kState1Offset] = uint64_to_double(state1);
- return *typed_array;
+ raw_cache->set(kState0Offset, uint64_to_double(state0));
+ raw_cache->set(kState1Offset, uint64_to_double(state1));
+ return Smi::FromInt(kInitialIndex);
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
new file mode 100644
index 0000000000..2b813430e0
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -0,0 +1,39 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/runtime/runtime-utils.h"
+
+#include "src/arguments.h"
+
+namespace v8 {
+namespace internal {
+
+RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_SMI_ARG_CHECKED(module_request, 0);
+ Handle<Module> module(isolate->context()->module());
+ return *Module::GetModuleNamespace(module, module_request);
+}
+
+RUNTIME_FUNCTION(Runtime_LoadModuleVariable) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_SMI_ARG_CHECKED(index, 0);
+ Handle<Module> module(isolate->context()->module());
+ return *Module::LoadVariable(module, index);
+}
+
+RUNTIME_FUNCTION(Runtime_StoreModuleVariable) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_SMI_ARG_CHECKED(index, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ Handle<Module> module(isolate->context()->module());
+ Module::StoreVariable(module, index, value);
+ return isolate->heap()->undefined_value();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index 9f43c0acfc..bfe8763e99 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -33,28 +33,40 @@ RUNTIME_FUNCTION(Runtime_StringToNumber) {
// ES6 18.2.5 parseInt(string, radix) slow path
RUNTIME_FUNCTION(Runtime_StringParseInt) {
HandleScope handle_scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_NUMBER_CHECKED(int, radix, Int32, args[1]);
- // Step 8.a. is already handled in the JS function.
- CHECK(radix == 0 || (2 <= radix && radix <= 36));
-
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, string, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, radix, 1);
+
+ // Convert {string} to a String first, and flatten it.
+ Handle<String> subject;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, subject,
+ Object::ToString(isolate, string));
subject = String::Flatten(subject);
- double value;
+ // Convert {radix} to Int32.
+ if (!radix->IsNumber()) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, radix, Object::ToNumber(radix));
+ }
+ int radix32 = DoubleToInt32(radix->Number());
+ if (radix32 != 0 && (radix32 < 2 || radix32 > 36)) {
+ return isolate->heap()->nan_value();
+ }
+
+ double result;
{
DisallowHeapAllocation no_gc;
String::FlatContent flat = subject->GetFlatContent();
if (flat.IsOneByte()) {
- value =
- StringToInt(isolate->unicode_cache(), flat.ToOneByteVector(), radix);
+ result = StringToInt(isolate->unicode_cache(), flat.ToOneByteVector(),
+ radix32);
} else {
- value = StringToInt(isolate->unicode_cache(), flat.ToUC16Vector(), radix);
+ result =
+ StringToInt(isolate->unicode_cache(), flat.ToUC16Vector(), radix32);
}
}
- return *isolate->factory()->NewNumber(value);
+ return *isolate->factory()->NewNumber(result);
}
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 70ed23ba61..c7e9cf3c92 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -207,6 +207,70 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
return isolate->heap()->false_value();
}
+// ES6 section 19.1.2.2 Object.create ( O [ , Properties ] )
+// TODO(verwaest): Support the common cases with precached map directly in
+// an Object.create stub.
+RUNTIME_FUNCTION(Runtime_ObjectCreate) {
+ HandleScope scope(isolate);
+ Handle<Object> prototype = args.at<Object>(0);
+ if (!prototype->IsNull(isolate) && !prototype->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, prototype));
+ }
+
+ // Generate the map with the specified {prototype} based on the Object
+ // function's initial map from the current native context.
+ // TODO(bmeurer): Use a dedicated cache for Object.create; think about
+ // slack tracking for Object.create.
+ Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
+ isolate);
+ if (map->prototype() != *prototype) {
+ if (prototype->IsNull(isolate)) {
+ map = isolate->slow_object_with_null_prototype_map();
+ } else if (prototype->IsJSObject()) {
+ Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
+ if (!js_prototype->map()->is_prototype_map()) {
+ JSObject::OptimizeAsPrototype(js_prototype, FAST_PROTOTYPE);
+ }
+ Handle<PrototypeInfo> info =
+ Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
+ // TODO(verwaest): Use inobject slack tracking for this map.
+ if (info->HasObjectCreateMap()) {
+ map = handle(info->ObjectCreateMap(), isolate);
+ } else {
+ map = Map::CopyInitialMap(map);
+ Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
+ PrototypeInfo::SetObjectCreateMap(info, map);
+ }
+ } else {
+ map = Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
+ }
+ }
+
+ bool is_dictionary_map = map->is_dictionary_map();
+ Handle<FixedArray> object_properties;
+ if (is_dictionary_map) {
+ // Allocate the actual properties dictionay up front to avoid invalid object
+ // state.
+ object_properties =
+ NameDictionary::New(isolate, NameDictionary::kInitialCapacity);
+ }
+ // Actually allocate the object.
+ Handle<JSObject> object = isolate->factory()->NewJSObjectFromMap(map);
+ if (is_dictionary_map) {
+ object->set_properties(*object_properties);
+ }
+
+ // Define the properties if properties was specified and is not undefined.
+ Handle<Object> properties = args.at<Object>(1);
+ if (!properties->IsUndefined(isolate)) {
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, JSReceiver::DefineProperties(isolate, object, properties));
+ }
+
+ return *object;
+}
+
MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
@@ -250,18 +314,6 @@ RUNTIME_FUNCTION(Runtime_InternalSetPrototype) {
return *obj;
}
-
-RUNTIME_FUNCTION(Runtime_SetPrototype) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
- MAYBE_RETURN(
- JSReceiver::SetPrototype(obj, prototype, true, Object::THROW_ON_ERROR),
- isolate->heap()->exception());
- return *obj;
-}
-
RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -277,64 +329,6 @@ RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
}
-namespace {
-
-Object* StoreGlobalViaContext(Isolate* isolate, int slot, Handle<Object> value,
- LanguageMode language_mode) {
- // Go up context chain to the script context.
- Handle<Context> script_context(isolate->context()->script_context(), isolate);
- DCHECK(script_context->IsScriptContext());
- DCHECK(script_context->get(slot)->IsPropertyCell());
-
- // Lookup the named property on the global object.
- Handle<ScopeInfo> scope_info(script_context->scope_info(), isolate);
- Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
- Handle<JSGlobalObject> global_object(script_context->global_object(),
- isolate);
- LookupIterator it(global_object, name, global_object, LookupIterator::OWN);
-
- // Switch to fast mode only if there is a data property and it's not on
- // a hidden prototype.
- if (it.state() == LookupIterator::DATA &&
- it.GetHolder<Object>().is_identical_to(global_object)) {
- // Now update cell in the script context.
- Handle<PropertyCell> cell = it.GetPropertyCell();
- script_context->set(slot, *cell);
- } else {
- // This is not a fast case, so keep this access in a slow mode.
- // Store empty_property_cell here to release the outdated property cell.
- script_context->set(slot, isolate->heap()->empty_property_cell());
- }
-
- MAYBE_RETURN(Object::SetProperty(&it, value, language_mode,
- Object::CERTAINLY_NOT_STORE_FROM_KEYED),
- isolate->heap()->exception());
- return *value;
-}
-
-} // namespace
-
-
-RUNTIME_FUNCTION(Runtime_StoreGlobalViaContext_Sloppy) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_SMI_ARG_CHECKED(slot, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
-
- return StoreGlobalViaContext(isolate, slot, value, SLOPPY);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StoreGlobalViaContext_Strict) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_SMI_ARG_CHECKED(slot, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
-
- return StoreGlobalViaContext(isolate, slot, value, STRICT);
-}
-
-
RUNTIME_FUNCTION(Runtime_GetProperty) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -530,7 +524,7 @@ RUNTIME_FUNCTION(Runtime_GetInterceptorInfo) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
if (!args[0]->IsJSObject()) {
- return Smi::FromInt(0);
+ return Smi::kZero;
}
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
@@ -604,14 +598,14 @@ RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- if (!object->IsJSObject()) return Smi::FromInt(0);
+ if (!object->IsJSObject()) return Smi::kZero;
Handle<JSObject> js_object = Handle<JSObject>::cast(object);
- if (!js_object->map()->is_deprecated()) return Smi::FromInt(0);
+ if (!js_object->map()->is_deprecated()) return Smi::kZero;
// This call must not cause lazy deopts, because it's called from deferred
// code where we can't handle lazy deopts for lack of a suitable bailout
// ID. So we just try migration and signal failure if necessary,
// which will also trigger a deopt.
- if (!JSObject::TryMigrateInstance(js_object)) return Smi::FromInt(0);
+ if (!JSObject::TryMigrateInstance(js_object)) return Smi::kZero;
return *object;
}
@@ -928,13 +922,20 @@ RUNTIME_FUNCTION(Runtime_CreateIterResultObject) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, done, 1);
- Handle<JSObject> result =
- isolate->factory()->NewJSObjectFromMap(isolate->iterator_result_map());
- result->InObjectPropertyAtPut(JSIteratorResult::kValueIndex, *value);
- result->InObjectPropertyAtPut(JSIteratorResult::kDoneIndex, *done);
- return *result;
+ return *isolate->factory()->NewJSIteratorResult(value, done->BooleanValue());
}
+RUNTIME_FUNCTION(Runtime_CreateKeyValueArray) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ Handle<FixedArray> elements = isolate->factory()->NewFixedArray(2);
+ elements->set(0, *key);
+ elements->set(1, *value);
+ return *isolate->factory()->NewJSArrayWithElements(elements, FAST_ELEMENTS,
+ 2);
+}
RUNTIME_FUNCTION(Runtime_IsAccessCheckNeeded) {
SealHandleScope shs(isolate);
@@ -960,32 +961,6 @@ RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
return *value;
}
-RUNTIME_FUNCTION(Runtime_LoadModuleExport) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- Handle<Module> module(isolate->context()->module());
- return *Module::LoadExport(module, name);
-}
-
-RUNTIME_FUNCTION(Runtime_LoadModuleImport) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(Smi, module_request, 1);
- Handle<Module> module(isolate->context()->module());
- return *Module::LoadImport(module, name, module_request->value());
-}
-
-RUNTIME_FUNCTION(Runtime_StoreModuleExport) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- Handle<Module> module(isolate->context()->module());
- Module::StoreExport(module, name, value);
- return isolate->heap()->undefined_value();
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
new file mode 100644
index 0000000000..226993a50e
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -0,0 +1,193 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/runtime/runtime-utils.h"
+
+#include "src/debug/debug.h"
+#include "src/elements.h"
+#include "src/promise-utils.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+void PromiseRejectEvent(Isolate* isolate, Handle<JSReceiver> promise,
+ Handle<Object> rejected_promise, Handle<Object> value,
+ bool debug_event) {
+ if (isolate->debug()->is_active() && debug_event) {
+ isolate->debug()->OnPromiseReject(rejected_promise, value);
+ }
+ Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
+ // Do not report if we actually have a handler.
+ if (JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate)) {
+ isolate->ReportPromiseReject(Handle<JSObject>::cast(promise), value,
+ v8::kPromiseRejectWithNoHandler);
+ }
+}
+
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
+ DCHECK(args.length() == 2);
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+
+ Handle<Object> rejected_promise = promise;
+ if (isolate->debug()->is_active()) {
+ // If the Promise.reject call is caught, then this will return
+ // undefined, which will be interpreted by PromiseRejectEvent
+ // as being a caught exception event.
+ rejected_promise = isolate->GetPromiseOnStackOnThrow();
+ }
+ PromiseRejectEvent(isolate, promise, rejected_promise, value, true);
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
+ DCHECK(args.length() == 1);
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+ Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
+ // At this point, no revocation has been issued before
+ CHECK(JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate));
+ isolate->ReportPromiseReject(promise, Handle<Object>(),
+ v8::kPromiseHandlerAddedAfterReject);
+ return isolate->heap()->undefined_value();
+}
+
+namespace {
+void EnqueuePromiseReactionJob(Isolate* isolate, Handle<Object> value,
+ Handle<Object> tasks, Handle<Object> deferred,
+ Handle<Object> status) {
+ Handle<Object> debug_id = isolate->factory()->undefined_value();
+ Handle<Object> debug_name = isolate->factory()->undefined_value();
+ if (isolate->debug()->is_active()) {
+ MaybeHandle<Object> maybe_result;
+ Handle<Object> argv[] = {deferred, status};
+ maybe_result = Execution::TryCall(
+ isolate, isolate->promise_debug_get_info(),
+ isolate->factory()->undefined_value(), arraysize(argv), argv);
+ Handle<Object> result;
+ if ((maybe_result).ToHandle(&result)) {
+ CHECK(result->IsJSArray());
+ Handle<JSArray> array = Handle<JSArray>::cast(result);
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ DCHECK(accessor->HasElement(array, 0));
+ DCHECK(accessor->HasElement(array, 1));
+ debug_id = accessor->Get(array, 0);
+ debug_name = accessor->Get(array, 1);
+ }
+ }
+ Handle<PromiseReactionJobInfo> info =
+ isolate->factory()->NewPromiseReactionJobInfo(value, tasks, deferred,
+ debug_id, debug_name,
+ isolate->native_context());
+ isolate->EnqueueMicrotask(info);
+}
+
+void PromiseFulfill(Isolate* isolate, Handle<JSReceiver> promise,
+ Handle<Smi> status, Handle<Object> value,
+ Handle<Symbol> reaction) {
+ Handle<Object> tasks = JSReceiver::GetDataProperty(promise, reaction);
+ if (!tasks->IsUndefined(isolate)) {
+ Handle<Object> deferred = JSReceiver::GetDataProperty(
+ promise, isolate->factory()->promise_deferred_reaction_symbol());
+ EnqueuePromiseReactionJob(isolate, value, tasks, deferred, status);
+ }
+}
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_PromiseReject) {
+ DCHECK(args.length() == 3);
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, promise, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, reason, 1);
+ CONVERT_BOOLEAN_ARG_CHECKED(debug_event, 2);
+
+ PromiseRejectEvent(isolate, promise, promise, reason, debug_event);
+
+ Handle<Smi> status = handle(Smi::FromInt(kPromiseRejected), isolate);
+ Handle<Symbol> reaction =
+ isolate->factory()->promise_reject_reactions_symbol();
+ PromiseFulfill(isolate, promise, status, reason, reaction);
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseFulfill) {
+ DCHECK(args.length() == 4);
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, promise, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, status, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Symbol, reaction, 3);
+ PromiseFulfill(isolate, promise, status, value, reaction);
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_EnqueuePromiseReactionJob) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, tasks, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, deferred, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, status, 3);
+ EnqueuePromiseReactionJob(isolate, value, tasks, deferred, status);
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_EnqueuePromiseResolveThenableJob) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, resolution, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, then, 2);
+
+ // TODO(gsathya): Add fast path for native promises with unmodified
+ // PromiseThen (which don't need these resolving functions, but
+ // instead can just call resolve/reject directly).
+ Handle<JSFunction> resolve, reject;
+ PromiseUtils::CreateResolvingFunctions(
+ isolate, promise, isolate->factory()->false_value(), &resolve, &reject);
+
+ Handle<Object> debug_id, debug_name;
+ if (isolate->debug()->is_active()) {
+ debug_id =
+ handle(Smi::FromInt(isolate->GetNextDebugMicrotaskId()), isolate);
+ debug_name = isolate->factory()->PromiseResolveThenableJob_string();
+ isolate->debug()->OnAsyncTaskEvent(isolate->factory()->enqueue_string(),
+ debug_id,
+ Handle<String>::cast(debug_name));
+ } else {
+ debug_id = isolate->factory()->undefined_value();
+ debug_name = isolate->factory()->undefined_value();
+ }
+
+ Handle<PromiseResolveThenableJobInfo> info =
+ isolate->factory()->NewPromiseResolveThenableJobInfo(
+ resolution, then, resolve, reject, debug_id, debug_name,
+ isolate->native_context());
+ isolate->EnqueueMicrotask(info);
+
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, microtask, 0);
+ isolate->EnqueueMicrotask(microtask);
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ isolate->RunMicrotasks();
+ return isolate->heap()->undefined_value();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 977e6bc48f..d572eedd31 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -10,6 +10,7 @@
#include "src/messages.h"
#include "src/regexp/jsregexp-inl.h"
#include "src/regexp/jsregexp.h"
+#include "src/regexp/regexp-utils.h"
#include "src/string-builder.h"
#include "src/string-search.h"
@@ -279,10 +280,8 @@ void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
}
}
-
void FindOneByteStringIndices(Vector<const uint8_t> subject, uint8_t pattern,
- ZoneList<int>* indices, unsigned int limit,
- Zone* zone) {
+ List<int>* indices, unsigned int limit) {
DCHECK(limit > 0);
// Collect indices of pattern in subject using memchr.
// Stop after finding at most limit values.
@@ -293,32 +292,29 @@ void FindOneByteStringIndices(Vector<const uint8_t> subject, uint8_t pattern,
pos = reinterpret_cast<const uint8_t*>(
memchr(pos, pattern, subject_end - pos));
if (pos == NULL) return;
- indices->Add(static_cast<int>(pos - subject_start), zone);
+ indices->Add(static_cast<int>(pos - subject_start));
pos++;
limit--;
}
}
-
void FindTwoByteStringIndices(const Vector<const uc16> subject, uc16 pattern,
- ZoneList<int>* indices, unsigned int limit,
- Zone* zone) {
+ List<int>* indices, unsigned int limit) {
DCHECK(limit > 0);
const uc16* subject_start = subject.start();
const uc16* subject_end = subject_start + subject.length();
for (const uc16* pos = subject_start; pos < subject_end && limit > 0; pos++) {
if (*pos == pattern) {
- indices->Add(static_cast<int>(pos - subject_start), zone);
+ indices->Add(static_cast<int>(pos - subject_start));
limit--;
}
}
}
-
template <typename SubjectChar, typename PatternChar>
void FindStringIndices(Isolate* isolate, Vector<const SubjectChar> subject,
- Vector<const PatternChar> pattern,
- ZoneList<int>* indices, unsigned int limit, Zone* zone) {
+ Vector<const PatternChar> pattern, List<int>* indices,
+ unsigned int limit) {
DCHECK(limit > 0);
// Collect indices of pattern in subject.
// Stop after finding at most limit values.
@@ -328,16 +324,15 @@ void FindStringIndices(Isolate* isolate, Vector<const SubjectChar> subject,
while (limit > 0) {
index = search.Search(subject, index);
if (index < 0) return;
- indices->Add(index, zone);
+ indices->Add(index);
index += pattern_length;
limit--;
}
}
-
void FindStringIndicesDispatch(Isolate* isolate, String* subject,
- String* pattern, ZoneList<int>* indices,
- unsigned int limit, Zone* zone) {
+ String* pattern, List<int>* indices,
+ unsigned int limit) {
{
DisallowHeapAllocation no_gc;
String::FlatContent subject_content = subject->GetFlatContent();
@@ -351,14 +346,14 @@ void FindStringIndicesDispatch(Isolate* isolate, String* subject,
pattern_content.ToOneByteVector();
if (pattern_vector.length() == 1) {
FindOneByteStringIndices(subject_vector, pattern_vector[0], indices,
- limit, zone);
+ limit);
} else {
FindStringIndices(isolate, subject_vector, pattern_vector, indices,
- limit, zone);
+ limit);
}
} else {
FindStringIndices(isolate, subject_vector,
- pattern_content.ToUC16Vector(), indices, limit, zone);
+ pattern_content.ToUC16Vector(), indices, limit);
}
} else {
Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
@@ -367,34 +362,51 @@ void FindStringIndicesDispatch(Isolate* isolate, String* subject,
pattern_content.ToOneByteVector();
if (pattern_vector.length() == 1) {
FindTwoByteStringIndices(subject_vector, pattern_vector[0], indices,
- limit, zone);
+ limit);
} else {
FindStringIndices(isolate, subject_vector, pattern_vector, indices,
- limit, zone);
+ limit);
}
} else {
Vector<const uc16> pattern_vector = pattern_content.ToUC16Vector();
if (pattern_vector.length() == 1) {
FindTwoByteStringIndices(subject_vector, pattern_vector[0], indices,
- limit, zone);
+ limit);
} else {
FindStringIndices(isolate, subject_vector, pattern_vector, indices,
- limit, zone);
+ limit);
}
}
}
}
}
+namespace {
+List<int>* GetRewoundRegexpIndicesList(Isolate* isolate) {
+ List<int>* list = isolate->regexp_indices();
+ list->Rewind(0);
+ return list;
+}
+
+void TruncateRegexpIndicesList(Isolate* isolate) {
+ // Same size as smallest zone segment, preserving behavior from the
+ // runtime zone.
+ static const int kMaxRegexpIndicesListCapacity = 8 * KB;
+ if (isolate->regexp_indices()->capacity() > kMaxRegexpIndicesListCapacity) {
+ isolate->regexp_indices()->Clear(); // Throw away backing storage
+ }
+}
+} // namespace
+
template <typename ResultSeqString>
MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
Isolate* isolate, Handle<String> subject, Handle<JSRegExp> pattern_regexp,
- Handle<String> replacement, Handle<JSObject> last_match_info) {
+ Handle<String> replacement, Handle<RegExpMatchInfo> last_match_info) {
DCHECK(subject->IsFlat());
DCHECK(replacement->IsFlat());
- ZoneScope zone_scope(isolate->runtime_zone());
- ZoneList<int> indices(8, zone_scope.zone());
+ List<int>* indices = GetRewoundRegexpIndicesList(isolate);
+
DCHECK_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
String* pattern =
String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
@@ -402,10 +414,9 @@ MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
int pattern_len = pattern->length();
int replacement_len = replacement->length();
- FindStringIndicesDispatch(isolate, *subject, pattern, &indices, 0xffffffff,
- zone_scope.zone());
+ FindStringIndicesDispatch(isolate, *subject, pattern, indices, 0xffffffff);
- int matches = indices.length();
+ int matches = indices->length();
if (matches == 0) return *subject;
// Detect integer overflow.
@@ -436,10 +447,10 @@ MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
for (int i = 0; i < matches; i++) {
// Copy non-matched subject content.
- if (subject_pos < indices.at(i)) {
+ if (subject_pos < indices->at(i)) {
String::WriteToFlat(*subject, result->GetChars() + result_pos,
- subject_pos, indices.at(i));
- result_pos += indices.at(i) - subject_pos;
+ subject_pos, indices->at(i));
+ result_pos += indices->at(i) - subject_pos;
}
// Replace match.
@@ -449,7 +460,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
result_pos += replacement_len;
}
- subject_pos = indices.at(i) + pattern_len;
+ subject_pos = indices->at(i) + pattern_len;
}
// Add remaining subject content at the end.
if (subject_pos < subject_len) {
@@ -457,16 +468,18 @@ MUST_USE_RESULT static Object* StringReplaceGlobalAtomRegExpWithString(
subject_len);
}
- int32_t match_indices[] = {indices.at(matches - 1),
- indices.at(matches - 1) + pattern_len};
+ int32_t match_indices[] = {indices->at(matches - 1),
+ indices->at(matches - 1) + pattern_len};
RegExpImpl::SetLastMatchInfo(last_match_info, subject, 0, match_indices);
+ TruncateRegexpIndicesList(isolate);
+
return *result;
}
MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString(
Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp,
- Handle<String> replacement, Handle<JSObject> last_match_info) {
+ Handle<String> replacement, Handle<RegExpMatchInfo> last_match_info) {
DCHECK(subject->IsFlat());
DCHECK(replacement->IsFlat());
@@ -474,8 +487,8 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString(
int subject_length = subject->length();
// CompiledReplacement uses zone allocation.
- ZoneScope zone_scope(isolate->runtime_zone());
- CompiledReplacement compiled_replacement(zone_scope.zone());
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ CompiledReplacement compiled_replacement(&zone);
bool simple_replace =
compiled_replacement.Compile(replacement, capture_count, subject_length);
@@ -548,7 +561,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString(
template <typename ResultSeqString>
MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp,
- Handle<JSObject> last_match_info) {
+ Handle<RegExpMatchInfo> last_match_info) {
DCHECK(subject->IsFlat());
// Shortcut for simple non-regexp global replacements
@@ -643,18 +656,12 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
return *answer;
}
+namespace {
-RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
-
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, last_match_info, 3);
-
+Object* StringReplaceGlobalRegExpWithStringHelper(
+ Isolate* isolate, Handle<JSRegExp> regexp, Handle<String> subject,
+ Handle<String> replacement, Handle<RegExpMatchInfo> last_match_info) {
CHECK(regexp->GetFlags() & JSRegExp::kGlobal);
- CHECK(last_match_info->HasFastObjectElements());
subject = String::Flatten(subject);
@@ -674,6 +681,20 @@ RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) {
replacement, last_match_info);
}
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
+ CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 3);
+
+ return StringReplaceGlobalRegExpWithStringHelper(
+ isolate, regexp, subject, replacement, last_match_info);
+}
RUNTIME_FUNCTION(Runtime_StringSplit) {
HandleScope handle_scope(isolate);
@@ -694,7 +715,7 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
&last_match_cache_unused,
RegExpResultsCache::STRING_SPLIT_SUBSTRINGS),
isolate);
- if (*cached_answer != Smi::FromInt(0)) {
+ if (*cached_answer != Smi::kZero) {
// The cache FixedArray is a COW-array and can therefore be reused.
Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(
Handle<FixedArray>::cast(cached_answer));
@@ -709,25 +730,18 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
subject = String::Flatten(subject);
pattern = String::Flatten(pattern);
- static const int kMaxInitialListCapacity = 16;
+ List<int>* indices = GetRewoundRegexpIndicesList(isolate);
- ZoneScope zone_scope(isolate->runtime_zone());
+ FindStringIndicesDispatch(isolate, *subject, *pattern, indices, limit);
- // Find (up to limit) indices of separator and end-of-string in subject
- int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
- ZoneList<int> indices(initial_capacity, zone_scope.zone());
-
- FindStringIndicesDispatch(isolate, *subject, *pattern, &indices, limit,
- zone_scope.zone());
-
- if (static_cast<uint32_t>(indices.length()) < limit) {
- indices.Add(subject_length, zone_scope.zone());
+ if (static_cast<uint32_t>(indices->length()) < limit) {
+ indices->Add(subject_length);
}
// The list indices now contains the end of each part to create.
// Create JSArray of substrings separated by separator.
- int part_count = indices.length();
+ int part_count = indices->length();
Handle<JSArray> result =
isolate->factory()->NewJSArray(FAST_ELEMENTS, part_count, part_count,
@@ -737,12 +751,12 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
Handle<FixedArray> elements(FixedArray::cast(result->elements()));
- if (part_count == 1 && indices.at(0) == subject_length) {
+ if (part_count == 1 && indices->at(0) == subject_length) {
elements->set(0, *subject);
} else {
int part_start = 0;
FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < part_count, i++, {
- int part_end = indices.at(i);
+ int part_end = indices->at(i);
Handle<String> substring =
isolate->factory()->NewProperSubString(subject, part_start, part_end);
elements->set(i, *substring);
@@ -758,9 +772,37 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
}
}
+ TruncateRegexpIndicesList(isolate);
+
return *result;
}
+// ES##sec-regexpcreate
+// RegExpCreate ( P, F )
+RUNTIME_FUNCTION(Runtime_RegExpCreate) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, source_object, 0);
+
+ Handle<String> source;
+ if (source_object->IsUndefined(isolate)) {
+ source = isolate->factory()->empty_string();
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, source, Object::ToString(isolate, source_object));
+ }
+
+ Handle<Map> map(isolate->regexp_function()->initial_map());
+ Handle<JSRegExp> regexp =
+ Handle<JSRegExp>::cast(isolate->factory()->NewJSObjectFromMap(map));
+
+ JSRegExp::Flags flags = JSRegExp::kNone;
+
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSRegExp::Initialize(regexp, source, flags));
+
+ return *regexp;
+}
RUNTIME_FUNCTION(Runtime_RegExpExec) {
HandleScope scope(isolate);
@@ -768,7 +810,7 @@ RUNTIME_FUNCTION(Runtime_RegExpExec) {
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
CONVERT_INT32_ARG_CHECKED(index, 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, last_match_info, 3);
+ CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 3);
// Due to the way the JS calls are constructed this must be less than the
// length of a string, i.e. it is always a Smi. We check anyway for security.
CHECK(index >= 0);
@@ -778,64 +820,116 @@ RUNTIME_FUNCTION(Runtime_RegExpExec) {
isolate, RegExpImpl::Exec(regexp, subject, index, last_match_info));
}
+RUNTIME_FUNCTION(Runtime_RegExpInternalReplace) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
-RUNTIME_FUNCTION(Runtime_RegExpFlags) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
- return regexp->flags();
+ Handle<RegExpMatchInfo> internal_match_info =
+ isolate->regexp_internal_match_info();
+
+ return StringReplaceGlobalRegExpWithStringHelper(
+ isolate, regexp, subject, replacement, internal_match_info);
}
+namespace {
-RUNTIME_FUNCTION(Runtime_RegExpSource) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
- return regexp->source();
-}
+class MatchInfoBackedMatch : public String::Match {
+ public:
+ MatchInfoBackedMatch(Isolate* isolate, Handle<String> subject,
+ Handle<RegExpMatchInfo> match_info)
+ : isolate_(isolate), match_info_(match_info) {
+ subject_ = String::Flatten(subject);
+ }
-// TODO(jgruber): Remove this once all uses in regexp.js have been removed.
-RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
- HandleScope handle_scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_SMI_ARG_CHECKED(size, 0);
- CHECK(size >= 0 && size <= FixedArray::kMaxLength);
- CONVERT_ARG_HANDLE_CHECKED(Object, index, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, input, 2);
- Handle<FixedArray> elements = isolate->factory()->NewFixedArray(size);
- Handle<Map> regexp_map(isolate->native_context()->regexp_result_map());
- Handle<JSObject> object =
- isolate->factory()->NewJSObjectFromMap(regexp_map, NOT_TENURED);
- Handle<JSArray> array = Handle<JSArray>::cast(object);
- array->set_elements(*elements);
- array->set_length(Smi::FromInt(size));
- // Write in-object properties after the length of the array.
- array->InObjectPropertyAtPut(JSRegExpResult::kIndexIndex, *index);
- array->InObjectPropertyAtPut(JSRegExpResult::kInputIndex, *input);
- return *array;
-}
+ Handle<String> GetMatch() override {
+ return RegExpUtils::GenericCaptureGetter(isolate_, match_info_, 0, nullptr);
+ }
+ MaybeHandle<String> GetCapture(int i, bool* capture_exists) override {
+ Handle<Object> capture_obj = RegExpUtils::GenericCaptureGetter(
+ isolate_, match_info_, i, capture_exists);
+ return (*capture_exists) ? Object::ToString(isolate_, capture_obj)
+ : isolate_->factory()->empty_string();
+ }
-RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
+ Handle<String> GetPrefix() override {
+ const int match_start = match_info_->Capture(0);
+ return isolate_->factory()->NewSubString(subject_, 0, match_start);
+ }
- RETURN_FAILURE_ON_EXCEPTION(isolate,
- JSRegExp::Initialize(regexp, source, flags));
+ Handle<String> GetSuffix() override {
+ const int match_end = match_info_->Capture(1);
+ return isolate_->factory()->NewSubString(subject_, match_end,
+ subject_->length());
+ }
- return *regexp;
-}
+ int CaptureCount() override {
+ return match_info_->NumberOfCaptureRegisters() / 2;
+ }
+
+ virtual ~MatchInfoBackedMatch() {}
+
+ private:
+ Isolate* isolate_;
+ Handle<String> subject_;
+ Handle<RegExpMatchInfo> match_info_;
+};
+
+class VectorBackedMatch : public String::Match {
+ public:
+ VectorBackedMatch(Isolate* isolate, Handle<String> subject,
+ Handle<String> match, int match_position,
+ ZoneVector<Handle<Object>>* captures)
+ : isolate_(isolate),
+ match_(match),
+ match_position_(match_position),
+ captures_(captures) {
+ subject_ = String::Flatten(subject);
+ }
+
+ Handle<String> GetMatch() override { return match_; }
+
+ MaybeHandle<String> GetCapture(int i, bool* capture_exists) override {
+ Handle<Object> capture_obj = captures_->at(i);
+ if (capture_obj->IsUndefined(isolate_)) {
+ *capture_exists = false;
+ return isolate_->factory()->empty_string();
+ }
+ *capture_exists = true;
+ return Object::ToString(isolate_, capture_obj);
+ }
+ Handle<String> GetPrefix() override {
+ return isolate_->factory()->NewSubString(subject_, 0, match_position_);
+ }
+
+ Handle<String> GetSuffix() override {
+ const int match_end_position = match_position_ + match_->length();
+ return isolate_->factory()->NewSubString(subject_, match_end_position,
+ subject_->length());
+ }
+
+ int CaptureCount() override { return static_cast<int>(captures_->size()); }
+
+ virtual ~VectorBackedMatch() {}
+
+ private:
+ Isolate* isolate_;
+ Handle<String> subject_;
+ Handle<String> match_;
+ const int match_position_;
+ ZoneVector<Handle<Object>>* captures_;
+};
// Only called from Runtime_RegExpExecMultiple so it doesn't need to maintain
// separate last match info. See comment on that function.
template <bool has_capture>
static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
Handle<JSRegExp> regexp,
- Handle<JSObject> last_match_array,
+ Handle<RegExpMatchInfo> last_match_array,
Handle<JSArray> result_array) {
DCHECK(subject->IsFlat());
DCHECK_NE(has_capture, regexp->CaptureCount() == 0);
@@ -858,8 +952,11 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
}
Handle<FixedArray> cached_fixed_array =
Handle<FixedArray>(FixedArray::cast(cached_answer));
- // The cache FixedArray is a COW-array and can therefore be reused.
- JSArray::SetContent(result_array, cached_fixed_array);
+ // The cache FixedArray is a COW-array and we need to return a copy.
+ Handle<FixedArray> copied_fixed_array =
+ isolate->factory()->CopyFixedArrayWithMap(
+ cached_fixed_array, isolate->factory()->fixed_array_map());
+ JSArray::SetContent(result_array, copied_fixed_array);
RegExpImpl::SetLastMatchInfo(last_match_array, subject, capture_count,
last_match);
DeleteArray(last_match);
@@ -964,9 +1061,12 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
}
Handle<FixedArray> result_fixed_array = builder.array();
result_fixed_array->Shrink(builder.length());
- // Cache the result and turn the FixedArray into a COW array.
+ // Cache the result and copy the FixedArray into a COW array.
+ Handle<FixedArray> copied_fixed_array =
+ isolate->factory()->CopyFixedArrayWithMap(
+ result_fixed_array, isolate->factory()->fixed_array_map());
RegExpResultsCache::Enter(
- isolate, subject, handle(regexp->data(), isolate), result_fixed_array,
+ isolate, subject, handle(regexp->data(), isolate), copied_fixed_array,
last_match_cache, RegExpResultsCache::REGEXP_MULTIPLE_INDICES);
}
return *builder.ToJSArray(result_array);
@@ -975,19 +1075,174 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
}
}
+MUST_USE_RESULT MaybeHandle<String> StringReplaceNonGlobalRegExpWithFunction(
+ Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp,
+ Handle<Object> replace_obj) {
+ Factory* factory = isolate->factory();
+ Handle<RegExpMatchInfo> last_match_info = isolate->regexp_last_match_info();
+
+ // TODO(jgruber): This is a pattern we could refactor.
+ Handle<Object> match_indices_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, match_indices_obj,
+ RegExpImpl::Exec(regexp, subject, 0, last_match_info), String);
+
+ if (match_indices_obj->IsNull(isolate)) {
+ RETURN_ON_EXCEPTION(isolate, RegExpUtils::SetLastIndex(isolate, regexp, 0),
+ String);
+ return subject;
+ }
+
+ Handle<RegExpMatchInfo> match_indices =
+ Handle<RegExpMatchInfo>::cast(match_indices_obj);
+
+ const int index = match_indices->Capture(0);
+ const int end_of_match = match_indices->Capture(1);
+
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendString(factory->NewSubString(subject, 0, index));
+
+ // Compute the parameter list consisting of the match, captures, index,
+ // and subject for the replace function invocation.
+ // The number of captures plus one for the match.
+ const int m = match_indices->NumberOfCaptureRegisters() / 2;
+
+ const int argc = m + 2;
+ ScopedVector<Handle<Object>> argv(argc);
+
+ for (int j = 0; j < m; j++) {
+ bool ok;
+ Handle<String> capture =
+ RegExpUtils::GenericCaptureGetter(isolate, match_indices, j, &ok);
+ if (ok) {
+ argv[j] = capture;
+ } else {
+ argv[j] = factory->undefined_value();
+ }
+ }
+
+ argv[argc - 2] = handle(Smi::FromInt(index), isolate);
+ argv[argc - 1] = subject;
+
+ Handle<Object> replacement_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, replacement_obj,
+ Execution::Call(isolate, replace_obj, factory->undefined_value(), argc,
+ argv.start()),
+ String);
+
+ Handle<String> replacement;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, replacement, Object::ToString(isolate, replacement_obj), String);
+
+ builder.AppendString(replacement);
+ builder.AppendString(
+ factory->NewSubString(subject, end_of_match, subject->length()));
+
+ return builder.Finish();
+}
+
+// Legacy implementation of RegExp.prototype[Symbol.replace] which
+// doesn't properly call the underlying exec method.
+MUST_USE_RESULT MaybeHandle<String> RegExpReplace(Isolate* isolate,
+ Handle<JSRegExp> regexp,
+ Handle<String> string,
+ Handle<Object> replace_obj) {
+ Factory* factory = isolate->factory();
+
+ // TODO(jgruber): We need the even stricter guarantee of an unmodified
+ // JSRegExp map here for access to GetFlags to be legal.
+ const int flags = regexp->GetFlags();
+ const bool global = (flags & JSRegExp::kGlobal) != 0;
+
+ // Functional fast-paths are dispatched directly by replace builtin.
+ DCHECK(!replace_obj->IsCallable());
+
+ Handle<String> replace;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, replace,
+ Object::ToString(isolate, replace_obj), String);
+ replace = String::Flatten(replace);
+
+ Handle<RegExpMatchInfo> last_match_info = isolate->regexp_last_match_info();
+
+ if (!global) {
+ // Non-global regexp search, string replace.
+
+ Handle<Object> match_indices_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, match_indices_obj,
+ RegExpImpl::Exec(regexp, string, 0, last_match_info), String);
+
+ if (match_indices_obj->IsNull(isolate)) {
+ RETURN_ON_EXCEPTION(
+ isolate, RegExpUtils::SetLastIndex(isolate, regexp, 0), String);
+ return string;
+ }
+
+ auto match_indices = Handle<RegExpMatchInfo>::cast(match_indices_obj);
-// This is only called for StringReplaceGlobalRegExpWithFunction. This sets
-// lastMatchInfoOverride to maintain the last match info, so we don't need to
-// set any other last match array info.
+ const int start_index = match_indices->Capture(0);
+ const int end_index = match_indices->Capture(1);
+
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendString(factory->NewSubString(string, 0, start_index));
+
+ if (replace->length() > 0) {
+ MatchInfoBackedMatch m(isolate, string, match_indices);
+ Handle<String> replacement;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, replacement,
+ String::GetSubstitution(isolate, &m, replace),
+ String);
+ builder.AppendString(replacement);
+ }
+
+ builder.AppendString(
+ factory->NewSubString(string, end_index, string->length()));
+ return builder.Finish();
+ } else {
+ // Global regexp search, string replace.
+ DCHECK(global);
+ RETURN_ON_EXCEPTION(isolate, RegExpUtils::SetLastIndex(isolate, regexp, 0),
+ String);
+
+ if (replace->length() == 0) {
+ if (string->HasOnlyOneByteChars()) {
+ Object* result =
+ StringReplaceGlobalRegExpWithEmptyString<SeqOneByteString>(
+ isolate, string, regexp, last_match_info);
+ return handle(String::cast(result), isolate);
+ } else {
+ Object* result =
+ StringReplaceGlobalRegExpWithEmptyString<SeqTwoByteString>(
+ isolate, string, regexp, last_match_info);
+ return handle(String::cast(result), isolate);
+ }
+ }
+
+ Object* result = StringReplaceGlobalRegExpWithString(
+ isolate, string, regexp, replace, last_match_info);
+ if (result->IsString()) {
+ return handle(String::cast(result), isolate);
+ } else {
+ return MaybeHandle<String>();
+ }
+ }
+
+ UNREACHABLE();
+ return MaybeHandle<String>();
+}
+
+} // namespace
+
+// This is only called for StringReplaceGlobalRegExpWithFunction.
RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
HandleScope handles(isolate);
DCHECK(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, last_match_info, 2);
+ CONVERT_ARG_HANDLE_CHECKED(RegExpMatchInfo, last_match_info, 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
- CHECK(last_match_info->HasFastObjectElements());
CHECK(result_array->HasFastObjectElements());
subject = String::Flatten(subject);
@@ -1002,6 +1257,188 @@ RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
}
}
+RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, replace, 2);
+
+ RETURN_RESULT_OR_FAILURE(isolate, StringReplaceNonGlobalRegExpWithFunction(
+ isolate, subject, regexp, replace));
+}
+
+// Slow path for:
+// ES#sec-regexp.prototype-@@replace
+// RegExp.prototype [ @@replace ] ( string, replaceValue )
+RUNTIME_FUNCTION(Runtime_RegExpReplace) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 3);
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, recv, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, string, 1);
+ Handle<Object> replace_obj = args.at<Object>(2);
+
+ Factory* factory = isolate->factory();
+
+ string = String::Flatten(string);
+
+ // Fast-path for unmodified JSRegExps.
+ if (RegExpUtils::IsUnmodifiedRegExp(isolate, recv)) {
+ RETURN_RESULT_OR_FAILURE(
+ isolate, RegExpReplace(isolate, Handle<JSRegExp>::cast(recv), string,
+ replace_obj));
+ }
+
+ const int length = string->length();
+ const bool functional_replace = replace_obj->IsCallable();
+
+ Handle<String> replace;
+ if (!functional_replace) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, replace,
+ Object::ToString(isolate, replace_obj));
+ }
+
+ Handle<Object> global_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, global_obj,
+ JSReceiver::GetProperty(recv, factory->global_string()));
+ const bool global = global_obj->BooleanValue();
+
+ bool unicode = false;
+ if (global) {
+ Handle<Object> unicode_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, unicode_obj,
+ JSReceiver::GetProperty(recv, factory->unicode_string()));
+ unicode = unicode_obj->BooleanValue();
+
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ RegExpUtils::SetLastIndex(isolate, recv, 0));
+ }
+
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ ZoneVector<Handle<Object>> results(&zone);
+
+ while (true) {
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, RegExpUtils::RegExpExec(isolate, recv, string,
+ factory->undefined_value()));
+
+ if (result->IsNull(isolate)) break;
+
+ results.push_back(result);
+ if (!global) break;
+
+ Handle<Object> match_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match_obj,
+ Object::GetElement(isolate, result, 0));
+
+ Handle<String> match;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match,
+ Object::ToString(isolate, match_obj));
+
+ if (match->length() == 0) {
+ RETURN_FAILURE_ON_EXCEPTION(isolate, RegExpUtils::SetAdvancedStringIndex(
+ isolate, recv, string, unicode));
+ }
+ }
+
+ // TODO(jgruber): Look into ReplacementStringBuilder instead.
+ IncrementalStringBuilder builder(isolate);
+ int next_source_position = 0;
+
+ for (const auto& result : results) {
+ Handle<Object> captures_length_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, captures_length_obj,
+ Object::GetProperty(result, factory->length_string()));
+
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, captures_length_obj,
+ Object::ToLength(isolate, captures_length_obj));
+ const int captures_length =
+ std::max(Handle<Smi>::cast(captures_length_obj)->value(), 0);
+
+ Handle<Object> match_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match_obj,
+ Object::GetElement(isolate, result, 0));
+
+ Handle<String> match;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match,
+ Object::ToString(isolate, match_obj));
+
+ const int match_length = match->length();
+
+ Handle<Object> position_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, position_obj,
+ Object::GetProperty(result, factory->index_string()));
+
+ // TODO(jgruber): Extract and correct error handling. Since we can go up to
+ // 2^53 - 1 (at least for ToLength), we might actually need uint64_t here?
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, position_obj, Object::ToInteger(isolate, position_obj));
+ const int position =
+ std::max(std::min(Handle<Smi>::cast(position_obj)->value(), length), 0);
+
+ ZoneVector<Handle<Object>> captures(&zone);
+ for (int n = 0; n < captures_length; n++) {
+ Handle<Object> capture;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, capture, Object::GetElement(isolate, result, n));
+
+ if (!capture->IsUndefined(isolate)) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, capture,
+ Object::ToString(isolate, capture));
+ }
+ captures.push_back(capture);
+ }
+
+ Handle<String> replacement;
+ if (functional_replace) {
+ const int argc = captures_length + 2;
+ ScopedVector<Handle<Object>> argv(argc);
+
+ for (int j = 0; j < captures_length; j++) {
+ argv[j] = captures[j];
+ }
+
+ argv[captures_length] = handle(Smi::FromInt(position), isolate);
+ argv[captures_length + 1] = string;
+
+ Handle<Object> replacement_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, replacement_obj,
+ Execution::Call(isolate, replace_obj, factory->undefined_value(),
+ argc, argv.start()));
+
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, replacement, Object::ToString(isolate, replacement_obj));
+ } else {
+ VectorBackedMatch m(isolate, string, match, position, &captures);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, replacement, String::GetSubstitution(isolate, &m, replace));
+ }
+
+ if (position >= next_source_position) {
+ builder.AppendString(
+ factory->NewSubString(string, next_source_position, position));
+ builder.AppendString(replacement);
+
+ next_source_position = position + match_length;
+ }
+ }
+
+ if (next_source_position < length) {
+ builder.AppendString(
+ factory->NewSubString(string, next_source_position, length));
+ }
+
+ RETURN_RESULT_OR_FAILURE(isolate, builder.Finish());
+}
RUNTIME_FUNCTION(Runtime_RegExpExecReThrow) {
SealHandleScope shs(isolate);
@@ -1018,5 +1455,6 @@ RUNTIME_FUNCTION(Runtime_IsRegExp) {
CONVERT_ARG_CHECKED(Object, obj, 0);
return isolate->heap()->ToBoolean(obj->IsJSRegExp());
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 0c037db307..377799fe04 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -903,7 +903,7 @@ MaybeHandle<Object> StoreLookupSlot(Handle<String> name, Handle<Object> value,
// The property was found in a context slot.
if (index != Context::kNotFound) {
if (flag == kNeedsInitialization &&
- Handle<Context>::cast(holder)->is_the_hole(index)) {
+ Handle<Context>::cast(holder)->is_the_hole(isolate, index)) {
THROW_NEW_ERROR(isolate,
NewReferenceError(MessageTemplate::kNotDefined, name),
Object);
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index f5bda59b26..328bdceb37 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -90,17 +90,8 @@ RUNTIME_FUNCTION(Runtime_StringReplaceOneCharWithString) {
RUNTIME_FUNCTION(Runtime_StringIndexOf) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, index, 2);
-
- uint32_t start_index = 0;
- if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
-
- CHECK(start_index <= static_cast<uint32_t>(sub->length()));
- int position = String::IndexOf(isolate, sub, pat, start_index);
- return Smi::FromInt(position);
+ return String::IndexOf(isolate, args.at<Object>(0), args.at<Object>(1),
+ args.at<Object>(2));
}
RUNTIME_FUNCTION(Runtime_StringLastIndexOf) {
@@ -166,59 +157,6 @@ RUNTIME_FUNCTION(Runtime_InternalizeString) {
}
-RUNTIME_FUNCTION(Runtime_StringMatch) {
- HandleScope handles(isolate);
- DCHECK(args.length() == 3);
-
- CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2);
-
- CHECK(regexp_info->HasFastObjectElements());
-
- RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
- if (global_cache.HasException()) return isolate->heap()->exception();
-
- int capture_count = regexp->CaptureCount();
-
- ZoneScope zone_scope(isolate->runtime_zone());
- ZoneList<int> offsets(8, zone_scope.zone());
-
- while (true) {
- int32_t* match = global_cache.FetchNext();
- if (match == NULL) break;
- offsets.Add(match[0], zone_scope.zone()); // start
- offsets.Add(match[1], zone_scope.zone()); // end
- }
-
- if (global_cache.HasException()) return isolate->heap()->exception();
-
- if (offsets.length() == 0) {
- // Not a single match.
- return isolate->heap()->null_value();
- }
-
- RegExpImpl::SetLastMatchInfo(regexp_info, subject, capture_count,
- global_cache.LastSuccessfulMatch());
-
- int matches = offsets.length() / 2;
- Handle<FixedArray> elements = isolate->factory()->NewFixedArray(matches);
- Handle<String> substring =
- isolate->factory()->NewSubString(subject, offsets.at(0), offsets.at(1));
- elements->set(0, *substring);
- FOR_WITH_HANDLE_SCOPE(isolate, int, i = 1, i, i < matches, i++, {
- int from = offsets.at(i * 2);
- int to = offsets.at(i * 2 + 1);
- Handle<String> substring =
- isolate->factory()->NewProperSubString(subject, from, to);
- elements->set(i, *substring);
- });
- Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(elements);
- result->set_length(Smi::FromInt(matches));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_StringCharCodeAtRT) {
HandleScope handle_scope(isolate);
DCHECK(args.length() == 2);
@@ -256,7 +194,7 @@ RUNTIME_FUNCTION(Runtime_StringCompare) {
break;
}
UNREACHABLE();
- return Smi::FromInt(0);
+ return Smi::kZero;
}
@@ -573,13 +511,13 @@ static int CopyCachedOneByteCharsToArray(Heap* heap, const uint8_t* chars,
elements->set(i, value, mode);
}
if (i < length) {
- DCHECK(Smi::FromInt(0) == 0);
+ DCHECK(Smi::kZero == 0);
memset(elements->data_start() + i, 0, kPointerSize * (length - i));
}
#ifdef DEBUG
for (int j = 0; j < length; ++j) {
Object* element = elements->get(j);
- DCHECK(element == Smi::FromInt(0) ||
+ DCHECK(element == Smi::kZero ||
(element->IsString() && String::cast(element)->LooksValid()));
}
#endif
@@ -942,7 +880,7 @@ RUNTIME_FUNCTION(Runtime_StringLessThan) {
break;
}
UNREACHABLE();
- return Smi::FromInt(0);
+ return Smi::kZero;
}
RUNTIME_FUNCTION(Runtime_StringLessThanOrEqual) {
@@ -960,7 +898,7 @@ RUNTIME_FUNCTION(Runtime_StringLessThanOrEqual) {
break;
}
UNREACHABLE();
- return Smi::FromInt(0);
+ return Smi::kZero;
}
RUNTIME_FUNCTION(Runtime_StringGreaterThan) {
@@ -978,7 +916,7 @@ RUNTIME_FUNCTION(Runtime_StringGreaterThan) {
break;
}
UNREACHABLE();
- return Smi::FromInt(0);
+ return Smi::kZero;
}
RUNTIME_FUNCTION(Runtime_StringGreaterThanOrEqual) {
@@ -996,7 +934,7 @@ RUNTIME_FUNCTION(Runtime_StringGreaterThanOrEqual) {
break;
}
UNREACHABLE();
- return Smi::FromInt(0);
+ return Smi::kZero;
}
RUNTIME_FUNCTION(Runtime_StringEqual) {
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 8100d2c759..7054192a0f 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -17,6 +17,7 @@
#include "src/snapshot/code-serializer.h"
#include "src/snapshot/natives.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -267,6 +268,9 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
if (function->IsOptimized() && function->code()->is_turbofanned()) {
return Smi::FromInt(7); // 7 == "TurboFan compiler".
}
+ if (function->IsInterpreted()) {
+ return Smi::FromInt(8); // 8 == "Interpreted".
+ }
return function->IsOptimized() ? Smi::FromInt(1) // 1 == "yes".
: Smi::FromInt(2); // 2 == "no".
}
@@ -444,7 +448,7 @@ RUNTIME_FUNCTION(Runtime_DebugPrint) {
OFStream os(stdout);
#ifdef DEBUG
- if (args[0]->IsString()) {
+ if (args[0]->IsString() && isolate->context() != nullptr) {
// If we have a string, assume it's a code "marker"
// and print some interesting cpu debugging info.
JavaScriptFrameIterator it(isolate);
@@ -546,8 +550,7 @@ RUNTIME_FUNCTION(Runtime_NativeScriptsCount) {
return Smi::FromInt(Natives::GetBuiltinsCount());
}
-
-// Returns V8 version as a string.
+// TODO(5510): remove this.
RUNTIME_FUNCTION(Runtime_GetV8Version) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
@@ -755,21 +758,37 @@ RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
// Return undefined if unsuccessful.
RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
HandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, wire_bytes, 1);
Address mem_start = static_cast<Address>(buffer->backing_store());
int mem_size = static_cast<int>(buffer->byte_length()->Number());
+ // DeserializeWasmModule will allocate. We assume JSArrayBuffer doesn't
+ // get relocated.
ScriptData sc(mem_start, mem_size);
+ bool already_external = wire_bytes->is_external();
+ if (!already_external) {
+ wire_bytes->set_is_external(true);
+ isolate->heap()->UnregisterArrayBuffer(*wire_bytes);
+ }
MaybeHandle<FixedArray> maybe_compiled_module =
- WasmCompiledModuleSerializer::DeserializeWasmModule(isolate, &sc);
+ WasmCompiledModuleSerializer::DeserializeWasmModule(
+ isolate, &sc,
+ Vector<const uint8_t>(
+ reinterpret_cast<uint8_t*>(wire_bytes->backing_store()),
+ static_cast<int>(wire_bytes->byte_length()->Number())));
+ if (!already_external) {
+ wire_bytes->set_is_external(false);
+ isolate->heap()->RegisterNewArrayBuffer(*wire_bytes);
+ }
Handle<FixedArray> compiled_module;
if (!maybe_compiled_module.ToHandle(&compiled_module)) {
return isolate->heap()->undefined_value();
}
- return *wasm::CreateCompiledModuleObject(isolate, compiled_module,
- wasm::ModuleOrigin::kWasmOrigin);
+ return *WasmModuleObject::New(
+ isolate, Handle<WasmCompiledModule>::cast(compiled_module));
}
RUNTIME_FUNCTION(Runtime_ValidateWasmInstancesChain) {
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index ba422bf01e..cb0e062d14 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -59,7 +59,7 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, array_buffer, 0);
if (array_buffer->backing_store() == NULL) {
- CHECK(Smi::FromInt(0) == array_buffer->byte_length());
+ CHECK(Smi::kZero == array_buffer->byte_length());
return isolate->heap()->undefined_value();
}
// Shared array buffers should never be neutered.
@@ -142,7 +142,7 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
DCHECK_EQ(v8::ArrayBufferView::kInternalFieldCount,
holder->GetInternalFieldCount());
for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
- holder->SetInternalField(i, Smi::FromInt(0));
+ holder->SetInternalField(i, Smi::kZero);
}
Handle<Object> length_obj = isolate->factory()->NewNumberFromSize(length);
holder->set_length(*length_obj);
@@ -215,7 +215,7 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
DCHECK_EQ(v8::ArrayBufferView::kInternalFieldCount,
holder->GetInternalFieldCount());
for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
- holder->SetInternalField(i, Smi::FromInt(0));
+ holder->SetInternalField(i, Smi::kZero);
}
// NOTE: not initializing backing store.
@@ -241,7 +241,7 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
}
holder->set_buffer(*buffer);
- holder->set_byte_offset(Smi::FromInt(0));
+ holder->set_byte_offset(Smi::kZero);
Handle<Object> byte_length_obj(
isolate->factory()->NewNumberFromSize(byte_length));
holder->set_byte_length(*byte_length_obj);
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index cbdaf0f033..8e2e83c37e 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -9,6 +9,7 @@
#include "src/allocation.h"
#include "src/base/platform/time.h"
+#include "src/globals.h"
#include "src/objects.h"
#include "src/unicode.h"
#include "src/zone/zone.h"
@@ -51,13 +52,12 @@ namespace internal {
F(HasComplexElements, 1, 1) \
F(IsArray, 1, 1) \
F(ArrayIsArray, 1, 1) \
- F(HasCachedArrayIndex, 1, 1) \
- F(GetCachedArrayIndex, 1, 1) \
F(FixedArrayGet, 2, 1) \
F(FixedArraySet, 3, 1) \
F(ArraySpeciesConstructor, 1, 1) \
F(ArrayIncludes_Slow, 3, 1) \
- F(ArrayIndexOf, 3, 1)
+ F(ArrayIndexOf, 3, 1) \
+ F(SpreadIterablePrepare, 1, 1)
#define FOR_EACH_INTRINSIC_ATOMICS(F) \
F(ThrowNotIntegerSharedTypedArrayError, 1, 1) \
@@ -188,6 +188,7 @@ namespace internal {
F(ScriptLineStartPosition, 2, 1) \
F(ScriptLineEndPosition, 2, 1) \
F(ScriptLocationFromLine, 4, 1) \
+ F(ScriptLocationFromLine2, 4, 1) \
F(ScriptPositionInfo, 3, 1) \
F(ScriptSourceLine, 2, 1) \
F(DebugPrepareStepInIfStepping, 1, 1) \
@@ -195,11 +196,10 @@ namespace internal {
F(DebugRecordAsyncFunction, 1, 1) \
F(DebugPushPromise, 1, 1) \
F(DebugPopPromise, 0, 1) \
- F(DebugAsyncTaskEvent, 1, 1) \
+ F(DebugNextMicrotaskId, 0, 1) \
+ F(DebugAsyncTaskEvent, 3, 1) \
F(DebugIsActive, 0, 1) \
- F(DebugBreakInOptimizedCode, 0, 1) \
- F(GetWasmFunctionOffsetTable, 1, 1) \
- F(DisassembleWasmFunction, 1, 1)
+ F(DebugBreakInOptimizedCode, 0, 1)
#define FOR_EACH_INTRINSIC_ERROR(F) F(ErrorToString, 1, 1)
@@ -214,13 +214,15 @@ namespace internal {
F(InterpreterTraceBytecodeEntry, 3, 1) \
F(InterpreterTraceBytecodeExit, 3, 1) \
F(InterpreterClearPendingMessage, 0, 1) \
- F(InterpreterSetPendingMessage, 1, 1)
+ F(InterpreterSetPendingMessage, 1, 1) \
+ F(InterpreterAdvanceBytecodeOffset, 2, 1)
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
F(FunctionGetName, 1, 1) \
F(FunctionSetName, 2, 1) \
F(FunctionRemovePrototype, 1, 1) \
F(FunctionGetScript, 1, 1) \
+ F(FunctionGetScriptId, 1, 1) \
F(FunctionGetSourceCode, 1, 1) \
F(FunctionGetScriptSourcePosition, 1, 1) \
F(FunctionGetContextData, 1, 1) \
@@ -290,7 +292,8 @@ namespace internal {
F(CheckIsBootstrapping, 0, 1) \
F(CreateListFromArrayLike, 1, 1) \
F(EnqueueMicrotask, 1, 1) \
- F(EnqueuePromiseResolveThenableJob, 6, 1) \
+ F(EnqueuePromiseReactionJob, 4, 1) \
+ F(EnqueuePromiseResolveThenableJob, 3, 1) \
F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
F(ExportExperimentalFromRuntime, 1, 1) \
F(ExportFromRuntime, 1, 1) \
@@ -298,12 +301,13 @@ namespace internal {
F(InstallToContext, 1, 1) \
F(Interrupt, 0, 1) \
F(IS_VAR, 1, 1) \
- F(IsWasmObject, 1, 1) \
+ F(IsWasmInstance, 1, 1) \
F(NewReferenceError, 2, 1) \
F(NewSyntaxError, 2, 1) \
F(NewTypeError, 2, 1) \
F(OrdinaryHasInstance, 2, 1) \
- F(PromiseRejectEvent, 3, 1) \
+ F(PromiseReject, 3, 1) \
+ F(PromiseFulfill, 4, 1) \
F(PromiseRejectEventFromStack, 2, 1) \
F(PromiseRevokeReject, 1, 1) \
F(PromoteScheduledException, 0, 1) \
@@ -325,6 +329,7 @@ namespace internal {
F(ThrowNotGeneric, 1, 1) \
F(ThrowReferenceError, 1, 1) \
F(ThrowStackOverflow, 0, 1) \
+ F(ThrowTypeError, -1 /* >= 1 */, 1) \
F(ThrowWasmError, 2, 1) \
F(ThrowUndefinedOrNullToObject, 1, 1) \
F(Typeof, 1, 1) \
@@ -350,7 +355,12 @@ namespace internal {
F(LiveEditCompareStrings, 2, 1) \
F(LiveEditRestartFrame, 2, 1)
-#define FOR_EACH_INTRINSIC_MATHS(F) F(GenerateRandomNumbers, 1, 1)
+#define FOR_EACH_INTRINSIC_MATHS(F) F(GenerateRandomNumbers, 0, 1)
+
+#define FOR_EACH_INTRINSIC_MODULE(F) \
+ F(GetModuleNamespace, 1, 1) \
+ F(LoadModuleVariable, 1, 1) \
+ F(StoreModuleVariable, 2, 1)
#define FOR_EACH_INTRINSIC_NUMBERS(F) \
F(IsValidSmi, 1, 1) \
@@ -370,13 +380,11 @@ namespace internal {
#define FOR_EACH_INTRINSIC_OBJECT(F) \
F(GetPrototype, 1, 1) \
F(ObjectHasOwnProperty, 2, 1) \
+ F(ObjectCreate, 2, 1) \
F(InternalSetPrototype, 2, 1) \
- F(SetPrototype, 2, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
F(GetProperty, 2, 1) \
F(KeyedGetProperty, 2, 1) \
- F(StoreGlobalViaContext_Sloppy, 2, 1) \
- F(StoreGlobalViaContext_Strict, 2, 1) \
F(AddNamedProperty, 4, 1) \
F(SetProperty, 4, 1) \
F(AddElement, 3, 1) \
@@ -417,11 +425,9 @@ namespace internal {
F(Compare, 3, 1) \
F(HasInPrototypeChain, 2, 1) \
F(CreateIterResultObject, 2, 1) \
+ F(CreateKeyValueArray, 2, 1) \
F(IsAccessCheckNeeded, 1, 1) \
- F(CreateDataProperty, 3, 1) \
- F(LoadModuleExport, 1, 1) \
- F(LoadModuleImport, 2, 1) \
- F(StoreModuleExport, 2, 1)
+ F(CreateDataProperty, 3, 1)
#define FOR_EACH_INTRINSIC_OPERATORS(F) \
F(Multiply, 2, 1) \
@@ -453,17 +459,17 @@ namespace internal {
F(JSProxyGetHandler, 1, 1) \
F(JSProxyRevoke, 1, 1)
-#define FOR_EACH_INTRINSIC_REGEXP(F) \
- F(StringReplaceGlobalRegExpWithString, 4, 1) \
- F(StringSplit, 3, 1) \
- F(RegExpExec, 4, 1) \
- F(RegExpFlags, 1, 1) \
- F(RegExpSource, 1, 1) \
- F(RegExpConstructResult, 3, 1) \
- F(RegExpInitializeAndCompile, 3, 1) \
- F(RegExpExecMultiple, 4, 1) \
- F(RegExpExecReThrow, 4, 1) \
- F(IsRegExp, 1, 1)
+#define FOR_EACH_INTRINSIC_REGEXP(F) \
+ F(IsRegExp, 1, 1) \
+ F(RegExpCreate, 1, 1) \
+ F(RegExpExec, 4, 1) \
+ F(RegExpExecMultiple, 4, 1) \
+ F(RegExpExecReThrow, 4, 1) \
+ F(RegExpInternalReplace, 3, 1) \
+ F(RegExpReplace, 3, 1) \
+ F(StringReplaceGlobalRegExpWithString, 4, 1) \
+ F(StringReplaceNonGlobalRegExpWithFunction, 3, 1) \
+ F(StringSplit, 3, 1)
#define FOR_EACH_INTRINSIC_SCOPES(F) \
F(ThrowConstAssignError, 0, 1) \
@@ -807,7 +813,6 @@ namespace internal {
F(SubString, 3, 1) \
F(StringAdd, 2, 1) \
F(InternalizeString, 1, 1) \
- F(StringMatch, 3, 1) \
F(StringCharCodeAtRT, 2, 1) \
F(StringCompare, 2, 1) \
F(StringBuilderConcat, 3, 1) \
@@ -891,7 +896,7 @@ namespace internal {
F(HasFixedUint8ClampedElements, 1, 1) \
F(SpeciesProtector, 0, 1) \
F(SerializeWasmModule, 1, 1) \
- F(DeserializeWasmModule, 1, 1) \
+ F(DeserializeWasmModule, 2, 1) \
F(IsAsmWasmCode, 1, 1) \
F(IsNotAsmWasmCode, 1, 1) \
F(ValidateWasmInstancesChain, 2, 1) \
@@ -971,6 +976,7 @@ namespace internal {
FOR_EACH_INTRINSIC_LITERALS(F) \
FOR_EACH_INTRINSIC_LIVEEDIT(F) \
FOR_EACH_INTRINSIC_MATHS(F) \
+ FOR_EACH_INTRINSIC_MODULE(F) \
FOR_EACH_INTRINSIC_NUMBERS(F) \
FOR_EACH_INTRINSIC_OBJECT(F) \
FOR_EACH_INTRINSIC_OPERATORS(F) \
@@ -1039,7 +1045,7 @@ class Runtime : public AllStatic {
static const Function* FunctionForName(const unsigned char* name, int length);
// Get the intrinsic function with the given FunctionId.
- static const Function* FunctionForId(FunctionId id);
+ V8_EXPORT_PRIVATE static const Function* FunctionForId(FunctionId id);
// Get the intrinsic function with the given function entry address.
static const Function* FunctionForEntry(Address ref);
@@ -1114,8 +1120,7 @@ class RuntimeState {
DISALLOW_COPY_AND_ASSIGN(RuntimeState);
};
-
-std::ostream& operator<<(std::ostream&, Runtime::FunctionId);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, Runtime::FunctionId);
//---------------------------------------------------------------------------
// Constants used by interface to runtime functions.
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index ffe0ac4621..65f0126580 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -1254,7 +1254,8 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+ void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+ int id);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index b1bf02d196..553d6d8ce4 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -553,7 +553,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
__ AndP(r4, lhs, rhs);
__ JumpIfNotSmi(r4, &not_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
@@ -1586,13 +1586,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ SmiToShortArrayOffset(r3, r3);
__ AddP(r3, Operand(2));
- __ LoadP(r2, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(r2, &runtime);
- __ CompareObjectType(r2, r4, r4, JS_OBJECT_TYPE);
- __ bne(&runtime);
+ // Check that the last match info is a FixedArray.
+ __ LoadP(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(last_match_info_elements, &runtime);
// Check that the object has fast elements.
- __ LoadP(last_match_info_elements,
- FieldMemOperand(r2, JSArray::kElementsOffset));
__ LoadP(r2,
FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
__ CompareRoot(r2, Heap::kFixedArrayMapRootIndex);
@@ -1601,7 +1598,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// additional information.
__ LoadP(
r2, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ AddP(r4, r3, Operand(RegExpImpl::kLastMatchOverhead));
+ __ AddP(r4, r3, Operand(RegExpMatchInfo::kLastMatchOverhead));
__ SmiUntag(r0, r2);
__ CmpP(r4, r0);
__ bgt(&runtime);
@@ -1611,18 +1608,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Store the capture count.
__ SmiTag(r4, r3);
__ StoreP(r4, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastCaptureCountOffset));
+ RegExpMatchInfo::kNumberOfCapturesOffset));
// Store last subject and last input.
__ StoreP(subject, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastSubjectOffset));
+ RegExpMatchInfo::kLastSubjectOffset));
__ LoadRR(r4, subject);
- __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset,
- subject, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ RecordWriteField(last_match_info_elements,
+ RegExpMatchInfo::kLastSubjectOffset, subject, r9,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
__ LoadRR(subject, r4);
__ StoreP(subject, FieldMemOperand(last_match_info_elements,
- RegExpImpl::kLastInputOffset));
- __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastInputOffset,
- subject, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ RegExpMatchInfo::kLastInputOffset));
+ __ RecordWriteField(last_match_info_elements,
+ RegExpMatchInfo::kLastInputOffset, subject, r9,
+ kLRHasNotBeenSaved, kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
ExternalReference address_of_static_offsets_vector =
@@ -1633,10 +1632,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// r4: offsets vector
Label next_capture;
// Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
- __ AddP(
- r2, last_match_info_elements,
- Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag - kPointerSize));
+ // counts down until wrapping after zero.
+ __ AddP(r2, last_match_info_elements,
+ Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag -
+ kPointerSize));
__ AddP(r4, Operand(-kIntSize)); // bias down for lwzu
__ bind(&next_capture);
// Read the value from the static offsets vector buffer.
@@ -1649,7 +1648,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ BranchOnCount(r3, &next_capture);
// Return last match info.
- __ LoadP(r2, MemOperand(sp, kLastMatchInfoOffset));
+ __ LoadRR(r2, last_match_info_elements);
__ la(sp, MemOperand(sp, (4 * kPointerSize)));
__ Ret();
@@ -1874,6 +1873,7 @@ static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+ // r2 - number of arguments
// r3 - function
// r5 - slot id
// r4 - vector
@@ -1882,24 +1882,21 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ CmpP(r3, r7);
__ bne(miss);
- __ mov(r2, Operand(arg_count()));
-
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, r4, r5, r1);
__ LoadRR(r4, r6);
__ LoadRR(r5, r3);
- ArrayConstructorStub stub(masm->isolate(), arg_count());
+ ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
void CallICStub::Generate(MacroAssembler* masm) {
+ // r2 - number of arguments
// r3 - function
// r5 - slot id (Smi)
// r4 - vector
Label extra_checks_or_miss, call, call_function, call_count_incremented;
- int argc = arg_count();
- ParameterCount actual(argc);
// The checks. First, does r3 match the recorded monomorphic target?
__ SmiToPtrArrayOffset(r8, r5);
@@ -1933,7 +1930,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, r4, r5, r1);
- __ mov(r2, Operand(argc));
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -1977,7 +1973,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
IncrementCallCount(masm, r4, r5, r1);
__ bind(&call_count_incremented);
- __ mov(r2, Operand(argc));
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -2010,13 +2005,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
- __ Push(r4);
- __ Push(r5);
- __ Push(cp, r3);
+ __ SmiTag(r2);
+ __ Push(r2, r4, r5, cp, r3);
__ CallStub(&create_stub);
- __ Pop(cp, r3);
- __ Pop(r5);
- __ Pop(r4);
+ __ Pop(r4, r5, cp, r3);
+ __ Pop(r2);
+ __ SmiUntag(r2);
}
__ b(&call_function);
@@ -2032,14 +2026,21 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the function and feedback info.
- __ Push(r3, r4, r5);
+ // Preserve the number of arguments as Smi.
+ __ SmiTag(r2);
+
+ // Push the receiver and the function and feedback info.
+ __ Push(r2, r3, r4, r5);
// Call the entry.
__ CallRuntime(Runtime::kCallIC_Miss);
// Move result to r3 and exit the internal frame.
__ LoadRR(r3, r2);
+
+ // Restore number of arguments.
+ __ Pop(r2);
+ __ SmiUntag(r2);
}
// StringCharCodeAtGenerator
@@ -3138,30 +3139,12 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(r4);
CallICStub stub(isolate(), state());
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, bool is_polymorphic,
@@ -3252,177 +3235,12 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ Jump(ip);
}
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r3
- Register name = LoadWithVectorDescriptor::NameRegister(); // r4
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // r5
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // r2
- Register feedback = r6;
- Register receiver_map = r7;
- Register scratch1 = r8;
-
- __ SmiToPtrArrayOffset(r1, slot);
- __ LoadP(feedback, FieldMemOperand(r1, vector, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ bne(&not_array, Label::kNear);
- HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
-
- __ bind(&not_array);
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ bne(&miss);
- masm->isolate()->load_stub_cache()->GenerateProbe(
- masm, receiver, name, feedback, receiver_map, scratch1, r9);
-
- __ bind(&miss);
- LoadIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ b(&compare_map);
-}
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // r3
- Register key = LoadWithVectorDescriptor::NameRegister(); // r4
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // r5
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // r2
- Register feedback = r6;
- Register receiver_map = r7;
- Register scratch1 = r8;
-
- __ SmiToPtrArrayOffset(r1, slot);
- __ LoadP(feedback, FieldMemOperand(r1, vector, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ bne(&not_array);
-
- // We have a polymorphic element handler.
- Label polymorphic, try_poly_name;
- __ bind(&polymorphic);
- HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ bne(&try_poly_name);
- Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ CmpP(key, feedback);
- __ bne(&miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ SmiToPtrArrayOffset(r1, slot);
- __ LoadP(feedback,
- FieldMemOperand(r1, vector, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, false, &miss);
-
- __ bind(&miss);
- KeyedLoadIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ b(&compare_map);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- StoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r3
- Register key = StoreWithVectorDescriptor::NameRegister(); // r4
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // r5
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // r6
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r2)); // r2
- Register feedback = r7;
- Register receiver_map = r8;
- Register scratch1 = r9;
-
- __ SmiToPtrArrayOffset(r0, slot);
- __ AddP(feedback, vector, r0);
- __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ bne(&not_array);
-
- Register scratch2 = ip;
- HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
- &miss);
-
- __ bind(&not_array);
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ bne(&miss);
- masm->isolate()->store_stub_cache()->GenerateProbe(
- masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
-
- __ bind(&miss);
- StoreIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ b(&compare_map);
-}
-
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
@@ -3795,30 +3613,19 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm, AllocationSiteOverrideMode mode) {
- if (argument_count() == ANY) {
- Label not_zero_case, not_one_case;
- __ CmpP(r2, Operand::Zero());
- __ bne(&not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+ Label not_zero_case, not_one_case;
+ __ CmpP(r2, Operand::Zero());
+ __ bne(&not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- __ bind(&not_zero_case);
- __ CmpP(r2, Operand(1));
- __ bgt(&not_one_case);
- CreateArrayDispatchOneArgument(masm, mode);
+ __ bind(&not_zero_case);
+ __ CmpP(r2, Operand(1));
+ __ bgt(&not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else if (argument_count() == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count() == ONE) {
- CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count() == MORE_THAN_ONE) {
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- UNREACHABLE();
- }
+ __ bind(&not_one_case);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
@@ -3869,23 +3676,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
__ bind(&subclassing);
- switch (argument_count()) {
- case ANY:
- case MORE_THAN_ONE:
- __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
- __ StoreP(r3, MemOperand(sp, r1));
- __ AddP(r2, r2, Operand(3));
- break;
- case NONE:
- __ StoreP(r3, MemOperand(sp, 0 * kPointerSize));
- __ LoadImmP(r2, Operand(3));
- break;
- case ONE:
- __ StoreP(r3, MemOperand(sp, 1 * kPointerSize));
- __ LoadImmP(r2, Operand(4));
- break;
- }
-
+ __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2));
+ __ StoreP(r3, MemOperand(sp, r1));
+ __ AddP(r2, r2, Operand(3));
__ Push(r5, r4);
__ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
@@ -4315,7 +4108,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
// If there are no mapped parameters, we do not need the parameter_map.
- __ CmpSmiLiteral(r8, Smi::FromInt(0), r0);
+ __ CmpSmiLiteral(r8, Smi::kZero, r0);
Label skip2, skip3;
__ bne(&skip2);
__ LoadImmP(r1, Operand::Zero());
@@ -4385,7 +4178,7 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// r8 = mapped parameter count (tagged)
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
- __ CmpSmiLiteral(r8, Smi::FromInt(0), r0);
+ __ CmpSmiLiteral(r8, Smi::kZero, r0);
Label skip6;
__ bne(&skip6);
// Move backing store address to r3, because it is
@@ -4609,133 +4402,6 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewStrictArguments);
}
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register value = r2;
- Register slot = r4;
-
- Register cell = r3;
- Register cell_details = r5;
- Register cell_value = r6;
- Register cell_value_map = r7;
- Register scratch = r8;
-
- Register context = cp;
- Register context_temp = cell;
-
- Label fast_heapobject_case, fast_smi_case, slow_case;
-
- if (FLAG_debug_code) {
- __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
- __ Check(ne, kUnexpectedValue);
- }
-
- // Go up the context chain to the script context.
- for (int i = 0; i < depth(); i++) {
- __ LoadP(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- context = context_temp;
- }
-
- // Load the PropertyCell at the specified slot.
- __ ShiftLeftP(r0, slot, Operand(kPointerSizeLog2));
- __ AddP(cell, context, r0);
- __ LoadP(cell, ContextMemOperand(cell));
-
- // Load PropertyDetails for the cell (actually only the cell_type and kind).
- __ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
- __ SmiUntag(cell_details);
- __ AndP(cell_details, cell_details,
- Operand(PropertyDetails::PropertyCellTypeField::kMask |
- PropertyDetails::KindField::kMask |
- PropertyDetails::kAttributesReadOnlyMask));
-
- // Check if PropertyCell holds mutable data.
- Label not_mutable_data;
- __ CmpP(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kMutable) |
- PropertyDetails::KindField::encode(kData)));
- __ bne(&not_mutable_data);
- __ JumpIfSmi(value, &fast_smi_case);
-
- __ bind(&fast_heapobject_case);
- __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
- // RecordWriteField clobbers the value register, so we copy it before the
- // call.
- __ LoadRR(r5, value);
- __ RecordWriteField(cell, PropertyCell::kValueOffset, r5, scratch,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Ret();
-
- __ bind(&not_mutable_data);
- // Check if PropertyCell value matches the new value (relevant for Constant,
- // ConstantType and Undefined cells).
- Label not_same_value;
- __ LoadP(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
- __ CmpP(cell_value, value);
- __ bne(&not_same_value);
-
- // Make sure the PropertyCell is not marked READ_ONLY.
- __ AndP(r0, cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
- __ bne(&slow_case);
-
- if (FLAG_debug_code) {
- Label done;
- // This can only be true for Constant, ConstantType and Undefined cells,
- // because we never store the_hole via this stub.
- __ CmpP(cell_details,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstant) |
- PropertyDetails::KindField::encode(kData)));
- __ beq(&done);
- __ CmpP(cell_details,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
- __ beq(&done);
- __ CmpP(cell_details,
- Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kUndefined) |
- PropertyDetails::KindField::encode(kData)));
- __ Check(eq, kUnexpectedValue);
- __ bind(&done);
- }
- __ Ret();
- __ bind(&not_same_value);
-
- // Check if PropertyCell contains data with constant type (and is not
- // READ_ONLY).
- __ CmpP(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
- __ bne(&slow_case);
-
- // Now either both old and new values must be smis or both must be heap
- // objects with same map.
- Label value_is_heap_object;
- __ JumpIfNotSmi(value, &value_is_heap_object);
- __ JumpIfNotSmi(cell_value, &slow_case);
- // Old and new values are smis, no need for a write barrier here.
- __ bind(&fast_smi_case);
- __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0);
- __ Ret();
-
- __ bind(&value_is_heap_object);
- __ JumpIfSmi(cell_value, &slow_case);
-
- __ LoadP(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
- __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- __ CmpP(cell_value_map, scratch);
- __ beq(&fast_heapobject_case);
-
- // Fallback to runtime.
- __ bind(&slow_case);
- __ SmiTag(slot);
- __ Push(slot, value);
- __ TailCallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
@@ -5022,7 +4688,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ Push(scratch, scratch);
__ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
__ Push(scratch, holder);
- __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
+ __ Push(Smi::kZero); // should_throw_on_error -> false
__ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
__ push(scratch);
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index ca40a0c9f1..7fdf99349e 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -31,6 +31,8 @@ const Register LoadDescriptor::SlotRegister() { return r2; }
const Register LoadWithVectorDescriptor::VectorRegister() { return r5; }
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return r6; }
+
const Register StoreDescriptor::ReceiverRegister() { return r3; }
const Register StoreDescriptor::NameRegister() { return r4; }
const Register StoreDescriptor::ValueRegister() { return r2; }
@@ -42,9 +44,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return r6; }
const Register StoreTransitionDescriptor::VectorRegister() { return r5; }
const Register StoreTransitionDescriptor::MapRegister() { return r7; }
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r4; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r2; }
-
const Register StringCompareDescriptor::LeftRegister() { return r3; }
const Register StringCompareDescriptor::RightRegister() { return r2; }
@@ -143,7 +142,7 @@ void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {r3, r5, r4};
+ Register registers[] = {r3, r2, r5, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -187,12 +186,6 @@ void ConstructTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4, r3, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r3};
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index 769d3dc1b0..fbf82ccbc5 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -1502,87 +1502,6 @@ void MacroAssembler::PopStackHandler() {
StoreP(r3, MemOperand(ip));
}
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch, Label* miss) {
- Label same_contexts;
-
- DCHECK(!holder_reg.is(scratch));
- DCHECK(!holder_reg.is(ip));
- DCHECK(!scratch.is(ip));
-
- // Load current lexical context from the active StandardFrame, which
- // may require crawling past STUB frames.
- Label load_context;
- Label has_context;
- DCHECK(!ip.is(scratch));
- LoadRR(ip, fp);
- bind(&load_context);
- LoadP(scratch,
- MemOperand(ip, CommonFrameConstants::kContextOrFrameTypeOffset));
- JumpIfNotSmi(scratch, &has_context);
- LoadP(ip, MemOperand(ip, CommonFrameConstants::kCallerFPOffset));
- b(&load_context);
- bind(&has_context);
-
-// In debug mode, make sure the lexical context is set.
-#ifdef DEBUG
- CmpP(scratch, Operand::Zero());
- Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
-#endif
-
- // Load the native context of the current context.
- LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // Cannot use ip as a temporary in this verification code. Due to the fact
- // that ip is clobbered as part of cmp with an object Operand.
- push(holder_reg); // Temporarily save holder on the stack.
- // Read the first word and compare to the native_context_map.
- LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
- CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
- Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
- pop(holder_reg); // Restore holder.
- }
-
- // Check if both contexts are the same.
- LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- CmpP(scratch, ip);
- beq(&same_contexts, Label::kNear);
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
- // Cannot use ip as a temporary in this verification code. Due to the fact
- // that ip is clobbered as part of cmp with an object Operand.
- push(holder_reg); // Temporarily save holder on the stack.
- LoadRR(holder_reg, ip); // Move ip to its holding place.
- CompareRoot(holder_reg, Heap::kNullValueRootIndex);
- Check(ne, kJSGlobalProxyContextShouldNotBeNull);
-
- LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
- CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
- Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
- // Restore ip is not needed. ip is reloaded below.
- pop(holder_reg); // Restore holder.
- // Restore ip to holder's context.
- LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- }
-
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- int token_offset =
- Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
-
- LoadP(scratch, FieldMemOperand(scratch, token_offset));
- LoadP(ip, FieldMemOperand(ip, token_offset));
- CmpP(scratch, ip);
- bne(miss);
-
- bind(&same_contexts);
-}
-
// Compute the hash code from the untagged key. This must be kept in sync with
// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
@@ -1624,85 +1543,6 @@ void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
ExtractBitRange(t0, t0, 29, 0);
}
-void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
- Register key, Register result,
- Register t0, Register t1,
- Register t2) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- //
- // Scratch registers:
- //
- // t0 - holds the untagged key on entry and holds the hash once computed.
- //
- // t1 - used to hold the capacity mask of the dictionary
- //
- // t2 - used for the index into the dictionary.
- Label done;
-
- GetNumberHash(t0, t1);
-
- // Compute the capacity mask.
- LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
- SmiUntag(t1);
- SubP(t1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before giving up.
- for (int i = 0; i < kNumberDictionaryProbes; i++) {
- // Use t2 for index calculations and keep the hash intact in t0.
- LoadRR(t2, t0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- AddP(t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
- }
- AndP(t2, t1);
-
- // Scale the index by multiplying by the element size.
- DCHECK(SeededNumberDictionary::kEntrySize == 3);
- LoadRR(ip, t2);
- sll(ip, Operand(1));
- AddP(t2, ip); // t2 = t2 * 3
-
- // Check if the key is identical to the name.
- sll(t2, Operand(kPointerSizeLog2));
- AddP(t2, elements);
- LoadP(ip,
- FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
- CmpP(key, ip);
- if (i != kNumberDictionaryProbes - 1) {
- beq(&done, Label::kNear);
- } else {
- bne(miss);
- }
- }
-
- bind(&done);
- // Check that the value is a field property.
- // t2: elements + (index * kPointerSize)
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
- LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
- DCHECK_EQ(DATA, 0);
- AndP(r0, ip, t1);
- bne(miss);
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- LoadP(result, FieldMemOperand(t2, kValueOffset));
-}
-
void MacroAssembler::Allocate(int object_size, Register result,
Register scratch1, Register scratch2,
Label* gc_required, AllocationFlags flags) {
@@ -2116,18 +1956,6 @@ void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
CmpP(obj, MemOperand(kRootRegister, index << kPointerSizeLog2));
}
-void MacroAssembler::CheckFastElements(Register map, Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
- CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
- Operand(Map::kMaximumBitField2FastHoleyElementValue));
- bgt(fail);
-}
-
void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
@@ -2302,16 +2130,6 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // If the hash field contains an array index pick it out. The assert checks
- // that the constants for the maximum number of digits for an array index
- // cached in the hash field and the number of bits reserved for it does not
- // conflict.
- DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
-}
-
void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
Register scratch1, Register scratch2,
DoubleRegister double_scratch) {
@@ -3022,51 +2840,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
-void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
- Register scratch) {
- Label big_loop, left_bytes, done, fake_call;
-
- DCHECK(!scratch.is(r0));
-
- // big loop moves 256 bytes at a time
- bind(&big_loop);
- CmpP(length, Operand(static_cast<intptr_t>(0x100)));
- blt(&left_bytes);
-
- mvc(MemOperand(dst), MemOperand(src), 0x100);
-
- AddP(src, Operand(static_cast<intptr_t>(0x100)));
- AddP(dst, Operand(static_cast<intptr_t>(0x100)));
- SubP(length, Operand(static_cast<intptr_t>(0x100)));
- b(&big_loop);
-
- bind(&left_bytes);
- CmpP(length, Operand::Zero());
- beq(&done);
-
- // TODO(john.yan): More optimal version is to use MVC
- // Sequence below has some undiagnosed issue.
- /*
- b(scratch, &fake_call); // use brasl to Save mvc addr to scratch
- mvc(MemOperand(dst), MemOperand(src), 1);
- bind(&fake_call);
- SubP(length, Operand(static_cast<intptr_t>(-1)));
- ex(length, MemOperand(scratch)); // execute mvc instr above
- AddP(src, length);
- AddP(dst, length);
- AddP(src, Operand(static_cast<intptr_t>(0x1)));
- AddP(dst, Operand(static_cast<intptr_t>(0x1)));
- */
-
- mvc(MemOperand(dst), MemOperand(src), 1);
- AddP(src, Operand(static_cast<intptr_t>(0x1)));
- AddP(dst, Operand(static_cast<intptr_t>(0x1)));
- SubP(length, Operand(static_cast<intptr_t>(0x1)));
-
- b(&left_bytes);
- bind(&done);
-}
-
void MacroAssembler::InitializeNFieldsWithFiller(Register current_address,
Register count,
Register filler) {
@@ -3171,7 +2944,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
CmpP(index, ip);
Check(lt, kIndexIsTooLarge);
- DCHECK(Smi::FromInt(0) == 0);
+ DCHECK(Smi::kZero == 0);
CmpP(index, Operand::Zero());
Check(ge, kIndexIsNegative);
@@ -3496,7 +3269,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// For all objects but the receiver, check that the cache is empty.
EnumLength(r5, r3);
- CmpSmiLiteral(r5, Smi::FromInt(0), r0);
+ CmpSmiLiteral(r5, Smi::kZero, r0);
bne(call_runtime);
bind(&start);
@@ -3581,7 +3354,8 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
ExternalReference new_space_allocation_top_adr =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
- const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+ const int kMementoLastWordOffset =
+ kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
DCHECK(!AreAliased(receiver_reg, scratch_reg));
@@ -3592,7 +3366,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
- AddP(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
+ AddP(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
mov(ip, Operand(new_space_allocation_top_adr));
LoadP(ip, MemOperand(ip));
XorP(r0, scratch_reg, ip);
@@ -3610,7 +3384,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// we are below top.
bind(&top_check);
CmpP(scratch_reg, ip);
- bgt(no_memento_found);
+ bge(no_memento_found);
// Memento map check.
bind(&map_check);
LoadP(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index 7f2d0421bf..06fcaf0519 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -932,18 +932,8 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Inline caching support
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, whereas both scratch registers are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg, Register scratch,
- Label* miss);
-
void GetNumberHash(Register t0, Register scratch);
- void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
- Register result, Register t0, Register t1,
- Register t2);
-
inline void MarkCode(NopMarkerTypes type) { nop(type); }
// Check if the given instruction is a 'type' marker.
@@ -1036,11 +1026,6 @@ class MacroAssembler : public Assembler {
Register scratch1, Register scratch2,
Label* gc_required);
- // Copies a number of bytes from src to dst. All registers are clobbered. On
- // exit src and dst will point to the place just after where the last byte was
- // read or written and length will be zero.
- void CopyBytes(Register src, Register dst, Register length, Register scratch);
-
// Initialize fields with filler values. |count| fields starting at
// |current_address| are overwritten with the value in |filler|. At the end
// the loop, |current_address| points at the next uninitialized field.
@@ -1086,10 +1071,6 @@ class MacroAssembler : public Assembler {
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map, Register scratch, Label* fail);
-
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map, Register scratch, Label* fail);
@@ -1177,12 +1158,6 @@ class MacroAssembler : public Assembler {
return eq;
}
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
// Get the number of least significant bits from a register
void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index 78bc939842..74d37bc20a 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -5682,7 +5682,7 @@ void Simulator::CallInternal(byte* entry, int reg_arg_count) {
// Set up the non-volatile registers with a known value. To be able to check
// that they are preserved properly across JS execution.
- intptr_t callee_saved_value = icount_;
+ uintptr_t callee_saved_value = icount_;
if (reg_arg_count < 5) {
set_register(r6, callee_saved_value + 6);
}
@@ -5700,15 +5700,15 @@ void Simulator::CallInternal(byte* entry, int reg_arg_count) {
// Check that the non-volatile registers have been preserved.
#ifndef V8_TARGET_ARCH_S390X
if (reg_arg_count < 5) {
- DCHECK_EQ(callee_saved_value + 6, get_low_register<int32_t>(r6));
+ DCHECK_EQ(callee_saved_value + 6, get_low_register<uint32_t>(r6));
}
- DCHECK_EQ(callee_saved_value + 7, get_low_register<int32_t>(r7));
- DCHECK_EQ(callee_saved_value + 8, get_low_register<int32_t>(r8));
- DCHECK_EQ(callee_saved_value + 9, get_low_register<int32_t>(r9));
- DCHECK_EQ(callee_saved_value + 10, get_low_register<int32_t>(r10));
- DCHECK_EQ(callee_saved_value + 11, get_low_register<int32_t>(r11));
- DCHECK_EQ(callee_saved_value + 12, get_low_register<int32_t>(r12));
- DCHECK_EQ(callee_saved_value + 13, get_low_register<int32_t>(r13));
+ DCHECK_EQ(callee_saved_value + 7, get_low_register<uint32_t>(r7));
+ DCHECK_EQ(callee_saved_value + 8, get_low_register<uint32_t>(r8));
+ DCHECK_EQ(callee_saved_value + 9, get_low_register<uint32_t>(r9));
+ DCHECK_EQ(callee_saved_value + 10, get_low_register<uint32_t>(r10));
+ DCHECK_EQ(callee_saved_value + 11, get_low_register<uint32_t>(r11));
+ DCHECK_EQ(callee_saved_value + 12, get_low_register<uint32_t>(r12));
+ DCHECK_EQ(callee_saved_value + 13, get_low_register<uint32_t>(r13));
#else
if (reg_arg_count < 5) {
DCHECK_EQ(callee_saved_value + 6, get_register(r6));
@@ -5762,7 +5762,7 @@ intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
// Remaining arguments passed on stack.
int64_t original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
- intptr_t entry_stack =
+ uintptr_t entry_stack =
(original_stack -
(kCalleeRegisterSaveAreaSize + stack_arg_count * sizeof(intptr_t)));
if (base::OS::ActivationFrameAlignment() != 0) {
@@ -5798,7 +5798,7 @@ intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
// Set up the non-volatile registers with a known value. To be able to check
// that they are preserved properly across JS execution.
- intptr_t callee_saved_value = icount_;
+ uintptr_t callee_saved_value = icount_;
if (reg_arg_count < 5) {
set_register(r6, callee_saved_value + 6);
}
@@ -5816,15 +5816,15 @@ intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
// Check that the non-volatile registers have been preserved.
#ifndef V8_TARGET_ARCH_S390X
if (reg_arg_count < 5) {
- DCHECK_EQ(callee_saved_value + 6, get_low_register<int32_t>(r6));
+ DCHECK_EQ(callee_saved_value + 6, get_low_register<uint32_t>(r6));
}
- DCHECK_EQ(callee_saved_value + 7, get_low_register<int32_t>(r7));
- DCHECK_EQ(callee_saved_value + 8, get_low_register<int32_t>(r8));
- DCHECK_EQ(callee_saved_value + 9, get_low_register<int32_t>(r9));
- DCHECK_EQ(callee_saved_value + 10, get_low_register<int32_t>(r10));
- DCHECK_EQ(callee_saved_value + 11, get_low_register<int32_t>(r11));
- DCHECK_EQ(callee_saved_value + 12, get_low_register<int32_t>(r12));
- DCHECK_EQ(callee_saved_value + 13, get_low_register<int32_t>(r13));
+ DCHECK_EQ(callee_saved_value + 7, get_low_register<uint32_t>(r7));
+ DCHECK_EQ(callee_saved_value + 8, get_low_register<uint32_t>(r8));
+ DCHECK_EQ(callee_saved_value + 9, get_low_register<uint32_t>(r9));
+ DCHECK_EQ(callee_saved_value + 10, get_low_register<uint32_t>(r10));
+ DCHECK_EQ(callee_saved_value + 11, get_low_register<uint32_t>(r11));
+ DCHECK_EQ(callee_saved_value + 12, get_low_register<uint32_t>(r12));
+ DCHECK_EQ(callee_saved_value + 13, get_low_register<uint32_t>(r13));
#else
if (reg_arg_count < 5) {
DCHECK_EQ(callee_saved_value + 6, get_register(r6));
@@ -5850,7 +5850,7 @@ intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
// Pop stack passed arguments.
#ifndef V8_TARGET_ARCH_S390X
- DCHECK_EQ(entry_stack, get_low_register<int32_t>(sp));
+ DCHECK_EQ(entry_stack, get_low_register<uint32_t>(sp));
#else
DCHECK_EQ(entry_stack, get_register(sp));
#endif
@@ -6504,7 +6504,6 @@ EVALUATE(LCR) {
DCHECK_OPCODE(LCR);
DECODE_RR_INSTRUCTION(r1, r2);
int32_t r2_val = get_low_register<int32_t>(r2);
- int32_t original_r2_val = r2_val;
r2_val = ~r2_val;
r2_val = r2_val + 1;
set_low_register(r1, r2_val);
@@ -6513,7 +6512,7 @@ EVALUATE(LCR) {
// Cannot do int comparison due to GCC 4.8 bug on x86.
// Detect INT_MIN alternatively, as it is the only value where both
// original and result are negative due to overflow.
- if (r2_val < 0 && original_r2_val < 0) {
+ if (r2_val == (static_cast<int32_t>(1) << 31)) {
SetS390OverflowCode(true);
}
return length;
@@ -9837,7 +9836,7 @@ EVALUATE(LCGR) {
set_register(r1, r2_val);
SetS390ConditionCode<int64_t>(r2_val, 0);
// if the input is INT_MIN, loading its compliment would be overflowing
- if (r2_val < 0 && (r2_val + 1) > 0) {
+ if (r2_val == (static_cast<int64_t>(1) << 63)) {
SetS390OverflowCode(true);
}
return length;
diff --git a/deps/v8/src/signature.h b/deps/v8/src/signature.h
index 97238b6749..32050fe4b0 100644
--- a/deps/v8/src/signature.h
+++ b/deps/v8/src/signature.h
@@ -32,7 +32,16 @@ class Signature : public ZoneObject {
return reps_[index];
}
- const T* raw_data() const { return reps_; }
+ bool Equals(Signature* that) {
+ if (this == that) return true;
+ if (this->parameter_count() != that->parameter_count()) return false;
+ if (this->return_count() != that->return_count()) return false;
+ size_t size = this->return_count() + this->parameter_count();
+ for (size_t i = 0; i < size; i++) {
+ if (this->reps_[i] != that->reps_[i]) return false;
+ }
+ return true;
+ }
// For incrementally building signatures.
class Builder {
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 16044a5059..86a91643d2 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -12,6 +12,8 @@
#include "src/snapshot/deserializer.h"
#include "src/snapshot/snapshot.h"
#include "src/version.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
@@ -99,8 +101,8 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
if (ElideObject(obj)) {
- return SerializeObject(*isolate()->factory()->undefined_value(),
- how_to_code, where_to_point, skip);
+ return SerializeObject(isolate()->heap()->undefined_value(), how_to_code,
+ where_to_point, skip);
}
// Past this point we should not see any (context-specific) maps anymore.
CHECK(!obj->IsMap());
@@ -217,15 +219,19 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
}
std::unique_ptr<ScriptData> WasmCompiledModuleSerializer::SerializeWasmModule(
- Isolate* isolate, Handle<FixedArray> compiled_module) {
+ Isolate* isolate, Handle<FixedArray> input) {
+ Handle<WasmCompiledModule> compiled_module =
+ Handle<WasmCompiledModule>::cast(input);
WasmCompiledModuleSerializer wasm_cs(isolate, 0);
wasm_cs.reference_map()->AddAttachedReference(*isolate->native_context());
+ wasm_cs.reference_map()->AddAttachedReference(
+ *compiled_module->module_bytes());
ScriptData* data = wasm_cs.Serialize(compiled_module);
return std::unique_ptr<ScriptData>(data);
}
MaybeHandle<FixedArray> WasmCompiledModuleSerializer::DeserializeWasmModule(
- Isolate* isolate, ScriptData* data) {
+ Isolate* isolate, ScriptData* data, Vector<const byte> wire_bytes) {
SerializedCodeData::SanityCheckResult sanity_check_result =
SerializedCodeData::CHECK_SUCCESS;
MaybeHandle<FixedArray> nothing;
@@ -239,6 +245,15 @@ MaybeHandle<FixedArray> WasmCompiledModuleSerializer::DeserializeWasmModule(
Deserializer deserializer(&scd, true);
deserializer.AddAttachedObject(isolate->native_context());
+ MaybeHandle<String> maybe_wire_bytes_as_string =
+ isolate->factory()->NewStringFromOneByte(wire_bytes, TENURED);
+ Handle<String> wire_bytes_as_string;
+ if (!maybe_wire_bytes_as_string.ToHandle(&wire_bytes_as_string)) {
+ return nothing;
+ }
+ deserializer.AddAttachedObject(
+ handle(SeqOneByteString::cast(*wire_bytes_as_string)));
+
Vector<const uint32_t> stub_keys = scd.CodeStubKeys();
for (int i = 0; i < stub_keys.length(); ++i) {
deserializer.AddAttachedObject(
@@ -247,7 +262,11 @@ MaybeHandle<FixedArray> WasmCompiledModuleSerializer::DeserializeWasmModule(
MaybeHandle<HeapObject> obj = deserializer.DeserializeObject(isolate);
if (obj.is_null() || !obj.ToHandleChecked()->IsFixedArray()) return nothing;
- return Handle<FixedArray>::cast(obj.ToHandleChecked());
+ Handle<WasmCompiledModule> compiled_module =
+ Handle<WasmCompiledModule>::cast(obj.ToHandleChecked());
+
+ WasmCompiledModule::RecreateModuleWrapper(isolate, compiled_module);
+ return compiled_module;
}
class Checksum {
@@ -340,6 +359,7 @@ SerializedCodeData::SerializedCodeData(const List<byte>* payload,
SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
Isolate* isolate, uint32_t expected_source_hash) const {
+ if (this->size_ < kHeaderSize) return INVALID_HEADER;
uint32_t magic_number = GetMagicNumber();
if (magic_number != ComputeMagicNumber(isolate)) return MAGIC_NUMBER_MISMATCH;
uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
diff --git a/deps/v8/src/snapshot/code-serializer.h b/deps/v8/src/snapshot/code-serializer.h
index b3c54d1c84..15757379f0 100644
--- a/deps/v8/src/snapshot/code-serializer.h
+++ b/deps/v8/src/snapshot/code-serializer.h
@@ -59,8 +59,8 @@ class WasmCompiledModuleSerializer : public CodeSerializer {
public:
static std::unique_ptr<ScriptData> SerializeWasmModule(
Isolate* isolate, Handle<FixedArray> compiled_module);
- static MaybeHandle<FixedArray> DeserializeWasmModule(Isolate* isolate,
- ScriptData* data);
+ static MaybeHandle<FixedArray> DeserializeWasmModule(
+ Isolate* isolate, ScriptData* data, Vector<const byte> wire_bytes);
protected:
void SerializeCodeObject(Code* code_object, HowToCode how_to_code,
@@ -74,7 +74,9 @@ class WasmCompiledModuleSerializer : public CodeSerializer {
}
}
- bool ElideObject(Object* obj) override { return obj->IsWeakCell(); };
+ bool ElideObject(Object* obj) override {
+ return obj->IsWeakCell() || obj->IsForeign();
+ };
private:
WasmCompiledModuleSerializer(Isolate* isolate, uint32_t source_hash)
@@ -92,9 +94,35 @@ class SerializedCodeData : public SerializedData {
SOURCE_MISMATCH = 3,
CPU_FEATURES_MISMATCH = 4,
FLAGS_MISMATCH = 5,
- CHECKSUM_MISMATCH = 6
+ CHECKSUM_MISMATCH = 6,
+ INVALID_HEADER = 7
};
+ // The data header consists of uint32_t-sized entries:
+ // [0] magic number and external reference count
+ // [1] version hash
+ // [2] source hash
+ // [3] cpu features
+ // [4] flag hash
+ // [5] number of code stub keys
+ // [6] number of reservation size entries
+ // [7] payload length
+ // [8] payload checksum part 1
+ // [9] payload checksum part 2
+ // ... reservations
+ // ... code stub keys
+ // ... serialized payload
+ static const int kVersionHashOffset = kMagicNumberOffset + kInt32Size;
+ static const int kSourceHashOffset = kVersionHashOffset + kInt32Size;
+ static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size;
+ static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size;
+ static const int kNumReservationsOffset = kFlagHashOffset + kInt32Size;
+ static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size;
+ static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size;
+ static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size;
+ static const int kChecksum2Offset = kChecksum1Offset + kInt32Size;
+ static const int kHeaderSize = kChecksum2Offset + kInt32Size;
+
// Used when consuming.
static const SerializedCodeData FromCachedData(
Isolate* isolate, ScriptData* cached_data, uint32_t expected_source_hash,
@@ -124,30 +152,6 @@ class SerializedCodeData : public SerializedData {
SanityCheckResult SanityCheck(Isolate* isolate,
uint32_t expected_source_hash) const;
- // The data header consists of uint32_t-sized entries:
- // [0] magic number and external reference count
- // [1] version hash
- // [2] source hash
- // [3] cpu features
- // [4] flag hash
- // [5] number of code stub keys
- // [6] number of reservation size entries
- // [7] payload length
- // [8] payload checksum part 1
- // [9] payload checksum part 2
- // ... reservations
- // ... code stub keys
- // ... serialized payload
- static const int kVersionHashOffset = kMagicNumberOffset + kInt32Size;
- static const int kSourceHashOffset = kVersionHashOffset + kInt32Size;
- static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size;
- static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size;
- static const int kNumReservationsOffset = kFlagHashOffset + kInt32Size;
- static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size;
- static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size;
- static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size;
- static const int kChecksum2Offset = kChecksum1Offset + kInt32Size;
- static const int kHeaderSize = kChecksum2Offset + kInt32Size;
};
} // namespace internal
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index b90a2c5b10..aabd806b7a 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -99,7 +99,7 @@ void Deserializer::Deserialize(Isolate* isolate) {
isolate_->heap()->undefined_value());
// The allocation site list is build during root iteration, but if no sites
// were encountered then it needs to be initialized to undefined.
- if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
+ if (isolate_->heap()->allocation_sites_list() == Smi::kZero) {
isolate_->heap()->set_allocation_sites_list(
isolate_->heap()->undefined_value());
}
@@ -128,6 +128,7 @@ MaybeHandle<Object> Deserializer::DeserializePartial(
Object* root;
VisitPointer(&root);
DeserializeDeferredObjects();
+ DeserializeInternalFields();
isolate->heap()->RegisterReservationsForBlackAllocation(reservations_);
@@ -212,6 +213,31 @@ void Deserializer::DeserializeDeferredObjects() {
}
}
+void Deserializer::DeserializeInternalFields() {
+ if (!source_.HasMore() || source_.Get() != kInternalFieldsData) return;
+ DisallowHeapAllocation no_gc;
+ DisallowJavascriptExecution no_js(isolate_);
+ DisallowCompilation no_compile(isolate_);
+ v8::DeserializeInternalFieldsCallback callback =
+ isolate_->deserialize_internal_fields_callback();
+ DCHECK_NOT_NULL(callback);
+ for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
+ HandleScope scope(isolate_);
+ int space = code & kSpaceMask;
+ DCHECK(space <= kNumberOfSpaces);
+ DCHECK(code - space == kNewObject);
+ Handle<JSObject> obj(JSObject::cast(GetBackReferencedObject(space)),
+ isolate_);
+ int index = source_.GetInt();
+ int size = source_.GetInt();
+ byte* data = new byte[size];
+ source_.CopyRaw(data, size);
+ callback(v8::Utils::ToLocal(obj), index,
+ {reinterpret_cast<char*>(data), size});
+ delete[] data;
+ }
+}
+
// Used to insert a deserialized internalized string into the string table.
class StringTableInsertionKey : public HashTableKey {
public:
@@ -277,7 +303,7 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
// TODO(mvstanton): consider treating the heap()->allocation_sites_list()
// as a (weak) root. If this root is relocated correctly, this becomes
// unnecessary.
- if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
+ if (isolate_->heap()->allocation_sites_list() == Smi::kZero) {
site->set_weak_next(isolate_->heap()->undefined_value());
} else {
site->set_weak_next(isolate_->heap()->allocation_sites_list());
@@ -502,7 +528,7 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
int skip = source_.GetInt(); \
current = reinterpret_cast<Object**>( \
reinterpret_cast<Address>(current) + skip); \
- int reference_id = source_.GetInt(); \
+ uint32_t reference_id = static_cast<uint32_t>(source_.GetInt()); \
Address address = external_reference_table_->address(reference_id); \
new_object = reinterpret_cast<Object*>(address); \
} else if (where == kAttachedReference) { \
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index 634d80e3ab..db7996297d 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -88,6 +88,7 @@ class Deserializer : public SerializerDeserializer {
}
void DeserializeDeferredObjects();
+ void DeserializeInternalFields();
void FlushICacheForNewIsolate();
void FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index b46f6755f0..e89f44f6e2 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -10,9 +10,12 @@
namespace v8 {
namespace internal {
-PartialSerializer::PartialSerializer(Isolate* isolate,
- StartupSerializer* startup_serializer)
- : Serializer(isolate), startup_serializer_(startup_serializer) {
+PartialSerializer::PartialSerializer(
+ Isolate* isolate, StartupSerializer* startup_serializer,
+ v8::SerializeInternalFieldsCallback callback)
+ : Serializer(isolate),
+ startup_serializer_(startup_serializer),
+ serialize_internal_fields_(callback) {
InitializeCodeAddressMap();
}
@@ -33,10 +36,14 @@ void PartialSerializer::Serialize(Object** o) {
context->set(Context::NEXT_CONTEXT_LINK,
isolate_->heap()->undefined_value());
DCHECK(!context->global_object()->IsUndefined(context->GetIsolate()));
+ // Reset math random cache to get fresh random numbers.
+ context->set_math_random_index(Smi::kZero);
+ context->set_math_random_cache(isolate_->heap()->undefined_value());
}
}
VisitPointer(o);
SerializeDeferredObjects();
+ SerializeInternalFields();
Pad();
}
@@ -93,6 +100,11 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
function->ClearTypeFeedbackInfo();
}
+ if (obj->IsJSObject()) {
+ JSObject* jsobj = JSObject::cast(obj);
+ if (jsobj->GetInternalFieldCount() > 0) internal_field_holders_.Add(jsobj);
+ }
+
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this, obj, &sink_, how_to_code, where_to_point);
serializer.Serialize();
@@ -106,9 +118,39 @@ bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject* o) {
DCHECK(!o->IsScript());
return o->IsName() || o->IsSharedFunctionInfo() || o->IsHeapNumber() ||
o->IsCode() || o->IsScopeInfo() || o->IsAccessorInfo() ||
+ o->IsTemplateInfo() ||
o->map() ==
startup_serializer_->isolate()->heap()->fixed_cow_array_map();
}
+void PartialSerializer::SerializeInternalFields() {
+ int count = internal_field_holders_.length();
+ if (count == 0) return;
+ DisallowHeapAllocation no_gc;
+ DisallowJavascriptExecution no_js(isolate());
+ DisallowCompilation no_compile(isolate());
+ DCHECK_NOT_NULL(serialize_internal_fields_);
+ sink_.Put(kInternalFieldsData, "internal fields data");
+ while (internal_field_holders_.length() > 0) {
+ HandleScope scope(isolate());
+ Handle<JSObject> obj(internal_field_holders_.RemoveLast(), isolate());
+ SerializerReference reference = reference_map_.Lookup(*obj);
+ DCHECK(reference.is_back_reference());
+ int internal_fields_count = obj->GetInternalFieldCount();
+ for (int i = 0; i < internal_fields_count; i++) {
+ if (obj->GetInternalField(i)->IsHeapObject()) continue;
+ StartupData data = serialize_internal_fields_(v8::Utils::ToLocal(obj), i);
+ sink_.Put(kNewObject + reference.space(), "internal field holder");
+ PutBackReference(*obj, reference);
+ sink_.PutInt(i, "internal field index");
+ sink_.PutInt(data.raw_size, "internal fields data size");
+ sink_.PutRaw(reinterpret_cast<const byte*>(data.data), data.raw_size,
+ "internal fields data");
+ delete[] data.data;
+ }
+ }
+ sink_.Put(kSynchronize, "Finished with internal fields data");
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/partial-serializer.h b/deps/v8/src/snapshot/partial-serializer.h
index 282f76e78f..45d64e431e 100644
--- a/deps/v8/src/snapshot/partial-serializer.h
+++ b/deps/v8/src/snapshot/partial-serializer.h
@@ -15,7 +15,8 @@ class StartupSerializer;
class PartialSerializer : public Serializer {
public:
- PartialSerializer(Isolate* isolate, StartupSerializer* startup_serializer);
+ PartialSerializer(Isolate* isolate, StartupSerializer* startup_serializer,
+ v8::SerializeInternalFieldsCallback callback);
~PartialSerializer() override;
@@ -28,7 +29,11 @@ class PartialSerializer : public Serializer {
bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
+ void SerializeInternalFields();
+
StartupSerializer* startup_serializer_;
+ List<JSObject*> internal_field_holders_;
+ v8::SerializeInternalFieldsCallback serialize_internal_fields_;
DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
};
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index adfd6e4f64..f188793419 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -13,37 +13,42 @@ namespace internal {
ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
map_ = isolate->external_reference_map();
- if (map_ != NULL) return;
- map_ = new base::HashMap();
+#ifdef DEBUG
+ table_ = ExternalReferenceTable::instance(isolate);
+#endif // DEBUG
+ if (map_ != nullptr) return;
+ map_ = new AddressToIndexHashMap();
ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
- for (int i = 0; i < table->size(); ++i) {
+ for (uint32_t i = 0; i < table->size(); ++i) {
Address addr = table->address(i);
- if (addr == ExternalReferenceTable::NotAvailable()) continue;
- // We expect no duplicate external references entries in the table.
- // AccessorRefTable getter may have duplicates, indicated by an empty string
- // as name.
- DCHECK(table->name(i)[0] == '\0' ||
- map_->Lookup(addr, Hash(addr)) == nullptr);
- map_->LookupOrInsert(addr, Hash(addr))->value = reinterpret_cast<void*>(i);
+ DCHECK(map_->Get(addr).IsNothing() ||
+ strncmp(table->name(i), "Redirect to ", 12) == 0);
+ map_->Set(addr, i);
+ DCHECK(map_->Get(addr).IsJust());
}
isolate->set_external_reference_map(map_);
}
uint32_t ExternalReferenceEncoder::Encode(Address address) const {
- DCHECK_NOT_NULL(address);
- base::HashMap::Entry* entry =
- const_cast<base::HashMap*>(map_)->Lookup(address, Hash(address));
- DCHECK_NOT_NULL(entry);
- return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
+ Maybe<uint32_t> maybe_index = map_->Get(address);
+ if (maybe_index.IsNothing()) {
+ void* addr = address;
+ v8::base::OS::PrintError("Unknown external reference %p.\n", addr);
+ v8::base::OS::PrintError("%s", ExternalReferenceTable::ResolveSymbol(addr));
+ v8::base::OS::Abort();
+ }
+#ifdef DEBUG
+ table_->increment_count(maybe_index.FromJust());
+#endif // DEBUG
+ return maybe_index.FromJust();
}
const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
Address address) const {
- base::HashMap::Entry* entry =
- const_cast<base::HashMap*>(map_)->Lookup(address, Hash(address));
- if (entry == NULL) return "<unknown>";
- uint32_t i = static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
- return ExternalReferenceTable::instance(isolate)->name(i);
+ Maybe<uint32_t> maybe_index = map_->Get(address);
+ if (maybe_index.IsNothing()) return "<unknown>";
+ return ExternalReferenceTable::instance(isolate)->name(
+ maybe_index.FromJust());
}
void SerializedData::AllocateData(int size) {
@@ -64,7 +69,7 @@ void SerializerDeserializer::Iterate(Isolate* isolate, ObjectVisitor* visitor) {
List<Object*>* cache = isolate->partial_snapshot_cache();
for (int i = 0;; ++i) {
// Extend the array ready to get a value when deserializing.
- if (cache->length() <= i) cache->Add(Smi::FromInt(0));
+ if (cache->length() <= i) cache->Add(Smi::kZero);
// During deserialization, the visitor populates the partial snapshot cache
// and eventually terminates the cache with undefined.
visitor->VisitPointer(&cache->at(i));
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index 74b0218073..201ac4e039 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -23,12 +23,10 @@ class ExternalReferenceEncoder {
const char* NameOfAddress(Isolate* isolate, Address address) const;
private:
- static uint32_t Hash(Address key) {
- return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >>
- kPointerSizeLog2);
- }
-
- base::HashMap* map_;
+ AddressToIndexHashMap* map_;
+#ifdef DEBUG
+ ExternalReferenceTable* table_;
+#endif // DEBUG
DISALLOW_COPY_AND_ASSIGN(ExternalReferenceEncoder);
};
@@ -172,6 +170,8 @@ class SerializerDeserializer : public ObjectVisitor {
// Used for the source code for compiled stubs, which is in the executable,
// but is referred to from external strings in the snapshot.
static const int kExtraNativesStringResource = 0x1e;
+ // Used for embedder-provided serialization data for internal fields.
+ static const int kInternalFieldsData = 0x1f;
// 8 hot (recently seen or back-referenced) objects with optional skip.
static const int kNumberOfHotObjects = 8;
@@ -182,7 +182,7 @@ class SerializerDeserializer : public ObjectVisitor {
static const int kHotObjectWithSkip = 0x58;
static const int kHotObjectMask = 0x07;
- // 0x1f, 0x35..0x37, 0x55..0x57, 0x75..0x7f unused.
+ // 0x35..0x37, 0x55..0x57, 0x75..0x7f unused.
// ---------- byte code range 0x80..0xff ----------
// First 32 root array items.
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index f622a5b8d2..2e971e3407 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -212,6 +212,11 @@ void Serializer::PutRoot(int root_index, HeapObject* object,
PrintF("\n");
}
+ // Assert that the first 32 root array items are a conscious choice. They are
+ // chosen so that the most common ones can be encoded more efficiently.
+ STATIC_ASSERT(Heap::kEmptyDescriptorArrayRootIndex ==
+ kNumberOfRootArrayConstants - 1);
+
if (how_to_code == kPlain && where_to_point == kStartOfObject &&
root_index < kNumberOfRootArrayConstants &&
!isolate()->heap()->InNewSpace(object)) {
@@ -618,6 +623,7 @@ void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
sink_->PutInt(skip, "SkipB4ExternalRef");
Address target = rinfo->target_external_reference();
+ DCHECK_NOT_NULL(target); // Code does not reference null.
sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
bytes_processed_so_far_ += rinfo->target_address_size();
}
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index 9c1c3b904c..ac75c5d163 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -30,25 +30,26 @@ class StartupSerializer : public Serializer {
int PartialSnapshotCacheIndex(HeapObject* o);
private:
- class PartialCacheIndexMap : public AddressMapBase {
+ class PartialCacheIndexMap {
public:
PartialCacheIndexMap() : map_(), next_index_(0) {}
// Lookup object in the map. Return its index if found, or create
// a new entry with new_index as value, and return kInvalidIndex.
bool LookupOrInsert(HeapObject* obj, int* index_out) {
- base::HashMap::Entry* entry = LookupEntry(&map_, obj, false);
- if (entry != NULL) {
- *index_out = GetValue(entry);
+ Maybe<uint32_t> maybe_index = map_.Get(obj);
+ if (maybe_index.IsJust()) {
+ *index_out = maybe_index.FromJust();
return true;
}
*index_out = next_index_;
- SetValue(LookupEntry(&map_, obj, true), next_index_++);
+ map_.Set(obj, next_index_++);
return false;
}
private:
- base::HashMap map_;
+ DisallowHeapAllocation no_allocation_;
+ HeapObjectToIndexHashMap map_;
int next_index_;
DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
diff --git a/deps/v8/src/source-position-table.cc b/deps/v8/src/source-position-table.cc
index ef6d0773f9..35d8e7c2f6 100644
--- a/deps/v8/src/source-position-table.cc
+++ b/deps/v8/src/source-position-table.cc
@@ -47,17 +47,19 @@ void SubtractFromEntry(PositionTableEntry& value,
}
// Helper: Encode an integer.
-void EncodeInt(ZoneVector<byte>& bytes, int value) {
+template <typename T>
+void EncodeInt(ZoneVector<byte>& bytes, T value) {
// Zig-zag encoding.
- static const int kShift = kIntSize * kBitsPerByte - 1;
+ static const int kShift = sizeof(T) * kBitsPerByte - 1;
value = ((value << 1) ^ (value >> kShift));
DCHECK_GE(value, 0);
- unsigned int encoded = static_cast<unsigned int>(value);
+ auto encoded = static_cast<typename std::make_unsigned<T>::type>(value);
bool more;
do {
more = encoded > ValueBits::kMax;
- bytes.push_back(MoreBit::encode(more) |
- ValueBits::encode(encoded & ValueBits::kMask));
+ byte current =
+ MoreBit::encode(more) | ValueBits::encode(encoded & ValueBits::kMask);
+ bytes.push_back(current);
encoded >>= ValueBits::kSize;
} while (more);
}
@@ -73,25 +75,27 @@ void EncodeEntry(ZoneVector<byte>& bytes, const PositionTableEntry& entry) {
}
// Helper: Decode an integer.
-void DecodeInt(ByteArray* bytes, int* index, int* v) {
+template <typename T>
+T DecodeInt(ByteArray* bytes, int* index) {
byte current;
int shift = 0;
- int decoded = 0;
+ T decoded = 0;
bool more;
do {
current = bytes->get((*index)++);
- decoded |= ValueBits::decode(current) << shift;
+ decoded |= static_cast<typename std::make_unsigned<T>::type>(
+ ValueBits::decode(current))
+ << shift;
more = MoreBit::decode(current);
shift += ValueBits::kSize;
} while (more);
DCHECK_GE(decoded, 0);
decoded = (decoded >> 1) ^ (-(decoded & 1));
- *v = decoded;
+ return decoded;
}
void DecodeEntry(ByteArray* bytes, int* index, PositionTableEntry* entry) {
- int tmp;
- DecodeInt(bytes, index, &tmp);
+ int tmp = DecodeInt<int>(bytes, index);
if (tmp >= 0) {
entry->is_statement = true;
entry->code_offset = tmp;
@@ -99,7 +103,7 @@ void DecodeEntry(ByteArray* bytes, int* index, PositionTableEntry* entry) {
entry->is_statement = false;
entry->code_offset = -(tmp + 1);
}
- DecodeInt(bytes, index, &entry->source_position);
+ entry->source_position = DecodeInt<int64_t>(bytes, index);
}
} // namespace
@@ -115,11 +119,12 @@ SourcePositionTableBuilder::SourcePositionTableBuilder(
}
void SourcePositionTableBuilder::AddPosition(size_t code_offset,
- int source_position,
+ SourcePosition source_position,
bool is_statement) {
if (Omit()) return;
+ DCHECK(source_position.IsKnown());
int offset = static_cast<int>(code_offset);
- AddEntry({offset, source_position, is_statement});
+ AddEntry({offset, source_position.raw(), is_statement});
}
void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
@@ -152,7 +157,7 @@ Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
encoded.Advance(), raw++) {
DCHECK(raw != raw_entries_.end());
DCHECK_EQ(encoded.code_offset(), raw->code_offset);
- DCHECK_EQ(encoded.source_position(), raw->source_position);
+ DCHECK_EQ(encoded.source_position().raw(), raw->source_position);
DCHECK_EQ(encoded.is_statement(), raw->is_statement);
}
DCHECK(raw == raw_entries_.end());
@@ -170,7 +175,7 @@ SourcePositionTableIterator::SourcePositionTableIterator(ByteArray* byte_array)
void SourcePositionTableIterator::Advance() {
DCHECK(!done());
DCHECK(index_ >= 0 && index_ <= table_->length());
- if (index_ == table_->length()) {
+ if (index_ >= table_->length()) {
index_ = kDone;
} else {
PositionTableEntry tmp;
diff --git a/deps/v8/src/source-position-table.h b/deps/v8/src/source-position-table.h
index 74c3b9e45f..f569ac9819 100644
--- a/deps/v8/src/source-position-table.h
+++ b/deps/v8/src/source-position-table.h
@@ -7,7 +7,9 @@
#include "src/assert-scope.h"
#include "src/checks.h"
+#include "src/globals.h"
#include "src/handles.h"
+#include "src/source-position.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -22,22 +24,23 @@ class Zone;
struct PositionTableEntry {
PositionTableEntry()
: code_offset(0), source_position(0), is_statement(false) {}
- PositionTableEntry(int offset, int source, bool statement)
+ PositionTableEntry(int offset, int64_t source, bool statement)
: code_offset(offset), source_position(source), is_statement(statement) {}
int code_offset;
- int source_position;
+ int64_t source_position;
bool is_statement;
};
-class SourcePositionTableBuilder {
+class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
public:
enum RecordingMode { OMIT_SOURCE_POSITIONS, RECORD_SOURCE_POSITIONS };
SourcePositionTableBuilder(Zone* zone,
RecordingMode mode = RECORD_SOURCE_POSITIONS);
- void AddPosition(size_t code_offset, int source_position, bool is_statement);
+ void AddPosition(size_t code_offset, SourcePosition source_position,
+ bool is_statement);
Handle<ByteArray> ToSourcePositionTable(Isolate* isolate,
Handle<AbstractCode> code);
@@ -55,7 +58,7 @@ class SourcePositionTableBuilder {
PositionTableEntry previous_; // Previously written entry, to compute delta.
};
-class SourcePositionTableIterator {
+class V8_EXPORT_PRIVATE SourcePositionTableIterator {
public:
explicit SourcePositionTableIterator(ByteArray* byte_array);
@@ -65,9 +68,9 @@ class SourcePositionTableIterator {
DCHECK(!done());
return current_.code_offset;
}
- int source_position() const {
+ SourcePosition source_position() const {
DCHECK(!done());
- return current_.source_position;
+ return SourcePosition::FromRaw(current_.source_position);
}
bool is_statement() const {
DCHECK(!done());
diff --git a/deps/v8/src/source-position.cc b/deps/v8/src/source-position.cc
new file mode 100644
index 0000000000..e9f86db01b
--- /dev/null
+++ b/deps/v8/src/source-position.cc
@@ -0,0 +1,131 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/source-position.h"
+#include "src/compilation-info.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+std::ostream& operator<<(std::ostream& out, const SourcePositionInfo& pos) {
+ Handle<SharedFunctionInfo> function(pos.function);
+ Handle<Script> script(Script::cast(function->script()));
+ out << "<";
+ if (script->name()->IsString()) {
+ out << String::cast(script->name())->ToCString(DISALLOW_NULLS).get();
+ } else {
+ out << "unknown";
+ }
+ out << ":" << pos.line + 1 << ":" << pos.column + 1 << ">";
+ return out;
+}
+
+std::ostream& operator<<(std::ostream& out,
+ const std::vector<SourcePositionInfo>& stack) {
+ bool first = true;
+ for (const SourcePositionInfo& pos : stack) {
+ if (!first) out << " inlined at ";
+ out << pos;
+ first = false;
+ }
+ return out;
+}
+
+std::ostream& operator<<(std::ostream& out, const SourcePosition& pos) {
+ if (pos.isInlined()) {
+ out << "<inlined(" << pos.InliningId() << "):";
+ } else {
+ out << "<not inlined:";
+ }
+ out << pos.ScriptOffset() << ">";
+ return out;
+}
+
+SourcePositionInfo SourcePosition::Info(
+ Handle<SharedFunctionInfo> function) const {
+ SourcePositionInfo result(*this, function);
+ Handle<Script> script(Script::cast(function->script()));
+ Script::PositionInfo pos;
+ if (Script::GetPositionInfo(script, ScriptOffset(), &pos,
+ Script::WITH_OFFSET)) {
+ result.line = pos.line;
+ result.column = pos.column;
+ }
+ return result;
+}
+
+std::vector<SourcePositionInfo> SourcePosition::InliningStack(
+ CompilationInfo* cinfo) const {
+ SourcePosition pos = *this;
+ std::vector<SourcePositionInfo> stack;
+ while (pos.isInlined()) {
+ const auto& inl = cinfo->inlined_functions()[pos.InliningId()];
+ stack.push_back(pos.Info(inl.shared_info));
+ pos = inl.position.position;
+ }
+ stack.push_back(pos.Info(cinfo->shared_info()));
+ return stack;
+}
+
+std::vector<SourcePositionInfo> SourcePosition::InliningStack(
+ Handle<Code> code) const {
+ Handle<DeoptimizationInputData> deopt_data(
+ DeoptimizationInputData::cast(code->deoptimization_data()));
+ SourcePosition pos = *this;
+ std::vector<SourcePositionInfo> stack;
+ while (pos.isInlined()) {
+ InliningPosition inl =
+ deopt_data->InliningPositions()->get(pos.InliningId());
+ Handle<SharedFunctionInfo> function(
+ deopt_data->GetInlinedFunction(inl.inlined_function_id));
+ stack.push_back(pos.Info(function));
+ pos = inl.position;
+ }
+ Handle<SharedFunctionInfo> function(
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()));
+ stack.push_back(pos.Info(function));
+ return stack;
+}
+
+void SourcePosition::Print(std::ostream& out,
+ SharedFunctionInfo* function) const {
+ Script* script = Script::cast(function->script());
+ Object* source_name = script->name();
+ Script::PositionInfo pos;
+ script->GetPositionInfo(ScriptOffset(), &pos, Script::WITH_OFFSET);
+ out << "<";
+ if (source_name->IsString()) {
+ out << String::cast(source_name)
+ ->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)
+ .get();
+ } else {
+ out << "unknown";
+ }
+ out << ":" << pos.line + 1 << ":" << pos.column + 1 << ">";
+}
+
+void SourcePosition::Print(std::ostream& out, Code* code) const {
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ if (!isInlined()) {
+ SharedFunctionInfo* function(
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()));
+ Print(out, function);
+ } else {
+ InliningPosition inl = deopt_data->InliningPositions()->get(InliningId());
+ if (inl.inlined_function_id == -1) {
+ out << *this;
+ } else {
+ SharedFunctionInfo* function =
+ deopt_data->GetInlinedFunction(inl.inlined_function_id);
+ Print(out, function);
+ }
+ out << " inlined at ";
+ inl.position.Print(out, code);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/source-position.h b/deps/v8/src/source-position.h
index 2d36e97521..aa7d31bae2 100644
--- a/deps/v8/src/source-position.h
+++ b/deps/v8/src/source-position.h
@@ -9,77 +9,114 @@
#include "src/flags.h"
#include "src/globals.h"
+#include "src/handles.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
-// This class encapsulates encoding and decoding of sources positions from
-// which hydrogen values originated.
-// When FLAG_track_hydrogen_positions is set this object encodes the
-// identifier of the inlining and absolute offset from the start of the
-// inlined function.
-// When the flag is not set we simply track absolute offset from the
-// script start.
-class SourcePosition {
+class Code;
+class CompilationInfo;
+class Script;
+class SharedFunctionInfo;
+struct SourcePositionInfo;
+
+// SourcePosition stores
+// - script_offset (31 bit non-negative int or kNoSourcePosition)
+// - inlining_id (16 bit non-negative int or kNotInlined).
+//
+// A defined inlining_id refers to positions in
+// CompilationInfo::inlined_functions or
+// DeoptimizationInputData::InliningPositions, depending on the compilation
+// stage.
+class SourcePosition final {
public:
- static SourcePosition Unknown() {
- return SourcePosition::FromRaw(kNoPosition);
+ explicit SourcePosition(int script_offset, int inlining_id = kNotInlined)
+ : value_(0) {
+ SetScriptOffset(script_offset);
+ SetInliningId(inlining_id);
}
- bool IsUnknown() const { return value_ == kNoPosition; }
-
- uint32_t position() const { return PositionField::decode(value_); }
- void set_position(uint32_t position) {
- if (FLAG_hydrogen_track_positions) {
- value_ = static_cast<uint32_t>(PositionField::update(value_, position));
- } else {
- value_ = position;
- }
+ static SourcePosition Unknown() { return SourcePosition(kNoSourcePosition); }
+ bool IsKnown() const {
+ return ScriptOffset() != kNoSourcePosition || InliningId() != kNotInlined;
}
+ bool isInlined() const { return InliningId() != kNotInlined; }
- uint32_t inlining_id() const { return InliningIdField::decode(value_); }
- void set_inlining_id(uint32_t inlining_id) {
- if (FLAG_hydrogen_track_positions) {
- value_ =
- static_cast<uint32_t>(InliningIdField::update(value_, inlining_id));
- }
- }
+ std::vector<SourcePositionInfo> InliningStack(Handle<Code> code) const;
+ std::vector<SourcePositionInfo> InliningStack(CompilationInfo* code) const;
- uint32_t raw() const { return value_; }
+ void Print(std::ostream& out, Code* function) const;
- private:
- static const uint32_t kNoPosition = static_cast<uint32_t>(kNoSourcePosition);
- typedef BitField<uint32_t, 0, 9> InliningIdField;
+ int ScriptOffset() const { return ScriptOffsetField::decode(value_) - 1; }
+ int InliningId() const { return InliningIdField::decode(value_) - 1; }
- // Offset from the start of the inlined function.
- typedef BitField<uint32_t, 9, 23> PositionField;
+ void SetScriptOffset(int script_offset) {
+ DCHECK(script_offset <= ScriptOffsetField::kMax - 2);
+ DCHECK(script_offset >= kNoSourcePosition);
+ value_ = ScriptOffsetField::update(value_, script_offset + 1);
+ }
+ void SetInliningId(int inlining_id) {
+ DCHECK(inlining_id <= InliningIdField::kMax - 2);
+ DCHECK(inlining_id >= kNotInlined);
+ value_ = InliningIdField::update(value_, inlining_id + 1);
+ }
- friend class HPositionInfo;
- friend class Deoptimizer;
+ static const int kNotInlined = -1;
+ STATIC_ASSERT(kNoSourcePosition == -1);
- static SourcePosition FromRaw(uint32_t raw_position) {
- SourcePosition position;
- position.value_ = raw_position;
+ int64_t raw() const { return static_cast<int64_t>(value_); }
+ static SourcePosition FromRaw(int64_t raw) {
+ SourcePosition position = Unknown();
+ DCHECK_GE(raw, 0);
+ position.value_ = static_cast<uint64_t>(raw);
return position;
}
- // If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
- // and PositionField.
- // Otherwise contains absolute offset from the script start.
- uint32_t value_;
+ private:
+ void Print(std::ostream& out, SharedFunctionInfo* function) const;
+ SourcePositionInfo Info(Handle<SharedFunctionInfo> script) const;
+
+ // InliningId is in the high bits for better compression in
+ // SourcePositionTable.
+ typedef BitField64<int, 0, 31> ScriptOffsetField;
+ typedef BitField64<int, 31, 16> InliningIdField;
+ // Leaving the highest bit untouched to allow for signed conversion.
+ uint64_t value_;
};
-inline std::ostream& operator<<(std::ostream& os, const SourcePosition& p) {
- if (p.IsUnknown()) {
- return os << "<?>";
- } else if (FLAG_hydrogen_track_positions) {
- return os << "<" << p.inlining_id() << ":" << p.position() << ">";
- } else {
- return os << "<0:" << p.raw() << ">";
- }
+inline bool operator==(const SourcePosition& lhs, const SourcePosition& rhs) {
+ return lhs.raw() == rhs.raw();
+}
+
+inline bool operator!=(const SourcePosition& lhs, const SourcePosition& rhs) {
+ return !(lhs == rhs);
}
+struct InliningPosition {
+ // position of the inlined call
+ SourcePosition position = SourcePosition::Unknown();
+
+ // references position in DeoptimizationInputData::literals()
+ int inlined_function_id;
+};
+
+struct SourcePositionInfo {
+ explicit SourcePositionInfo(SourcePosition pos, Handle<SharedFunctionInfo> f)
+ : position(pos), function(f) {}
+
+ SourcePosition position;
+ Handle<SharedFunctionInfo> function;
+ int line = -1;
+ int column = -1;
+};
+
+std::ostream& operator<<(std::ostream& out, const SourcePosition& pos);
+
+std::ostream& operator<<(std::ostream& out, const SourcePositionInfo& pos);
+std::ostream& operator<<(std::ostream& out,
+ const std::vector<SourcePositionInfo>& stack);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/startup-data-util.cc b/deps/v8/src/startup-data-util.cc
index 7c6d9ebf21..5f5472f0af 100644
--- a/deps/v8/src/startup-data-util.cc
+++ b/deps/v8/src/startup-data-util.cc
@@ -86,8 +86,9 @@ void InitializeExternalStartupData(const char* directory_path) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
char* natives;
char* snapshot;
- LoadFromFiles(RelativePath(&natives, directory_path, "natives_blob.bin"),
- RelativePath(&snapshot, directory_path, "snapshot_blob.bin"));
+ LoadFromFiles(
+ base::RelativePath(&natives, directory_path, "natives_blob.bin"),
+ base::RelativePath(&snapshot, directory_path, "snapshot_blob.bin"));
free(natives);
free(snapshot);
#endif // V8_USE_EXTERNAL_STARTUP_DATA
diff --git a/deps/v8/src/string-builder.h b/deps/v8/src/string-builder.h
index 192603f33f..edc6476a45 100644
--- a/deps/v8/src/string-builder.h
+++ b/deps/v8/src/string-builder.h
@@ -180,7 +180,6 @@ class FixedArrayBuilder {
return target_array;
}
-
private:
Handle<FixedArray> array_;
int length_;
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 3ae4580709..acfb917414 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -533,7 +533,7 @@ void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
print_name = true;
} else if (isolate->context() != nullptr) {
if (!receiver->IsJSObject()) {
- receiver = receiver->GetRootMap(isolate)->prototype();
+ receiver = receiver->GetPrototypeChainRootMap(isolate)->prototype();
}
for (PrototypeIterator iter(isolate, JSObject::cast(receiver),
diff --git a/deps/v8/src/tracing/trace-event.cc b/deps/v8/src/tracing/trace-event.cc
index 440af193e9..97da1de056 100644
--- a/deps/v8/src/tracing/trace-event.cc
+++ b/deps/v8/src/tracing/trace-event.cc
@@ -8,30 +8,26 @@
#include "src/counters.h"
#include "src/isolate.h"
+#include "src/tracing/traced-value.h"
#include "src/v8.h"
namespace v8 {
namespace internal {
namespace tracing {
-// A global flag used as a shortcut to check for the
-// v8.runtime-call-stats category due to its high frequency use.
-base::Atomic32 kRuntimeCallStatsTracingEnabled = false;
-
v8::Platform* TraceEventHelper::GetCurrentPlatform() {
return v8::internal::V8::GetCurrentPlatform();
}
void CallStatsScopedTracer::AddEndTraceEvent() {
if (!has_parent_scope_ && p_data_->isolate) {
+ auto value = v8::tracing::TracedValue::Create();
+ p_data_->isolate->counters()->runtime_call_stats()->Dump(value.get());
v8::internal::tracing::AddTraceEvent(
TRACE_EVENT_PHASE_END, p_data_->category_group_enabled, p_data_->name,
v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId,
v8::internal::tracing::kNoId, TRACE_EVENT_FLAG_NONE,
- "runtime-call-stats", TRACE_STR_COPY(p_data_->isolate->counters()
- ->runtime_call_stats()
- ->Dump()
- .c_str()));
+ "runtime-call-stats", std::move(value));
} else {
v8::internal::tracing::AddTraceEvent(
TRACE_EVENT_PHASE_END, p_data_->category_group_enabled, p_data_->name,
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index 35d2e1507d..a228608f1b 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -49,33 +49,6 @@ enum CategoryGroupEnabledFlags {
#define TRACE_ID_WITH_SCOPE(scope, id) \
trace_event_internal::TraceID::WithScope(scope, id)
-// Sets the current sample state to the given category and name (both must be
-// constant strings). These states are intended for a sampling profiler.
-// Implementation note: we store category and name together because we don't
-// want the inconsistency/expense of storing two pointers.
-// |thread_bucket| is [0..2] and is used to statically isolate samples in one
-// thread from others.
-#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \
- name) \
- v8::internal::tracing::TraceEventSamplingStateScope<bucket_number>::Set( \
- category "\0" name)
-
-// Returns a current sampling state of the given bucket.
-#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
- v8::internal::tracing::TraceEventSamplingStateScope<bucket_number>::Current()
-
-// Creates a scope of a sampling state of the given bucket.
-//
-// { // The sampling state is set within this scope.
-// TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
-// ...;
-// }
-#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \
- name) \
- v8::internal::TraceEventSamplingStateScope<bucket_number> \
- traceEventSamplingScope(category "\0" name);
-
-
#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
*INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
(kEnabledForRecording_CategoryGroupEnabledFlags | \
@@ -138,12 +111,6 @@ enum CategoryGroupEnabledFlags {
#define TRACE_EVENT_API_ATOMIC_STORE(var, value) \
v8::base::NoBarrier_Store(&(var), (value))
-// The thread buckets for the sampling profiler.
-extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
-
-#define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \
- g_trace_state[thread_bucket]
-
////////////////////////////////////////////////////////////////////////////////
// Implementation detail: trace event macros create temporary variables
@@ -282,20 +249,10 @@ extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
INTERNAL_TRACE_EVENT_UID(ScopedContext) \
INTERNAL_TRACE_EVENT_UID(scoped_context)(context);
-#define TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() \
- base::NoBarrier_Load(&v8::internal::tracing::kRuntimeCallStatsTracingEnabled)
-
#define TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) \
INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name)
#define INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) \
- { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO( \
- TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats")); \
- base::NoBarrier_Store( \
- &v8::internal::tracing::kRuntimeCallStatsTracingEnabled, \
- INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()); \
- } \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
v8::internal::tracing::CallStatsScopedTracer INTERNAL_TRACE_EVENT_UID( \
tracer); \
@@ -613,48 +570,6 @@ class ScopedTracer {
Data data_;
};
-// Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly.
-class ScopedTraceBinaryEfficient {
- public:
- ScopedTraceBinaryEfficient(const char* category_group, const char* name);
- ~ScopedTraceBinaryEfficient();
-
- private:
- const uint8_t* category_group_enabled_;
- const char* name_;
- uint64_t event_handle_;
-};
-
-// TraceEventSamplingStateScope records the current sampling state
-// and sets a new sampling state. When the scope exists, it restores
-// the sampling state having recorded.
-template <size_t BucketNumber>
-class TraceEventSamplingStateScope {
- public:
- explicit TraceEventSamplingStateScope(const char* category_and_name) {
- previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current();
- TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name);
- }
-
- ~TraceEventSamplingStateScope() {
- TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_);
- }
-
- static V8_INLINE const char* Current() {
- return reinterpret_cast<const char*>(
- TRACE_EVENT_API_ATOMIC_LOAD(g_trace_state[BucketNumber]));
- }
-
- static V8_INLINE void Set(const char* category_and_name) {
- TRACE_EVENT_API_ATOMIC_STORE(g_trace_state[BucketNumber],
- reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(
- const_cast<char*>(category_and_name)));
- }
-
- private:
- const char* previous_state_;
-};
-
// Do not use directly.
class CallStatsScopedTracer {
public:
diff --git a/deps/v8/src/tracing/traced-value.cc b/deps/v8/src/tracing/traced-value.cc
new file mode 100644
index 0000000000..81be6237f9
--- /dev/null
+++ b/deps/v8/src/tracing/traced-value.cc
@@ -0,0 +1,203 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/tracing/traced-value.h"
+
+#include "src/base/platform/platform.h"
+#include "src/conversions.h"
+
+namespace v8 {
+namespace tracing {
+
+namespace {
+
+#define DCHECK_CURRENT_CONTAINER_IS(x) DCHECK_EQ(x, nesting_stack_.back())
+#define DCHECK_CONTAINER_STACK_DEPTH_EQ(x) DCHECK_EQ(x, nesting_stack_.size())
+#ifdef DEBUG
+const bool kStackTypeDict = false;
+const bool kStackTypeArray = true;
+#define DEBUG_PUSH_CONTAINER(x) nesting_stack_.push_back(x)
+#define DEBUG_POP_CONTAINER() nesting_stack_.pop_back()
+#else
+#define DEBUG_PUSH_CONTAINER(x) ((void)0)
+#define DEBUG_POP_CONTAINER() ((void)0)
+#endif
+
+std::string EscapeString(const std::string& value) {
+ std::string result;
+ result.reserve(value.length() + 2);
+ result += '"';
+ size_t length = value.length();
+ char number_buffer[10];
+ for (size_t src = 0; src < length; ++src) {
+ char c = value[src];
+ switch (c) {
+ case '\t':
+ result += "\\t";
+ break;
+ case '\n':
+ result += "\\n";
+ break;
+ case '\"':
+ result += "\\\"";
+ break;
+ case '\\':
+ result += "\\\\";
+ break;
+ default:
+ if (c < '\040') {
+ base::OS::SNPrintF(
+ number_buffer, arraysize(number_buffer), "\\u%04X",
+ static_cast<unsigned>(static_cast<unsigned char>(c)));
+ result += number_buffer;
+ } else {
+ result += c;
+ }
+ }
+ }
+ result += '"';
+ return result;
+}
+
+} // namespace
+
+std::unique_ptr<TracedValue> TracedValue::Create() {
+ return std::unique_ptr<TracedValue>(new TracedValue());
+}
+
+TracedValue::TracedValue() : first_item_(true) {
+ DEBUG_PUSH_CONTAINER(kStackTypeDict);
+}
+
+TracedValue::~TracedValue() {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ DEBUG_POP_CONTAINER();
+ DCHECK_CONTAINER_STACK_DEPTH_EQ(0u);
+}
+
+void TracedValue::SetInteger(const char* name, int value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ WriteName(name);
+ data_ += std::to_string(value);
+}
+
+void TracedValue::SetDouble(const char* name, double value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ WriteName(name);
+ i::EmbeddedVector<char, 100> buffer;
+ data_ += DoubleToCString(value, buffer);
+}
+
+void TracedValue::SetBoolean(const char* name, bool value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ WriteName(name);
+ data_ += value ? "true" : "false";
+}
+
+void TracedValue::SetString(const char* name, const std::string& value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ WriteName(name);
+ data_ += EscapeString(value);
+}
+
+void TracedValue::BeginDictionary(const char* name) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ DEBUG_PUSH_CONTAINER(kStackTypeDict);
+ WriteName(name);
+ data_ += '{';
+ first_item_ = true;
+}
+
+void TracedValue::BeginArray(const char* name) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ DEBUG_PUSH_CONTAINER(kStackTypeArray);
+ WriteName(name);
+ data_ += '[';
+ first_item_ = true;
+}
+
+void TracedValue::AppendInteger(int value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+ WriteComma();
+ data_ += std::to_string(value);
+}
+
+void TracedValue::AppendLongInteger(int64_t value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+ WriteComma();
+ data_ += std::to_string(value);
+}
+
+void TracedValue::AppendDouble(double value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+ WriteComma();
+ i::EmbeddedVector<char, 100> buffer;
+ data_ += DoubleToCString(value, buffer);
+}
+
+void TracedValue::AppendBoolean(bool value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+ WriteComma();
+ data_ += value ? "true" : "false";
+}
+
+void TracedValue::AppendString(const std::string& value) {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+ WriteComma();
+ data_ += EscapeString(value);
+}
+
+void TracedValue::BeginDictionary() {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+ DEBUG_PUSH_CONTAINER(kStackTypeDict);
+ WriteComma();
+ data_ += '{';
+ first_item_ = true;
+}
+
+void TracedValue::BeginArray() {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+ DEBUG_PUSH_CONTAINER(kStackTypeArray);
+ WriteComma();
+ data_ += '[';
+ first_item_ = true;
+}
+
+void TracedValue::EndDictionary() {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
+ DEBUG_POP_CONTAINER();
+ data_ += '}';
+ first_item_ = false;
+}
+
+void TracedValue::EndArray() {
+ DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
+ DEBUG_POP_CONTAINER();
+ data_ += ']';
+ first_item_ = false;
+}
+
+void TracedValue::WriteComma() {
+ if (first_item_) {
+ first_item_ = false;
+ } else {
+ data_ += ',';
+ }
+}
+
+void TracedValue::WriteName(const char* name) {
+ WriteComma();
+ data_ += '"';
+ data_ += name;
+ data_ += "\":";
+}
+
+void TracedValue::AppendAsTraceFormat(std::string* out) const {
+ *out += '{';
+ *out += data_;
+ *out += '}';
+}
+
+} // namespace tracing
+} // namespace v8
diff --git a/deps/v8/src/tracing/traced-value.h b/deps/v8/src/tracing/traced-value.h
new file mode 100644
index 0000000000..b5c265cd48
--- /dev/null
+++ b/deps/v8/src/tracing/traced-value.h
@@ -0,0 +1,67 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TRACING_TRACED_VALUE_H_
+#define V8_TRACING_TRACED_VALUE_H_
+
+#include <stddef.h>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "include/v8-platform.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace tracing {
+
+class TracedValue : public ConvertableToTraceFormat {
+ public:
+ ~TracedValue() override;
+
+ static std::unique_ptr<TracedValue> Create();
+
+ void EndDictionary();
+ void EndArray();
+
+ // These methods assume that |name| is a long lived "quoted" string.
+ void SetInteger(const char* name, int value);
+ void SetDouble(const char* name, double value);
+ void SetBoolean(const char* name, bool value);
+ void SetString(const char* name, const std::string& value);
+ void BeginDictionary(const char* name);
+ void BeginArray(const char* name);
+
+ void AppendInteger(int);
+ void AppendLongInteger(int64_t);
+ void AppendDouble(double);
+ void AppendBoolean(bool);
+ void AppendString(const std::string&);
+ void BeginArray();
+ void BeginDictionary();
+
+ // ConvertableToTraceFormat implementation.
+ void AppendAsTraceFormat(std::string* out) const override;
+
+ private:
+ TracedValue();
+
+ void WriteComma();
+ void WriteName(const char* name);
+
+#ifdef DEBUG
+ // In debug builds checks the pairings of {Begin,End}{Dictionary,Array}
+ std::vector<bool> nesting_stack_;
+#endif
+
+ std::string data_;
+ bool first_item_;
+
+ DISALLOW_COPY_AND_ASSIGN(TracedValue);
+};
+
+} // namespace tracing
+} // namespace v8
+
+#endif // V8_TRACING_TRACED_VALUE_H_
diff --git a/deps/v8/src/tracing/tracing-category-observer.cc b/deps/v8/src/tracing/tracing-category-observer.cc
new file mode 100644
index 0000000000..3fffd2f9ca
--- /dev/null
+++ b/deps/v8/src/tracing/tracing-category-observer.cc
@@ -0,0 +1,58 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/tracing/tracing-category-observer.h"
+
+#include "src/flags.h"
+#include "src/tracing/trace-event.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace tracing {
+
+TracingCategoryObserver* TracingCategoryObserver::instance_ = nullptr;
+
+void TracingCategoryObserver::SetUp() {
+ TracingCategoryObserver::instance_ = new TracingCategoryObserver();
+ v8::internal::V8::GetCurrentPlatform()->AddTraceStateObserver(
+ TracingCategoryObserver::instance_);
+ TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"));
+ TRACE_EVENT_WARMUP_CATEGORY(
+ TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats_sampling"));
+ TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"));
+}
+
+void TracingCategoryObserver::TearDown() {
+ v8::internal::V8::GetCurrentPlatform()->RemoveTraceStateObserver(
+ TracingCategoryObserver::instance_);
+ delete TracingCategoryObserver::instance_;
+}
+
+void TracingCategoryObserver::OnTraceEnabled() {
+ bool enabled = false;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"), &enabled);
+ if (enabled) {
+ v8::internal::FLAG_runtime_stats |= ENABLED_BY_TRACING;
+ }
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats_sampling"), &enabled);
+ if (enabled) {
+ v8::internal::FLAG_runtime_stats |= ENABLED_BY_SAMPLING;
+ }
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
+ &enabled);
+ if (enabled) {
+ v8::internal::FLAG_gc_stats |= ENABLED_BY_TRACING;
+ }
+}
+
+void TracingCategoryObserver::OnTraceDisabled() {
+ v8::internal::FLAG_runtime_stats &=
+ ~(ENABLED_BY_TRACING | ENABLED_BY_SAMPLING);
+ v8::internal::FLAG_gc_stats &= ~ENABLED_BY_TRACING;
+}
+
+} // namespace tracing
+} // namespace v8
diff --git a/deps/v8/src/tracing/tracing-category-observer.h b/deps/v8/src/tracing/tracing-category-observer.h
new file mode 100644
index 0000000000..66dd2d78f1
--- /dev/null
+++ b/deps/v8/src/tracing/tracing-category-observer.h
@@ -0,0 +1,35 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TRACING_TRACING_CATEGORY_OBSERVER_H_
+#define V8_TRACING_TRACING_CATEGORY_OBSERVER_H_
+
+#include "include/v8-platform.h"
+
+namespace v8 {
+namespace tracing {
+
+class TracingCategoryObserver : public Platform::TraceStateObserver {
+ public:
+ enum Mode {
+ ENABLED_BY_NATIVE = 1 << 0,
+ ENABLED_BY_TRACING = 1 << 1,
+ ENABLED_BY_SAMPLING = 1 << 2,
+ };
+
+ static void SetUp();
+ static void TearDown();
+
+ // v8::Platform::TraceStateObserver
+ void OnTraceEnabled() final;
+ void OnTraceDisabled() final;
+
+ private:
+ static TracingCategoryObserver* instance_;
+};
+
+} // namespace tracing
+} // namespace v8
+
+#endif // V8_TRACING_TRACING_CATEGORY_OBSERVER_H_
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index 828a673d7f..df28c2c991 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -26,7 +26,7 @@ void TransitionArray::set_next_link(Object* next, WriteBarrierMode mode) {
bool TransitionArray::HasPrototypeTransitions() {
- return get(kPrototypeTransitionsIndex) != Smi::FromInt(0);
+ return get(kPrototypeTransitionsIndex) != Smi::kZero;
}
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 082ebc16b0..88c1549579 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -395,7 +395,7 @@ Handle<TransitionArray> TransitionArray::Allocate(Isolate* isolate,
int slack) {
Handle<FixedArray> array = isolate->factory()->NewTransitionArray(
LengthFor(number_of_transitions + slack));
- array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
+ array->set(kPrototypeTransitionsIndex, Smi::kZero);
array->set(kTransitionLengthIndex, Smi::FromInt(number_of_transitions));
return Handle<TransitionArray>::cast(array);
}
diff --git a/deps/v8/src/type-feedback-vector-inl.h b/deps/v8/src/type-feedback-vector-inl.h
index f70f01888f..58dfe33ac7 100644
--- a/deps/v8/src/type-feedback-vector-inl.h
+++ b/deps/v8/src/type-feedback-vector-inl.h
@@ -128,6 +128,7 @@ BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) {
case BinaryOperationFeedback::kSignedSmall:
return BinaryOperationHint::kSignedSmall;
case BinaryOperationFeedback::kNumber:
+ case BinaryOperationFeedback::kNumberOrOddball:
return BinaryOperationHint::kNumberOrOddball;
case BinaryOperationFeedback::kString:
return BinaryOperationHint::kString;
@@ -158,8 +159,6 @@ CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic,
int* vector_ic_count,
bool code_is_interpreted) {
- Object* uninitialized_sentinel =
- TypeFeedbackVector::RawUninitializedSentinel(GetIsolate());
Object* megamorphic_sentinel =
*TypeFeedbackVector::MegamorphicSentinel(GetIsolate());
int with = 0;
@@ -170,47 +169,58 @@ void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic,
FeedbackVectorSlot slot = iter.Next();
FeedbackVectorSlotKind kind = iter.kind();
- Object* obj = Get(slot);
- if (kind == FeedbackVectorSlotKind::GENERAL) {
- continue;
- }
- total++;
-
- if (obj != uninitialized_sentinel) {
- if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC ||
- kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC) {
- // If we are not running interpreted code, we need to ignore
- // the special ic slots for binaryop/compare used by the
- // interpreter.
- // TODO(mvstanton): Remove code_is_interpreted when full code
- // is retired from service.
- if (!code_is_interpreted) continue;
-
- DCHECK(obj->IsSmi());
- int op_feedback = static_cast<int>(Smi::cast(obj)->value());
- if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC) {
- CompareOperationHint hint =
- CompareOperationHintFromFeedback(op_feedback);
- if (hint == CompareOperationHint::kAny) {
- gen++;
- } else if (hint != CompareOperationHint::kNone) {
- with++;
- }
- } else {
- DCHECK(kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC);
- BinaryOperationHint hint =
- BinaryOperationHintFromFeedback(op_feedback);
- if (hint == BinaryOperationHint::kAny) {
- gen++;
- } else if (hint != BinaryOperationHint::kNone) {
- with++;
+ Object* const obj = Get(slot);
+ switch (kind) {
+ case FeedbackVectorSlotKind::CALL_IC:
+ case FeedbackVectorSlotKind::LOAD_IC:
+ case FeedbackVectorSlotKind::LOAD_GLOBAL_IC:
+ case FeedbackVectorSlotKind::KEYED_LOAD_IC:
+ case FeedbackVectorSlotKind::STORE_IC:
+ case FeedbackVectorSlotKind::KEYED_STORE_IC: {
+ if (obj->IsWeakCell() || obj->IsFixedArray() || obj->IsString()) {
+ with++;
+ } else if (obj == megamorphic_sentinel) {
+ gen++;
+ }
+ total++;
+ break;
+ }
+ case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
+ case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC: {
+ // If we are not running interpreted code, we need to ignore the special
+ // IC slots for binaryop/compare used by the interpreter.
+ // TODO(mvstanton): Remove code_is_interpreted when full code is retired
+ // from service.
+ if (code_is_interpreted) {
+ int const feedback = Smi::cast(obj)->value();
+ if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC) {
+ CompareOperationHint hint =
+ CompareOperationHintFromFeedback(feedback);
+ if (hint == CompareOperationHint::kAny) {
+ gen++;
+ } else if (hint != CompareOperationHint::kNone) {
+ with++;
+ }
+ } else {
+ DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC, kind);
+ BinaryOperationHint hint =
+ BinaryOperationHintFromFeedback(feedback);
+ if (hint == BinaryOperationHint::kAny) {
+ gen++;
+ } else if (hint != BinaryOperationHint::kNone) {
+ with++;
+ }
}
+ total++;
}
- } else if (obj->IsWeakCell() || obj->IsFixedArray() || obj->IsString()) {
- with++;
- } else if (obj == megamorphic_sentinel) {
- gen++;
+ break;
}
+ case FeedbackVectorSlotKind::GENERAL:
+ break;
+ case FeedbackVectorSlotKind::INVALID:
+ case FeedbackVectorSlotKind::KINDS_NUMBER:
+ UNREACHABLE();
+ break;
}
}
diff --git a/deps/v8/src/type-feedback-vector.cc b/deps/v8/src/type-feedback-vector.cc
index 30bc2d4153..2ba9690b9f 100644
--- a/deps/v8/src/type-feedback-vector.cc
+++ b/deps/v8/src/type-feedback-vector.cc
@@ -5,7 +5,7 @@
#include "src/type-feedback-vector.h"
#include "src/code-stubs.h"
-#include "src/ic/ic.h"
+#include "src/ic/ic-inl.h"
#include "src/ic/ic-state.h"
#include "src/objects.h"
#include "src/type-feedback-vector-inl.h"
@@ -91,7 +91,7 @@ Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(Isolate* isolate,
array->set(kSlotsCountIndex, Smi::FromInt(slot_count));
// Fill the bit-vector part with zeros.
for (int i = 0; i < slot_kinds_length; i++) {
- array->set(kReservedIndexCount + i, Smi::FromInt(0));
+ array->set(kReservedIndexCount + i, Smi::kZero);
}
Handle<TypeFeedbackMetadata> metadata =
@@ -121,7 +121,7 @@ Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(Isolate* isolate,
}
DCHECK_EQ(name_count, name_index);
metadata->set(kNamesTableIndex,
- name_count ? static_cast<Object*>(*names) : Smi::FromInt(0));
+ name_count ? static_cast<Object*>(*names) : Smi::kZero);
// It's important that the TypeFeedbackMetadata have a COW map, since it's
// pointed to by both a SharedFunctionInfo and indirectly by closures through
@@ -241,13 +241,13 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::New(
Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
array->set(kMetadataIndex, *metadata);
- array->set(kInvocationCountIndex, Smi::FromInt(0));
+ array->set(kInvocationCountIndex, Smi::kZero);
DisallowHeapAllocation no_gc;
// Ensure we can skip the write barrier
Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
- DCHECK_EQ(*factory->uninitialized_symbol(), *uninitialized_sentinel);
+ DCHECK_EQ(isolate->heap()->uninitialized_symbol(), *uninitialized_sentinel);
for (int i = 0; i < slot_count;) {
FeedbackVectorSlot slot(i);
FeedbackVectorSlotKind kind = metadata->GetKind(slot);
@@ -256,16 +256,16 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::New(
Object* value;
if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
- value = *factory->empty_weak_cell();
+ value = isolate->heap()->empty_weak_cell();
} else if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC ||
kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC) {
- value = Smi::FromInt(0);
+ value = Smi::kZero;
} else {
value = *uninitialized_sentinel;
}
array->set(index, value, SKIP_WRITE_BARRIER);
- value = kind == FeedbackVectorSlotKind::CALL_IC ? Smi::FromInt(0)
+ value = kind == FeedbackVectorSlotKind::CALL_IC ? Smi::kZero
: *uninitialized_sentinel;
for (int j = 1; j < entry_size; j++) {
array->set(index + j, value, SKIP_WRITE_BARRIER);
@@ -351,7 +351,7 @@ void TypeFeedbackVector::ClearSlotsImpl(SharedFunctionInfo* shared,
case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC: {
DCHECK(Get(slot)->IsSmi());
// don't clear these smi slots.
- // Set(slot, Smi::FromInt(0));
+ // Set(slot, Smi::kZero);
break;
}
case FeedbackVectorSlotKind::GENERAL: {
@@ -657,7 +657,7 @@ void CallICNexus::ConfigureUninitialized() {
Isolate* isolate = GetIsolate();
SetFeedback(*TypeFeedbackVector::UninitializedSentinel(isolate),
SKIP_WRITE_BARRIER);
- SetFeedbackExtra(Smi::FromInt(0), SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(Smi::kZero, SKIP_WRITE_BARRIER);
}
void CallICNexus::ConfigureMonomorphicArray() {
@@ -733,18 +733,16 @@ void KeyedLoadICNexus::ConfigureMonomorphic(Handle<Name> name,
}
}
-
void StoreICNexus::ConfigureMonomorphic(Handle<Map> receiver_map,
- Handle<Code> handler) {
+ Handle<Object> handler) {
Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
SetFeedback(*cell);
SetFeedbackExtra(*handler);
}
-
void KeyedStoreICNexus::ConfigureMonomorphic(Handle<Name> name,
Handle<Map> receiver_map,
- Handle<Code> handler) {
+ Handle<Object> handler) {
Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
if (name.is_null()) {
SetFeedback(*cell);
@@ -851,17 +849,10 @@ int GetStepSize(FixedArray* array, Isolate* isolate) {
DCHECK(array->length() >= 2);
Object* second = array->get(1);
if (second->IsWeakCell() || second->IsUndefined(isolate)) return 3;
- DCHECK(second->IsCode() || second->IsSmi());
+ DCHECK(IC::IsHandler(second));
return 2;
}
-#ifdef DEBUG // Only used by DCHECKs below.
-bool IsHandler(Object* object) {
- return object->IsSmi() ||
- (object->IsCode() && Code::cast(object)->is_handler());
-}
-#endif
-
} // namespace
int FeedbackNexus::ExtractMaps(MapHandleList* maps) const {
@@ -914,7 +905,7 @@ MaybeHandle<Object> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
Map* array_map = Map::cast(cell->value());
if (array_map == *map) {
Object* code = array->get(i + increment - 1);
- DCHECK(IsHandler(code));
+ DCHECK(IC::IsHandler(code));
return handle(code, isolate);
}
}
@@ -925,7 +916,7 @@ MaybeHandle<Object> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
Map* cell_map = Map::cast(cell->value());
if (cell_map == *map) {
Object* code = GetFeedbackExtra();
- DCHECK(IsHandler(code));
+ DCHECK(IC::IsHandler(code));
return handle(code, isolate);
}
}
@@ -952,7 +943,7 @@ bool FeedbackNexus::FindHandlers(List<Handle<Object>>* code_list,
// Be sure to skip handlers whose maps have been cleared.
if (!cell->cleared()) {
Object* code = array->get(i + increment - 1);
- DCHECK(IsHandler(code));
+ DCHECK(IC::IsHandler(code));
code_list->Add(handle(code, isolate));
count++;
}
@@ -961,7 +952,7 @@ bool FeedbackNexus::FindHandlers(List<Handle<Object>>* code_list,
WeakCell* cell = WeakCell::cast(feedback);
if (!cell->cleared()) {
Object* code = GetFeedbackExtra();
- DCHECK(IsHandler(code));
+ DCHECK(IC::IsHandler(code));
code_list->Add(handle(code, isolate));
count++;
}
diff --git a/deps/v8/src/type-feedback-vector.h b/deps/v8/src/type-feedback-vector.h
index af69499b04..3bb51c1d34 100644
--- a/deps/v8/src/type-feedback-vector.h
+++ b/deps/v8/src/type-feedback-vector.h
@@ -609,7 +609,7 @@ class StoreICNexus : public FeedbackNexus {
void Clear(Code* host);
- void ConfigureMonomorphic(Handle<Map> receiver_map, Handle<Code> handler);
+ void ConfigureMonomorphic(Handle<Map> receiver_map, Handle<Object> handler);
void ConfigurePolymorphic(MapHandleList* maps,
List<Handle<Object>>* handlers);
@@ -637,7 +637,7 @@ class KeyedStoreICNexus : public FeedbackNexus {
// name can be a null handle for element loads.
void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map,
- Handle<Code> handler);
+ Handle<Object> handler);
// name can be null.
void ConfigurePolymorphic(Handle<Name> name, MapHandleList* maps,
List<Handle<Object>>* handlers);
diff --git a/deps/v8/src/type-hints.cc b/deps/v8/src/type-hints.cc
index ff00eeff8b..1c40c59b63 100644
--- a/deps/v8/src/type-hints.cc
+++ b/deps/v8/src/type-hints.cc
@@ -67,6 +67,8 @@ std::ostream& operator<<(std::ostream& os, ToBooleanHint hint) {
return os << "SimdValue";
case ToBooleanHint::kAny:
return os << "Any";
+ case ToBooleanHint::kNeedsMap:
+ return os << "NeedsMap";
}
UNREACHABLE();
return os;
@@ -87,5 +89,26 @@ std::ostream& operator<<(std::ostream& os, ToBooleanHints hints) {
return os;
}
+std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags) {
+ switch (flags) {
+ case STRING_ADD_CHECK_NONE:
+ return os << "CheckNone";
+ case STRING_ADD_CHECK_LEFT:
+ return os << "CheckLeft";
+ case STRING_ADD_CHECK_RIGHT:
+ return os << "CheckRight";
+ case STRING_ADD_CHECK_BOTH:
+ return os << "CheckBoth";
+ case STRING_ADD_CONVERT_LEFT:
+ return os << "ConvertLeft";
+ case STRING_ADD_CONVERT_RIGHT:
+ return os << "ConvertRight";
+ case STRING_ADD_CONVERT:
+ break;
+ }
+ UNREACHABLE();
+ return os;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/type-hints.h b/deps/v8/src/type-hints.h
index cdf470956f..e6138c771d 100644
--- a/deps/v8/src/type-hints.h
+++ b/deps/v8/src/type-hints.h
@@ -55,7 +55,9 @@ enum class ToBooleanHint : uint16_t {
kHeapNumber = 1u << 7,
kSimdValue = 1u << 8,
kAny = kUndefined | kBoolean | kNull | kSmallInteger | kReceiver | kString |
- kSymbol | kHeapNumber | kSimdValue
+ kSymbol | kHeapNumber | kSimdValue,
+ kNeedsMap = kReceiver | kString | kSymbol | kHeapNumber | kSimdValue,
+ kCanBeUndetectable = kReceiver,
};
std::ostream& operator<<(std::ostream&, ToBooleanHint);
@@ -66,6 +68,23 @@ std::ostream& operator<<(std::ostream&, ToBooleanHints);
DEFINE_OPERATORS_FOR_FLAGS(ToBooleanHints)
+enum StringAddFlags {
+ // Omit both parameter checks.
+ STRING_ADD_CHECK_NONE = 0,
+ // Check left parameter.
+ STRING_ADD_CHECK_LEFT = 1 << 0,
+ // Check right parameter.
+ STRING_ADD_CHECK_RIGHT = 1 << 1,
+ // Check both parameters.
+ STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT,
+ // Convert parameters when check fails (instead of throwing an exception).
+ STRING_ADD_CONVERT = 1 << 2,
+ STRING_ADD_CONVERT_LEFT = STRING_ADD_CHECK_LEFT | STRING_ADD_CONVERT,
+ STRING_ADD_CONVERT_RIGHT = STRING_ADD_CHECK_RIGHT | STRING_ADD_CONVERT
+};
+
+std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index ce0ab6ca6a..fd3a2dc01e 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -210,19 +210,20 @@ AstType* CompareOpHintToType(CompareOperationHint hint) {
return AstType::None();
}
-AstType* BinaryOpHintToType(BinaryOperationHint hint) {
+AstType* BinaryOpFeedbackToType(int hint) {
switch (hint) {
- case BinaryOperationHint::kNone:
+ case BinaryOperationFeedback::kNone:
return AstType::None();
- case BinaryOperationHint::kSignedSmall:
+ case BinaryOperationFeedback::kSignedSmall:
return AstType::SignedSmall();
- case BinaryOperationHint::kSigned32:
- return AstType::Signed32();
- case BinaryOperationHint::kNumberOrOddball:
+ case BinaryOperationFeedback::kNumber:
return AstType::Number();
- case BinaryOperationHint::kString:
+ case BinaryOperationFeedback::kString:
return AstType::String();
- case BinaryOperationHint::kAny:
+ case BinaryOperationFeedback::kNumberOrOddball:
+ return AstType::NumberOrOddball();
+ case BinaryOperationFeedback::kAny:
+ default:
return AstType::Any();
}
UNREACHABLE();
@@ -262,14 +263,33 @@ void TypeFeedbackOracle::CompareType(TypeFeedbackId id, FeedbackVectorSlot slot,
CompareICStub stub(code->stub_key(), isolate());
AstType* left_type_from_ic =
CompareICState::StateToType(zone(), stub.left());
- *left_type = AstType::Union(*left_type, left_type_from_ic, zone());
AstType* right_type_from_ic =
CompareICState::StateToType(zone(), stub.right());
- *right_type = AstType::Union(*right_type, right_type_from_ic, zone());
AstType* combined_type_from_ic =
CompareICState::StateToType(zone(), stub.state(), map);
- *combined_type =
- AstType::Union(*combined_type, combined_type_from_ic, zone());
+ // Full-codegen collects lhs and rhs feedback seperately and Crankshaft
+ // could use this information to optimize better. So if combining the
+ // feedback has made the feedback less precise, we should use the feedback
+ // only from Full-codegen. If the union of the feedback from Full-codegen
+ // is same as that of Ignition, there is no need to combine feedback from
+ // from Ignition.
+ AstType* combined_type_from_fcg = AstType::Union(
+ left_type_from_ic,
+ AstType::Union(right_type_from_ic, combined_type_from_ic, zone()),
+ zone());
+ if (combined_type_from_fcg == *left_type) {
+ // Full-codegen collects information about lhs, rhs and result types
+ // seperately. So just retain that information.
+ *left_type = left_type_from_ic;
+ *right_type = right_type_from_ic;
+ *combined_type = combined_type_from_ic;
+ } else {
+ // Combine Ignition and Full-codegen feedbacks.
+ *left_type = AstType::Union(*left_type, left_type_from_ic, zone());
+ *right_type = AstType::Union(*right_type, right_type_from_ic, zone());
+ *combined_type =
+ AstType::Union(*combined_type, combined_type_from_ic, zone());
+ }
}
}
@@ -299,7 +319,7 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id, FeedbackVectorSlot slot,
DCHECK(!slot.IsInvalid());
BinaryOpICNexus nexus(feedback_vector_, slot);
*left = *right = *result =
- BinaryOpHintToType(nexus.GetBinaryOperationFeedback());
+ BinaryOpFeedbackToType(Smi::cast(nexus.GetFeedback())->value());
*fixed_right_arg = Nothing<int>();
*allocation_site = Handle<AllocationSite>::null();
@@ -311,9 +331,29 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id, FeedbackVectorSlot slot,
BinaryOpICState state(isolate(), code->extra_ic_state());
DCHECK_EQ(op, state.op());
- *left = AstType::Union(*left, state.GetLeftType(), zone());
- *right = AstType::Union(*right, state.GetRightType(), zone());
- *result = AstType::Union(*result, state.GetResultType(), zone());
+ // Full-codegen collects lhs and rhs feedback seperately and Crankshaft
+ // could use this information to optimize better. So if combining the
+ // feedback has made the feedback less precise, we should use the feedback
+ // only from Full-codegen. If the union of the feedback from Full-codegen
+ // is same as that of Ignition, there is no need to combine feedback from
+ // from Ignition.
+ AstType* combined_type_from_fcg = AstType::Union(
+ state.GetLeftType(),
+ AstType::Union(state.GetRightType(), state.GetResultType(), zone()),
+ zone());
+ if (combined_type_from_fcg == *left) {
+ // Full-codegen collects information about lhs, rhs and result types
+ // seperately. So just retain that information.
+ *left = state.GetLeftType();
+ *right = state.GetRightType();
+ *result = state.GetResultType();
+ } else {
+ // Combine Ignition and Full-codegen feedback.
+ *left = AstType::Union(*left, state.GetLeftType(), zone());
+ *right = AstType::Union(*right, state.GetRightType(), zone());
+ *result = AstType::Union(*result, state.GetResultType(), zone());
+ }
+ // Ignition does not collect this feedback.
*fixed_right_arg = state.fixed_right_arg();
AllocationSite* first_allocation_site = code->FindFirstAllocationSite();
@@ -334,7 +374,8 @@ AstType* TypeFeedbackOracle::CountType(TypeFeedbackId id,
DCHECK(!slot.IsInvalid());
BinaryOpICNexus nexus(feedback_vector_, slot);
- AstType* type = BinaryOpHintToType(nexus.GetBinaryOperationFeedback());
+ AstType* type =
+ BinaryOpFeedbackToType(Smi::cast(nexus.GetFeedback())->value());
if (!object->IsCode()) return type;
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index 015f8a27f2..83d4a03618 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -319,7 +319,7 @@ uchar Utf8::ValueOfIncremental(byte next, Utf8IncrementalBuffer* buffer) {
} else {
// Otherwise, process the previous byte and save the next byte for next
// time.
- DCHECK_EQ(0, *buffer);
+ DCHECK_EQ(0u, *buffer);
*buffer = next;
return t;
}
diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h
index 1299a8ff9a..1b98a472f2 100644
--- a/deps/v8/src/unicode.h
+++ b/deps/v8/src/unicode.h
@@ -178,16 +178,16 @@ struct Lowercase {
struct Letter {
static bool Is(uchar c);
};
-struct ID_Start {
+struct V8_EXPORT_PRIVATE ID_Start {
static bool Is(uchar c);
};
-struct ID_Continue {
+struct V8_EXPORT_PRIVATE ID_Continue {
static bool Is(uchar c);
};
-struct WhiteSpace {
+struct V8_EXPORT_PRIVATE WhiteSpace {
static bool Is(uchar c);
};
-struct LineTerminator {
+struct V8_EXPORT_PRIVATE LineTerminator {
static bool Is(uchar c);
};
struct ToLowercase {
diff --git a/deps/v8/src/uri.cc b/deps/v8/src/uri.cc
index de7bd9bf57..14e22146e7 100644
--- a/deps/v8/src/uri.cc
+++ b/deps/v8/src/uri.cc
@@ -51,7 +51,7 @@ bool DecodeOctets(const uint8_t* octets, int length, List<uc16>* buffer) {
return false;
}
- if (value <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ if (value <= static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
buffer->Add(value);
} else {
buffer->Add(unibrow::Utf16::LeadSurrogate(value));
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 314ea9be9e..bd5589cc89 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -922,7 +922,7 @@ class BailoutId {
bool operator==(const BailoutId& other) const { return id_ == other.id_; }
bool operator!=(const BailoutId& other) const { return id_ != other.id_; }
friend size_t hash_value(BailoutId);
- friend std::ostream& operator<<(std::ostream&, BailoutId);
+ V8_EXPORT_PRIVATE friend std::ostream& operator<<(std::ostream&, BailoutId);
private:
static const int kNoneId = -1;
@@ -964,7 +964,7 @@ class TokenDispenserForFinally {
// I/O support.
// Our version of printf().
-void PRINTF_FORMAT(1, 2) PrintF(const char* format, ...);
+V8_EXPORT_PRIVATE void PRINTF_FORMAT(1, 2) PrintF(const char* format, ...);
void PRINTF_FORMAT(2, 3) PrintF(FILE* out, const char* format, ...);
// Prepends the current process ID to the output.
@@ -1150,9 +1150,9 @@ inline void MemsetPointer(T** dest, U* value, int counter) {
// Simple support to read a file into a 0-terminated C-string.
// The returned buffer must be freed by the caller.
// On return, *exits tells whether the file existed.
-Vector<const char> ReadFile(const char* filename,
- bool* exists,
- bool verbose = true);
+V8_EXPORT_PRIVATE Vector<const char> ReadFile(const char* filename,
+ bool* exists,
+ bool verbose = true);
Vector<const char> ReadFile(FILE* file,
bool* exists,
bool verbose = true);
@@ -1597,6 +1597,86 @@ static inline void WriteLittleEndianValue(void* p, V value) {
}
#endif // V8_TARGET_LITTLE_ENDIAN
}
+
+// Represents a linked list that threads through the nodes in the linked list.
+// Entries in the list are pointers to nodes. The nodes need to have a T**
+// next() method that returns the location where the next value is stored.
+template <typename T>
+class ThreadedList final {
+ public:
+ ThreadedList() : head_(nullptr), tail_(&head_) {}
+ void Add(T* v) {
+ DCHECK_NULL(*tail_);
+ DCHECK_NULL(*v->next());
+ *tail_ = v;
+ tail_ = v->next();
+ }
+
+ void Clear() {
+ head_ = nullptr;
+ tail_ = &head_;
+ }
+
+ class Iterator final {
+ public:
+ Iterator& operator++() {
+ entry_ = (*entry_)->next();
+ return *this;
+ }
+ bool operator!=(const Iterator& other) { return entry_ != other.entry_; }
+ T* operator*() { return *entry_; }
+ Iterator& operator=(T* entry) {
+ T* next = *(*entry_)->next();
+ *entry->next() = next;
+ *entry_ = entry;
+ return *this;
+ }
+
+ private:
+ explicit Iterator(T** entry) : entry_(entry) {}
+
+ T** entry_;
+
+ friend class ThreadedList;
+ };
+
+ Iterator begin() { return Iterator(&head_); }
+ Iterator end() { return Iterator(tail_); }
+
+ void Rewind(Iterator reset_point) {
+ tail_ = reset_point.entry_;
+ *tail_ = nullptr;
+ }
+
+ void MoveTail(ThreadedList<T>* parent, Iterator location) {
+ if (parent->end() != location) {
+ DCHECK_NULL(*tail_);
+ *tail_ = *location;
+ tail_ = parent->tail_;
+ parent->Rewind(location);
+ }
+ }
+
+ bool is_empty() const { return head_ == nullptr; }
+
+ // Slow. For testing purposes.
+ int LengthForTest() {
+ int result = 0;
+ for (Iterator t = begin(); t != end(); ++t) ++result;
+ return result;
+ }
+ T* AtForTest(int i) {
+ Iterator t = begin();
+ while (i-- > 0) ++t;
+ return *t;
+ }
+
+ private:
+ T* head_;
+ T** tail_;
+ DISALLOW_COPY_AND_ASSIGN(ThreadedList);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 08796f3f0e..7f0230aa4c 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -20,7 +20,7 @@
#include "src/runtime-profiler.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
-
+#include "src/tracing/tracing-category-observer.h"
namespace v8 {
namespace internal {
@@ -94,11 +94,13 @@ void V8::InitializePlatform(v8::Platform* platform) {
CHECK(!platform_);
CHECK(platform);
platform_ = platform;
+ v8::tracing::TracingCategoryObserver::SetUp();
}
void V8::ShutdownPlatform() {
CHECK(platform_);
+ v8::tracing::TracingCategoryObserver::TearDown();
platform_ = NULL;
}
diff --git a/deps/v8/src/v8.gyp b/deps/v8/src/v8.gyp
index 9a3824742f..020ec0928f 100644
--- a/deps/v8/src/v8.gyp
+++ b/deps/v8/src/v8.gyp
@@ -37,6 +37,7 @@
'v8_enable_inspector%': 0,
'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
'mkpeephole_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mkpeephole<(EXECUTABLE_SUFFIX)',
+ 'v8_os_page_size%': 0,
},
'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi', 'inspector/inspector.gypi'],
'targets': [
@@ -68,16 +69,6 @@
'USING_V8_SHARED',
],
},
- 'target_conditions': [
- ['OS=="android" and _toolset=="target"', {
- 'libraries': [
- '-llog',
- ],
- 'include_dirs': [
- 'src/common/android/include',
- ],
- }],
- ],
'conditions': [
['OS=="mac"', {
'xcode_settings': {
@@ -316,6 +307,9 @@
['v8_vector_stores!=0', {
'mksnapshot_flags': ['--vector-stores'],
}],
+ ['v8_os_page_size!=0', {
+ 'mksnapshot_flags': ['--v8_os_page_size', '<(v8_os_page_size)'],
+ }],
],
},
'conditions': [
@@ -433,6 +427,8 @@
'asmjs/asm-types.h',
'asmjs/asm-wasm-builder.cc',
'asmjs/asm-wasm-builder.h',
+ 'asmjs/switch-logic.h',
+ 'asmjs/switch-logic.cc',
'assembler.cc',
'assembler.h',
'assert-scope.h',
@@ -500,6 +496,7 @@
'builtins/builtins-math.cc',
'builtins/builtins-number.cc',
'builtins/builtins-object.cc',
+ 'builtins/builtins-promise.cc',
'builtins/builtins-proxy.cc',
'builtins/builtins-reflect.cc',
'builtins/builtins-regexp.cc',
@@ -709,14 +706,16 @@
'compiler/scheduler.h',
'compiler/select-lowering.cc',
'compiler/select-lowering.h',
+ 'compiler/simd-scalar-lowering.cc',
+ 'compiler/simd-scalar-lowering.h',
'compiler/simplified-lowering.cc',
'compiler/simplified-lowering.h',
'compiler/simplified-operator-reducer.cc',
'compiler/simplified-operator-reducer.h',
'compiler/simplified-operator.cc',
'compiler/simplified-operator.h',
- 'compiler/source-position.cc',
- 'compiler/source-position.h',
+ 'compiler/compiler-source-position-table.cc',
+ 'compiler/compiler-source-position-table.h',
'compiler/state-values-utils.cc',
'compiler/state-values-utils.h',
'compiler/store-store-elimination.cc',
@@ -741,10 +740,12 @@
'compiler/wasm-compiler.cc',
'compiler/wasm-compiler.h',
'compiler/wasm-linkage.cc',
- 'compiler/zone-pool.cc',
- 'compiler/zone-pool.h',
+ 'compiler/zone-stats.cc',
+ 'compiler/zone-stats.h',
'compiler-dispatcher/compiler-dispatcher-job.cc',
'compiler-dispatcher/compiler-dispatcher-job.h',
+ 'compiler-dispatcher/compiler-dispatcher-tracer.cc',
+ 'compiler-dispatcher/compiler-dispatcher-tracer.h',
'compiler-dispatcher/optimizing-compile-dispatcher.cc',
'compiler-dispatcher/optimizing-compile-dispatcher.h',
'compiler.cc',
@@ -788,8 +789,6 @@
'crankshaft/hydrogen-instructions.h',
'crankshaft/hydrogen-load-elimination.cc',
'crankshaft/hydrogen-load-elimination.h',
- 'crankshaft/hydrogen-mark-deoptimize.cc',
- 'crankshaft/hydrogen-mark-deoptimize.h',
'crankshaft/hydrogen-mark-unreachable.cc',
'crankshaft/hydrogen-mark-unreachable.h',
'crankshaft/hydrogen-osr.cc',
@@ -830,6 +829,7 @@
'dateparser.h',
'debug/debug-evaluate.cc',
'debug/debug-evaluate.h',
+ 'debug/debug-interface.h',
'debug/debug-frames.cc',
'debug/debug-frames.h',
'debug/debug-scopes.cc',
@@ -933,7 +933,6 @@
'heap/objects-visiting.cc',
'heap/objects-visiting.h',
'heap/page-parallel-job.h',
- 'heap/remembered-set.cc',
'heap/remembered-set.h',
'heap/scavenge-job.h',
'heap/scavenge-job.cc',
@@ -950,12 +949,14 @@
'i18n.h',
'icu_util.cc',
'icu_util.h',
+ 'ic/access-compiler-data.h',
'ic/access-compiler.cc',
'ic/access-compiler.h',
'ic/call-optimization.cc',
'ic/call-optimization.h',
'ic/handler-compiler.cc',
'ic/handler-compiler.h',
+ 'ic/handler-configuration-inl.h',
'ic/handler-configuration.h',
'ic/ic-inl.h',
'ic/ic-state.cc',
@@ -964,6 +965,8 @@
'ic/ic.h',
'ic/ic-compiler.cc',
'ic/ic-compiler.h',
+ 'ic/keyed-store-generic.cc',
+ 'ic/keyed-store-generic.h',
'identity-map.cc',
'identity-map.h',
'interface-descriptors.cc',
@@ -1109,6 +1112,8 @@
'profiler/tracing-cpu-profiler.h',
'profiler/unbound-queue-inl.h',
'profiler/unbound-queue.h',
+ 'promise-utils.h',
+ 'promise-utils.cc',
'property-descriptor.cc',
'property-descriptor.h',
'property-details.h',
@@ -1134,6 +1139,8 @@
'regexp/regexp-parser.h',
'regexp/regexp-stack.cc',
'regexp/regexp-stack.h',
+ 'regexp/regexp-utils.cc',
+ 'regexp/regexp-utils.h',
'register-configuration.cc',
'register-configuration.h',
'runtime-profiler.cc',
@@ -1156,9 +1163,11 @@
'runtime/runtime-literals.cc',
'runtime/runtime-liveedit.cc',
'runtime/runtime-maths.cc',
+ 'runtime/runtime-module.cc',
'runtime/runtime-numbers.cc',
'runtime/runtime-object.cc',
'runtime/runtime-operators.cc',
+ 'runtime/runtime-promise.cc',
'runtime/runtime-proxy.cc',
'runtime/runtime-regexp.cc',
'runtime/runtime-scopes.cc',
@@ -1196,6 +1205,7 @@
'snapshot/startup-serializer.h',
'source-position-table.cc',
'source-position-table.h',
+ 'source-position.cc',
'source-position.h',
'splay-tree.h',
'splay-tree-inl.h',
@@ -1212,6 +1222,10 @@
'ic/stub-cache.h',
'tracing/trace-event.cc',
'tracing/trace-event.h',
+ 'tracing/traced-value.cc',
+ 'tracing/traced-value.h',
+ 'tracing/tracing-category-observer.cc',
+ 'tracing/tracing-category-observer.h',
'transitions-inl.h',
'transitions.cc',
'transitions.h',
@@ -1250,16 +1264,14 @@
'wasm/ast-decoder.h',
'wasm/decoder.h',
'wasm/leb-helper.h',
+ 'wasm/managed.h',
'wasm/module-decoder.cc',
'wasm/module-decoder.h',
- 'wasm/switch-logic.h',
- 'wasm/switch-logic.cc',
+ 'wasm/signature-map.cc',
+ 'wasm/signature-map.h',
'wasm/wasm-debug.cc',
- 'wasm/wasm-debug.h',
'wasm/wasm-external-refs.cc',
'wasm/wasm-external-refs.h',
- 'wasm/wasm-function-name-table.cc',
- 'wasm/wasm-function-name-table.h',
'wasm/wasm-js.cc',
'wasm/wasm-js.h',
'wasm/wasm-macro-gen.h',
@@ -1269,6 +1281,8 @@
'wasm/wasm-module-builder.h',
'wasm/wasm-interpreter.cc',
'wasm/wasm-interpreter.h',
+ 'wasm/wasm-objects.cc',
+ 'wasm/wasm-objects.h',
'wasm/wasm-opcodes.cc',
'wasm/wasm-opcodes.h',
'wasm/wasm-result.cc',
@@ -1279,6 +1293,9 @@
'zone/zone-segment.h',
'zone/zone.cc',
'zone/zone.h',
+ 'zone/zone-chunk-list.h',
+ 'zone/zone-segment.cc',
+ 'zone/zone-segment.h',
'zone/zone-allocator.h',
'zone/zone-containers.h',
],
@@ -1763,7 +1780,7 @@
},
{
'target_name': 'v8_libbase',
- 'type': 'static_library',
+ 'type': '<(component)',
'variables': {
'optimize': 'max',
},
@@ -1774,18 +1791,10 @@
'base/adapters.h',
'base/atomic-utils.h',
'base/atomicops.h',
- 'base/atomicops_internals_arm64_gcc.h',
- 'base/atomicops_internals_arm_gcc.h',
'base/atomicops_internals_atomicword_compat.h',
- 'base/atomicops_internals_mac.h',
- 'base/atomicops_internals_mips_gcc.h',
- 'base/atomicops_internals_mips64_gcc.h',
- 'base/atomicops_internals_ppc_gcc.h',
- 'base/atomicops_internals_s390_gcc.h',
- 'base/atomicops_internals_tsan.h',
- 'base/atomicops_internals_x86_gcc.cc',
- 'base/atomicops_internals_x86_gcc.h',
+ 'base/atomicops_internals_portable.h',
'base/atomicops_internals_x86_msvc.h',
+ 'base/base-export.h',
'base/bits.cc',
'base/bits.h',
'base/build_config.h',
@@ -1824,6 +1833,7 @@
'base/platform/platform.h',
'base/platform/semaphore.cc',
'base/platform/semaphore.h',
+ 'base/ring-buffer.h',
'base/safe_conversions.h',
'base/safe_conversions_impl.h',
'base/safe_math.h',
@@ -1833,6 +1843,16 @@
'base/utils/random-number-generator.cc',
'base/utils/random-number-generator.h',
],
+ 'target_conditions': [
+ ['OS=="android" and _toolset=="target"', {
+ 'libraries': [
+ '-llog',
+ ],
+ 'include_dirs': [
+ 'src/common/android/include',
+ ],
+ }],
+ ],
'conditions': [
['want_separate_host_toolset==1 or \
want_separate_host_toolset_mkpeephole==1', {
@@ -1840,6 +1860,16 @@
}, {
'toolsets': ['target'],
}],
+ ['component=="shared_library"', {
+ 'defines': [
+ 'BUILDING_V8_BASE_SHARED',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'USING_V8_BASE_SHARED',
+ ],
+ },
+ }],
['OS=="linux"', {
'link_settings': {
'libraries': [
@@ -2047,7 +2077,7 @@
},
{
'target_name': 'v8_libplatform',
- 'type': 'static_library',
+ 'type': '<(component)',
'variables': {
'optimize': 'max',
},
@@ -2061,6 +2091,7 @@
],
'sources': [
'../include/libplatform/libplatform.h',
+ '../include/libplatform/libplatform-export.h',
'../include/libplatform/v8-tracing.h',
'libplatform/default-platform.cc',
'libplatform/default-platform.h',
@@ -2082,6 +2113,12 @@
}, {
'toolsets': ['target'],
}],
+ ['component=="shared_library"', {
+ 'direct_dependent_settings': {
+ 'defines': [ 'USING_V8_PLATFORM_SHARED' ],
+ },
+ 'defines': [ 'BUILDING_V8_PLATFORM_SHARED' ],
+ }]
],
'direct_dependent_settings': {
'include_dirs': [
@@ -2197,8 +2234,6 @@
'js/symbol.js',
'js/array.js',
'js/string.js',
- 'js/math.js',
- 'js/regexp.js',
'js/arraybuffer.js',
'js/typedarray.js',
'js/collection.js',
@@ -2206,7 +2241,6 @@
'js/collection-iterator.js',
'js/promise.js',
'js/messages.js',
- 'js/array-iterator.js',
'js/templates.js',
'js/spread.js',
'js/proxy.js',
@@ -2400,7 +2434,12 @@
{
'target_name': 'mksnapshot',
'type': 'executable',
- 'dependencies': ['v8_base', 'v8_nosnapshot', 'v8_libplatform'],
+ 'dependencies': [
+ 'v8_base',
+ 'v8_libbase',
+ 'v8_nosnapshot',
+ 'v8_libplatform'
+ ],
'include_dirs+': [
'..',
'<(DEPTH)',
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index a1b18b20d6..e49cb0061c 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -7,6 +7,7 @@
#include "include/v8.h"
#include "src/allocation.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
@@ -25,7 +26,7 @@ class V8 : public AllStatic {
static void InitializePlatform(v8::Platform* platform);
static void ShutdownPlatform();
- static v8::Platform* GetCurrentPlatform();
+ V8_EXPORT_PRIVATE static v8::Platform* GetCurrentPlatform();
// Replaces the current platform with the given platform.
// Should be used only for testing.
static void SetPlatformForTesting(v8::Platform* platform);
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 1d2e36dc04..c6abb8a85c 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -9,11 +9,16 @@
#include "src/base/logging.h"
#include "src/conversions.h"
#include "src/factory.h"
+#include "src/flags.h"
#include "src/handles-inl.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects.h"
+#include "src/snapshot/code-serializer.h"
#include "src/transitions.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-result.h"
namespace v8 {
namespace internal {
@@ -107,6 +112,11 @@ enum class SerializationTag : uint8_t {
kArrayBufferView = 'V',
// Shared array buffer (transferred). transferID:uint32_t
kSharedArrayBufferTransfer = 'u',
+ // Compiled WebAssembly module. encodingType:(one-byte tag).
+ // If encodingType == 'y' (raw bytes):
+ // wasmWireByteLength:uint32_t, then raw data
+ // compiledDataLength:uint32_t, then raw data
+ kWasmModule = 'W',
};
namespace {
@@ -124,17 +134,29 @@ enum class ArrayBufferViewTag : uint8_t {
kDataView = '?',
};
+enum class WasmEncodingTag : uint8_t {
+ kRawBytes = 'y',
+};
+
} // namespace
ValueSerializer::ValueSerializer(Isolate* isolate,
v8::ValueSerializer::Delegate* delegate)
: isolate_(isolate),
delegate_(delegate),
- zone_(isolate->allocator()),
+ zone_(isolate->allocator(), ZONE_NAME),
id_map_(isolate->heap(), &zone_),
array_buffer_transfer_map_(isolate->heap(), &zone_) {}
-ValueSerializer::~ValueSerializer() {}
+ValueSerializer::~ValueSerializer() {
+ if (buffer_) {
+ if (delegate_) {
+ delegate_->FreeBufferMemory(buffer_);
+ } else {
+ free(buffer_);
+ }
+ }
+}
void ValueSerializer::WriteHeader() {
WriteTag(SerializationTag::kVersion);
@@ -142,7 +164,8 @@ void ValueSerializer::WriteHeader() {
}
void ValueSerializer::WriteTag(SerializationTag tag) {
- buffer_.push_back(static_cast<uint8_t>(tag));
+ uint8_t raw_tag = static_cast<uint8_t>(tag);
+ WriteRawBytes(&raw_tag, sizeof(raw_tag));
}
template <typename T>
@@ -161,7 +184,7 @@ void ValueSerializer::WriteVarint(T value) {
value >>= 7;
} while (value);
*(next_byte - 1) &= 0x7f;
- buffer_.insert(buffer_.end(), stack_buffer, next_byte);
+ WriteRawBytes(stack_buffer, next_byte - stack_buffer);
}
template <typename T>
@@ -179,34 +202,50 @@ void ValueSerializer::WriteZigZag(T value) {
void ValueSerializer::WriteDouble(double value) {
// Warning: this uses host endianness.
- buffer_.insert(buffer_.end(), reinterpret_cast<const uint8_t*>(&value),
- reinterpret_cast<const uint8_t*>(&value + 1));
+ WriteRawBytes(&value, sizeof(value));
}
void ValueSerializer::WriteOneByteString(Vector<const uint8_t> chars) {
WriteVarint<uint32_t>(chars.length());
- buffer_.insert(buffer_.end(), chars.begin(), chars.end());
+ WriteRawBytes(chars.begin(), chars.length() * sizeof(uint8_t));
}
void ValueSerializer::WriteTwoByteString(Vector<const uc16> chars) {
// Warning: this uses host endianness.
WriteVarint<uint32_t>(chars.length() * sizeof(uc16));
- buffer_.insert(buffer_.end(), reinterpret_cast<const uint8_t*>(chars.begin()),
- reinterpret_cast<const uint8_t*>(chars.end()));
+ WriteRawBytes(chars.begin(), chars.length() * sizeof(uc16));
}
void ValueSerializer::WriteRawBytes(const void* source, size_t length) {
- const uint8_t* begin = reinterpret_cast<const uint8_t*>(source);
- buffer_.insert(buffer_.end(), begin, begin + length);
+ memcpy(ReserveRawBytes(length), source, length);
}
uint8_t* ValueSerializer::ReserveRawBytes(size_t bytes) {
- if (!bytes) return nullptr;
- auto old_size = buffer_.size();
- buffer_.resize(buffer_.size() + bytes);
+ size_t old_size = buffer_size_;
+ size_t new_size = old_size + bytes;
+ if (new_size > buffer_capacity_) ExpandBuffer(new_size);
+ buffer_size_ = new_size;
return &buffer_[old_size];
}
+void ValueSerializer::ExpandBuffer(size_t required_capacity) {
+ DCHECK_GT(required_capacity, buffer_capacity_);
+ size_t requested_capacity =
+ std::max(required_capacity, buffer_capacity_ * 2) + 64;
+ size_t provided_capacity = 0;
+ void* new_buffer = nullptr;
+ if (delegate_) {
+ new_buffer = delegate_->ReallocateBufferMemory(buffer_, requested_capacity,
+ &provided_capacity);
+ } else {
+ new_buffer = realloc(buffer_, requested_capacity);
+ provided_capacity = requested_capacity;
+ }
+ DCHECK_GE(provided_capacity, requested_capacity);
+ buffer_ = reinterpret_cast<uint8_t*>(new_buffer);
+ buffer_capacity_ = provided_capacity;
+}
+
void ValueSerializer::WriteUint32(uint32_t value) {
WriteVarint<uint32_t>(value);
}
@@ -215,6 +254,18 @@ void ValueSerializer::WriteUint64(uint64_t value) {
WriteVarint<uint64_t>(value);
}
+std::vector<uint8_t> ValueSerializer::ReleaseBuffer() {
+ return std::vector<uint8_t>(buffer_, buffer_ + buffer_size_);
+}
+
+std::pair<uint8_t*, size_t> ValueSerializer::Release() {
+ auto result = std::make_pair(buffer_, buffer_size_);
+ buffer_ = nullptr;
+ buffer_size_ = 0;
+ buffer_capacity_ = 0;
+ return result;
+}
+
void ValueSerializer::TransferArrayBuffer(uint32_t transfer_id,
Handle<JSArrayBuffer> array_buffer) {
DCHECK(!array_buffer_transfer_map_.Find(array_buffer));
@@ -325,7 +376,7 @@ void ValueSerializer::WriteString(Handle<String> string) {
Vector<const uc16> chars = flat.ToUC16Vector();
uint32_t byte_length = chars.length() * sizeof(uc16);
// The existing reading code expects 16-byte strings to be aligned.
- if ((buffer_.size() + 1 + BytesNeededForVarint(byte_length)) & 1)
+ if ((buffer_size_ + 1 + BytesNeededForVarint(byte_length)) & 1)
WriteTag(SerializationTag::kPadding);
WriteTag(SerializationTag::kTwoByteString);
WriteTwoByteString(chars);
@@ -365,8 +416,16 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
case JS_OBJECT_TYPE:
case JS_API_OBJECT_TYPE: {
Handle<JSObject> js_object = Handle<JSObject>::cast(receiver);
- return js_object->GetInternalFieldCount() ? WriteHostObject(js_object)
- : WriteJSObject(js_object);
+ Map* map = js_object->map();
+ if (FLAG_expose_wasm &&
+ map->GetConstructor() ==
+ isolate_->native_context()->wasm_module_constructor()) {
+ return WriteWasmModule(js_object);
+ } else if (JSObject::GetInternalFieldCount(map)) {
+ return WriteHostObject(js_object);
+ } else {
+ return WriteJSObject(js_object);
+ }
}
case JS_SPECIAL_API_OBJECT_TYPE:
return WriteHostObject(Handle<JSObject>::cast(receiver));
@@ -470,6 +529,8 @@ Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
array->HasFastElements() && !array->HasFastHoleyElements();
if (should_serialize_densely) {
+ DCHECK_LE(length, static_cast<uint32_t>(FixedArray::kMaxLength));
+
// TODO(jbroman): Distinguish between undefined and a hole (this can happen
// if serializing one of the elements deletes another). This requires wire
// format changes.
@@ -666,7 +727,6 @@ Maybe<bool> ValueSerializer::WriteJSSet(Handle<JSSet> set) {
Maybe<bool> ValueSerializer::WriteJSArrayBuffer(JSArrayBuffer* array_buffer) {
uint32_t* transfer_entry = array_buffer_transfer_map_.Find(array_buffer);
if (transfer_entry) {
- DCHECK(array_buffer->was_neutered() || array_buffer->is_shared());
WriteTag(array_buffer->is_shared()
? SerializationTag::kSharedArrayBufferTransfer
: SerializationTag::kArrayBufferTransfer);
@@ -716,6 +776,29 @@ Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView* view) {
return Just(true);
}
+Maybe<bool> ValueSerializer::WriteWasmModule(Handle<JSObject> object) {
+ Handle<WasmCompiledModule> compiled_part(
+ WasmCompiledModule::cast(object->GetInternalField(0)), isolate_);
+ WasmEncodingTag encoding_tag = WasmEncodingTag::kRawBytes;
+ WriteTag(SerializationTag::kWasmModule);
+ WriteRawBytes(&encoding_tag, sizeof(encoding_tag));
+
+ Handle<String> wire_bytes = compiled_part->module_bytes();
+ int wire_bytes_length = wire_bytes->length();
+ WriteVarint<uint32_t>(wire_bytes_length);
+ uint8_t* destination = ReserveRawBytes(wire_bytes_length);
+ String::WriteToFlat(*wire_bytes, destination, 0, wire_bytes_length);
+
+ std::unique_ptr<ScriptData> script_data =
+ WasmCompiledModuleSerializer::SerializeWasmModule(isolate_,
+ compiled_part);
+ int script_data_length = script_data->length();
+ WriteVarint<uint32_t>(script_data_length);
+ WriteRawBytes(script_data->data(), script_data_length);
+
+ return Just(true);
+}
+
Maybe<bool> ValueSerializer::WriteHostObject(Handle<JSObject> object) {
if (!delegate_) {
isolate_->Throw(*isolate_->factory()->NewError(
@@ -1026,6 +1109,8 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
const bool is_shared = true;
return ReadTransferredJSArrayBuffer(is_shared);
}
+ case SerializationTag::kWasmModule:
+ return ReadWasmModule();
default:
// TODO(jbroman): Introduce an explicit tag for host objects to avoid
// having to treat every unknown tag as a potential host object.
@@ -1092,7 +1177,7 @@ bool ValueDeserializer::ReadExpectedString(Handle<String> expected) {
// is successfully consumed.
if (tag == SerializationTag::kUtf8String && flat.IsOneByte()) {
Vector<const uint8_t> chars = flat.ToOneByteVector();
- if (byte_length == chars.length() &&
+ if (byte_length == static_cast<size_t>(chars.length()) &&
String::IsAscii(chars.begin(), chars.length()) &&
memcmp(bytes.begin(), chars.begin(), byte_length) == 0) {
return true;
@@ -1165,8 +1250,15 @@ MaybeHandle<JSArray> ValueDeserializer::ReadDenseJSArray() {
// If we are at the end of the stack, abort. This function may recurse.
STACK_CHECK(isolate_, MaybeHandle<JSArray>());
+ // We shouldn't permit an array larger than the biggest we can request from
+ // V8. As an additional sanity check, since each entry will take at least one
+ // byte to encode, if there are fewer bytes than that we can also fail fast.
uint32_t length;
- if (!ReadVarint<uint32_t>().To(&length)) return MaybeHandle<JSArray>();
+ if (!ReadVarint<uint32_t>().To(&length) ||
+ length > static_cast<uint32_t>(FixedArray::kMaxLength) ||
+ length > static_cast<size_t>(end_ - position_)) {
+ return MaybeHandle<JSArray>();
+ }
uint32_t id = next_id_++;
HandleScope scope(isolate_);
@@ -1410,7 +1502,8 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
}
- if (byte_offset % element_size != 0 || byte_length % element_size != 0) {
+ if (element_size == 0 || byte_offset % element_size != 0 ||
+ byte_length % element_size != 0) {
return MaybeHandle<JSArrayBufferView>();
}
Handle<JSTypedArray> typed_array = isolate_->factory()->NewJSTypedArray(
@@ -1420,6 +1513,51 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
return typed_array;
}
+MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
+ if (!FLAG_expose_wasm) return MaybeHandle<JSObject>();
+
+ Vector<const uint8_t> encoding_tag;
+ if (!ReadRawBytes(sizeof(WasmEncodingTag)).To(&encoding_tag) ||
+ encoding_tag[0] != static_cast<uint8_t>(WasmEncodingTag::kRawBytes)) {
+ return MaybeHandle<JSObject>();
+ }
+
+ // Extract the data from the buffer: wasm wire bytes, followed by V8 compiled
+ // script data.
+ static_assert(sizeof(int) <= sizeof(uint32_t),
+ "max int must fit in uint32_t");
+ const uint32_t max_valid_size = std::numeric_limits<int>::max();
+ uint32_t wire_bytes_length = 0;
+ Vector<const uint8_t> wire_bytes;
+ uint32_t compiled_bytes_length = 0;
+ Vector<const uint8_t> compiled_bytes;
+ if (!ReadVarint<uint32_t>().To(&wire_bytes_length) ||
+ wire_bytes_length > max_valid_size ||
+ !ReadRawBytes(wire_bytes_length).To(&wire_bytes) ||
+ !ReadVarint<uint32_t>().To(&compiled_bytes_length) ||
+ compiled_bytes_length > max_valid_size ||
+ !ReadRawBytes(compiled_bytes_length).To(&compiled_bytes)) {
+ return MaybeHandle<JSObject>();
+ }
+
+ // Try to deserialize the compiled module first.
+ ScriptData script_data(compiled_bytes.start(), compiled_bytes.length());
+ Handle<FixedArray> compiled_part;
+ if (WasmCompiledModuleSerializer::DeserializeWasmModule(
+ isolate_, &script_data, wire_bytes)
+ .ToHandle(&compiled_part)) {
+ return WasmModuleObject::New(
+ isolate_, Handle<WasmCompiledModule>::cast(compiled_part));
+ }
+
+ // If that fails, recompile.
+ wasm::ErrorThrower thrower(isolate_, "ValueDeserializer::ReadWasmModule");
+ return wasm::CreateModuleObjectFromBytes(
+ isolate_, wire_bytes.begin(), wire_bytes.end(), &thrower,
+ wasm::ModuleOrigin::kWasmOrigin, Handle<Script>::null(), nullptr,
+ nullptr);
+}
+
MaybeHandle<JSObject> ValueDeserializer::ReadHostObject() {
if (!delegate_) return MaybeHandle<JSObject>();
STACK_CHECK(isolate_, MaybeHandle<JSObject>());
@@ -1629,7 +1767,7 @@ static Maybe<bool> SetPropertiesFromKeyValuePairs(Isolate* isolate,
MaybeHandle<Object>
ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
- DCHECK_EQ(version_, 0);
+ DCHECK_EQ(version_, 0u);
HandleScope scope(isolate_);
std::vector<Handle<Object>> stack;
while (position_ < end_) {
@@ -1646,6 +1784,8 @@ ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
uint32_t num_properties;
if (!ReadVarint<uint32_t>().To(&num_properties) ||
stack.size() / 2 < num_properties) {
+ isolate_->Throw(*isolate_->factory()->NewError(
+ MessageTemplate::kDataCloneDeserializationError));
return MaybeHandle<Object>();
}
@@ -1657,6 +1797,7 @@ ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
!SetPropertiesFromKeyValuePairs(
isolate_, js_object, &stack[begin_properties], num_properties)
.FromMaybe(false)) {
+ DCHECK(isolate_->has_pending_exception());
return MaybeHandle<Object>();
}
@@ -1673,6 +1814,8 @@ ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
if (!ReadVarint<uint32_t>().To(&num_properties) ||
!ReadVarint<uint32_t>().To(&length) ||
stack.size() / 2 < num_properties) {
+ isolate_->Throw(*isolate_->factory()->NewError(
+ MessageTemplate::kDataCloneDeserializationError));
return MaybeHandle<Object>();
}
@@ -1685,6 +1828,7 @@ ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
!SetPropertiesFromKeyValuePairs(
isolate_, js_array, &stack[begin_properties], num_properties)
.FromMaybe(false)) {
+ DCHECK(isolate_->has_pending_exception());
return MaybeHandle<Object>();
}
diff --git a/deps/v8/src/value-serializer.h b/deps/v8/src/value-serializer.h
index 27ce0c1207..86e21cf86c 100644
--- a/deps/v8/src/value-serializer.h
+++ b/deps/v8/src/value-serializer.h
@@ -59,7 +59,13 @@ class ValueSerializer {
* Returns the stored data. This serializer should not be used once the buffer
* is released. The contents are undefined if a previous write has failed.
*/
- std::vector<uint8_t> ReleaseBuffer() { return std::move(buffer_); }
+ std::vector<uint8_t> ReleaseBuffer();
+
+ /*
+ * Returns the buffer, allocated via the delegate, and its size.
+ * Caller assumes ownership of the buffer.
+ */
+ std::pair<uint8_t*, size_t> Release();
/*
* Marks an ArrayBuffer as havings its contents transferred out of band.
@@ -79,6 +85,9 @@ class ValueSerializer {
void WriteDouble(double value);
private:
+ // Managing allocations of the internal buffer.
+ void ExpandBuffer(size_t required_capacity);
+
// Writing the wire format.
void WriteTag(SerializationTag tag);
template <typename T>
@@ -105,6 +114,7 @@ class ValueSerializer {
Maybe<bool> WriteJSSet(Handle<JSSet> map) WARN_UNUSED_RESULT;
Maybe<bool> WriteJSArrayBuffer(JSArrayBuffer* array_buffer);
Maybe<bool> WriteJSArrayBufferView(JSArrayBufferView* array_buffer);
+ Maybe<bool> WriteWasmModule(Handle<JSObject> object) WARN_UNUSED_RESULT;
Maybe<bool> WriteHostObject(Handle<JSObject> object) WARN_UNUSED_RESULT;
/*
@@ -125,7 +135,9 @@ class ValueSerializer {
Isolate* const isolate_;
v8::ValueSerializer::Delegate* const delegate_;
- std::vector<uint8_t> buffer_;
+ uint8_t* buffer_ = nullptr;
+ size_t buffer_size_ = 0;
+ size_t buffer_capacity_ = 0;
Zone zone_;
// To avoid extra lookups in the identity map, ID+1 is actually stored in the
@@ -230,6 +242,7 @@ class ValueDeserializer {
WARN_UNUSED_RESULT;
MaybeHandle<JSArrayBufferView> ReadJSArrayBufferView(
Handle<JSArrayBuffer> buffer) WARN_UNUSED_RESULT;
+ MaybeHandle<JSObject> ReadWasmModule() WARN_UNUSED_RESULT;
MaybeHandle<JSObject> ReadHostObject() WARN_UNUSED_RESULT;
/*
diff --git a/deps/v8/src/vector.h b/deps/v8/src/vector.h
index d120dfc4ac..080f89e9f4 100644
--- a/deps/v8/src/vector.h
+++ b/deps/v8/src/vector.h
@@ -51,7 +51,8 @@ class Vector {
// Access individual vector elements - checks bounds in debug mode.
T& operator[](int index) const {
- DCHECK(0 <= index && index < length_);
+ DCHECK_LE(0, index);
+ DCHECK_LT(index, length_);
return start_[index];
}
diff --git a/deps/v8/src/wasm/ast-decoder.cc b/deps/v8/src/wasm/ast-decoder.cc
index 02d1db5bda..ff6af34a02 100644
--- a/deps/v8/src/wasm/ast-decoder.cc
+++ b/deps/v8/src/wasm/ast-decoder.cc
@@ -32,6 +32,9 @@ namespace wasm {
#endif
#define CHECK_PROTOTYPE_OPCODE(flag) \
+ if (module_ && module_->origin == kAsmJsOrigin) { \
+ error("Opcode not supported for asmjs modules"); \
+ } \
if (!FLAG_##flag) { \
error("Invalid opcode (enable with --" #flag ")"); \
break; \
@@ -147,6 +150,16 @@ struct Control {
(build() ? CheckForException(builder_->func(__VA_ARGS__)) : nullptr)
#define BUILD0(func) (build() ? CheckForException(builder_->func()) : nullptr)
+struct LaneOperand {
+ uint8_t lane;
+ unsigned length;
+
+ inline LaneOperand(Decoder* decoder, const byte* pc) {
+ lane = decoder->checked_read_u8(pc, 2, "lane");
+ length = 1;
+ }
+};
+
// Generic Wasm bytecode decoder with utilities for decoding operands,
// lengths, etc.
class WasmDecoder : public Decoder {
@@ -214,6 +227,11 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, CallIndirectOperand& operand) {
+ uint32_t table_index = 0;
+ if (!module_->IsValidTable(table_index)) {
+ error("function table has to exist to execute call_indirect");
+ return false;
+ }
if (Complete(pc, operand)) {
return true;
}
@@ -237,8 +255,17 @@ class WasmDecoder : public Decoder {
return true;
}
+ inline bool Validate(const byte* pc, LaneOperand& operand) {
+ if (operand.lane < 0 || operand.lane > 3) {
+ error(pc_, pc_ + 2, "invalid extract lane value");
+ return false;
+ } else {
+ return true;
+ }
+ }
+
unsigned OpcodeLength(const byte* pc) {
- switch (static_cast<WasmOpcode>(*pc)) {
+ switch (static_cast<byte>(*pc)) {
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
@@ -295,12 +322,39 @@ class WasmDecoder : public Decoder {
ImmI64Operand operand(this, pc);
return 1 + operand.length;
}
+ case kExprGrowMemory:
+ case kExprMemorySize: {
+ MemoryIndexOperand operand(this, pc);
+ return 1 + operand.length;
+ }
case kExprI8Const:
return 2;
case kExprF32Const:
return 5;
case kExprF64Const:
return 9;
+ case kSimdPrefix: {
+ byte simd_index = checked_read_u8(pc, 1, "simd_index");
+ WasmOpcode opcode =
+ static_cast<WasmOpcode>(kSimdPrefix << 8 | simd_index);
+ switch (opcode) {
+#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+ FOREACH_SIMD_0_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+ {
+ return 2;
+ }
+#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+ FOREACH_SIMD_1_OPERAND_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+ {
+ return 3;
+ }
+ default:
+ error("invalid SIMD opcode");
+ return 2;
+ }
+ }
default:
return 1;
}
@@ -500,7 +554,7 @@ class WasmFullDecoder : public WasmDecoder {
case kAstF64:
return builder_->Float64Constant(0);
case kAstS128:
- return builder_->DefaultS128Value();
+ return builder_->CreateS128Value(0);
default:
UNREACHABLE();
return nullptr;
@@ -520,7 +574,7 @@ class WasmFullDecoder : public WasmDecoder {
// Decodes the locals declarations, if any, populating {local_type_vec_}.
void DecodeLocalDecls() {
- DCHECK_EQ(0, local_type_vec_.size());
+ DCHECK_EQ(0u, local_type_vec_.size());
// Initialize {local_type_vec} from signature.
if (sig_) {
local_type_vec_.reserve(sig_->parameter_count());
@@ -681,8 +735,8 @@ class WasmFullDecoder : public WasmDecoder {
BlockTypeOperand operand(this, pc_);
SsaEnv* finish_try_env = Steal(ssa_env_);
// The continue environment is the inner environment.
- PrepareForLoop(pc_, finish_try_env);
- SetEnv("loop:start", Split(finish_try_env));
+ SsaEnv* loop_body_env = PrepareForLoop(pc_, finish_try_env);
+ SetEnv("loop:start", loop_body_env);
ssa_env_->SetNotMerged();
PushLoop(finish_try_env);
SetBlockType(&control_.back(), operand);
@@ -695,7 +749,7 @@ class WasmFullDecoder : public WasmDecoder {
Value cond = Pop(0, kAstI32);
TFNode* if_true = nullptr;
TFNode* if_false = nullptr;
- BUILD(Branch, cond.node, &if_true, &if_false);
+ BUILD(BranchNoHint, cond.node, &if_true, &if_false);
SsaEnv* end_env = ssa_env_;
SsaEnv* false_env = Split(ssa_env_);
false_env->control = if_false;
@@ -746,7 +800,8 @@ class WasmFullDecoder : public WasmDecoder {
if (c->false_env != nullptr) {
// End the true branch of a one-armed if.
Goto(c->false_env, c->end_env);
- if (ssa_env_->go() && stack_.size() != c->stack_depth) {
+ if (ssa_env_->go() &&
+ static_cast<int>(stack_.size()) != c->stack_depth) {
error("end of if expected empty stack");
stack_.resize(c->stack_depth);
}
@@ -813,7 +868,7 @@ class WasmFullDecoder : public WasmDecoder {
DCHECK(fval.type != kAstEnd);
DCHECK(cond.type != kAstEnd);
TFNode* controls[2];
- builder_->Branch(cond.node, &controls[0], &controls[1]);
+ builder_->BranchNoHint(cond.node, &controls[0], &controls[1]);
TFNode* merge = builder_->Merge(2, controls);
TFNode* vals[2] = {tval.node, fval.node};
TFNode* phi = builder_->Phi(tval.type, 2, vals, merge);
@@ -840,7 +895,7 @@ class WasmFullDecoder : public WasmDecoder {
SsaEnv* fenv = ssa_env_;
SsaEnv* tenv = Split(fenv);
fenv->SetNotMerged();
- BUILD(Branch, cond.node, &tenv->control, &fenv->control);
+ BUILD(BranchNoHint, cond.node, &tenv->control, &fenv->control);
ssa_env_ = tenv;
BreakTo(operand.depth);
ssa_env_ = fenv;
@@ -862,7 +917,7 @@ class WasmFullDecoder : public WasmDecoder {
SsaEnv* copy = Steal(break_env);
ssa_env_ = copy;
- while (iterator.has_next()) {
+ while (ok() && iterator.has_next()) {
uint32_t i = iterator.cur_index();
const byte* pos = iterator.pc();
uint32_t target = iterator.next();
@@ -876,6 +931,7 @@ class WasmFullDecoder : public WasmDecoder {
: BUILD(IfValue, i, sw);
BreakTo(target);
}
+ if (failed()) break;
} else {
// Only a default target. Do the equivalent of br.
const byte* pos = iterator.pc();
@@ -1057,17 +1113,23 @@ class WasmFullDecoder : public WasmDecoder {
case kExprF64StoreMem:
len = DecodeStoreMem(kAstF64, MachineType::Float64());
break;
- case kExprGrowMemory:
+ case kExprGrowMemory: {
+ MemoryIndexOperand operand(this, pc_);
if (module_->origin != kAsmJsOrigin) {
Value val = Pop(0, kAstI32);
Push(kAstI32, BUILD(GrowMemory, val.node));
} else {
error("grow_memory is not supported for asmjs modules");
}
+ len = 1 + operand.length;
break;
- case kExprMemorySize:
+ }
+ case kExprMemorySize: {
+ MemoryIndexOperand operand(this, pc_);
Push(kAstI32, BUILD(CurrentMemoryPages));
+ len = 1 + operand.length;
break;
+ }
case kExprCallFunction: {
CallFunctionOperand operand(this, pc_);
if (Validate(pc_, operand)) {
@@ -1095,13 +1157,31 @@ class WasmFullDecoder : public WasmDecoder {
case kSimdPrefix: {
CHECK_PROTOTYPE_OPCODE(wasm_simd_prototype);
len++;
- byte simd_index = *(pc_ + 1);
+ byte simd_index = checked_read_u8(pc_, 1, "simd index");
opcode = static_cast<WasmOpcode>(opcode << 8 | simd_index);
TRACE(" @%-4d #%02x #%02x:%-20s|", startrel(pc_), kSimdPrefix,
simd_index, WasmOpcodes::ShortOpcodeName(opcode));
len += DecodeSimdOpcode(opcode);
break;
}
+ case kAtomicPrefix: {
+ if (!module_ || module_->origin != kAsmJsOrigin) {
+ error("Atomics are allowed only in AsmJs modules");
+ break;
+ }
+ if (!FLAG_wasm_atomics_prototype) {
+ error("Invalid opcode (enable with --wasm_atomics_prototype)");
+ break;
+ }
+ len = 2;
+ byte atomic_opcode = checked_read_u8(pc_, 1, "atomic index");
+ opcode = static_cast<WasmOpcode>(opcode << 8 | atomic_opcode);
+ sig = WasmOpcodes::AtomicSignature(opcode);
+ if (sig) {
+ BuildAtomicOperator(opcode);
+ }
+ break;
+ }
default: {
// Deal with special asmjs opcodes.
if (module_ && module_->origin == kAsmJsOrigin) {
@@ -1245,18 +1325,25 @@ class WasmFullDecoder : public WasmDecoder {
return 1 + operand.length;
}
+ unsigned ExtractLane(WasmOpcode opcode, LocalType type) {
+ LaneOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ TFNode* input = Pop(0, LocalType::kSimd128).node;
+ TFNode* node = BUILD(SimdExtractLane, opcode, operand.lane, input);
+ Push(type, node);
+ }
+ return operand.length;
+ }
+
unsigned DecodeSimdOpcode(WasmOpcode opcode) {
unsigned len = 0;
switch (opcode) {
case kExprI32x4ExtractLane: {
- uint8_t lane = this->checked_read_u8(pc_, 2, "lane number");
- if (lane < 0 || lane > 3) {
- error(pc_, pc_ + 2, "invalid extract lane value");
- }
- TFNode* input = Pop(0, LocalType::kSimd128).node;
- TFNode* node = BUILD(SimdExtractLane, opcode, lane, input);
- Push(LocalType::kWord32, node);
- len++;
+ len = ExtractLane(opcode, LocalType::kWord32);
+ break;
+ }
+ case kExprF32x4ExtractLane: {
+ len = ExtractLane(opcode, LocalType::kFloat32);
break;
}
default: {
@@ -1277,6 +1364,8 @@ class WasmFullDecoder : public WasmDecoder {
return len;
}
+ void BuildAtomicOperator(WasmOpcode opcode) { UNIMPLEMENTED(); }
+
void DoReturn() {
int count = static_cast<int>(sig_->return_count());
TFNode** buffer = nullptr;
@@ -1347,7 +1436,7 @@ class WasmFullDecoder : public WasmDecoder {
// Unreachable code is essentially not typechecked.
return {pc_, nullptr, kAstEnd};
}
- if (stack_depth == stack_.size()) {
+ if (stack_depth == static_cast<int>(stack_.size())) {
Value val = {pc_, nullptr, kAstStmt};
return val;
} else {
@@ -1372,8 +1461,7 @@ class WasmFullDecoder : public WasmDecoder {
Goto(ssa_env_, c->end_env);
} else {
// Merge the value(s) into the end of the block.
- if (static_cast<size_t>(c->stack_depth + c->merge.arity) >
- stack_.size()) {
+ if (c->stack_depth + c->merge.arity > stack_.size()) {
error(
pc_, pc_,
"expected at least %d values on the stack for br to @%d, found %d",
@@ -1389,7 +1477,7 @@ class WasmFullDecoder : public WasmDecoder {
if (!ssa_env_->go()) return;
// Merge the value(s) into the end of the block.
int arity = static_cast<int>(c->merge.arity);
- if (c->stack_depth + arity != stack_.size()) {
+ if (c->stack_depth + arity != static_cast<int>(stack_.size())) {
error(pc_, pc_, "expected %d elements on the stack for fallthru to @%d",
arity, startrel(c->pc));
return;
@@ -1405,7 +1493,7 @@ class WasmFullDecoder : public WasmDecoder {
if (!ssa_env_->go()) return;
// Fallthru must match arity exactly.
int arity = static_cast<int>(c->merge.arity);
- if (c->stack_depth + arity != stack_.size()) {
+ if (c->stack_depth + arity != static_cast<int>(stack_.size())) {
error(pc_, pc_, "expected %d elements on the stack for fallthru to @%d",
arity, startrel(c->pc));
return;
@@ -1437,9 +1525,13 @@ class WasmFullDecoder : public WasmDecoder {
WasmOpcodes::TypeName(old.type), WasmOpcodes::TypeName(val.type));
return;
}
- old.node =
- first ? val.node : CreateOrMergeIntoPhi(old.type, target->control,
- old.node, val.node);
+ if (builder_) {
+ old.node =
+ first ? val.node : CreateOrMergeIntoPhi(old.type, target->control,
+ old.node, val.node);
+ } else {
+ old.node = nullptr;
+ }
}
}
@@ -1596,6 +1688,7 @@ class WasmFullDecoder : public WasmDecoder {
TFNode* CreateOrMergeIntoPhi(LocalType type, TFNode* merge, TFNode* tnode,
TFNode* fnode) {
+ DCHECK_NOT_NULL(builder_);
if (builder_->IsPhiWithMerge(tnode, merge)) {
builder_->AppendToPhi(tnode, fnode);
} else if (tnode != fnode) {
@@ -1608,16 +1701,17 @@ class WasmFullDecoder : public WasmDecoder {
return tnode;
}
- void PrepareForLoop(const byte* pc, SsaEnv* env) {
- if (!env->go()) return;
+ SsaEnv* PrepareForLoop(const byte* pc, SsaEnv* env) {
+ if (!builder_) return Split(env);
+ if (!env->go()) return Split(env);
env->state = SsaEnv::kMerged;
- if (!builder_) return;
env->control = builder_->Loop(env->control);
env->effect = builder_->EffectPhi(1, &env->effect, env->control);
builder_->Terminate(env->effect, env->control);
if (FLAG_wasm_loop_assignment_analysis) {
BitVector* assigned = AnalyzeLoopAssignment(pc);
+ if (failed()) return env;
if (assigned != nullptr) {
// Only introduce phis for variables assigned in this loop.
for (int i = EnvironmentCount() - 1; i >= 0; i--) {
@@ -1625,7 +1719,10 @@ class WasmFullDecoder : public WasmDecoder {
env->locals[i] = builder_->Phi(local_type_vec_[i], 1, &env->locals[i],
env->control);
}
- return;
+ SsaEnv* loop_body_env = Split(env);
+ builder_->StackCheck(position(), &(loop_body_env->effect),
+ &(loop_body_env->control));
+ return loop_body_env;
}
}
@@ -1634,6 +1731,11 @@ class WasmFullDecoder : public WasmDecoder {
env->locals[i] =
builder_->Phi(local_type_vec_[i], 1, &env->locals[i], env->control);
}
+
+ SsaEnv* loop_body_env = Split(env);
+ builder_->StackCheck(position(), &(loop_body_env->effect),
+ &(loop_body_env->control));
+ return loop_body_env;
}
// Create a complete copy of the {from}.
@@ -1766,7 +1868,7 @@ class WasmFullDecoder : public WasmDecoder {
bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start,
const byte* end) {
AccountingAllocator allocator;
- Zone tmp(&allocator);
+ Zone tmp(&allocator, ZONE_NAME);
FunctionBody body = {nullptr, nullptr, nullptr, start, end};
WasmFullDecoder decoder(&tmp, nullptr, body);
return decoder.DecodeLocalDecls(decls);
@@ -1785,7 +1887,7 @@ BytecodeIterator::BytecodeIterator(const byte* start, const byte* end,
DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
FunctionBody& body) {
- Zone zone(allocator);
+ Zone zone(allocator, ZONE_NAME);
WasmFullDecoder decoder(&zone, nullptr, body);
decoder.Decode();
return decoder.toResult<DecodeStruct*>(nullptr);
@@ -1793,7 +1895,7 @@ DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
FunctionBody& body) {
- Zone zone(allocator);
+ Zone zone(allocator, ZONE_NAME);
WasmFullDecoder decoder(&zone, builder, body);
decoder.Decode();
return decoder.toResult<DecodeStruct*>(nullptr);
@@ -1813,7 +1915,7 @@ void PrintAstForDebugging(const byte* start, const byte* end) {
bool PrintAst(AccountingAllocator* allocator, const FunctionBody& body,
std::ostream& os,
std::vector<std::tuple<uint32_t, int, int>>* offset_table) {
- Zone zone(allocator);
+ Zone zone(allocator, ZONE_NAME);
WasmFullDecoder decoder(&zone, nullptr, body);
int line_nr = 0;
diff --git a/deps/v8/src/wasm/ast-decoder.h b/deps/v8/src/wasm/ast-decoder.h
index 8c2c2c4734..9ce323efcb 100644
--- a/deps/v8/src/wasm/ast-decoder.h
+++ b/deps/v8/src/wasm/ast-decoder.h
@@ -5,6 +5,8 @@
#ifndef V8_WASM_AST_DECODER_H_
#define V8_WASM_AST_DECODER_H_
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
#include "src/signature.h"
#include "src/wasm/decoder.h"
#include "src/wasm/wasm-opcodes.h"
@@ -156,6 +158,9 @@ struct BlockTypeOperand {
case kLocalF64:
*result = kAstF64;
return true;
+ case kLocalS128:
+ *result = kAstS128;
+ return true;
default:
*result = kAstStmt;
return false;
@@ -181,14 +186,19 @@ struct BreakDepthOperand {
};
struct CallIndirectOperand {
+ uint32_t table_index;
uint32_t index;
FunctionSig* sig;
unsigned length;
inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
- unsigned len1 = 0;
- unsigned len2 = 0;
- index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "signature index");
- length = len1 + len2;
+ unsigned len = 0;
+ index = decoder->checked_read_u32v(pc, 1, &len, "signature index");
+ table_index = decoder->checked_read_u8(pc, 1 + len, "table index");
+ if (table_index != 0) {
+ decoder->error(pc, pc + 1 + len, "expected table index 0, found %u",
+ table_index);
+ }
+ length = 1 + len;
sig = nullptr;
}
};
@@ -206,6 +216,18 @@ struct CallFunctionOperand {
}
};
+struct MemoryIndexOperand {
+ uint32_t index;
+ unsigned length;
+ inline MemoryIndexOperand(Decoder* decoder, const byte* pc) {
+ index = decoder->checked_read_u8(pc, 1, "memory index");
+ if (index != 0) {
+ decoder->error(pc, pc + 1, "expected memory index 0, found %u", index);
+ }
+ length = 1;
+ }
+};
+
struct BranchTableOperand {
uint32_t table_count;
const byte* start;
@@ -231,7 +253,7 @@ struct BranchTableOperand {
class BranchTableIterator {
public:
unsigned cur_index() { return index_; }
- bool has_next() { return index_ <= table_count_; }
+ bool has_next() { return decoder_->ok() && index_ <= table_count_; }
uint32_t next() {
DCHECK(has_next());
index_++;
@@ -352,15 +374,18 @@ struct AstLocalDecls {
: decls_encoded_size(0), total_local_count(0), local_types(zone) {}
};
-bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start, const byte* end);
-BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
- const byte* start, const byte* end);
+V8_EXPORT_PRIVATE bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start,
+ const byte* end);
+V8_EXPORT_PRIVATE BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone,
+ size_t num_locals,
+ const byte* start,
+ const byte* end);
// Computes the length of the opcode at the given address.
-unsigned OpcodeLength(const byte* pc, const byte* end);
+V8_EXPORT_PRIVATE unsigned OpcodeLength(const byte* pc, const byte* end);
// A simple forward iterator for bytecodes.
-class BytecodeIterator : public Decoder {
+class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
public:
// If one wants to iterate over the bytecode without looking at {pc_offset()}.
class iterator {
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index d5c9f43c57..fc8f110b73 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -173,48 +173,14 @@ class Decoder {
return traceOffEnd<uint32_t>();
}
- // Reads a LEB128 variable-length 32-bit integer and advances {pc_}.
+ // Reads a LEB128 variable-length unsigned 32-bit integer and advances {pc_}.
uint32_t consume_u32v(const char* name = nullptr) {
- TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
- name ? name : "varint");
- if (checkAvailable(1)) {
- const byte* pos = pc_;
- const byte* end = pc_ + 5;
- if (end > limit_) end = limit_;
-
- uint32_t result = 0;
- int shift = 0;
- byte b = 0;
- while (pc_ < end) {
- b = *pc_++;
- TRACE("%02x ", b);
- result = result | ((b & 0x7F) << shift);
- if ((b & 0x80) == 0) break;
- shift += 7;
- }
-
- int length = static_cast<int>(pc_ - pos);
- if (pc_ == end && (b & 0x80)) {
- error(pc_ - 1, "varint too large");
- } else if (length == 0) {
- error(pc_, "varint of length 0");
- } else {
- TRACE("= %u\n", result);
- }
- return result;
- }
- return traceOffEnd<uint32_t>();
+ return consume_leb<uint32_t, false>(name);
}
- // Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
- void consume_bytes(int size) {
- TRACE(" +%d %-20s: %d bytes\n", static_cast<int>(pc_ - start_), "skip",
- size);
- if (checkAvailable(size)) {
- pc_ += size;
- } else {
- pc_ = limit_;
- }
+ // Reads a LEB128 variable-length signed 32-bit integer and advances {pc_}.
+ int32_t consume_i32v(const char* name = nullptr) {
+ return consume_leb<int32_t, true>(name);
}
// Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
@@ -287,7 +253,7 @@ class Decoder {
template <typename T>
Result<T> toResult(T val) {
Result<T> result;
- if (error_pc_) {
+ if (failed()) {
TRACE("Result error: %s\n", error_msg_.get());
result.error_code = kError;
result.start = start_;
@@ -313,8 +279,8 @@ class Decoder {
error_msg_.reset();
}
- bool ok() const { return error_pc_ == nullptr; }
- bool failed() const { return !!error_msg_; }
+ bool ok() const { return error_msg_ == nullptr; }
+ bool failed() const { return !ok(); }
bool more() const { return pc_ < limit_; }
const byte* start() { return start_; }
@@ -383,6 +349,47 @@ class Decoder {
}
return result;
}
+
+ template <typename IntType, bool is_signed>
+ IntType consume_leb(const char* name = nullptr) {
+ TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
+ name ? name : "varint");
+ if (checkAvailable(1)) {
+ const int kMaxLength = (sizeof(IntType) * 8 + 6) / 7;
+ const byte* pos = pc_;
+ const byte* end = pc_ + kMaxLength;
+ if (end > limit_) end = limit_;
+
+ IntType result = 0;
+ int shift = 0;
+ byte b = 0;
+ while (pc_ < end) {
+ b = *pc_++;
+ TRACE("%02x ", b);
+ result = result | (static_cast<IntType>(b & 0x7F) << shift);
+ shift += 7;
+ if ((b & 0x80) == 0) break;
+ }
+
+ int length = static_cast<int>(pc_ - pos);
+ if (pc_ == end && (b & 0x80)) {
+ error(pc_ - 1, "varint too large");
+ } else if (length == 0) {
+ error(pc_, "varint of length 0");
+ } else if (is_signed) {
+ if (length < kMaxLength) {
+ int sign_ext_shift = 8 * sizeof(IntType) - shift;
+ // Perform sign extension.
+ result = (result << sign_ext_shift) >> sign_ext_shift;
+ }
+ TRACE("= %" PRIi64 "\n", static_cast<int64_t>(result));
+ } else {
+ TRACE("= %" PRIu64 "\n", static_cast<uint64_t>(result));
+ }
+ return result;
+ }
+ return traceOffEnd<uint32_t>();
+ }
};
#undef TRACE
diff --git a/deps/v8/src/wasm/managed.h b/deps/v8/src/wasm/managed.h
new file mode 100644
index 0000000000..785d5d32c0
--- /dev/null
+++ b/deps/v8/src/wasm/managed.h
@@ -0,0 +1,56 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MANAGED_H_
+#define V8_WASM_MANAGED_H_
+
+#include "src/factory.h"
+#include "src/global-handles.h"
+#include "src/handles.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+// An object that wraps a pointer to a C++ object and optionally deletes it
+// when the managed wrapper object is garbage collected.
+template <class CppType>
+class Managed : public Foreign {
+ public:
+ V8_INLINE CppType* get() {
+ return reinterpret_cast<CppType*>(foreign_address());
+ }
+
+ static Handle<Managed<CppType>> New(Isolate* isolate, CppType* ptr,
+ bool delete_on_gc = true) {
+ Handle<Foreign> foreign =
+ isolate->factory()->NewForeign(reinterpret_cast<Address>(ptr));
+ Handle<Managed<CppType>> handle(
+ reinterpret_cast<Managed<CppType>*>(*foreign), isolate);
+ if (delete_on_gc) {
+ RegisterWeakCallbackForDelete(isolate, handle);
+ }
+ return handle;
+ }
+
+ private:
+ static void RegisterWeakCallbackForDelete(Isolate* isolate,
+ Handle<Managed<CppType>> handle) {
+ Handle<Object> global_handle = isolate->global_handles()->Create(*handle);
+ GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
+ &Managed<CppType>::Delete,
+ v8::WeakCallbackType::kFinalizer);
+ }
+ static void Delete(const v8::WeakCallbackInfo<void>& data) {
+ Managed<CppType>** p =
+ reinterpret_cast<Managed<CppType>**>(data.GetParameter());
+ delete (*p)->get();
+ (*p)->set_foreign_address(0);
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
+ }
+};
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_MANAGED_H_
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 90065616d9..c8eace3c10 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -6,6 +6,7 @@
#include "src/base/functional.h"
#include "src/base/platform/platform.h"
+#include "src/flags.h"
#include "src/macro-assembler.h"
#include "src/objects.h"
#include "src/v8.h"
@@ -76,6 +77,12 @@ class WasmSectionIterator {
return static_cast<uint32_t>(section_end_ - section_start_);
}
+ inline const byte* payload_start() const { return payload_start_; }
+
+ inline uint32_t payload_length() const {
+ return static_cast<uint32_t>(section_end_ - payload_start_);
+ }
+
inline const byte* section_end() const { return section_end_; }
// Advances to the next section, checking that decoding the current section
@@ -96,6 +103,7 @@ class WasmSectionIterator {
Decoder& decoder_;
WasmSectionCode section_code_;
const byte* section_start_;
+ const byte* payload_start_;
const byte* section_end_;
// Reads the section code/name at the current position and sets up
@@ -110,6 +118,7 @@ class WasmSectionIterator {
// Read and check the section size.
uint32_t section_length = decoder_.consume_u32v("section length");
section_start_ = decoder_.pc();
+ payload_start_ = section_start_;
if (decoder_.checkAvailable(section_length)) {
// Get the limit of the section within the module.
section_end_ = section_start_ + section_length;
@@ -119,7 +128,7 @@ class WasmSectionIterator {
}
if (section_code == kUnknownSectionCode) {
- // Check for the known "names" section.
+ // Check for the known "name" section.
uint32_t string_length = decoder_.consume_u32v("section name length");
const byte* section_name_start = decoder_.pc();
decoder_.consume_bytes(string_length, "section name");
@@ -128,6 +137,7 @@ class WasmSectionIterator {
section_code_ = kUnknownSectionCode;
return;
}
+ payload_start_ = decoder_.pc();
TRACE(" +%d section name : \"%.*s\"\n",
static_cast<int>(section_name_start - decoder_.start()),
@@ -299,29 +309,38 @@ class ModuleDecoder : public Decoder {
// ===== Imported table ==========================================
import->index =
static_cast<uint32_t>(module->function_tables.size());
- module->function_tables.push_back(
- {0, 0, std::vector<int32_t>(), true, false});
- expect_u8("element type", 0x20);
+ module->function_tables.push_back({0, 0, false,
+ std::vector<int32_t>(), true,
+ false, SignatureMap()});
+ expect_u8("element type", kWasmAnyFunctionTypeForm);
WasmIndirectFunctionTable* table = &module->function_tables.back();
- consume_resizable_limits("element count", "elements", kMaxUInt32,
- &table->size, &table->max_size);
+ consume_resizable_limits(
+ "element count", "elements", WasmModule::kV8MaxTableSize,
+ &table->min_size, &table->has_max, WasmModule::kV8MaxTableSize,
+ &table->max_size);
break;
}
case kExternalMemory: {
// ===== Imported memory =========================================
- // import->index =
- // static_cast<uint32_t>(module->memories.size());
- // TODO(titzer): imported memories
+ bool has_max = false;
+ consume_resizable_limits("memory", "pages", WasmModule::kV8MaxPages,
+ &module->min_mem_pages, &has_max,
+ WasmModule::kSpecMaxPages,
+ &module->max_mem_pages);
+ module->has_memory = true;
break;
}
case kExternalGlobal: {
// ===== Imported global =========================================
import->index = static_cast<uint32_t>(module->globals.size());
module->globals.push_back(
- {kAstStmt, false, NO_INIT, 0, true, false});
+ {kAstStmt, false, WasmInitExpr(), 0, true, false});
WasmGlobal* global = &module->globals.back();
global->type = consume_value_type();
global->mutability = consume_u8("mutability") != 0;
+ if (global->mutability) {
+ error("mutable globals cannot be imported");
+ }
break;
}
default:
@@ -362,14 +381,18 @@ class ModuleDecoder : public Decoder {
if (table_count > 1) {
error(pos, pos, "invalid table count %d, maximum 1", table_count);
}
+ if (module->function_tables.size() < 1) {
+ module->function_tables.push_back({0, 0, false, std::vector<int32_t>(),
+ false, false, SignatureMap()});
+ }
for (uint32_t i = 0; ok() && i < table_count; i++) {
- module->function_tables.push_back(
- {0, 0, std::vector<int32_t>(), false, false});
WasmIndirectFunctionTable* table = &module->function_tables.back();
expect_u8("table type", kWasmAnyFunctionTypeForm);
- consume_resizable_limits("table elements", "elements", kMaxUInt32,
- &table->size, &table->max_size);
+ consume_resizable_limits("table elements", "elements",
+ WasmModule::kV8MaxTableSize, &table->min_size,
+ &table->has_max, WasmModule::kV8MaxTableSize,
+ &table->max_size);
}
section_iter.advance();
}
@@ -384,24 +407,33 @@ class ModuleDecoder : public Decoder {
}
for (uint32_t i = 0; ok() && i < memory_count; i++) {
- consume_resizable_limits("memory", "pages", WasmModule::kMaxLegalPages,
- &module->min_mem_pages,
- &module->max_mem_pages);
+ bool has_max = false;
+ consume_resizable_limits(
+ "memory", "pages", WasmModule::kV8MaxPages, &module->min_mem_pages,
+ &has_max, WasmModule::kSpecMaxPages, &module->max_mem_pages);
}
+ module->has_memory = true;
section_iter.advance();
}
// ===== Global section ==================================================
if (section_iter.section_code() == kGlobalSectionCode) {
uint32_t globals_count = consume_u32v("globals count");
- module->globals.reserve(SafeReserve(globals_count));
+ uint32_t imported_globals = static_cast<uint32_t>(module->globals.size());
+ if (!IsWithinLimit(std::numeric_limits<int32_t>::max(), globals_count,
+ imported_globals)) {
+ error(pos, pos, "too many imported+defined globals: %u + %u",
+ imported_globals, globals_count);
+ }
+ module->globals.reserve(SafeReserve(imported_globals + globals_count));
for (uint32_t i = 0; ok() && i < globals_count; ++i) {
TRACE("DecodeGlobal[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
// Add an uninitialized global and pass a pointer to it.
- module->globals.push_back({kAstStmt, false, NO_INIT, 0, false, false});
+ module->globals.push_back(
+ {kAstStmt, false, WasmInitExpr(), 0, false, false});
WasmGlobal* global = &module->globals.back();
- DecodeGlobalInModule(module, i, global);
+ DecodeGlobalInModule(module, i + imported_globals, global);
}
section_iter.advance();
}
@@ -448,7 +480,12 @@ class ModuleDecoder : public Decoder {
case kExternalGlobal: {
WasmGlobal* global = nullptr;
exp->index = consume_global_index(module, &global);
- if (global) global->exported = true;
+ if (global) {
+ if (global->mutability) {
+ error("mutable globals cannot be exported");
+ }
+ global->exported = true;
+ }
break;
}
default:
@@ -491,8 +528,10 @@ class ModuleDecoder : public Decoder {
WasmFunction* func;
const byte* pos = pc_;
module->start_function_index = consume_func_index(module, &func);
- if (func && func->sig->parameter_count() > 0) {
- error(pos, "invalid start function: non-zero parameter count");
+ if (func &&
+ (func->sig->parameter_count() > 0 || func->sig->return_count() > 0)) {
+ error(pos,
+ "invalid start function: non-zero parameter or return count");
}
section_iter.advance();
}
@@ -501,8 +540,17 @@ class ModuleDecoder : public Decoder {
if (section_iter.section_code() == kElementSectionCode) {
uint32_t element_count = consume_u32v("element count");
for (uint32_t i = 0; ok() && i < element_count; ++i) {
+ const byte* pos = pc();
uint32_t table_index = consume_u32v("table index");
- if (table_index != 0) error("illegal table index != 0");
+ if (table_index != 0) {
+ error(pos, pos, "illegal table index %u != 0", table_index);
+ }
+ WasmIndirectFunctionTable* table = nullptr;
+ if (table_index >= module->function_tables.size()) {
+ error(pos, pos, "out of bounds table index %u", table_index);
+ } else {
+ table = &module->function_tables[table_index];
+ }
WasmInitExpr offset = consume_init_expr(module, kAstI32);
uint32_t num_elem = consume_u32v("number of elements");
std::vector<uint32_t> vector;
@@ -511,7 +559,13 @@ class ModuleDecoder : public Decoder {
init->entries.reserve(SafeReserve(num_elem));
for (uint32_t j = 0; ok() && j < num_elem; j++) {
WasmFunction* func = nullptr;
- init->entries.push_back(consume_func_index(module, &func));
+ uint32_t index = consume_func_index(module, &func);
+ init->entries.push_back(index);
+ if (table && index < module->functions.size()) {
+ // Canonicalize signature indices during decoding.
+ // TODO(titzer): suboptimal, redundant when verifying only.
+ table->map.FindOrInsert(module->functions[index].sig);
+ }
}
}
@@ -532,6 +586,14 @@ class ModuleDecoder : public Decoder {
uint32_t size = consume_u32v("body size");
function->code_start_offset = pc_offset();
function->code_end_offset = pc_offset() + size;
+ if (verify_functions) {
+ ModuleEnv module_env;
+ module_env.module = module;
+ module_env.origin = module->origin;
+
+ VerifyFunctionBody(i + module->num_imported_functions, &module_env,
+ function);
+ }
consume_bytes(size, "function body");
}
section_iter.advance();
@@ -542,12 +604,16 @@ class ModuleDecoder : public Decoder {
uint32_t data_segments_count = consume_u32v("data segments count");
module->data_segments.reserve(SafeReserve(data_segments_count));
for (uint32_t i = 0; ok() && i < data_segments_count; ++i) {
+ if (!module->has_memory) {
+ error("cannot load data without memory");
+ break;
+ }
TRACE("DecodeDataSegment[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
module->data_segments.push_back({
- NO_INIT, // dest_addr
- 0, // source_offset
- 0 // source_size
+ WasmInitExpr(), // dest_addr
+ 0, // source_offset
+ 0 // source_size
});
WasmDataSegment* segment = &module->data_segments.back();
DecodeDataSegmentInModule(module, segment);
@@ -557,24 +623,20 @@ class ModuleDecoder : public Decoder {
// ===== Name section ====================================================
if (section_iter.section_code() == kNameSectionCode) {
- const byte* pos = pc_;
uint32_t functions_count = consume_u32v("functions count");
- if (functions_count != module->num_declared_functions) {
- error(pos, pos, "function name count %u mismatch (%u expected)",
- functions_count, module->num_declared_functions);
- }
for (uint32_t i = 0; ok() && i < functions_count; ++i) {
- WasmFunction* function =
- &module->functions[i + module->num_imported_functions];
- function->name_offset = consume_string(&function->name_length, false);
+ uint32_t function_name_length = 0;
+ uint32_t name_offset = consume_string(&function_name_length, false);
+ uint32_t func_index = i;
+ if (func_index < module->functions.size()) {
+ module->functions[func_index].name_offset = name_offset;
+ module->functions[func_index].name_length = function_name_length;
+ }
uint32_t local_names_count = consume_u32v("local names count");
for (uint32_t j = 0; ok() && j < local_names_count; j++) {
- uint32_t unused = 0;
- uint32_t offset = consume_string(&unused, false);
- USE(unused);
- USE(offset);
+ skip_string();
}
}
section_iter.advance();
@@ -588,10 +650,12 @@ class ModuleDecoder : public Decoder {
if (ok()) {
CalculateGlobalOffsets(module);
- PreinitializeIndirectFunctionTables(module);
}
const WasmModule* finished_module = module;
ModuleResult result = toResult(finished_module);
+ if (verify_functions && result.ok()) {
+ result.MoveFrom(result_); // Copy error code and location.
+ }
if (FLAG_dump_wasm_module) DumpModule(module, result);
return result;
}
@@ -647,13 +711,22 @@ class ModuleDecoder : public Decoder {
const byte* pos = pc();
global->init = consume_init_expr(module, kAstStmt);
switch (global->init.kind) {
- case WasmInitExpr::kGlobalIndex:
- if (global->init.val.global_index >= index) {
- error("invalid global index in init expression");
- } else if (module->globals[index].type != global->type) {
- error("type mismatch in global initialization");
+ case WasmInitExpr::kGlobalIndex: {
+ uint32_t other_index = global->init.val.global_index;
+ if (other_index >= index) {
+ error(pos, pos,
+ "invalid global index in init expression, "
+ "index %u, other_index %u",
+ index, other_index);
+ } else if (module->globals[other_index].type != global->type) {
+ error(pos, pos,
+ "type mismatch in global initialization "
+ "(from global #%u), expected %s, got %s",
+ other_index, WasmOpcodes::TypeName(global->type),
+ WasmOpcodes::TypeName(module->globals[other_index].type));
}
break;
+ }
default:
if (global->type != TypeOf(module, global->init)) {
error(pos, pos,
@@ -705,30 +778,6 @@ class ModuleDecoder : public Decoder {
module->globals_size = offset;
}
- // TODO(titzer): this only works without overlapping initializations from
- // global bases for entries
- void PreinitializeIndirectFunctionTables(WasmModule* module) {
- // Fill all tables with invalid entries first.
- for (WasmIndirectFunctionTable& table : module->function_tables) {
- table.values.resize(table.size);
- for (size_t i = 0; i < table.size; i++) {
- table.values[i] = kInvalidFunctionIndex;
- }
- }
- for (WasmTableInit& init : module->table_inits) {
- if (init.offset.kind != WasmInitExpr::kI32Const) continue;
- if (init.table_index >= module->function_tables.size()) continue;
- WasmIndirectFunctionTable& table =
- module->function_tables[init.table_index];
- for (size_t i = 0; i < init.entries.size(); i++) {
- size_t index = i + init.offset.val.i32_const;
- if (index < table.values.size()) {
- table.values[index] = init.entries[i];
- }
- }
- }
- }
-
// Verifies the body (code) of a given function.
void VerifyFunctionBody(uint32_t func_num, ModuleEnv* menv,
WasmFunction* function) {
@@ -766,7 +815,7 @@ class ModuleDecoder : public Decoder {
uint32_t offset = pc_offset();
const byte* string_start = pc_;
// Consume bytes before validation to guarantee that the string is not oob.
- consume_bytes(*length, "string");
+ if (*length > 0) consume_bytes(*length, "string");
if (ok() && validate_utf8 &&
!unibrow::Utf8::Validate(string_start, *length)) {
error(string_start, "no valid UTF-8 string");
@@ -774,6 +823,12 @@ class ModuleDecoder : public Decoder {
return offset;
}
+ // Skips over a length-prefixed string, but checks that it is within bounds.
+ void skip_string() {
+ uint32_t length = consume_u32v("string length");
+ consume_bytes(length, "string");
+ }
+
uint32_t consume_sig_index(WasmModule* module, FunctionSig** sig) {
const byte* pos = pc_;
uint32_t sig_index = consume_u32v("signature index");
@@ -815,30 +870,35 @@ class ModuleDecoder : public Decoder {
}
void consume_resizable_limits(const char* name, const char* units,
- uint32_t max_value, uint32_t* initial,
+ uint32_t max_initial, uint32_t* initial,
+ bool* has_max, uint32_t max_maximum,
uint32_t* maximum) {
uint32_t flags = consume_u32v("resizable limits flags");
const byte* pos = pc();
*initial = consume_u32v("initial size");
- if (*initial > max_value) {
+ *has_max = false;
+ if (*initial > max_initial) {
error(pos, pos,
- "initial %s size (%u %s) is larger than maximum allowable (%u)",
- name, *initial, units, max_value);
+ "initial %s size (%u %s) is larger than implementation limit (%u)",
+ name, *initial, units, max_initial);
}
if (flags & 1) {
+ *has_max = true;
pos = pc();
*maximum = consume_u32v("maximum size");
- if (*maximum > max_value) {
- error(pos, pos,
- "maximum %s size (%u %s) is larger than maximum allowable (%u)",
- name, *maximum, units, max_value);
+ if (*maximum > max_maximum) {
+ error(
+ pos, pos,
+ "maximum %s size (%u %s) is larger than implementation limit (%u)",
+ name, *maximum, units, max_maximum);
}
if (*maximum < *initial) {
error(pos, pos, "maximum %s size (%u %s) is less than initial (%u %s)",
name, *maximum, units, *initial, units);
}
} else {
- *maximum = 0;
+ *has_max = false;
+ *maximum = max_initial;
}
}
@@ -860,6 +920,21 @@ class ModuleDecoder : public Decoder {
switch (opcode) {
case kExprGetGlobal: {
GlobalIndexOperand operand(this, pc() - 1);
+ if (module->globals.size() <= operand.index) {
+ error("global index is out of bounds");
+ expr.kind = WasmInitExpr::kNone;
+ expr.val.i32_const = 0;
+ break;
+ }
+ WasmGlobal* global = &module->globals[operand.index];
+ if (global->mutability || !global->imported) {
+ error(
+ "only immutable imported globals can be used in initializer "
+ "expressions");
+ expr.kind = WasmInitExpr::kNone;
+ expr.val.i32_const = 0;
+ break;
+ }
expr.kind = WasmInitExpr::kGlobalIndex;
expr.val.global_index = operand.index;
len = operand.length;
@@ -925,7 +1000,12 @@ class ModuleDecoder : public Decoder {
case kLocalF64:
return kAstF64;
case kLocalS128:
- return kAstS128;
+ if (origin_ != kAsmJsOrigin && FLAG_wasm_simd_prototype) {
+ return kAstS128;
+ } else {
+ error(pc_ - 1, "invalid local type");
+ return kAstStmt;
+ }
default:
error(pc_ - 1, "invalid local type");
return kAstStmt;
@@ -999,6 +1079,8 @@ class FunctionError : public FunctionResult {
}
};
+// Find section with given section code. Return Vector of the payload, or null
+// Vector if section is not found or module bytes are invalid.
Vector<const byte> FindSection(const byte* module_start, const byte* module_end,
WasmSectionCode code) {
Decoder decoder(module_start, module_end);
@@ -1012,10 +1094,10 @@ Vector<const byte> FindSection(const byte* module_start, const byte* module_end,
WasmSectionIterator section_iter(decoder);
while (section_iter.more()) {
if (section_iter.section_code() == code) {
- return Vector<const uint8_t>(section_iter.section_start(),
- section_iter.section_length());
+ return Vector<const uint8_t>(section_iter.payload_start(),
+ section_iter.payload_length());
}
- decoder.consume_bytes(section_iter.section_length(), "section payload");
+ decoder.consume_bytes(section_iter.payload_length(), "section payload");
section_iter.advance();
}
@@ -1024,10 +1106,9 @@ Vector<const byte> FindSection(const byte* module_start, const byte* module_end,
} // namespace
-ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
- const byte* module_start, const byte* module_end,
- bool verify_functions, ModuleOrigin origin) {
- size_t decode_memory_start = zone->allocation_size();
+ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
+ const byte* module_end, bool verify_functions,
+ ModuleOrigin origin) {
HistogramTimerScope wasm_decode_module_time_scope(
isolate->counters()->wasm_decode_module_time());
size_t size = module_end - module_start;
@@ -1036,12 +1117,18 @@ ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
// TODO(bradnelson): Improve histogram handling of size_t.
isolate->counters()->wasm_module_size_bytes()->AddSample(
static_cast<int>(size));
- WasmModule* module = new WasmModule();
+ // Signatures are stored in zone memory, which have the same lifetime
+ // as the {module}.
+ Zone* zone = new Zone(isolate->allocator(), ZONE_NAME);
+ WasmModule* module = new WasmModule(zone, module_start);
ModuleDecoder decoder(zone, module_start, module_end, origin);
ModuleResult result = decoder.DecodeModule(module, verify_functions);
// TODO(bradnelson): Improve histogram handling of size_t.
+ // TODO(titzer): this isn't accurate, since it doesn't count the data
+ // allocated on the C++ heap.
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=657320
isolate->counters()->wasm_decode_module_peak_memory_bytes()->AddSample(
- static_cast<int>(zone->allocation_size() - decode_memory_start));
+ static_cast<int>(zone->allocation_size()));
return result;
}
@@ -1053,7 +1140,7 @@ FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
WasmInitExpr DecodeWasmInitExprForTesting(const byte* start, const byte* end) {
AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
ModuleDecoder decoder(&zone, start, end, kWasmOrigin);
return decoder.DecodeInitExpr(start);
}
@@ -1075,9 +1162,8 @@ FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
return decoder.DecodeSingleFunction(module_env, function);
}
-FunctionOffsetsResult DecodeWasmFunctionOffsets(
- const byte* module_start, const byte* module_end,
- uint32_t num_imported_functions) {
+FunctionOffsetsResult DecodeWasmFunctionOffsets(const byte* module_start,
+ const byte* module_end) {
// Find and decode the code section.
Vector<const byte> code_section =
FindSection(module_start, module_end, kCodeSectionCode);
@@ -1088,16 +1174,12 @@ FunctionOffsetsResult DecodeWasmFunctionOffsets(
return decoder.toResult(std::move(table));
}
- // Reserve entries for the imported functions.
- table.reserve(num_imported_functions);
- for (uint32_t i = 0; i < num_imported_functions; i++) {
- table.push_back(std::make_pair(0, 0));
- }
-
uint32_t functions_count = decoder.consume_u32v("functions count");
- // Take care of invalid input here.
- if (functions_count < static_cast<unsigned>(code_section.length()) / 2)
+ // Reserve space for the entries, taking care of invalid input.
+ if (functions_count < static_cast<unsigned>(code_section.length()) / 2) {
table.reserve(functions_count);
+ }
+
int section_offset = static_cast<int>(code_section.start() - module_start);
DCHECK_LE(0, section_offset);
for (uint32_t i = 0; i < functions_count && decoder.ok(); ++i) {
@@ -1112,6 +1194,47 @@ FunctionOffsetsResult DecodeWasmFunctionOffsets(
return decoder.toResult(std::move(table));
}
+AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* tables_start,
+ const byte* tables_end) {
+ AsmJsOffsets table;
+
+ Decoder decoder(tables_start, tables_end);
+ uint32_t functions_count = decoder.consume_u32v("functions count");
+ // Reserve space for the entries, taking care of invalid input.
+ if (functions_count < static_cast<unsigned>(tables_end - tables_start)) {
+ table.reserve(functions_count);
+ }
+
+ for (uint32_t i = 0; i < functions_count && decoder.ok(); ++i) {
+ uint32_t size = decoder.consume_u32v("table size");
+ if (size == 0) {
+ table.push_back(std::vector<std::pair<int, int>>());
+ continue;
+ }
+ if (!decoder.checkAvailable(size)) {
+ decoder.error("illegal asm function offset table size");
+ }
+ const byte* table_end = decoder.pc() + size;
+ uint32_t locals_size = decoder.consume_u32("locals size");
+ int last_byte_offset = locals_size;
+ int last_asm_position = 0;
+ std::vector<std::pair<int, int>> func_asm_offsets;
+ func_asm_offsets.reserve(size / 4); // conservative estimation
+ while (decoder.ok() && decoder.pc() < table_end) {
+ last_byte_offset += decoder.consume_u32v("byte offset delta");
+ last_asm_position += decoder.consume_i32v("asm position delta");
+ func_asm_offsets.push_back({last_byte_offset, last_asm_position});
+ }
+ if (decoder.pc() != table_end) {
+ decoder.error("broken asm offset table");
+ }
+ table.push_back(std::move(func_asm_offsets));
+ }
+ if (decoder.more()) decoder.error("unexpected additional bytes");
+
+ return decoder.toResult(std::move(table));
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 22a313cec3..7cf5cfe3c1 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -5,14 +5,24 @@
#ifndef V8_WASM_MODULE_DECODER_H_
#define V8_WASM_MODULE_DECODER_H_
+#include "src/globals.h"
#include "src/wasm/ast-decoder.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-result.h"
namespace v8 {
namespace internal {
namespace wasm {
+
+typedef Result<const WasmModule*> ModuleResult;
+typedef Result<WasmFunction*> FunctionResult;
+typedef std::vector<std::pair<int, int>> FunctionOffsets;
+typedef Result<FunctionOffsets> FunctionOffsetsResult;
+typedef std::vector<std::vector<std::pair<int, int>>> AsmJsOffsets;
+typedef Result<AsmJsOffsets> AsmJsOffsetsResult;
+
// Decodes the bytes of a WASM module between {module_start} and {module_end}.
-V8_EXPORT_PRIVATE ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
+V8_EXPORT_PRIVATE ModuleResult DecodeWasmModule(Isolate* isolate,
const byte* module_start,
const byte* module_end,
bool verify_functions,
@@ -20,23 +30,33 @@ V8_EXPORT_PRIVATE ModuleResult DecodeWasmModule(Isolate* isolate, Zone* zone,
// Exposed for testing. Decodes a single function signature, allocating it
// in the given zone. Returns {nullptr} upon failure.
-FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
- const byte* end);
+V8_EXPORT_PRIVATE FunctionSig* DecodeWasmSignatureForTesting(Zone* zone,
+ const byte* start,
+ const byte* end);
// Decodes the bytes of a WASM function between
// {function_start} and {function_end}.
-FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone, ModuleEnv* env,
- const byte* function_start,
- const byte* function_end);
+V8_EXPORT_PRIVATE FunctionResult DecodeWasmFunction(Isolate* isolate,
+ Zone* zone, ModuleEnv* env,
+ const byte* function_start,
+ const byte* function_end);
// Extracts the function offset table from the wasm module bytes.
// Returns a vector with <offset, length> entries, or failure if the wasm bytes
// are detected as invalid. Note that this validation is not complete.
-FunctionOffsetsResult DecodeWasmFunctionOffsets(
- const byte* module_start, const byte* module_end,
- uint32_t num_imported_functions);
+FunctionOffsetsResult DecodeWasmFunctionOffsets(const byte* module_start,
+ const byte* module_end);
+
+V8_EXPORT_PRIVATE WasmInitExpr DecodeWasmInitExprForTesting(const byte* start,
+ const byte* end);
-WasmInitExpr DecodeWasmInitExprForTesting(const byte* start, const byte* end);
+// Extracts the mapping from wasm byte offset to asm.js source position per
+// function.
+// Returns a vector of vectors with <byte_offset, source_position> entries, or
+// failure if the wasm bytes are detected as invalid. Note that this validation
+// is not complete.
+AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* module_start,
+ const byte* module_end);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/signature-map.cc b/deps/v8/src/wasm/signature-map.cc
new file mode 100644
index 0000000000..e7f8b2fc94
--- /dev/null
+++ b/deps/v8/src/wasm/signature-map.cc
@@ -0,0 +1,51 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/signature-map.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+uint32_t SignatureMap::FindOrInsert(FunctionSig* sig) {
+ auto pos = map_.find(sig);
+ if (pos != map_.end()) {
+ return pos->second;
+ } else {
+ uint32_t index = next_++;
+ map_[sig] = index;
+ return index;
+ }
+}
+
+int32_t SignatureMap::Find(FunctionSig* sig) const {
+ auto pos = map_.find(sig);
+ if (pos != map_.end()) {
+ return static_cast<int32_t>(pos->second);
+ } else {
+ return -1;
+ }
+}
+
+bool SignatureMap::CompareFunctionSigs::operator()(FunctionSig* a,
+ FunctionSig* b) const {
+ if (a == b) return false;
+ if (a->return_count() < b->return_count()) return true;
+ if (a->return_count() > b->return_count()) return false;
+ if (a->parameter_count() < b->parameter_count()) return true;
+ if (a->parameter_count() > b->parameter_count()) return false;
+ for (size_t r = 0; r < a->return_count(); r++) {
+ if (a->GetReturn(r) < b->GetReturn(r)) return true;
+ if (a->GetReturn(r) > b->GetReturn(r)) return false;
+ }
+ for (size_t p = 0; p < a->parameter_count(); p++) {
+ if (a->GetParam(p) < b->GetParam(p)) return true;
+ if (a->GetParam(p) > b->GetParam(p)) return false;
+ }
+ return false;
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/wasm/signature-map.h b/deps/v8/src/wasm/signature-map.h
new file mode 100644
index 0000000000..3a7ed0a047
--- /dev/null
+++ b/deps/v8/src/wasm/signature-map.h
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_SIGNATURE_MAP_H_
+#define V8_WASM_SIGNATURE_MAP_H_
+
+#include <map>
+
+#include "src/signature.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// A signature map canonicalizes signatures into a range of indices so that
+// two different {FunctionSig} instances with the same contents map to the
+// same index.
+class V8_EXPORT_PRIVATE SignatureMap {
+ public:
+ // Gets the index for a signature, assigning a new index if necessary.
+ uint32_t FindOrInsert(FunctionSig* sig);
+
+ // Gets the index for a signature, returning {-1} if not found.
+ int32_t Find(FunctionSig* sig) const;
+
+ private:
+ // TODO(wasm): use a hashmap instead of an ordered map?
+ struct CompareFunctionSigs {
+ bool operator()(FunctionSig* a, FunctionSig* b) const;
+ };
+ uint32_t next_ = 0;
+ std::map<FunctionSig*, uint32_t, CompareFunctionSigs> map_;
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_SIGNATURE_MAP_H_
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 42a8e5f2ab..11c2ef8aa5 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -2,14 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/wasm/wasm-debug.h"
-
#include "src/assert-scope.h"
#include "src/debug/debug.h"
#include "src/factory.h"
#include "src/isolate.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
using namespace v8::internal;
using namespace v8::internal::wasm;
@@ -19,68 +18,62 @@ namespace {
enum {
kWasmDebugInfoWasmObj,
kWasmDebugInfoWasmBytesHash,
- kWasmDebugInfoFunctionByteOffsets,
- kWasmDebugInfoFunctionScripts,
+ kWasmDebugInfoAsmJsOffsets,
kWasmDebugInfoNumEntries
};
-ByteArray *GetOrCreateFunctionOffsetTable(Handle<WasmDebugInfo> debug_info) {
- Object *offset_table = debug_info->get(kWasmDebugInfoFunctionByteOffsets);
- Isolate *isolate = debug_info->GetIsolate();
- if (!offset_table->IsUndefined(isolate)) return ByteArray::cast(offset_table);
+// TODO(clemensh): Move asm.js offset tables to the compiled module.
+FixedArray *GetAsmJsOffsetTables(Handle<WasmDebugInfo> debug_info,
+ Isolate *isolate) {
+ Object *offset_tables = debug_info->get(kWasmDebugInfoAsmJsOffsets);
+ if (!offset_tables->IsUndefined(isolate)) {
+ return FixedArray::cast(offset_tables);
+ }
- FunctionOffsetsResult function_offsets;
+ Handle<JSObject> wasm_instance(debug_info->wasm_instance(), isolate);
+ Handle<WasmCompiledModule> compiled_module(GetCompiledModule(*wasm_instance),
+ isolate);
+ DCHECK(compiled_module->has_asm_js_offset_tables());
+
+ AsmJsOffsetsResult asm_offsets;
{
+ Handle<ByteArray> asm_offset_tables =
+ compiled_module->asm_js_offset_tables();
DisallowHeapAllocation no_gc;
- Handle<JSObject> wasm_object(debug_info->wasm_object(), isolate);
- uint32_t num_imported_functions =
- wasm::GetNumImportedFunctions(wasm_object);
- SeqOneByteString *wasm_bytes =
- wasm::GetWasmBytes(debug_info->wasm_object());
- const byte *bytes_start = wasm_bytes->GetChars();
- const byte *bytes_end = bytes_start + wasm_bytes->length();
- function_offsets = wasm::DecodeWasmFunctionOffsets(bytes_start, bytes_end,
- num_imported_functions);
+ const byte *bytes_start = asm_offset_tables->GetDataStartAddress();
+ const byte *bytes_end = bytes_start + asm_offset_tables->length();
+ asm_offsets = wasm::DecodeAsmJsOffsets(bytes_start, bytes_end);
}
- DCHECK(function_offsets.ok());
- size_t array_size = 2 * kIntSize * function_offsets.val.size();
- CHECK_LE(array_size, static_cast<size_t>(kMaxInt));
- ByteArray *arr =
- *isolate->factory()->NewByteArray(static_cast<int>(array_size));
- int idx = 0;
- for (std::pair<int, int> p : function_offsets.val) {
- arr->set_int(idx++, p.first);
- arr->set_int(idx++, p.second);
+ // Wasm bytes must be valid and must contain asm.js offset table.
+ DCHECK(asm_offsets.ok());
+ DCHECK_GE(static_cast<size_t>(kMaxInt), asm_offsets.val.size());
+ int num_functions = static_cast<int>(asm_offsets.val.size());
+ DCHECK_EQ(
+ wasm::GetNumberOfFunctions(handle(debug_info->wasm_instance())),
+ static_cast<int>(num_functions +
+ compiled_module->module()->num_imported_functions));
+ Handle<FixedArray> all_tables =
+ isolate->factory()->NewFixedArray(num_functions);
+ debug_info->set(kWasmDebugInfoAsmJsOffsets, *all_tables);
+ for (int func = 0; func < num_functions; ++func) {
+ std::vector<std::pair<int, int>> &func_asm_offsets = asm_offsets.val[func];
+ if (func_asm_offsets.empty()) continue;
+ size_t array_size = 2 * kIntSize * func_asm_offsets.size();
+ CHECK_LE(array_size, static_cast<size_t>(kMaxInt));
+ ByteArray *arr =
+ *isolate->factory()->NewByteArray(static_cast<int>(array_size));
+ all_tables->set(func, arr);
+ int idx = 0;
+ for (std::pair<int, int> p : func_asm_offsets) {
+ // Byte offsets must be strictly monotonously increasing:
+ DCHECK(idx == 0 || p.first > arr->get_int(idx - 2));
+ arr->set_int(idx++, p.first);
+ arr->set_int(idx++, p.second);
+ }
+ DCHECK_EQ(arr->length(), idx * kIntSize);
}
- DCHECK_EQ(arr->length(), idx * kIntSize);
- debug_info->set(kWasmDebugInfoFunctionByteOffsets, arr);
-
- return arr;
-}
-
-std::pair<int, int> GetFunctionOffsetAndLength(Handle<WasmDebugInfo> debug_info,
- int func_index) {
- ByteArray *arr = GetOrCreateFunctionOffsetTable(debug_info);
- DCHECK(func_index >= 0 && func_index < arr->length() / kIntSize / 2);
-
- int offset = arr->get_int(2 * func_index);
- int length = arr->get_int(2 * func_index + 1);
- // Assert that it's distinguishable from the "illegal function index" return.
- DCHECK(offset > 0 && length > 0);
- return {offset, length};
-}
-
-Vector<const uint8_t> GetFunctionBytes(Handle<WasmDebugInfo> debug_info,
- int func_index) {
- SeqOneByteString *module_bytes =
- wasm::GetWasmBytes(debug_info->wasm_object());
- std::pair<int, int> offset_and_length =
- GetFunctionOffsetAndLength(debug_info, func_index);
- return Vector<const uint8_t>(
- module_bytes->GetChars() + offset_and_length.first,
- offset_and_length.second);
+ return *all_tables;
}
-
} // namespace
Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<JSObject> wasm) {
@@ -90,7 +83,7 @@ Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<JSObject> wasm) {
factory->NewFixedArray(kWasmDebugInfoNumEntries, TENURED);
arr->set(kWasmDebugInfoWasmObj, *wasm);
int hash = 0;
- Handle<SeqOneByteString> wasm_bytes(GetWasmBytes(*wasm), isolate);
+ Handle<SeqOneByteString> wasm_bytes = GetWasmBytes(wasm);
{
DisallowHeapAllocation no_gc;
hash = StringHasher::HashSequentialString(
@@ -105,14 +98,9 @@ Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<JSObject> wasm) {
bool WasmDebugInfo::IsDebugInfo(Object *object) {
if (!object->IsFixedArray()) return false;
FixedArray *arr = FixedArray::cast(object);
- Isolate *isolate = arr->GetIsolate();
return arr->length() == kWasmDebugInfoNumEntries &&
- IsWasmObject(arr->get(kWasmDebugInfoWasmObj)) &&
- arr->get(kWasmDebugInfoWasmBytesHash)->IsNumber() &&
- (arr->get(kWasmDebugInfoFunctionByteOffsets)->IsUndefined(isolate) ||
- arr->get(kWasmDebugInfoFunctionByteOffsets)->IsByteArray()) &&
- (arr->get(kWasmDebugInfoFunctionScripts)->IsUndefined(isolate) ||
- arr->get(kWasmDebugInfoFunctionScripts)->IsFixedArray());
+ IsWasmInstance(arr->get(kWasmDebugInfoWasmObj)) &&
+ arr->get(kWasmDebugInfoWasmBytesHash)->IsNumber();
}
WasmDebugInfo *WasmDebugInfo::cast(Object *object) {
@@ -120,119 +108,38 @@ WasmDebugInfo *WasmDebugInfo::cast(Object *object) {
return reinterpret_cast<WasmDebugInfo *>(object);
}
-JSObject *WasmDebugInfo::wasm_object() {
+JSObject *WasmDebugInfo::wasm_instance() {
return JSObject::cast(get(kWasmDebugInfoWasmObj));
}
-Script *WasmDebugInfo::GetFunctionScript(Handle<WasmDebugInfo> debug_info,
- int func_index) {
+int WasmDebugInfo::GetAsmJsSourcePosition(Handle<WasmDebugInfo> debug_info,
+ int func_index, int byte_offset) {
Isolate *isolate = debug_info->GetIsolate();
- Object *scripts_obj = debug_info->get(kWasmDebugInfoFunctionScripts);
- Handle<FixedArray> scripts;
- if (scripts_obj->IsUndefined(isolate)) {
- int num_functions = wasm::GetNumberOfFunctions(debug_info->wasm_object());
- scripts = isolate->factory()->NewFixedArray(num_functions, TENURED);
- debug_info->set(kWasmDebugInfoFunctionScripts, *scripts);
- } else {
- scripts = handle(FixedArray::cast(scripts_obj), isolate);
- }
-
- DCHECK(func_index >= 0 && func_index < scripts->length());
- Object *script_or_undef = scripts->get(func_index);
- if (!script_or_undef->IsUndefined(isolate)) {
- return Script::cast(script_or_undef);
- }
-
- Handle<Script> script =
- isolate->factory()->NewScript(isolate->factory()->empty_string());
- scripts->set(func_index, *script);
-
- script->set_type(Script::TYPE_WASM);
- script->set_wasm_object(debug_info->wasm_object());
- script->set_wasm_function_index(func_index);
-
- int hash = 0;
- debug_info->get(kWasmDebugInfoWasmBytesHash)->ToInt32(&hash);
- char buffer[32];
- SNPrintF(ArrayVector(buffer), "wasm://%08x/%d", hash, func_index);
- Handle<String> source_url =
- isolate->factory()->NewStringFromAsciiChecked(buffer, TENURED);
- script->set_source_url(*source_url);
-
- int func_bytes_len =
- GetFunctionOffsetAndLength(debug_info, func_index).second;
- Handle<FixedArray> line_ends = isolate->factory()->NewFixedArray(1, TENURED);
- line_ends->set(0, Smi::FromInt(func_bytes_len));
- line_ends->set_map(isolate->heap()->fixed_cow_array_map());
- script->set_line_ends(*line_ends);
-
- // TODO(clemensh): Register with the debugger. Note that we cannot call into
- // JS at this point since this function is called from within stack trace
- // collection (which means we cannot call Debug::OnAfterCompile in its
- // current form). See crbug.com/641065.
- if (false) isolate->debug()->OnAfterCompile(script);
-
- return *script;
-}
-
-Handle<String> WasmDebugInfo::DisassembleFunction(
- Handle<WasmDebugInfo> debug_info, int func_index) {
- std::ostringstream disassembly_os;
-
- {
- Vector<const uint8_t> bytes_vec = GetFunctionBytes(debug_info, func_index);
- DisallowHeapAllocation no_gc;
-
- AccountingAllocator allocator;
- bool ok = PrintAst(
- &allocator, FunctionBodyForTesting(bytes_vec.start(), bytes_vec.end()),
- disassembly_os, nullptr);
- DCHECK(ok);
- USE(ok);
- }
-
- // Unfortunately, we have to copy the string here.
- std::string code_str = disassembly_os.str();
- CHECK_LE(code_str.length(), static_cast<size_t>(kMaxInt));
- Factory *factory = debug_info->GetIsolate()->factory();
- Vector<const char> code_vec(code_str.data(),
- static_cast<int>(code_str.length()));
- return factory->NewStringFromAscii(code_vec).ToHandleChecked();
-}
-
-Handle<FixedArray> WasmDebugInfo::GetFunctionOffsetTable(
- Handle<WasmDebugInfo> debug_info, int func_index) {
- class NullBuf : public std::streambuf {};
- NullBuf null_buf;
- std::ostream null_stream(&null_buf);
-
- std::vector<std::tuple<uint32_t, int, int>> offset_table_vec;
-
- {
- Vector<const uint8_t> bytes_vec = GetFunctionBytes(debug_info, func_index);
- DisallowHeapAllocation no_gc;
-
- AccountingAllocator allocator;
- bool ok = PrintAst(
- &allocator, FunctionBodyForTesting(bytes_vec.start(), bytes_vec.end()),
- null_stream, &offset_table_vec);
- DCHECK(ok);
- USE(ok);
+ Handle<JSObject> instance(debug_info->wasm_instance(), isolate);
+ FixedArray *offset_tables = GetAsmJsOffsetTables(debug_info, isolate);
+
+ WasmCompiledModule *compiled_module = wasm::GetCompiledModule(*instance);
+ int num_imported_functions =
+ compiled_module->module()->num_imported_functions;
+ DCHECK_LE(num_imported_functions, func_index);
+ func_index -= num_imported_functions;
+ DCHECK_LT(func_index, offset_tables->length());
+ ByteArray *offset_table = ByteArray::cast(offset_tables->get(func_index));
+
+ // Binary search for the current byte offset.
+ int left = 0; // inclusive
+ int right = offset_table->length() / kIntSize / 2; // exclusive
+ DCHECK_LT(left, right);
+ while (right - left > 1) {
+ int mid = left + (right - left) / 2;
+ if (offset_table->get_int(2 * mid) <= byte_offset) {
+ left = mid;
+ } else {
+ right = mid;
+ }
}
-
- size_t arr_size = 3 * offset_table_vec.size();
- CHECK_LE(arr_size, static_cast<size_t>(kMaxInt));
- Factory *factory = debug_info->GetIsolate()->factory();
- Handle<FixedArray> offset_table =
- factory->NewFixedArray(static_cast<int>(arr_size), TENURED);
-
- int idx = 0;
- for (std::tuple<uint32_t, int, int> elem : offset_table_vec) {
- offset_table->set(idx++, Smi::FromInt(std::get<0>(elem)));
- offset_table->set(idx++, Smi::FromInt(std::get<1>(elem)));
- offset_table->set(idx++, Smi::FromInt(std::get<2>(elem)));
- }
- DCHECK_EQ(idx, offset_table->length());
-
- return offset_table;
+ // There should be an entry for each position that could show up on the stack
+ // trace:
+ DCHECK_EQ(byte_offset, offset_table->get_int(2 * left));
+ return offset_table->get_int(2 * left + 1);
}
diff --git a/deps/v8/src/wasm/wasm-debug.h b/deps/v8/src/wasm/wasm-debug.h
deleted file mode 100644
index 9659951271..0000000000
--- a/deps/v8/src/wasm/wasm-debug.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_DEBUG_H_
-#define V8_WASM_DEBUG_H_
-
-#include "src/handles.h"
-#include "src/objects.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-class WasmDebugInfo : public FixedArray {
- public:
- static Handle<WasmDebugInfo> New(Handle<JSObject> wasm);
-
- static bool IsDebugInfo(Object* object);
- static WasmDebugInfo* cast(Object* object);
-
- JSObject* wasm_object();
-
- bool SetBreakPoint(int byte_offset);
-
- // Get the Script for the specified function.
- static Script* GetFunctionScript(Handle<WasmDebugInfo> debug_info,
- int func_index);
-
- // Disassemble the specified function from this module.
- static Handle<String> DisassembleFunction(Handle<WasmDebugInfo> debug_info,
- int func_index);
-
- // Get the offset table for the specified function, mapping from byte offsets
- // to position in the disassembly.
- // Returns an array with three entries per instruction: byte offset, line and
- // column.
- static Handle<FixedArray> GetFunctionOffsetTable(
- Handle<WasmDebugInfo> debug_info, int func_index);
-};
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_DEBUG_H_
diff --git a/deps/v8/src/wasm/wasm-function-name-table.cc b/deps/v8/src/wasm/wasm-function-name-table.cc
deleted file mode 100644
index cc52125500..0000000000
--- a/deps/v8/src/wasm/wasm-function-name-table.cc
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/wasm/wasm-function-name-table.h"
-
-#include "src/wasm/wasm-module.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// Build an array with all function names. If there are N functions in the
-// module, then the first (kIntSize * (N+1)) bytes are integer entries.
-// The first integer entry encodes the number of functions in the module.
-// The entries 1 to N contain offsets into the second part of this array.
-// If a function is unnamed (not to be confused with an empty name), then the
-// integer entry is the negative offset of the next function name.
-// After these N+1 integer entries, the second part begins, which holds a
-// concatenation of all function names.
-Handle<ByteArray> BuildFunctionNamesTable(Isolate* isolate,
- const WasmModule* module) {
- uint64_t func_names_length = 0;
- for (auto& func : module->functions) func_names_length += func.name_length;
- int num_funcs_int = static_cast<int>(module->functions.size());
- int current_offset = (num_funcs_int + 1) * kIntSize;
- uint64_t total_array_length = current_offset + func_names_length;
- int total_array_length_int = static_cast<int>(total_array_length);
- // Check for overflow.
- CHECK(total_array_length_int == total_array_length && num_funcs_int >= 0 &&
- num_funcs_int == module->functions.size());
- Handle<ByteArray> func_names_array =
- isolate->factory()->NewByteArray(total_array_length_int, TENURED);
- func_names_array->set_int(0, num_funcs_int);
- int func_index = 0;
- for (const WasmFunction& fun : module->functions) {
- WasmName name = module->GetNameOrNull(&fun);
- if (name.start() == nullptr) {
- func_names_array->set_int(func_index + 1, -current_offset);
- } else {
- func_names_array->copy_in(current_offset,
- reinterpret_cast<const byte*>(name.start()),
- name.length());
- func_names_array->set_int(func_index + 1, current_offset);
- current_offset += name.length();
- }
- ++func_index;
- }
- return func_names_array;
-}
-
-MaybeHandle<String> GetWasmFunctionNameFromTable(
- Handle<ByteArray> func_names_array, uint32_t func_index) {
- uint32_t num_funcs = static_cast<uint32_t>(func_names_array->get_int(0));
- DCHECK(static_cast<int>(num_funcs) >= 0);
- Factory* factory = func_names_array->GetIsolate()->factory();
- DCHECK(func_index < num_funcs);
- int offset = func_names_array->get_int(func_index + 1);
- if (offset < 0) return {};
- int next_offset = func_index == num_funcs - 1
- ? func_names_array->length()
- : abs(func_names_array->get_int(func_index + 2));
- ScopedVector<byte> buffer(next_offset - offset);
- func_names_array->copy_out(offset, buffer.start(), next_offset - offset);
- if (!unibrow::Utf8::Validate(buffer.start(), buffer.length())) return {};
- return factory->NewStringFromUtf8(Vector<const char>::cast(buffer));
-}
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-function-name-table.h b/deps/v8/src/wasm/wasm-function-name-table.h
deleted file mode 100644
index ffee782413..0000000000
--- a/deps/v8/src/wasm/wasm-function-name-table.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_FUNCTION_NAME_TABLE_H_
-#define V8_WASM_FUNCTION_NAME_TABLE_H_
-
-#include "src/handles.h"
-#include "src/objects.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-// Forward declarations for some WASM data structures.
-struct WasmModule;
-
-// Encode all function names of the WasmModule into one ByteArray.
-Handle<ByteArray> BuildFunctionNamesTable(Isolate* isolate,
- const WasmModule* module);
-
-// Extract the function name for the given func_index from the function name
-// table.
-// Returns a null handle if the respective function is unnamed (not to be
-// confused with empty names) or the function name is not a valid UTF-8 string.
-MaybeHandle<String> GetWasmFunctionNameFromTable(
- Handle<ByteArray> wasm_names_table, uint32_t func_index);
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_FUNCTION_NAME_TABLE_H_
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 2ac681eff2..6e049ffd25 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -62,8 +62,6 @@ namespace wasm {
V(I64GtS, int64_t, >) \
V(I64GeS, int64_t, >=) \
V(F32Add, float, +) \
- V(F32Mul, float, *) \
- V(F32Div, float, /) \
V(F32Eq, float, ==) \
V(F32Ne, float, !=) \
V(F32Lt, float, <) \
@@ -71,8 +69,6 @@ namespace wasm {
V(F32Gt, float, >) \
V(F32Ge, float, >=) \
V(F64Add, double, +) \
- V(F64Mul, double, *) \
- V(F64Div, double, /) \
V(F64Eq, double, ==) \
V(F64Ne, double, !=) \
V(F64Lt, double, <) \
@@ -80,6 +76,12 @@ namespace wasm {
V(F64Gt, double, >) \
V(F64Ge, double, >=)
+#define FOREACH_SIMPLE_BINOP_NAN(V) \
+ V(F32Mul, float, *) \
+ V(F64Mul, double, *) \
+ V(F32Div, float, /) \
+ V(F64Div, double, /)
+
#define FOREACH_OTHER_BINOP(V) \
V(I32DivS, int32_t) \
V(I32DivU, uint32_t) \
@@ -127,14 +129,12 @@ namespace wasm {
V(F32Floor, float) \
V(F32Trunc, float) \
V(F32NearestInt, float) \
- V(F32Sqrt, float) \
V(F64Abs, double) \
V(F64Neg, double) \
V(F64Ceil, double) \
V(F64Floor, double) \
V(F64Trunc, double) \
V(F64NearestInt, double) \
- V(F64Sqrt, double) \
V(I32SConvertF32, float) \
V(I32SConvertF64, double) \
V(I32UConvertF32, float) \
@@ -165,6 +165,10 @@ namespace wasm {
V(I32AsmjsSConvertF64, double) \
V(I32AsmjsUConvertF64, double)
+#define FOREACH_OTHER_UNOP_NAN(V) \
+ V(F32Sqrt, float) \
+ V(F64Sqrt, double)
+
static inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
if (b == 0) {
*trap = kTrapDivByZero;
@@ -460,7 +464,8 @@ static inline float ExecuteF32NearestInt(float a, TrapReason* trap) {
}
static inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
- return sqrtf(a);
+ float result = sqrtf(a);
+ return result;
}
static inline double ExecuteF64Abs(double a, TrapReason* trap) {
@@ -655,19 +660,16 @@ static inline int64_t ExecuteI64ReinterpretF64(double a, TrapReason* trap) {
}
static inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
- WasmModuleInstance* instance) {
+ WasmInstance* instance) {
// TODO(ahaas): Move memory allocation to wasm-module.cc for better
// encapsulation.
- if (delta_pages > wasm::WasmModule::kMaxMemPages) {
+ if (delta_pages > wasm::WasmModule::kV8MaxPages) {
return -1;
}
uint32_t old_size = instance->mem_size;
uint32_t new_size;
byte* new_mem_start;
if (instance->mem_size == 0) {
- if (delta_pages > wasm::WasmModule::kMaxMemPages) {
- return -1;
- }
// TODO(gdeepti): Fix bounds check to take into account size of memtype.
new_size = delta_pages * wasm::WasmModule::kPageSize;
new_mem_start = static_cast<byte*>(calloc(new_size, sizeof(byte)));
@@ -678,7 +680,7 @@ static inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
DCHECK_NOT_NULL(instance->mem_start);
new_size = old_size + delta_pages * wasm::WasmModule::kPageSize;
if (new_size >
- wasm::WasmModule::kMaxMemPages * wasm::WasmModule::kPageSize) {
+ wasm::WasmModule::kV8MaxPages * wasm::WasmModule::kPageSize) {
return -1;
}
new_mem_start = static_cast<byte*>(realloc(instance->mem_start, new_size));
@@ -690,9 +692,6 @@ static inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
}
instance->mem_start = new_mem_start;
instance->mem_size = new_size;
- // realloc
- // update mem_start
- // update mem_size
return static_cast<int32_t>(old_size / WasmModule::kPageSize);
}
@@ -967,7 +966,7 @@ class CodeMap {
// Responsible for executing code directly.
class ThreadImpl : public WasmInterpreter::Thread {
public:
- ThreadImpl(Zone* zone, CodeMap* codemap, WasmModuleInstance* instance)
+ ThreadImpl(Zone* zone, CodeMap* codemap, WasmInstance* instance)
: codemap_(codemap),
instance_(instance),
stack_(zone),
@@ -975,7 +974,8 @@ class ThreadImpl : public WasmInterpreter::Thread {
blocks_(zone),
state_(WasmInterpreter::STOPPED),
break_pc_(kInvalidPc),
- trap_reason_(kTrapCount) {}
+ trap_reason_(kTrapCount),
+ possible_nondeterminism_(false) {}
virtual ~ThreadImpl() {}
@@ -1030,6 +1030,7 @@ class ThreadImpl : public WasmInterpreter::Thread {
frames_.clear();
state_ = WasmInterpreter::STOPPED;
trap_reason_ = kTrapCount;
+ possible_nondeterminism_ = false;
}
virtual int GetFrameCount() { return static_cast<int>(frames_.size()); }
@@ -1053,6 +1054,8 @@ class ThreadImpl : public WasmInterpreter::Thread {
virtual pc_t GetBreakpointPc() { return break_pc_; }
+ virtual bool PossibleNondeterminism() { return possible_nondeterminism_; }
+
bool Terminated() {
return state_ == WasmInterpreter::TRAPPED ||
state_ == WasmInterpreter::FINISHED;
@@ -1080,16 +1083,17 @@ class ThreadImpl : public WasmInterpreter::Thread {
};
CodeMap* codemap_;
- WasmModuleInstance* instance_;
+ WasmInstance* instance_;
ZoneVector<WasmVal> stack_;
ZoneVector<Frame> frames_;
ZoneVector<Block> blocks_;
WasmInterpreter::State state_;
pc_t break_pc_;
TrapReason trap_reason_;
+ bool possible_nondeterminism_;
CodeMap* codemap() { return codemap_; }
- WasmModuleInstance* instance() { return instance_; }
+ WasmInstance* instance() { return instance_; }
const WasmModule* module() { return instance_->module; }
void DoTrap(TrapReason trap, pc_t pc) {
@@ -1327,9 +1331,15 @@ class ThreadImpl : public WasmInterpreter::Thread {
}
case kExprBrTable: {
BranchTableOperand operand(&decoder, code->at(pc));
+ BranchTableIterator iterator(&decoder, operand);
uint32_t key = Pop().to<uint32_t>();
+ uint32_t depth = 0;
if (key >= operand.table_count) key = operand.table_count;
- len = key + DoBreak(code, pc + key, operand.table[key]);
+ for (uint32_t i = 0; i <= key; i++) {
+ DCHECK(iterator.has_next());
+ depth = iterator.next();
+ }
+ len = key + DoBreak(code, pc + key, static_cast<size_t>(depth));
TRACE(" br[%u] => @%zu\n", key, pc + key + len);
break;
}
@@ -1419,7 +1429,16 @@ class ThreadImpl : public WasmInterpreter::Thread {
if (target == nullptr) {
return DoTrap(kTrapFuncInvalid, pc);
} else if (target->function->sig_index != operand.index) {
- return DoTrap(kTrapFuncSigMismatch, pc);
+ // If not an exact match, we have to do a canonical check.
+ // TODO(titzer): make this faster with some kind of caching?
+ const WasmIndirectFunctionTable* table =
+ &module()->function_tables[0];
+ int function_key = table->map.Find(target->function->sig);
+ if (function_key < 0 ||
+ (function_key !=
+ table->map.Find(module()->signatures[operand.index]))) {
+ return DoTrap(kTrapFuncSigMismatch, pc);
+ }
}
DoCall(target, &pc, pc + 1 + operand.length, &limit);
@@ -1573,13 +1592,17 @@ class ThreadImpl : public WasmInterpreter::Thread {
ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
#undef ASMJS_STORE_CASE
case kExprGrowMemory: {
+ MemoryIndexOperand operand(&decoder, code->at(pc));
uint32_t delta_pages = Pop().to<uint32_t>();
Push(pc, WasmVal(ExecuteGrowMemory(delta_pages, instance())));
+ len = 1 + operand.length;
break;
}
case kExprMemorySize: {
+ MemoryIndexOperand operand(&decoder, code->at(pc));
Push(pc, WasmVal(static_cast<uint32_t>(instance()->mem_size /
WasmModule::kPageSize)));
+ len = 1 + operand.length;
break;
}
#define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
@@ -1593,6 +1616,19 @@ class ThreadImpl : public WasmInterpreter::Thread {
FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
#undef EXECUTE_SIMPLE_BINOP
+#define EXECUTE_SIMPLE_BINOP_NAN(name, ctype, op) \
+ case kExpr##name: { \
+ WasmVal rval = Pop(); \
+ WasmVal lval = Pop(); \
+ ctype result = lval.to<ctype>() op rval.to<ctype>(); \
+ possible_nondeterminism_ |= std::isnan(result); \
+ WasmVal result_val(result); \
+ Push(pc, result_val); \
+ break; \
+ }
+ FOREACH_SIMPLE_BINOP_NAN(EXECUTE_SIMPLE_BINOP_NAN)
+#undef EXECUTE_SIMPLE_BINOP_NAN
+
#define EXECUTE_OTHER_BINOP(name, ctype) \
case kExpr##name: { \
TrapReason trap = kTrapCount; \
@@ -1618,6 +1654,20 @@ class ThreadImpl : public WasmInterpreter::Thread {
FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
#undef EXECUTE_OTHER_UNOP
+#define EXECUTE_OTHER_UNOP_NAN(name, ctype) \
+ case kExpr##name: { \
+ TrapReason trap = kTrapCount; \
+ volatile ctype val = Pop().to<ctype>(); \
+ ctype result = Execute##name(val, &trap); \
+ possible_nondeterminism_ |= std::isnan(result); \
+ WasmVal result_val(result); \
+ if (trap != kTrapCount) return DoTrap(trap, pc); \
+ Push(pc, result_val); \
+ break; \
+ }
+ FOREACH_OTHER_UNOP_NAN(EXECUTE_OTHER_UNOP_NAN)
+#undef EXECUTE_OTHER_UNOP_NAN
+
default:
V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
code->start[pc], OpcodeName(code->start[pc]));
@@ -1648,7 +1698,7 @@ class ThreadImpl : public WasmInterpreter::Thread {
WasmVal PopArity(size_t arity) {
if (arity == 0) return WasmVal();
- CHECK_EQ(1, arity);
+ CHECK_EQ(1u, arity);
return Pop();
}
@@ -1709,11 +1759,11 @@ class ThreadImpl : public WasmInterpreter::Thread {
//============================================================================
class WasmInterpreterInternals : public ZoneObject {
public:
- WasmModuleInstance* instance_;
+ WasmInstance* instance_;
CodeMap codemap_;
ZoneVector<ThreadImpl*> threads_;
- WasmInterpreterInternals(Zone* zone, WasmModuleInstance* instance)
+ WasmInterpreterInternals(Zone* zone, WasmInstance* instance)
: instance_(instance),
codemap_(instance_ ? instance_->module : nullptr, zone),
threads_(zone) {
@@ -1730,9 +1780,9 @@ class WasmInterpreterInternals : public ZoneObject {
//============================================================================
// Implementation of the public interface of the interpreter.
//============================================================================
-WasmInterpreter::WasmInterpreter(WasmModuleInstance* instance,
+WasmInterpreter::WasmInterpreter(WasmInstance* instance,
AccountingAllocator* allocator)
- : zone_(allocator),
+ : zone_(allocator, ZONE_NAME),
internals_(new (&zone_) WasmInterpreterInternals(&zone_, instance)) {}
WasmInterpreter::~WasmInterpreter() { internals_->Delete(); }
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index b61e092e23..360362b994 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -18,7 +18,7 @@ namespace wasm {
// forward declarations.
struct WasmFunction;
-struct WasmModuleInstance;
+struct WasmInstance;
class WasmInterpreterInternals;
typedef size_t pc_t;
@@ -125,13 +125,17 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
virtual const WasmFrame* GetFrame(int index) = 0;
virtual WasmFrame* GetMutableFrame(int index) = 0;
virtual WasmVal GetReturnValue(int index = 0) = 0;
+ // Returns true if the thread executed an instruction which may produce
+ // nondeterministic results, e.g. float div, float sqrt, and float mul,
+ // where the sign bit of a NaN is nondeterministic.
+ virtual bool PossibleNondeterminism() = 0;
// Thread-specific breakpoints.
bool SetBreakpoint(const WasmFunction* function, int pc, bool enabled);
bool GetBreakpoint(const WasmFunction* function, int pc);
};
- WasmInterpreter(WasmModuleInstance* instance, AccountingAllocator* allocator);
+ WasmInterpreter(WasmInstance* instance, AccountingAllocator* allocator);
~WasmInterpreter();
//==========================================================================
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 254fd7061a..0e030a28c4 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -19,6 +19,7 @@
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
typedef uint8_t byte;
@@ -27,6 +28,12 @@ using v8::internal::wasm::ErrorThrower;
namespace v8 {
+enum WasmMemoryObjectData {
+ kWasmMemoryBuffer,
+ kWasmMemoryMaximum,
+ kWasmMemoryInstanceObject
+};
+
namespace {
i::Handle<i::String> v8_str(i::Isolate* isolate, const char* str) {
return isolate->factory()->NewStringFromAsciiChecked(str);
@@ -55,7 +62,7 @@ RawBuffer GetRawBufferSource(
end = start + contents.ByteLength();
if (start == nullptr || end == start) {
- thrower->Error("ArrayBuffer argument is empty");
+ thrower->CompileError("ArrayBuffer argument is empty");
}
} else if (source->IsTypedArray()) {
// A TypedArray was passed.
@@ -69,132 +76,28 @@ RawBuffer GetRawBufferSource(
end = start + array->ByteLength();
if (start == nullptr || end == start) {
- thrower->Error("ArrayBuffer argument is empty");
+ thrower->TypeError("ArrayBuffer argument is empty");
}
} else {
- thrower->Error("Argument 0 must be an ArrayBuffer or Uint8Array");
+ thrower->TypeError("Argument 0 must be an ArrayBuffer or Uint8Array");
}
return {start, end};
}
-void VerifyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
- HandleScope scope(args.GetIsolate());
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
- ErrorThrower thrower(isolate, "Wasm.verifyModule()");
-
- if (args.Length() < 1) {
- thrower.TypeError("Argument 0 must be a buffer source");
- return;
- }
- RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
- if (thrower.error()) return;
-
- i::Zone zone(isolate->allocator());
- internal::wasm::ModuleResult result =
- internal::wasm::DecodeWasmModule(isolate, &zone, buffer.start, buffer.end,
- true, internal::wasm::kWasmOrigin);
-
- if (result.failed()) {
- thrower.Failed("", result);
- }
-
- if (result.val) delete result.val;
-}
-
-void VerifyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
- HandleScope scope(args.GetIsolate());
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
- ErrorThrower thrower(isolate, "Wasm.verifyFunction()");
-
- if (args.Length() < 1) {
- thrower.TypeError("Argument 0 must be a buffer source");
- return;
- }
- RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
- if (thrower.error()) return;
-
- internal::wasm::FunctionResult result;
- {
- // Verification of a single function shouldn't allocate.
- i::DisallowHeapAllocation no_allocation;
- i::Zone zone(isolate->allocator());
- result = internal::wasm::DecodeWasmFunction(isolate, &zone, nullptr,
- buffer.start, buffer.end);
- }
-
- if (result.failed()) {
- thrower.Failed("", result);
- }
-
- if (result.val) delete result.val;
-}
-
-i::MaybeHandle<i::JSObject> InstantiateModule(
- const v8::FunctionCallbackInfo<v8::Value>& args, const byte* start,
- const byte* end, ErrorThrower* thrower,
- internal::wasm::ModuleOrigin origin = i::wasm::kWasmOrigin) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
-
- // Decode but avoid a redundant pass over function bodies for verification.
- // Verification will happen during compilation.
- i::Zone zone(isolate->allocator());
- i::MaybeHandle<i::JSObject> module_object =
- i::wasm::CreateModuleObjectFromBytes(isolate, start, end, thrower,
- origin);
- i::MaybeHandle<i::JSObject> object;
- if (!module_object.is_null()) {
- // Success. Instantiate the module and return the object.
- i::Handle<i::JSObject> ffi = i::Handle<i::JSObject>::null();
- if (args.Length() > 1 && args[1]->IsObject()) {
- Local<Object> obj = Local<Object>::Cast(args[1]);
- ffi = i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
- }
-
- i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
- if (args.Length() > 2 && args[2]->IsArrayBuffer()) {
- Local<Object> obj = Local<Object>::Cast(args[2]);
- i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
- memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
- }
-
- object = i::wasm::WasmModule::Instantiate(
- isolate, thrower, module_object.ToHandleChecked(), ffi, memory);
- if (!object.is_null()) {
- args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
- }
- }
- return object;
-}
-
-void InstantiateModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
- HandleScope scope(args.GetIsolate());
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
- ErrorThrower thrower(isolate, "Wasm.instantiateModule()");
-
- if (args.Length() < 1) {
- thrower.TypeError("Argument 0 must be a buffer source");
- return;
- }
- RawBuffer buffer = GetRawBufferSource(args[0], &thrower);
- if (buffer.start == nullptr) return;
-
- InstantiateModule(args, buffer.start, buffer.end, &thrower);
-}
-
-static i::MaybeHandle<i::JSObject> CreateModuleObject(
+static i::MaybeHandle<i::WasmModuleObject> CreateModuleObject(
v8::Isolate* isolate, const v8::Local<v8::Value> source,
ErrorThrower* thrower) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::MaybeHandle<i::JSObject> nothing;
RawBuffer buffer = GetRawBufferSource(source, thrower);
- if (buffer.start == nullptr) return i::MaybeHandle<i::JSObject>();
+ if (buffer.start == nullptr) return i::MaybeHandle<i::WasmModuleObject>();
DCHECK(source->IsArrayBuffer() || source->IsTypedArray());
return i::wasm::CreateModuleObjectFromBytes(
- i_isolate, buffer.start, buffer.end, thrower,
- i::wasm::ModuleOrigin::kWasmOrigin);
+ i_isolate, buffer.start, buffer.end, thrower, i::wasm::kWasmOrigin,
+ i::Handle<i::Script>::null(), nullptr, nullptr);
}
static bool ValidateModule(v8::Isolate* isolate,
@@ -212,8 +115,8 @@ static bool ValidateModule(v8::Isolate* isolate,
i::wasm::ModuleOrigin::kWasmOrigin);
}
-bool BrandCheck(Isolate* isolate, i::Handle<i::Object> value,
- i::Handle<i::Symbol> sym, const char* msg) {
+static bool BrandCheck(Isolate* isolate, i::Handle<i::Object> value,
+ i::Handle<i::Symbol> sym, const char* msg) {
if (value->IsJSObject()) {
i::Handle<i::JSObject> object = i::Handle<i::JSObject>::cast(value);
Maybe<bool> has_brand = i::JSObject::HasOwnProperty(object, sym);
@@ -308,14 +211,8 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
Local<Object> obj = Local<Object>::Cast(args[0]);
-
- i::Handle<i::JSObject> module_obj =
+ i::Handle<i::JSObject> i_obj =
i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
- if (module_obj->GetInternalFieldCount() < 1 ||
- !module_obj->GetInternalField(0)->IsFixedArray()) {
- thrower.TypeError("Argument 0 is an invalid WebAssembly.Module");
- return;
- }
i::Handle<i::JSReceiver> ffi = i::Handle<i::JSObject>::null();
if (args.Length() > 1 && args[1]->IsObject()) {
@@ -324,17 +221,24 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
- if (args.Length() > 2 && args[2]->IsArrayBuffer()) {
+ if (args.Length() > 2 && args[2]->IsObject()) {
Local<Object> obj = Local<Object>::Cast(args[2]);
i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
- memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
+ if (i::WasmJs::IsWasmMemoryObject(i_isolate, mem_obj)) {
+ memory = i::Handle<i::JSArrayBuffer>(
+ i::Handle<i::WasmMemoryObject>::cast(mem_obj)->get_buffer(),
+ i_isolate);
+ } else {
+ thrower.TypeError("Argument 2 must be a WebAssembly.Memory");
+ }
}
- i::MaybeHandle<i::JSObject> instance = i::wasm::WasmModule::Instantiate(
- i_isolate, &thrower, module_obj, ffi, memory);
+ i::MaybeHandle<i::JSObject> instance =
+ i::wasm::WasmModule::Instantiate(i_isolate, &thrower, i_obj, ffi, memory);
if (instance.is_null()) {
- if (!thrower.error()) thrower.Error("Could not instantiate module");
+ if (!thrower.error()) thrower.RuntimeError("Could not instantiate module");
return;
}
+ DCHECK(!i_isolate->has_pending_exception());
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(Utils::ToLocal(instance.ToHandleChecked()));
}
@@ -366,6 +270,8 @@ bool GetIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
return false;
}
+const int max_table_size = 1 << 26;
+
void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
@@ -392,7 +298,6 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
}
- const int max_table_size = 1 << 26;
// The descriptor's 'initial'.
int initial;
if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
@@ -414,24 +319,14 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
&maximum, initial, max_table_size)) {
return;
}
+ } else {
+ maximum = static_cast<int>(i::wasm::WasmModule::kV8MaxTableSize);
}
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Handle<i::JSFunction> table_ctor(
- i_isolate->native_context()->wasm_table_constructor());
+ i::Handle<i::FixedArray> fixed_array;
i::Handle<i::JSObject> table_obj =
- i_isolate->factory()->NewJSObject(table_ctor);
- i::Handle<i::FixedArray> fixed_array =
- i_isolate->factory()->NewFixedArray(initial);
- i::Object* null = i_isolate->heap()->null_value();
- for (int i = 0; i < initial; ++i) fixed_array->set(i, null);
- table_obj->SetInternalField(0, *fixed_array);
- table_obj->SetInternalField(
- 1, has_maximum.FromJust()
- ? static_cast<i::Object*>(i::Smi::FromInt(maximum))
- : static_cast<i::Object*>(i_isolate->heap()->undefined_value()));
- i::Handle<i::Symbol> table_sym(i_isolate->native_context()->wasm_table_sym());
- i::Object::SetProperty(table_obj, table_sym, table_obj, i::STRICT).Check();
+ i::WasmTableObject::New(i_isolate, initial, maximum, &fixed_array);
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(Utils::ToLocal(table_obj));
}
@@ -442,7 +337,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
"WebAssembly.Module()");
if (args.Length() < 1 || !args[0]->IsObject()) {
- thrower.TypeError("Argument 0 must be a table descriptor");
+ thrower.TypeError("Argument 0 must be a memory descriptor");
return;
}
Local<Context> context = isolate->GetCurrentContext();
@@ -475,27 +370,196 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
static_cast<size_t>(initial);
i::JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, size);
- i::Handle<i::JSObject> memory_obj = i::WasmJs::CreateWasmMemoryObject(
- i_isolate, buffer, has_maximum.FromJust(), maximum);
- v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- return_value.Set(Utils::ToLocal(memory_obj));
+ i::Handle<i::JSObject> memory_obj = i::WasmMemoryObject::New(
+ i_isolate, buffer, has_maximum.FromJust() ? maximum : -1);
+ args.GetReturnValue().Set(Utils::ToLocal(memory_obj));
}
+
void WebAssemblyTableGetLength(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- // TODO(rossberg)
+ v8::Isolate* isolate = args.GetIsolate();
+ Local<Context> context = isolate->GetCurrentContext();
+ i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+ if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
+ i::Handle<i::Symbol>(i_context->wasm_table_sym()),
+ "Receiver is not a WebAssembly.Table")) {
+ return;
+ }
+ auto receiver =
+ i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
+ args.GetReturnValue().Set(
+ v8::Number::New(isolate, receiver->current_length()));
}
+
void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
- // TODO(rossberg)
+ v8::Isolate* isolate = args.GetIsolate();
+ Local<Context> context = isolate->GetCurrentContext();
+ i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+ if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
+ i::Handle<i::Symbol>(i_context->wasm_table_sym()),
+ "Receiver is not a WebAssembly.Table")) {
+ return;
+ }
+
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ auto receiver =
+ i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
+ i::Handle<i::FixedArray> old_array(receiver->get_functions(), i_isolate);
+ int old_size = old_array->length();
+ int64_t new_size64 = 0;
+ if (args.Length() > 0 && !args[0]->IntegerValue(context).To(&new_size64)) {
+ return;
+ }
+ new_size64 += old_size;
+
+ if (new_size64 < old_size || new_size64 > receiver->maximum_length()) {
+ v8::Local<v8::Value> e = v8::Exception::RangeError(
+ v8_str(isolate, new_size64 < old_size ? "trying to shrink table"
+ : "maximum table size exceeded"));
+ isolate->ThrowException(e);
+ return;
+ }
+ int new_size = static_cast<int>(new_size64);
+
+ if (new_size != old_size) {
+ i::Handle<i::FixedArray> new_array =
+ i_isolate->factory()->NewFixedArray(new_size);
+ for (int i = 0; i < old_size; ++i) new_array->set(i, old_array->get(i));
+ i::Object* null = i_isolate->heap()->null_value();
+ for (int i = old_size; i < new_size; ++i) new_array->set(i, null);
+ receiver->set_functions(*new_array);
+ }
+
+ // TODO(titzer): update relevant instances.
}
+
void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
- // TODO(rossberg)
+ v8::Isolate* isolate = args.GetIsolate();
+ Local<Context> context = isolate->GetCurrentContext();
+ i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+ if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
+ i::Handle<i::Symbol>(i_context->wasm_table_sym()),
+ "Receiver is not a WebAssembly.Table")) {
+ return;
+ }
+
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ auto receiver =
+ i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
+ i::Handle<i::FixedArray> array(receiver->get_functions(), i_isolate);
+ int i = 0;
+ if (args.Length() > 0 && !args[0]->Int32Value(context).To(&i)) return;
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ if (i < 0 || i >= array->length()) {
+ v8::Local<v8::Value> e =
+ v8::Exception::RangeError(v8_str(isolate, "index out of bounds"));
+ isolate->ThrowException(e);
+ return;
+ }
+
+ i::Handle<i::Object> value(array->get(i), i_isolate);
+ return_value.Set(Utils::ToLocal(value));
}
+
void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
- // TODO(rossberg)
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ Local<Context> context = isolate->GetCurrentContext();
+ i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+ if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
+ i::Handle<i::Symbol>(i_context->wasm_table_sym()),
+ "Receiver is not a WebAssembly.Table")) {
+ return;
+ }
+ if (args.Length() < 2) {
+ v8::Local<v8::Value> e = v8::Exception::TypeError(
+ v8_str(isolate, "Argument 1 must be null or a function"));
+ isolate->ThrowException(e);
+ return;
+ }
+ i::Handle<i::Object> value = Utils::OpenHandle(*args[1]);
+ if (!value->IsNull(i_isolate) &&
+ (!value->IsJSFunction() ||
+ i::Handle<i::JSFunction>::cast(value)->code()->kind() !=
+ i::Code::JS_TO_WASM_FUNCTION)) {
+ v8::Local<v8::Value> e = v8::Exception::TypeError(
+ v8_str(isolate, "Argument 1 must be null or a WebAssembly function"));
+ isolate->ThrowException(e);
+ return;
+ }
+
+ auto receiver =
+ i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
+ i::Handle<i::FixedArray> array(receiver->get_functions(), i_isolate);
+ int i;
+ if (!args[0]->Int32Value(context).To(&i)) return;
+ if (i < 0 || i >= array->length()) {
+ v8::Local<v8::Value> e =
+ v8::Exception::RangeError(v8_str(isolate, "index out of bounds"));
+ isolate->ThrowException(e);
+ return;
+ }
+
+ i::Handle<i::FixedArray> dispatch_tables(receiver->get_dispatch_tables(),
+ i_isolate);
+ if (value->IsNull(i_isolate)) {
+ i::wasm::UpdateDispatchTables(i_isolate, dispatch_tables, i,
+ i::Handle<i::JSFunction>::null());
+ } else {
+ i::wasm::UpdateDispatchTables(i_isolate, dispatch_tables, i,
+ i::Handle<i::JSFunction>::cast(value));
+ }
+
+ i::Handle<i::FixedArray>::cast(array)->set(i, *value);
}
+
void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
- // TODO(rossberg)
+ v8::Isolate* isolate = args.GetIsolate();
+ Local<Context> context = isolate->GetCurrentContext();
+ i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+ if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
+ i::Handle<i::Symbol>(i_context->wasm_memory_sym()),
+ "Receiver is not a WebAssembly.Memory")) {
+ return;
+ }
+ if (args.Length() < 1) {
+ v8::Local<v8::Value> e = v8::Exception::TypeError(
+ v8_str(isolate, "Argument 0 required, must be numeric value of pages"));
+ isolate->ThrowException(e);
+ return;
+ }
+
+ uint32_t delta = args[0]->Uint32Value(context).FromJust();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Handle<i::JSObject> receiver =
+ i::Handle<i::JSObject>::cast(Utils::OpenHandle(*args.This()));
+ i::Handle<i::Object> instance_object(
+ receiver->GetInternalField(kWasmMemoryInstanceObject), i_isolate);
+ i::Handle<i::JSObject> instance(
+ i::Handle<i::JSObject>::cast(instance_object));
+
+ // TODO(gdeepti) Implement growing memory when shared by different
+ // instances.
+ int32_t ret = internal::wasm::GrowInstanceMemory(i_isolate, instance, delta);
+ if (ret == -1) {
+ v8::Local<v8::Value> e = v8::Exception::Error(
+ v8_str(isolate, "Unable to grow instance memory."));
+ isolate->ThrowException(e);
+ return;
+ }
+ i::MaybeHandle<i::JSArrayBuffer> buffer =
+ internal::wasm::GetInstanceMemory(i_isolate, instance);
+ if (buffer.is_null()) {
+ v8::Local<v8::Value> e = v8::Exception::Error(
+ v8_str(isolate, "WebAssembly.Memory buffer object not set."));
+ isolate->ThrowException(e);
+ return;
+ }
+ receiver->SetInternalField(kWasmMemoryBuffer, *buffer.ToHandleChecked());
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(ret);
}
+
void WebAssemblyMemoryGetBuffer(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
@@ -509,31 +573,14 @@ void WebAssemblyMemoryGetBuffer(
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::JSObject> receiver =
i::Handle<i::JSObject>::cast(Utils::OpenHandle(*args.This()));
- i::Handle<i::Object> buffer(receiver->GetInternalField(0), i_isolate);
+ i::Handle<i::Object> buffer(receiver->GetInternalField(kWasmMemoryBuffer),
+ i_isolate);
DCHECK(buffer->IsJSArrayBuffer());
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(Utils::ToLocal(buffer));
}
} // namespace
-i::Handle<i::JSObject> i::WasmJs::CreateWasmMemoryObject(
- i::Isolate* i_isolate, i::Handle<i::JSArrayBuffer> buffer, bool has_maximum,
- int maximum) {
- i::Handle<i::JSFunction> memory_ctor(
- i_isolate->native_context()->wasm_memory_constructor());
- i::Handle<i::JSObject> memory_obj =
- i_isolate->factory()->NewJSObject(memory_ctor);
- memory_obj->SetInternalField(0, *buffer);
- memory_obj->SetInternalField(
- 1, has_maximum
- ? static_cast<i::Object*>(i::Smi::FromInt(maximum))
- : static_cast<i::Object*>(i_isolate->heap()->undefined_value()));
- i::Handle<i::Symbol> memory_sym(
- i_isolate->native_context()->wasm_memory_sym());
- i::Object::SetProperty(memory_obj, memory_sym, memory_obj, i::STRICT).Check();
- return memory_obj;
-}
-
// TODO(titzer): we use the API to create the function template because the
// internal guts are too ugly to replicate here.
static i::Handle<i::FunctionTemplateInfo> NewTemplate(i::Isolate* i_isolate,
@@ -605,41 +652,43 @@ void WasmJs::InstallWasmConstructors(Isolate* isolate,
JSFunction::SetInstancePrototype(
cons, Handle<Object>(context->initial_object_prototype(), isolate));
cons->shared()->set_instance_class_name(*name);
- Handle<JSObject> wasm_object = factory->NewJSObject(cons, TENURED);
+ Handle<JSObject> webassembly = factory->NewJSObject(cons, TENURED);
PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
- JSObject::AddProperty(global, name, wasm_object, attributes);
+ JSObject::AddProperty(global, name, webassembly, attributes);
// Setup compile
- InstallFunc(isolate, wasm_object, "compile", WebAssemblyCompile);
+ InstallFunc(isolate, webassembly, "compile", WebAssemblyCompile);
// Setup compile
- InstallFunc(isolate, wasm_object, "validate", WebAssemblyValidate);
+ InstallFunc(isolate, webassembly, "validate", WebAssemblyValidate);
// Setup Module
Handle<JSFunction> module_constructor =
- InstallFunc(isolate, wasm_object, "Module", WebAssemblyModule);
+ InstallFunc(isolate, webassembly, "Module", WebAssemblyModule);
context->set_wasm_module_constructor(*module_constructor);
Handle<JSObject> module_proto =
factory->NewJSObject(module_constructor, TENURED);
i::Handle<i::Map> map = isolate->factory()->NewMap(
- i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize + i::kPointerSize);
+ i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize +
+ WasmModuleObject::kFieldCount * i::kPointerSize);
JSFunction::SetInitialMap(module_constructor, map, module_proto);
JSObject::AddProperty(module_proto, isolate->factory()->constructor_string(),
module_constructor, DONT_ENUM);
// Setup Instance
Handle<JSFunction> instance_constructor =
- InstallFunc(isolate, wasm_object, "Instance", WebAssemblyInstance);
+ InstallFunc(isolate, webassembly, "Instance", WebAssemblyInstance);
context->set_wasm_instance_constructor(*instance_constructor);
// Setup Table
Handle<JSFunction> table_constructor =
- InstallFunc(isolate, wasm_object, "Table", WebAssemblyTable);
+ InstallFunc(isolate, webassembly, "Table", WebAssemblyTable);
context->set_wasm_table_constructor(*table_constructor);
Handle<JSObject> table_proto =
factory->NewJSObject(table_constructor, TENURED);
map = isolate->factory()->NewMap(
- i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize + 2 * i::kPointerSize);
+ i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize +
+ WasmTableObject::kFieldCount * i::kPointerSize);
JSFunction::SetInitialMap(table_constructor, map, table_proto);
JSObject::AddProperty(table_proto, isolate->factory()->constructor_string(),
table_constructor, DONT_ENUM);
@@ -650,17 +699,29 @@ void WasmJs::InstallWasmConstructors(Isolate* isolate,
// Setup Memory
Handle<JSFunction> memory_constructor =
- InstallFunc(isolate, wasm_object, "Memory", WebAssemblyMemory);
+ InstallFunc(isolate, webassembly, "Memory", WebAssemblyMemory);
context->set_wasm_memory_constructor(*memory_constructor);
Handle<JSObject> memory_proto =
factory->NewJSObject(memory_constructor, TENURED);
map = isolate->factory()->NewMap(
- i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize + 2 * i::kPointerSize);
+ i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize +
+ WasmMemoryObject::kFieldCount * i::kPointerSize);
JSFunction::SetInitialMap(memory_constructor, map, memory_proto);
JSObject::AddProperty(memory_proto, isolate->factory()->constructor_string(),
memory_constructor, DONT_ENUM);
InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow);
InstallGetter(isolate, memory_proto, "buffer", WebAssemblyMemoryGetBuffer);
+
+ // Setup errors
+ attributes = static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+ Handle<JSFunction> compile_error(
+ isolate->native_context()->wasm_compile_error_function());
+ JSObject::AddProperty(webassembly, isolate->factory()->CompileError_string(),
+ compile_error, attributes);
+ Handle<JSFunction> runtime_error(
+ isolate->native_context()->wasm_runtime_error_function());
+ JSObject::AddProperty(webassembly, isolate->factory()->RuntimeError_string(),
+ runtime_error, attributes);
}
void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
@@ -668,44 +729,13 @@ void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
return;
}
- Factory* factory = isolate->factory();
-
// Setup wasm function map.
Handle<Context> context(global->native_context(), isolate);
InstallWasmMapsIfNeeded(isolate, context);
- if (!FLAG_expose_wasm) {
- return;
- }
-
- // Bind the experimental WASM object.
- // TODO(rossberg, titzer): remove once it's no longer needed.
- {
- Handle<String> name = v8_str(isolate, "Wasm");
- Handle<JSFunction> cons = factory->NewFunction(name);
- JSFunction::SetInstancePrototype(
- cons, Handle<Object>(context->initial_object_prototype(), isolate));
- cons->shared()->set_instance_class_name(*name);
- Handle<JSObject> wasm_object = factory->NewJSObject(cons, TENURED);
- PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
- JSObject::AddProperty(global, name, wasm_object, attributes);
-
- // Install functions on the WASM object.
- InstallFunc(isolate, wasm_object, "verifyModule", VerifyModule);
- InstallFunc(isolate, wasm_object, "verifyFunction", VerifyFunction);
- InstallFunc(isolate, wasm_object, "instantiateModule", InstantiateModule);
-
- {
- // Add the Wasm.experimentalVersion property.
- Handle<String> name = v8_str(isolate, "experimentalVersion");
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
- Handle<Smi> value =
- Handle<Smi>(Smi::FromInt(wasm::kWasmVersion), isolate);
- JSObject::AddProperty(wasm_object, name, value, attributes);
- }
+ if (FLAG_expose_wasm) {
+ InstallWasmConstructors(isolate, global, context);
}
- InstallWasmConstructors(isolate, global, context);
}
void WasmJs::InstallWasmMapsIfNeeded(Isolate* isolate,
@@ -737,5 +767,24 @@ void WasmJs::InstallWasmMapsIfNeeded(Isolate* isolate,
}
}
+static bool HasBrand(i::Handle<i::Object> value, i::Handle<i::Symbol> symbol) {
+ if (value->IsJSObject()) {
+ i::Handle<i::JSObject> object = i::Handle<i::JSObject>::cast(value);
+ Maybe<bool> has_brand = i::JSObject::HasOwnProperty(object, symbol);
+ if (has_brand.IsNothing()) return false;
+ if (has_brand.ToChecked()) return true;
+ }
+ return false;
+}
+
+bool WasmJs::IsWasmMemoryObject(Isolate* isolate, Handle<Object> value) {
+ i::Handle<i::Symbol> symbol(isolate->context()->wasm_memory_sym(), isolate);
+ return HasBrand(value, symbol);
+}
+
+bool WasmJs::IsWasmTableObject(Isolate* isolate, Handle<Object> value) {
+ i::Handle<i::Symbol> symbol(isolate->context()->wasm_table_sym(), isolate);
+ return HasBrand(value, symbol);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
index 4f26494624..f5b9596ee2 100644
--- a/deps/v8/src/wasm/wasm-js.h
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -24,9 +24,11 @@ class WasmJs {
Handle<JSGlobalObject> global,
Handle<Context> context);
- static Handle<JSObject> CreateWasmMemoryObject(Isolate* isolate,
- Handle<JSArrayBuffer> buffer,
- bool has_maximum, int maximum);
+ // WebAssembly.Table.
+ static bool IsWasmTableObject(Isolate* isolate, Handle<Object> value);
+
+ // WebAssembly.Memory
+ static bool IsWasmMemoryObject(Isolate* isolate, Handle<Object> value);
};
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-macro-gen.h b/deps/v8/src/wasm/wasm-macro-gen.h
index fd10a3929a..ce2f843e71 100644
--- a/deps/v8/src/wasm/wasm-macro-gen.h
+++ b/deps/v8/src/wasm/wasm-macro-gen.h
@@ -420,21 +420,23 @@ class LocalDeclEncoder {
#define WASM_CALL_FUNCTION(index, ...) \
__VA_ARGS__, kExprCallFunction, static_cast<byte>(index)
+#define TABLE_ZERO 0
+
// TODO(titzer): change usages of these macros to put func last.
#define WASM_CALL_INDIRECT0(index, func) \
- func, kExprCallIndirect, static_cast<byte>(index)
+ func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
#define WASM_CALL_INDIRECT1(index, func, a) \
- a, func, kExprCallIndirect, static_cast<byte>(index)
+ a, func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
#define WASM_CALL_INDIRECT2(index, func, a, b) \
- a, b, func, kExprCallIndirect, static_cast<byte>(index)
+ a, b, func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
#define WASM_CALL_INDIRECT3(index, func, a, b, c) \
- a, b, c, func, kExprCallIndirect, static_cast<byte>(index)
+ a, b, c, func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
#define WASM_CALL_INDIRECT4(index, func, a, b, c, d) \
- a, b, c, d, func, kExprCallIndirect, static_cast<byte>(index)
+ a, b, c, d, func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
#define WASM_CALL_INDIRECT5(index, func, a, b, c, d, e) \
- a, b, c, d, e, func, kExprCallIndirect, static_cast<byte>(index)
+ a, b, c, d, e, func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
#define WASM_CALL_INDIRECTN(arity, index, func, ...) \
- __VA_ARGS__, func, kExprCallIndirect, static_cast<byte>(index)
+ __VA_ARGS__, func, kExprCallIndirect, static_cast<byte>(index), TABLE_ZERO
#define WASM_NOT(x) x, kExprI32Eqz
#define WASM_SEQ(...) __VA_ARGS__
@@ -493,6 +495,14 @@ class LocalDeclEncoder {
#define WASM_I32_EQZ(x) x, kExprI32Eqz
//------------------------------------------------------------------------------
+// Asmjs Int32 operations
+//------------------------------------------------------------------------------
+#define WASM_I32_ASMJS_DIVS(x, y) x, y, kExprI32AsmjsDivS
+#define WASM_I32_ASMJS_REMS(x, y) x, y, kExprI32AsmjsRemS
+#define WASM_I32_ASMJS_DIVU(x, y) x, y, kExprI32AsmjsDivU
+#define WASM_I32_ASMJS_REMU(x, y) x, y, kExprI32AsmjsRemU
+
+//------------------------------------------------------------------------------
// Int64 operations
//------------------------------------------------------------------------------
#define WASM_I64_ADD(x, y) x, y, kExprI64Add
@@ -605,8 +615,8 @@ class LocalDeclEncoder {
//------------------------------------------------------------------------------
// Memory Operations.
//------------------------------------------------------------------------------
-#define WASM_GROW_MEMORY(x) x, kExprGrowMemory
-#define WASM_MEMORY_SIZE kExprMemorySize
+#define WASM_GROW_MEMORY(x) x, kExprGrowMemory, 0
+#define WASM_MEMORY_SIZE kExprMemorySize, 0
//------------------------------------------------------------------------------
// Simd Operations.
@@ -614,6 +624,11 @@ class LocalDeclEncoder {
#define WASM_SIMD_I32x4_SPLAT(x) x, kSimdPrefix, kExprI32x4Splat & 0xff
#define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
x, kSimdPrefix, kExprI32x4ExtractLane & 0xff, static_cast<byte>(lane)
+#define WASM_SIMD_I32x4_ADD(x, y) x, y, kSimdPrefix, kExprI32x4Add & 0xff
+#define WASM_SIMD_F32x4_SPLAT(x) x, kSimdPrefix, kExprF32x4Splat & 0xff
+#define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x) \
+ x, kSimdPrefix, kExprF32x4ExtractLane & 0xff, static_cast<byte>(lane)
+#define WASM_SIMD_F32x4_ADD(x, y) x, y, kSimdPrefix, kExprF32x4Add & 0xff
#define SIG_ENTRY_v_v kWasmFunctionTypeForm, 0, 0
#define SIZEOF_SIG_ENTRY_v_v 3
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 084f5a0c1a..290e98ecf8 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -54,11 +54,13 @@ WasmFunctionBuilder::WasmFunctionBuilder(WasmModuleBuilder* builder)
func_index_(static_cast<uint32_t>(builder->functions_.size())),
body_(builder->zone()),
name_(builder->zone()),
+ exported_name_(builder->zone()),
i32_temps_(builder->zone()),
i64_temps_(builder->zone()),
f32_temps_(builder->zone()),
f64_temps_(builder->zone()),
- direct_calls_(builder->zone()) {}
+ direct_calls_(builder->zone()),
+ asm_offsets_(builder->zone(), 8) {}
void WasmFunctionBuilder::EmitVarInt(uint32_t val) {
byte buffer[8];
@@ -139,15 +141,31 @@ void WasmFunctionBuilder::EmitDirectCallIndex(uint32_t index) {
EmitCode(code, sizeof(code));
}
-void WasmFunctionBuilder::SetExported() { exported_ = true; }
+void WasmFunctionBuilder::Export() { exported_ = true; }
-void WasmFunctionBuilder::SetName(const char* name, int name_length) {
- name_.clear();
- if (name_length > 0) {
- for (int i = 0; i < name_length; ++i) {
- name_.push_back(*(name + i));
- }
- }
+void WasmFunctionBuilder::ExportAs(Vector<const char> name) {
+ exported_ = true;
+ exported_name_.resize(name.length());
+ memcpy(exported_name_.data(), name.start(), name.length());
+}
+
+void WasmFunctionBuilder::SetName(Vector<const char> name) {
+ name_.resize(name.length());
+ memcpy(name_.data(), name.start(), name.length());
+}
+
+void WasmFunctionBuilder::AddAsmWasmOffset(int asm_position) {
+ // We only want to emit one mapping per byte offset:
+ DCHECK(asm_offsets_.size() == 0 || body_.size() > last_asm_byte_offset_);
+
+ DCHECK_LE(body_.size(), kMaxUInt32);
+ uint32_t byte_offset = static_cast<uint32_t>(body_.size());
+ asm_offsets_.write_u32v(byte_offset - last_asm_byte_offset_);
+ last_asm_byte_offset_ = byte_offset;
+
+ DCHECK_GE(asm_position, 0);
+ asm_offsets_.write_i32v(asm_position - last_asm_source_position_);
+ last_asm_source_position_ = asm_position;
}
void WasmFunctionBuilder::WriteSignature(ZoneBuffer& buffer) const {
@@ -156,10 +174,11 @@ void WasmFunctionBuilder::WriteSignature(ZoneBuffer& buffer) const {
void WasmFunctionBuilder::WriteExport(ZoneBuffer& buffer) const {
if (exported_) {
- buffer.write_size(name_.size());
- if (name_.size() > 0) {
- buffer.write(reinterpret_cast<const byte*>(&name_[0]), name_.size());
- }
+ const ZoneVector<char>* exported_name =
+ exported_name_.size() == 0 ? &name_ : &exported_name_;
+ buffer.write_size(exported_name->size());
+ buffer.write(reinterpret_cast<const byte*>(exported_name->data()),
+ exported_name->size());
buffer.write_u8(kExternalFunction);
buffer.write_u32v(func_index_ +
static_cast<uint32_t>(builder_->imports_.size()));
@@ -184,6 +203,18 @@ void WasmFunctionBuilder::WriteBody(ZoneBuffer& buffer) const {
}
}
+void WasmFunctionBuilder::WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const {
+ if (asm_offsets_.size() == 0) {
+ buffer.write_size(0);
+ return;
+ }
+ buffer.write_size(asm_offsets_.size() + kInt32Size);
+ // Offset of the recorded byte offsets.
+ DCHECK_GE(kMaxUInt32, locals_.Size());
+ buffer.write_u32(static_cast<uint32_t>(locals_.Size()));
+ buffer.write(asm_offsets_.begin(), asm_offsets_.size());
+}
+
WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
: zone_(zone),
signatures_(zone),
@@ -255,8 +286,9 @@ void WasmModuleBuilder::MarkStartFunction(WasmFunctionBuilder* function) {
}
uint32_t WasmModuleBuilder::AddGlobal(LocalType type, bool exported,
- bool mutability) {
- globals_.push_back({type, exported, mutability});
+ bool mutability,
+ const WasmInitExpr& init) {
+ globals_.push_back({type, exported, mutability, init});
return static_cast<uint32_t>(globals_.size() - 1);
}
@@ -332,7 +364,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
buffer.write_u8(1); // memory count
buffer.write_u32v(kResizableMaximumFlag);
buffer.write_u32v(16); // min memory size
- buffer.write_u32v(16); // max memory size
+ buffer.write_u32v(32); // max memory size
FixupSection(buffer, start);
}
@@ -344,29 +376,64 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
for (auto global : globals_) {
buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(global.type));
buffer.write_u8(global.mutability ? 1 : 0);
- switch (global.type) {
- case kAstI32: {
- static const byte code[] = {WASM_I32V_1(0)};
+ switch (global.init.kind) {
+ case WasmInitExpr::kI32Const: {
+ DCHECK_EQ(kAstI32, global.type);
+ const byte code[] = {WASM_I32V_5(global.init.val.i32_const)};
buffer.write(code, sizeof(code));
break;
}
- case kAstF32: {
- static const byte code[] = {WASM_F32(0)};
+ case WasmInitExpr::kI64Const: {
+ DCHECK_EQ(kAstI64, global.type);
+ const byte code[] = {WASM_I64V_10(global.init.val.i64_const)};
buffer.write(code, sizeof(code));
break;
}
- case kAstI64: {
- static const byte code[] = {WASM_I64V_1(0)};
+ case WasmInitExpr::kF32Const: {
+ DCHECK_EQ(kAstF32, global.type);
+ const byte code[] = {WASM_F32(global.init.val.f32_const)};
buffer.write(code, sizeof(code));
break;
}
- case kAstF64: {
- static const byte code[] = {WASM_F64(0.0)};
+ case WasmInitExpr::kF64Const: {
+ DCHECK_EQ(kAstF64, global.type);
+ const byte code[] = {WASM_F64(global.init.val.f64_const)};
buffer.write(code, sizeof(code));
break;
}
- default:
- UNREACHABLE();
+ case WasmInitExpr::kGlobalIndex: {
+ const byte code[] = {kExprGetGlobal,
+ U32V_5(global.init.val.global_index)};
+ buffer.write(code, sizeof(code));
+ break;
+ }
+ default: {
+ // No initializer, emit a default value.
+ switch (global.type) {
+ case kAstI32: {
+ const byte code[] = {WASM_I32V_1(0)};
+ buffer.write(code, sizeof(code));
+ break;
+ }
+ case kAstI64: {
+ const byte code[] = {WASM_I64V_1(0)};
+ buffer.write(code, sizeof(code));
+ break;
+ }
+ case kAstF32: {
+ const byte code[] = {WASM_F32(0.0)};
+ buffer.write(code, sizeof(code));
+ break;
+ }
+ case kAstF64: {
+ const byte code[] = {WASM_F64(0.0)};
+ buffer.write(code, sizeof(code));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ }
}
buffer.write_u8(kExprEnd);
}
@@ -442,7 +509,12 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
buffer.write_size(4);
buffer.write(reinterpret_cast<const byte*>("name"), 4);
// Emit the names.
- buffer.write_size(functions_.size());
+ size_t count = functions_.size() + imports_.size();
+ buffer.write_size(count);
+ for (size_t i = 0; i < imports_.size(); i++) {
+ buffer.write_u8(0); // empty name for import
+ buffer.write_u8(0); // no local variables
+ }
for (auto function : functions_) {
buffer.write_size(function->name_.size());
if (function->name_.size() > 0) {
@@ -454,6 +526,15 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
FixupSection(buffer, start);
}
}
+
+void WasmModuleBuilder::WriteAsmJsOffsetTable(ZoneBuffer& buffer) const {
+ // == Emit asm.js offset table ===============================================
+ buffer.write_size(functions_.size());
+ // Emit the offset table per function.
+ for (auto function : functions_) {
+ function->WriteAsmWasmOffsetTable(buffer);
+ }
+}
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index dcaf6c8e86..d35313ef47 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -49,6 +49,11 @@ class ZoneBuffer : public ZoneObject {
LEBHelper::write_u32v(&pos_, val);
}
+ void write_i32v(int32_t val) {
+ EnsureSpace(kMaxVarInt32Size);
+ LEBHelper::write_i32v(&pos_, val);
+ }
+
void write_size(size_t val) {
EnsureSpace(kMaxVarInt32Size);
DCHECK_EQ(val, static_cast<uint32_t>(val));
@@ -83,10 +88,10 @@ class ZoneBuffer : public ZoneObject {
}
}
- size_t offset() { return static_cast<size_t>(pos_ - buffer_); }
- size_t size() { return static_cast<size_t>(pos_ - buffer_); }
- const byte* begin() { return buffer_; }
- const byte* end() { return pos_; }
+ size_t offset() const { return static_cast<size_t>(pos_ - buffer_); }
+ size_t size() const { return static_cast<size_t>(pos_ - buffer_); }
+ const byte* begin() const { return buffer_; }
+ const byte* end() const { return pos_; }
void EnsureSpace(size_t size) {
if ((pos_ + size) > end_) {
@@ -127,12 +132,15 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
void EmitWithU8U8(WasmOpcode opcode, const byte imm1, const byte imm2);
void EmitWithVarInt(WasmOpcode opcode, uint32_t immediate);
void EmitDirectCallIndex(uint32_t index);
- void SetExported();
- void SetName(const char* name, int name_length);
+ void Export();
+ void ExportAs(Vector<const char> name);
+ void SetName(Vector<const char> name);
+ void AddAsmWasmOffset(int asm_position);
void WriteSignature(ZoneBuffer& buffer) const;
void WriteExport(ZoneBuffer& buffer) const;
void WriteBody(ZoneBuffer& buffer) const;
+ void WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const;
bool exported() { return exported_; }
uint32_t func_index() { return func_index_; }
@@ -155,11 +163,17 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
uint32_t func_index_;
ZoneVector<uint8_t> body_;
ZoneVector<char> name_;
+ ZoneVector<char> exported_name_;
ZoneVector<uint32_t> i32_temps_;
ZoneVector<uint32_t> i64_temps_;
ZoneVector<uint32_t> f32_temps_;
ZoneVector<uint32_t> f64_temps_;
ZoneVector<DirectCallIndex> direct_calls_;
+
+ // Delta-encoded mapping from wasm bytes to asm.js source positions.
+ ZoneBuffer asm_offsets_;
+ uint32_t last_asm_byte_offset_ = 0;
+ uint32_t last_asm_source_position_ = 0;
};
class WasmTemporary {
@@ -212,7 +226,8 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
imports_[index].name_length = name_length;
}
WasmFunctionBuilder* AddFunction(FunctionSig* sig = nullptr);
- uint32_t AddGlobal(LocalType type, bool exported, bool mutability = true);
+ uint32_t AddGlobal(LocalType type, bool exported, bool mutability = true,
+ const WasmInitExpr& init = WasmInitExpr());
void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
uint32_t AddSignature(FunctionSig* sig);
void AddIndirectFunction(uint32_t index);
@@ -220,7 +235,10 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
// Writing methods.
void WriteTo(ZoneBuffer& buffer) const;
+ void WriteAsmJsOffsetTable(ZoneBuffer& buffer) const;
+ // TODO(titzer): use SignatureMap from signature-map.h here.
+ // This signature map is zone-allocated, but the other is heap allocated.
struct CompareFunctionSigs {
bool operator()(FunctionSig* a, FunctionSig* b) const;
};
@@ -241,6 +259,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
LocalType type;
bool exported;
bool mutability;
+ WasmInitExpr init;
};
struct WasmDataSegment {
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index f4cf505f5a..79b99fe04d 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -16,17 +16,16 @@
#include "src/wasm/ast-decoder.h"
#include "src/wasm/module-decoder.h"
-#include "src/wasm/wasm-debug.h"
-#include "src/wasm/wasm-function-name-table.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
#include "src/compiler/wasm-compiler.h"
-namespace v8 {
-namespace internal {
-namespace wasm {
+using namespace v8::internal;
+using namespace v8::internal::wasm;
+namespace base = v8::base;
#define TRACE(...) \
do { \
@@ -40,132 +39,39 @@ namespace wasm {
namespace {
+static const int kInvalidSigIndex = -1;
static const int kPlaceholderMarker = 1000000000;
-enum JSFunctionExportInternalField {
- kInternalModuleInstance,
- kInternalArity,
- kInternalSignature
-};
-
-// Internal constants for the layout of the module object.
-enum WasmInstanceObjectFields {
- kWasmCompiledModule = 0,
- kWasmModuleFunctionTable,
- kWasmModuleCodeTable,
- kWasmMemArrayBuffer,
- kWasmGlobalsArrayBuffer,
- // TODO(clemensh): Remove function name array, extract names from module
- // bytes.
- kWasmFunctionNamesArray,
- kWasmModuleBytesString,
- kWasmDebugInfo,
- kWasmNumImportedFunctions,
- kWasmModuleInternalFieldCount
-};
-
-enum WasmImportData {
- kModuleName, // String
- kFunctionName, // maybe String
- kOutputCount, // Smi. an uint32_t
- kSignature, // ByteArray. A copy of the data in FunctionSig
- kWasmImportDataSize // Sentinel value.
-};
-
-enum WasmExportData {
- kExportName, // String
- kExportArity, // Smi, an int
- kExportedFunctionIndex, // Smi, an uint32_t
- kExportedSignature, // ByteArray. A copy of the data in FunctionSig
- kWasmExportDataSize // Sentinel value.
-};
-
-enum WasmSegmentInfo {
- kDestAddr, // Smi. an uint32_t
- kSourceSize, // Smi. an uint32_t
- kWasmSegmentInfoSize // Sentinel value.
-};
-
-enum WasmIndirectFunctionTableData {
- kSize, // Smi. an uint32_t
- kTable, // FixedArray of indirect function table
- kWasmIndirectFunctionTableDataSize // Sentinel value.
-};
-
-uint32_t GetMinModuleMemSize(const WasmModule* module) {
- return WasmModule::kPageSize * module->min_mem_pages;
+byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
+ return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
}
-void LoadDataSegments(Handle<WasmCompiledModule> compiled_module,
- Address mem_addr, size_t mem_size) {
- CHECK(compiled_module->has_data_segments() ==
- compiled_module->has_data_segments_info());
-
- // If we have neither, we're done.
- if (!compiled_module->has_data_segments()) return;
-
- Handle<ByteArray> data = compiled_module->data_segments();
- Handle<FixedArray> segments = compiled_module->data_segments_info();
-
- uint32_t last_extraction_pos = 0;
- for (int i = 0; i < segments->length(); ++i) {
- Handle<ByteArray> segment =
- Handle<ByteArray>(ByteArray::cast(segments->get(i)));
- uint32_t dest_addr = static_cast<uint32_t>(segment->get_int(kDestAddr));
- uint32_t source_size = static_cast<uint32_t>(segment->get_int(kSourceSize));
- CHECK_LT(dest_addr, mem_size);
- CHECK_LE(source_size, mem_size);
- CHECK_LE(dest_addr, mem_size - source_size);
- byte* addr = mem_addr + dest_addr;
- data->copy_out(last_extraction_pos, addr, source_size);
- last_extraction_pos += source_size;
- }
+MaybeHandle<String> ExtractStringFromModuleBytes(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+ uint32_t offset, uint32_t size) {
+ // TODO(wasm): cache strings from modules if it's a performance win.
+ Handle<SeqOneByteString> module_bytes = compiled_module->module_bytes();
+ DCHECK_GE(static_cast<size_t>(module_bytes->length()), offset);
+ DCHECK_GE(static_cast<size_t>(module_bytes->length() - offset), size);
+ Address raw = module_bytes->GetCharsAddress() + offset;
+ if (!unibrow::Utf8::Validate(reinterpret_cast<const byte*>(raw), size))
+ return {}; // UTF8 decoding error for name.
+ return isolate->factory()->NewStringFromUtf8SubString(
+ module_bytes, static_cast<int>(offset), static_cast<int>(size));
}
-void SaveDataSegmentInfo(Factory* factory, const WasmModule* module,
- Handle<WasmCompiledModule> compiled_module) {
- Handle<FixedArray> segments = factory->NewFixedArray(
- static_cast<int>(module->data_segments.size()), TENURED);
- uint32_t data_size = 0;
- for (const WasmDataSegment& segment : module->data_segments) {
- if (segment.source_size == 0) continue;
- data_size += segment.source_size;
- }
- Handle<ByteArray> data = factory->NewByteArray(data_size, TENURED);
-
- uint32_t last_insertion_pos = 0;
- for (uint32_t i = 0; i < module->data_segments.size(); ++i) {
- const WasmDataSegment& segment = module->data_segments[i];
- if (segment.source_size == 0) continue;
- Handle<ByteArray> js_segment =
- factory->NewByteArray(kWasmSegmentInfoSize * sizeof(uint32_t), TENURED);
- // TODO(titzer): add support for global offsets for dest_addr
- CHECK_EQ(WasmInitExpr::kI32Const, segment.dest_addr.kind);
- js_segment->set_int(kDestAddr, segment.dest_addr.val.i32_const);
- js_segment->set_int(kSourceSize, segment.source_size);
- segments->set(i, *js_segment);
- data->copy_in(last_insertion_pos,
- module->module_start + segment.source_offset,
- segment.source_size);
- last_insertion_pos += segment.source_size;
- }
- compiled_module->set_data_segments_info(segments);
- compiled_module->set_data_segments(data);
-}
-
-void PatchFunctionTable(Handle<Code> code,
- Handle<FixedArray> old_indirect_table,
- Handle<FixedArray> new_indirect_table) {
+void ReplaceReferenceInCode(Handle<Code> code, Handle<Object> old_ref,
+ Handle<Object> new_ref) {
for (RelocIterator it(*code, 1 << RelocInfo::EMBEDDED_OBJECT); !it.done();
it.next()) {
- if (it.rinfo()->target_object() == *old_indirect_table) {
- it.rinfo()->set_target_object(*new_indirect_table);
+ if (it.rinfo()->target_object() == *old_ref) {
+ it.rinfo()->set_target_object(*new_ref);
}
}
}
Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size) {
- if (size > (WasmModule::kMaxMemPages * WasmModule::kPageSize)) {
+ if (size > (WasmModule::kV8MaxPages * WasmModule::kPageSize)) {
// TODO(titzer): lift restriction on maximum memory allocated here.
return Handle<JSArrayBuffer>::null();
}
@@ -188,49 +94,30 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size) {
return buffer;
}
-void RelocateInstanceCode(Handle<JSObject> instance, Address old_start,
- Address start, uint32_t prev_size,
- uint32_t new_size) {
- Handle<FixedArray> functions = Handle<FixedArray>(
- FixedArray::cast(instance->GetInternalField(kWasmModuleCodeTable)));
- for (int i = 0; i < functions->length(); ++i) {
- Handle<Code> function = Handle<Code>(Code::cast(functions->get(i)));
+void RelocateMemoryReferencesInCode(Handle<FixedArray> code_table,
+ Address old_start, Address start,
+ uint32_t prev_size, uint32_t new_size) {
+ for (int i = 0; i < code_table->length(); ++i) {
+ DCHECK(code_table->get(i)->IsCode());
+ Handle<Code> code = Handle<Code>(Code::cast(code_table->get(i)));
AllowDeferredHandleDereference embedding_raw_address;
int mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE) |
(1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
- for (RelocIterator it(*function, mask); !it.done(); it.next()) {
+ for (RelocIterator it(*code, mask); !it.done(); it.next()) {
it.rinfo()->update_wasm_memory_reference(old_start, start, prev_size,
new_size);
}
}
}
-// Allocate memory for a module instance as a new JSArrayBuffer.
-Handle<JSArrayBuffer> AllocateMemory(ErrorThrower* thrower, Isolate* isolate,
- uint32_t min_mem_pages) {
- if (min_mem_pages > WasmModule::kMaxMemPages) {
- thrower->Error("Out of memory: wasm memory too large");
- return Handle<JSArrayBuffer>::null();
- }
- Handle<JSArrayBuffer> mem_buffer =
- NewArrayBuffer(isolate, min_mem_pages * WasmModule::kPageSize);
-
- if (mem_buffer.is_null()) {
- thrower->Error("Out of memory: wasm memory");
- }
- return mem_buffer;
-}
-
-void RelocateGlobals(Handle<JSObject> instance, Address old_start,
+void RelocateGlobals(Handle<FixedArray> code_table, Address old_start,
Address globals_start) {
- Handle<FixedArray> functions = Handle<FixedArray>(
- FixedArray::cast(instance->GetInternalField(kWasmModuleCodeTable)));
- uint32_t function_count = static_cast<uint32_t>(functions->length());
- for (uint32_t i = 0; i < function_count; ++i) {
- Handle<Code> function = Handle<Code>(Code::cast(functions->get(i)));
+ for (int i = 0; i < code_table->length(); ++i) {
+ DCHECK(code_table->get(i)->IsCode());
+ Handle<Code> code = Handle<Code>(Code::cast(code_table->get(i)));
AllowDeferredHandleDereference embedding_raw_address;
int mask = 1 << RelocInfo::WASM_GLOBAL_REFERENCE;
- for (RelocIterator it(*function, mask); !it.done(); it.next()) {
+ for (RelocIterator it(*code, mask); !it.done(); it.next()) {
it.rinfo()->update_wasm_global_reference(old_start, globals_start);
}
}
@@ -240,8 +127,8 @@ Handle<Code> CreatePlaceholder(Factory* factory, uint32_t index,
Code::Kind kind) {
// Create a placeholder code object and encode the corresponding index in
// the {constant_pool_offset} field of the code object.
- // TODO(titzer): placeholder code objects are somewhat dangerous.
- static byte buffer[] = {0, 0, 0, 0, 0, 0, 0, 0}; // fake instructions.
+ // TODO(titzer): instead of placeholders, use a reloc_info mode.
+ static byte buffer[] = {0, 0, 0, 0}; // fake instructions.
static CodeDesc desc = {
buffer, arraysize(buffer), arraysize(buffer), 0, 0, nullptr, 0, nullptr};
Handle<Code> code = factory->NewCode(desc, Code::KindField::encode(kind),
@@ -284,9 +171,9 @@ bool LinkFunction(Handle<Code> unlinked,
return modified;
}
-void FlushICache(Isolate* isolate, Handle<FixedArray> functions) {
- for (int i = 0; i < functions->length(); ++i) {
- Handle<Code> code = functions->GetValueChecked<Code>(isolate, i);
+void FlushICache(Isolate* isolate, Handle<FixedArray> code_table) {
+ for (int i = 0; i < code_table->length(); ++i) {
+ Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
Assembler::FlushICache(isolate, code->instruction_start(),
code->instruction_size());
}
@@ -365,190 +252,16 @@ static void RecordStats(Isolate* isolate, Handle<FixedArray> functions) {
}
Address GetGlobalStartAddressFromCodeTemplate(Object* undefined,
- JSObject* owner) {
+ JSObject* object) {
+ auto instance = WasmInstanceObject::cast(object);
Address old_address = nullptr;
- Object* stored_value = owner->GetInternalField(kWasmGlobalsArrayBuffer);
- if (stored_value != undefined) {
- old_address = static_cast<Address>(
- JSArrayBuffer::cast(stored_value)->backing_store());
+ if (instance->has_globals_buffer()) {
+ old_address =
+ static_cast<Address>(instance->get_globals_buffer()->backing_store());
}
return old_address;
}
-Handle<FixedArray> GetImportsData(Factory* factory, const WasmModule* module) {
- Handle<FixedArray> ret = factory->NewFixedArray(
- static_cast<int>(module->import_table.size()), TENURED);
- for (size_t i = 0; i < module->import_table.size(); ++i) {
- const WasmImport& import = module->import_table[i];
- if (import.kind != kExternalFunction) continue;
- WasmName module_name = module->GetNameOrNull(import.module_name_offset,
- import.module_name_length);
- WasmName function_name = module->GetNameOrNull(import.field_name_offset,
- import.field_name_length);
-
- Handle<String> module_name_string =
- factory->InternalizeUtf8String(module_name);
- Handle<String> function_name_string =
- function_name.is_empty()
- ? Handle<String>::null()
- : factory->InternalizeUtf8String(function_name);
- FunctionSig* fsig = module->functions[import.index].sig;
- Handle<ByteArray> sig = factory->NewByteArray(
- static_cast<int>(fsig->parameter_count() + fsig->return_count()),
- TENURED);
- sig->copy_in(0, reinterpret_cast<const byte*>(fsig->raw_data()),
- sig->length());
- Handle<FixedArray> encoded_import =
- factory->NewFixedArray(kWasmImportDataSize, TENURED);
- encoded_import->set(kModuleName, *module_name_string);
- if (!function_name_string.is_null()) {
- encoded_import->set(kFunctionName, *function_name_string);
- }
- encoded_import->set(kOutputCount,
- Smi::FromInt(static_cast<int>(fsig->return_count())));
- encoded_import->set(kSignature, *sig);
- ret->set(static_cast<int>(i), *encoded_import);
- }
- return ret;
-}
-
-static MaybeHandle<JSFunction> ReportFFIError(
- ErrorThrower* thrower, const char* error, uint32_t index,
- Handle<String> module_name, MaybeHandle<String> function_name) {
- Handle<String> function_name_handle;
- if (function_name.ToHandle(&function_name_handle)) {
- thrower->Error("Import #%d module=\"%.*s\" function=\"%.*s\" error: %s",
- index, module_name->length(), module_name->ToCString().get(),
- function_name_handle->length(),
- function_name_handle->ToCString().get(), error);
- } else {
- thrower->Error("Import #%d module=\"%.*s\" error: %s", index,
- module_name->length(), module_name->ToCString().get(),
- error);
- }
- thrower->Error("Import ");
- return MaybeHandle<JSFunction>();
-}
-
-static MaybeHandle<JSReceiver> LookupFunction(
- ErrorThrower* thrower, Factory* factory, Handle<JSReceiver> ffi,
- uint32_t index, Handle<String> module_name,
- MaybeHandle<String> function_name) {
- if (ffi.is_null()) {
- return ReportFFIError(thrower, "FFI is not an object", index, module_name,
- function_name);
- }
-
- // Look up the module first.
- MaybeHandle<Object> result = Object::GetProperty(ffi, module_name);
- if (result.is_null()) {
- return ReportFFIError(thrower, "module not found", index, module_name,
- function_name);
- }
-
- Handle<Object> module = result.ToHandleChecked();
-
- if (!module->IsJSReceiver()) {
- return ReportFFIError(thrower, "module is not an object or function", index,
- module_name, function_name);
- }
-
- Handle<Object> function;
- if (!function_name.is_null()) {
- // Look up the function in the module.
- MaybeHandle<Object> result =
- Object::GetProperty(module, function_name.ToHandleChecked());
- if (result.is_null()) {
- return ReportFFIError(thrower, "function not found", index, module_name,
- function_name);
- }
- function = result.ToHandleChecked();
- } else {
- // No function specified. Use the "default export".
- function = module;
- }
-
- if (!function->IsCallable()) {
- return ReportFFIError(thrower, "not a callable", index, module_name,
- function_name);
- }
-
- return Handle<JSReceiver>::cast(function);
-}
-
-Handle<Code> CompileImportWrapper(Isolate* isolate,
- const Handle<JSReceiver> ffi, int index,
- Handle<FixedArray> import_data,
- ErrorThrower* thrower) {
- Handle<FixedArray> data =
- import_data->GetValueChecked<FixedArray>(isolate, index);
- Handle<String> module_name =
- data->GetValueChecked<String>(isolate, kModuleName);
- MaybeHandle<String> function_name =
- data->GetValue<String>(isolate, kFunctionName);
-
- // TODO(mtrofin): this is an uint32_t, actually. We should rationalize
- // it when we rationalize signed/unsigned stuff.
- int ret_count = Smi::cast(data->get(kOutputCount))->value();
- CHECK_GE(ret_count, 0);
- Handle<ByteArray> sig_data =
- data->GetValueChecked<ByteArray>(isolate, kSignature);
- int sig_data_size = sig_data->length();
- int param_count = sig_data_size - ret_count;
- CHECK(param_count >= 0);
-
- MaybeHandle<JSReceiver> function = LookupFunction(
- thrower, isolate->factory(), ffi, index, module_name, function_name);
- if (function.is_null()) return Handle<Code>::null();
- Handle<Code> code;
- Handle<JSReceiver> target = function.ToHandleChecked();
- bool isMatch = false;
- Handle<Code> export_wrapper_code;
- if (target->IsJSFunction()) {
- Handle<JSFunction> func = Handle<JSFunction>::cast(target);
- export_wrapper_code = handle(func->code());
- if (export_wrapper_code->kind() == Code::JS_TO_WASM_FUNCTION) {
- int exported_param_count =
- Smi::cast(func->GetInternalField(kInternalArity))->value();
- Handle<ByteArray> exportedSig = Handle<ByteArray>(
- ByteArray::cast(func->GetInternalField(kInternalSignature)));
- if (exported_param_count == param_count &&
- exportedSig->length() == sig_data->length() &&
- memcmp(exportedSig->data(), sig_data->data(),
- exportedSig->length()) == 0) {
- isMatch = true;
- }
- }
- }
- if (isMatch) {
- int wasm_count = 0;
- int const mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
- for (RelocIterator it(*export_wrapper_code, mask); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = rinfo->target_address();
- Code* target = Code::GetCodeFromTargetAddress(target_address);
- if (target->kind() == Code::WASM_FUNCTION) {
- ++wasm_count;
- code = handle(target);
- }
- }
- DCHECK(wasm_count == 1);
- return code;
- } else {
- // Copy the signature to avoid a raw pointer into a heap object when
- // GC can happen.
- Zone zone(isolate->allocator());
- MachineRepresentation* reps =
- zone.NewArray<MachineRepresentation>(sig_data_size);
- memcpy(reps, sig_data->data(),
- sizeof(MachineRepresentation) * sig_data_size);
- FunctionSig sig(ret_count, param_count, reps);
-
- return compiler::CompileWasmToJSWrapper(isolate, target, &sig, index,
- module_name, function_name);
- }
-}
-
void InitializeParallelCompilation(
Isolate* isolate, const std::vector<WasmFunction>& functions,
std::vector<compiler::WasmCompilationUnit*>& compilation_units,
@@ -590,7 +303,8 @@ void WaitForCompilationTasks(Isolate* isolate, uint32_t* task_ids,
for (size_t i = 0; i < num_tasks; ++i) {
// If the task has not started yet, then we abort it. Otherwise we wait for
// it to finish.
- if (!isolate->cancelable_task_manager()->TryAbort(task_ids[i])) {
+ if (isolate->cancelable_task_manager()->TryAbort(task_ids[i]) !=
+ CancelableTaskManager::kTaskAborted) {
pending_tasks->Wait();
}
}
@@ -695,8 +409,8 @@ void CompileSequentially(Isolate* isolate, const WasmModule* module,
code = compiler::WasmCompilationUnit::CompileWasmFunction(
thrower, isolate, module_env, &func);
if (code.is_null()) {
- thrower->Error("Compilation of #%d:%.*s failed.", i, str.length(),
- str.start());
+ thrower->CompileError("Compilation of #%d:%.*s failed.", i, str.length(),
+ str.start());
break;
}
// Install the code into the linker table.
@@ -736,19 +450,26 @@ void PatchDirectCalls(Handle<FixedArray> old_functions,
}
}
-static void ResetCompiledModule(Isolate* isolate, JSObject* owner,
+static void ResetCompiledModule(Isolate* isolate, WasmInstanceObject* owner,
WasmCompiledModule* compiled_module) {
TRACE("Resetting %d\n", compiled_module->instance_id());
Object* undefined = *isolate->factory()->undefined_value();
- uint32_t old_mem_size = compiled_module->has_heap()
- ? compiled_module->mem_size()
- : compiled_module->default_mem_size();
+ uint32_t old_mem_size = compiled_module->mem_size();
uint32_t default_mem_size = compiled_module->default_mem_size();
- Object* mem_start = compiled_module->ptr_to_heap();
+ Object* mem_start = compiled_module->ptr_to_memory();
Address old_mem_address = nullptr;
Address globals_start =
GetGlobalStartAddressFromCodeTemplate(undefined, owner);
+ // Reset function tables.
+ FixedArray* function_tables = nullptr;
+ FixedArray* empty_function_tables = nullptr;
+ if (compiled_module->has_function_tables()) {
+ function_tables = compiled_module->ptr_to_function_tables();
+ empty_function_tables = compiled_module->ptr_to_empty_function_tables();
+ compiled_module->set_ptr_to_function_tables(empty_function_tables);
+ }
+
if (old_mem_size > 0) {
CHECK_NE(mem_start, undefined);
old_mem_address =
@@ -756,11 +477,14 @@ static void ResetCompiledModule(Isolate* isolate, JSObject* owner,
}
int mode_mask = RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_SIZE_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::WASM_GLOBAL_REFERENCE);
+ RelocInfo::ModeMask(RelocInfo::WASM_GLOBAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ // Patch code to update memory references, global references, and function
+ // table references.
Object* fct_obj = compiled_module->ptr_to_code_table();
if (fct_obj != nullptr && fct_obj != undefined &&
- (old_mem_size > 0 || globals_start != nullptr)) {
+ (old_mem_size > 0 || globals_start != nullptr || function_tables)) {
FixedArray* functions = FixedArray::cast(fct_obj);
for (int i = 0; i < functions->length(); ++i) {
Code* code = Code::cast(functions->get(i));
@@ -772,10 +496,17 @@ static void ResetCompiledModule(Isolate* isolate, JSObject* owner,
it.rinfo()->update_wasm_memory_reference(
old_mem_address, nullptr, old_mem_size, default_mem_size);
changed = true;
- } else {
- CHECK(RelocInfo::IsWasmGlobalReference(mode));
+ } else if (RelocInfo::IsWasmGlobalReference(mode)) {
it.rinfo()->update_wasm_global_reference(globals_start, nullptr);
changed = true;
+ } else if (RelocInfo::IsEmbeddedObject(mode) && function_tables) {
+ Object* old = it.rinfo()->target_object();
+ for (int j = 0; j < function_tables->length(); ++j) {
+ if (function_tables->get(j) == old) {
+ it.rinfo()->set_target_object(empty_function_tables->get(j));
+ changed = true;
+ }
+ }
}
}
if (changed) {
@@ -784,26 +515,25 @@ static void ResetCompiledModule(Isolate* isolate, JSObject* owner,
}
}
}
- compiled_module->reset_heap();
+ compiled_module->reset_memory();
}
static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
- JSObject* owner = *p;
- WasmCompiledModule* compiled_module =
- WasmCompiledModule::cast(owner->GetInternalField(kWasmCompiledModule));
+ WasmInstanceObject* owner = reinterpret_cast<WasmInstanceObject*>(*p);
+ WasmCompiledModule* compiled_module = owner->get_compiled_module();
TRACE("Finalizing %d {\n", compiled_module->instance_id());
Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
- DCHECK(compiled_module->has_weak_module_object());
- WeakCell* weak_module_obj = compiled_module->ptr_to_weak_module_object();
+ DCHECK(compiled_module->has_weak_wasm_module());
+ WeakCell* weak_wasm_module = compiled_module->ptr_to_weak_wasm_module();
- // weak_module_obj may have been cleared, meaning the module object
+ // weak_wasm_module may have been cleared, meaning the module object
// was GC-ed. In that case, there won't be any new instances created,
// and we don't need to maintain the links between instances.
- if (!weak_module_obj->cleared()) {
- JSObject* module_obj = JSObject::cast(weak_module_obj->value());
+ if (!weak_wasm_module->cleared()) {
+ JSObject* wasm_module = JSObject::cast(weak_wasm_module->value());
WasmCompiledModule* current_template =
- WasmCompiledModule::cast(module_obj->GetInternalField(0));
+ WasmCompiledModule::cast(wasm_module->GetInternalField(0));
TRACE("chain before {\n");
TRACE_CHAIN(current_template);
@@ -818,7 +548,7 @@ static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
ResetCompiledModule(isolate, owner, compiled_module);
} else {
DCHECK(next->value()->IsFixedArray());
- module_obj->SetInternalField(0, next->value());
+ wasm_module->SetInternalField(0, next->value());
DCHECK_NULL(prev);
WasmCompiledModule::cast(next->value())->reset_weak_prev_instance();
}
@@ -847,7 +577,7 @@ static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
}
}
TRACE("chain after {\n");
- TRACE_CHAIN(WasmCompiledModule::cast(module_obj->GetInternalField(0)));
+ TRACE_CHAIN(WasmCompiledModule::cast(wasm_module->GetInternalField(0)));
TRACE("}\n");
}
compiled_module->reset_weak_owning_instance();
@@ -855,39 +585,21 @@ static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
TRACE("}\n");
}
-Handle<FixedArray> SetupIndirectFunctionTable(
- Isolate* isolate, Handle<FixedArray> wasm_functions,
- Handle<FixedArray> indirect_table_template,
- Handle<FixedArray> tables_to_replace) {
- Factory* factory = isolate->factory();
- Handle<FixedArray> cloned_indirect_tables =
- factory->CopyFixedArray(indirect_table_template);
- for (int i = 0; i < cloned_indirect_tables->length(); ++i) {
- Handle<FixedArray> orig_metadata =
- cloned_indirect_tables->GetValueChecked<FixedArray>(isolate, i);
- Handle<FixedArray> cloned_metadata = factory->CopyFixedArray(orig_metadata);
- cloned_indirect_tables->set(i, *cloned_metadata);
-
- Handle<FixedArray> orig_table =
- cloned_metadata->GetValueChecked<FixedArray>(isolate, kTable);
- Handle<FixedArray> cloned_table = factory->CopyFixedArray(orig_table);
- cloned_metadata->set(kTable, *cloned_table);
- // Patch the cloned code to refer to the cloned kTable.
- Handle<FixedArray> table_to_replace =
- tables_to_replace->GetValueChecked<FixedArray>(isolate, i)
- ->GetValueChecked<FixedArray>(isolate, kTable);
- for (int fct_index = 0; fct_index < wasm_functions->length(); ++fct_index) {
- Handle<Code> wasm_function =
- wasm_functions->GetValueChecked<Code>(isolate, fct_index);
- PatchFunctionTable(wasm_function, table_to_replace, cloned_table);
- }
+std::pair<int, int> GetFunctionOffsetAndLength(
+ Handle<WasmCompiledModule> compiled_module, int func_index) {
+ WasmModule* module = compiled_module->module();
+ if (func_index < 0 ||
+ static_cast<size_t>(func_index) > module->functions.size()) {
+ return {0, 0};
}
- return cloned_indirect_tables;
+ WasmFunction& func = module->functions[func_index];
+ return {static_cast<int>(func.code_start_offset),
+ static_cast<int>(func.code_end_offset - func.code_start_offset)};
}
} // namespace
-const char* SectionName(WasmSectionCode code) {
+const char* wasm::SectionName(WasmSectionCode code) {
switch (code) {
case kUnknownSectionCode:
return "Unknown";
@@ -920,7 +632,7 @@ const char* SectionName(WasmSectionCode code) {
}
}
-std::ostream& operator<<(std::ostream& os, const WasmModule& module) {
+std::ostream& wasm::operator<<(std::ostream& os, const WasmModule& module) {
os << "WASM module with ";
os << (module.min_mem_pages * module.kPageSize) << " min mem";
os << (module.max_mem_pages * module.kPageSize) << " max mem";
@@ -930,7 +642,7 @@ std::ostream& operator<<(std::ostream& os, const WasmModule& module) {
return os;
}
-std::ostream& operator<<(std::ostream& os, const WasmFunction& function) {
+std::ostream& wasm::operator<<(std::ostream& os, const WasmFunction& function) {
os << "WASM function with signature " << *function.sig;
os << " code bytes: "
@@ -938,7 +650,7 @@ std::ostream& operator<<(std::ostream& os, const WasmFunction& function) {
return os;
}
-std::ostream& operator<<(std::ostream& os, const WasmFunctionName& pair) {
+std::ostream& wasm::operator<<(std::ostream& os, const WasmFunctionName& pair) {
os << "#" << pair.function_->func_index << ":";
if (pair.function_->name_offset > 0) {
if (pair.module_) {
@@ -954,29 +666,7 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& pair) {
return os;
}
-Handle<JSFunction> WrapExportCodeAsJSFunction(
- Isolate* isolate, Handle<Code> export_code, Handle<String> name, int arity,
- MaybeHandle<ByteArray> maybe_signature, Handle<JSObject> module_instance) {
- Handle<SharedFunctionInfo> shared =
- isolate->factory()->NewSharedFunctionInfo(name, export_code, false);
- shared->set_length(arity);
- shared->set_internal_formal_parameter_count(arity);
- Handle<JSFunction> function = isolate->factory()->NewFunction(
- isolate->wasm_function_map(), name, export_code);
- function->set_shared(*shared);
-
- function->SetInternalField(kInternalModuleInstance, *module_instance);
- // add another Internal Field as the function arity
- function->SetInternalField(kInternalArity, Smi::FromInt(arity));
- // add another Internal Field as the signature of the foreign function
- Handle<ByteArray> signature;
- if (maybe_signature.ToHandle(&signature)) {
- function->SetInternalField(kInternalSignature, *signature);
- }
- return function;
-}
-
-Object* GetOwningWasmInstance(Code* code) {
+Object* wasm::GetOwningWasmInstance(Code* code) {
DCHECK(code->kind() == Code::WASM_FUNCTION);
DisallowHeapAllocation no_gc;
FixedArray* deopt_data = code->deoptimization_data();
@@ -988,52 +678,65 @@ Object* GetOwningWasmInstance(Code* code) {
return cell->value();
}
-uint32_t GetNumImportedFunctions(Handle<JSObject> wasm_object) {
- return static_cast<uint32_t>(
- Smi::cast(wasm_object->GetInternalField(kWasmNumImportedFunctions))
- ->value());
+int wasm::GetFunctionCodeOffset(Handle<WasmCompiledModule> compiled_module,
+ int func_index) {
+ return GetFunctionOffsetAndLength(compiled_module, func_index).first;
+}
+
+bool wasm::GetPositionInfo(Handle<WasmCompiledModule> compiled_module,
+ uint32_t position, Script::PositionInfo* info) {
+ std::vector<WasmFunction>& functions = compiled_module->module()->functions;
+
+ // Binary search for a function containing the given position.
+ int left = 0; // inclusive
+ int right = static_cast<int>(functions.size()); // exclusive
+ if (right == 0) return false;
+ while (right - left > 1) {
+ int mid = left + (right - left) / 2;
+ if (functions[mid].code_start_offset <= position) {
+ left = mid;
+ } else {
+ right = mid;
+ }
+ }
+ // If the found entry does not contains the given position, return false.
+ WasmFunction& func = functions[left];
+ if (position < func.code_start_offset || position >= func.code_end_offset) {
+ return false;
+ }
+
+ info->line = left;
+ info->column = position - func.code_start_offset;
+ info->line_start = func.code_start_offset;
+ info->line_end = func.code_end_offset;
+ return true;
}
-WasmModule::WasmModule(byte* module_start)
- : module_start(module_start),
- module_end(nullptr),
- min_mem_pages(0),
- max_mem_pages(0),
- mem_export(false),
- start_function_index(-1),
- origin(kWasmOrigin),
- globals_size(0),
- num_imported_functions(0),
- num_declared_functions(0),
- num_exported_functions(0),
+WasmModule::WasmModule(Zone* owned, const byte* module_start)
+ : owned_zone(owned),
+ module_start(module_start),
pending_tasks(new base::Semaphore(0)) {}
MaybeHandle<WasmCompiledModule> WasmModule::CompileFunctions(
- Isolate* isolate, ErrorThrower* thrower) const {
+ Isolate* isolate, Handle<WasmModuleWrapper> module_wrapper,
+ ErrorThrower* thrower) const {
Factory* factory = isolate->factory();
MaybeHandle<WasmCompiledModule> nothing;
- WasmModuleInstance temp_instance(this);
+ WasmInstance temp_instance(this);
temp_instance.context = isolate->native_context();
- temp_instance.mem_size = GetMinModuleMemSize(this);
+ temp_instance.mem_size = WasmModule::kPageSize * this->min_mem_pages;
temp_instance.mem_start = nullptr;
temp_instance.globals_start = nullptr;
- MaybeHandle<FixedArray> indirect_table =
- function_tables.size()
- ? factory->NewFixedArray(static_cast<int>(function_tables.size()),
- TENURED)
- : MaybeHandle<FixedArray>();
- for (uint32_t i = 0; i < function_tables.size(); ++i) {
- Handle<FixedArray> values = wasm::BuildFunctionTable(isolate, i, this);
- temp_instance.function_tables[i] = values;
-
- Handle<FixedArray> metadata = isolate->factory()->NewFixedArray(
- kWasmIndirectFunctionTableDataSize, TENURED);
- metadata->set(kSize, Smi::FromInt(function_tables[i].size));
- metadata->set(kTable, *values);
- indirect_table.ToHandleChecked()->set(i, *metadata);
+ // Initialize the indirect tables with placeholders.
+ int function_table_count = static_cast<int>(this->function_tables.size());
+ Handle<FixedArray> function_tables =
+ factory->NewFixedArray(function_table_count);
+ for (int i = 0; i < function_table_count; ++i) {
+ temp_instance.function_tables[i] = factory->NewFixedArray(0);
+ function_tables->set(i, *temp_instance.function_tables[i]);
}
HistogramTimerScope wasm_compile_module_time_scope(
@@ -1052,7 +755,7 @@ MaybeHandle<WasmCompiledModule> WasmModule::CompileFunctions(
factory->NewFixedArray(static_cast<int>(code_table_size), TENURED);
// Initialize the code table with placeholders.
- for (uint32_t i = 0; i < functions.size(); i++) {
+ for (uint32_t i = 0; i < functions.size(); ++i) {
Code::Kind kind = Code::WASM_FUNCTION;
if (i < num_imported_functions) kind = Code::WASM_TO_JS_FUNCTION;
Handle<Code> placeholder = CreatePlaceholder(factory, i, kind);
@@ -1066,12 +769,12 @@ MaybeHandle<WasmCompiledModule> WasmModule::CompileFunctions(
// Avoid a race condition by collecting results into a second vector.
std::vector<Handle<Code>> results;
results.reserve(temp_instance.function_code.size());
- for (size_t i = 0; i < temp_instance.function_code.size(); i++) {
+ for (size_t i = 0; i < temp_instance.function_code.size(); ++i) {
results.push_back(temp_instance.function_code[i]);
}
CompileInParallel(isolate, this, results, thrower, &module_env);
- for (size_t i = 0; i < results.size(); i++) {
+ for (size_t i = 0; i < results.size(); ++i) {
temp_instance.function_code[i] = results[i];
}
} else {
@@ -1103,68 +806,32 @@ MaybeHandle<WasmCompiledModule> WasmModule::CompileFunctions(
// and information needed at instantiation time. This object needs to be
// serializable. Instantiation may occur off a deserialized version of this
// object.
- Handle<WasmCompiledModule> ret = WasmCompiledModule::New(
- isolate, min_mem_pages, globals_size, mem_export, origin);
+ Handle<WasmCompiledModule> ret =
+ WasmCompiledModule::New(isolate, module_wrapper);
ret->set_code_table(code_table);
- if (!indirect_table.is_null()) {
- ret->set_indirect_function_tables(indirect_table.ToHandleChecked());
- }
- Handle<FixedArray> import_data = GetImportsData(factory, this);
- ret->set_import_data(import_data);
-
- // Compile exported function wrappers.
- int export_size = static_cast<int>(num_exported_functions);
- if (export_size > 0) {
- Handle<FixedArray> exports = factory->NewFixedArray(export_size, TENURED);
- int index = -1;
-
- for (const WasmExport& exp : export_table) {
- if (exp.kind != kExternalFunction)
- continue; // skip non-function exports.
- index++;
- Handle<FixedArray> export_data =
- factory->NewFixedArray(kWasmExportDataSize, TENURED);
- FunctionSig* funcSig = functions[exp.index].sig;
- Handle<ByteArray> exportedSig =
- factory->NewByteArray(static_cast<int>(funcSig->parameter_count() +
- funcSig->return_count()),
- TENURED);
- exportedSig->copy_in(0,
- reinterpret_cast<const byte*>(funcSig->raw_data()),
- exportedSig->length());
- export_data->set(kExportedSignature, *exportedSig);
- WasmName str = GetName(exp.name_offset, exp.name_length);
- Handle<String> name = factory->InternalizeUtf8String(str);
- Handle<Code> code = code_table->GetValueChecked<Code>(isolate, exp.index);
- Handle<Code> export_code = compiler::CompileJSToWasmWrapper(
- isolate, &module_env, code, exp.index);
- if (thrower->error()) return nothing;
- export_data->set(kExportName, *name);
- export_data->set(kExportArity,
- Smi::FromInt(static_cast<int>(
- functions[exp.index].sig->parameter_count())));
- export_data->set(kExportedFunctionIndex,
- Smi::FromInt(static_cast<int>(exp.index)));
- exports->set(index, *export_data);
- code_table->set(static_cast<int>(functions.size() + index), *export_code);
- }
- ret->set_exports(exports);
+ ret->set_min_mem_pages(min_mem_pages);
+ ret->set_max_mem_pages(max_mem_pages);
+ if (function_table_count > 0) {
+ ret->set_function_tables(function_tables);
+ ret->set_empty_function_tables(function_tables);
+ }
+
+ // Compile JS->WASM wrappers for exported functions.
+ int func_index = 0;
+ for (auto exp : export_table) {
+ if (exp.kind != kExternalFunction) continue;
+ Handle<Code> wasm_code =
+ code_table->GetValueChecked<Code>(isolate, exp.index);
+ Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
+ isolate, &module_env, wasm_code, exp.index);
+ int export_index = static_cast<int>(functions.size() + func_index);
+ code_table->set(export_index, *wrapper_code);
+ func_index++;
}
- // Record data for startup function.
- if (start_function_index >= 0) {
- HandleScope scope(isolate);
- Handle<FixedArray> startup_data =
- factory->NewFixedArray(kWasmExportDataSize, TENURED);
- startup_data->set(kExportArity, Smi::FromInt(0));
- startup_data->set(kExportedFunctionIndex,
- Smi::FromInt(start_function_index));
- ret->set_startup_function(startup_data);
- }
-
- // TODO(wasm): saving the module bytes for debugging is wasteful. We should
- // consider downloading this on-demand.
{
+ // TODO(wasm): only save the sections necessary to deserialize a
+ // {WasmModule}. E.g. function bodies could be omitted.
size_t module_bytes_len = module_end - module_start;
DCHECK_LE(module_bytes_len, static_cast<size_t>(kMaxInt));
Vector<const uint8_t> module_bytes_vec(module_start,
@@ -1172,644 +839,1185 @@ MaybeHandle<WasmCompiledModule> WasmModule::CompileFunctions(
Handle<String> module_bytes_string =
factory->NewStringFromOneByte(module_bytes_vec, TENURED)
.ToHandleChecked();
- ret->set_module_bytes(module_bytes_string);
+ DCHECK(module_bytes_string->IsSeqOneByteString());
+ ret->set_module_bytes(Handle<SeqOneByteString>::cast(module_bytes_string));
}
- Handle<ByteArray> function_name_table =
- BuildFunctionNamesTable(isolate, module_env.module);
- ret->set_function_names(function_name_table);
- if (data_segments.size() > 0) SaveDataSegmentInfo(factory, this, ret);
- DCHECK_EQ(ret->default_mem_size(), temp_instance.mem_size);
return ret;
}
-// Instantiates a WASM module, creating a WebAssembly.Instance from a
-// WebAssembly.Module.
-MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
- ErrorThrower* thrower,
- Handle<JSObject> module_object,
- Handle<JSReceiver> ffi,
- Handle<JSArrayBuffer> memory) {
- MaybeHandle<JSObject> nothing;
- HistogramTimerScope wasm_instantiate_module_time_scope(
- isolate->counters()->wasm_instantiate_module_time());
- Factory* factory = isolate->factory();
+static WasmFunction* GetWasmFunctionForImportWrapper(Isolate* isolate,
+ Handle<Object> target) {
+ if (target->IsJSFunction()) {
+ Handle<JSFunction> func = Handle<JSFunction>::cast(target);
+ if (func->code()->kind() == Code::JS_TO_WASM_FUNCTION) {
+ auto exported = Handle<WasmExportedFunction>::cast(func);
+ Handle<WasmInstanceObject> other_instance(exported->instance(), isolate);
+ int func_index = exported->function_index();
+ return &other_instance->module()->functions[func_index];
+ }
+ }
+ return nullptr;
+}
- //--------------------------------------------------------------------------
- // Reuse the compiled module (if no owner), otherwise clone.
- //--------------------------------------------------------------------------
- Handle<WasmCompiledModule> compiled_module;
- Handle<FixedArray> code_table;
- Handle<FixedArray> old_code_table;
- Handle<JSObject> owner;
- // If we don't clone, this will be null(). Otherwise, this will
- // be a weak link to the original. If we lose the original to GC,
- // this will be a cleared. We'll link the instances chain last.
- MaybeHandle<WeakCell> link_to_original;
-
- TRACE("Starting new module instantiation\n");
- {
- Handle<WasmCompiledModule> original(
- WasmCompiledModule::cast(module_object->GetInternalField(0)), isolate);
- // Always make a new copy of the code_table, since the old_code_table
- // may still have placeholders for imports.
- old_code_table = original->code_table();
- code_table = factory->CopyFixedArray(old_code_table);
-
- if (original->has_weak_owning_instance()) {
- WeakCell* tmp = original->ptr_to_weak_owning_instance();
- DCHECK(!tmp->cleared());
- // There is already an owner, clone everything.
- owner = Handle<JSObject>(JSObject::cast(tmp->value()), isolate);
- // Insert the latest clone in front.
- TRACE("Cloning from %d\n", original->instance_id());
- compiled_module = WasmCompiledModule::Clone(isolate, original);
- // Replace the strong reference to point to the new instance here.
- // This allows any of the other instances, including the original,
- // to be collected.
- module_object->SetInternalField(0, *compiled_module);
- compiled_module->set_weak_module_object(original->weak_module_object());
- link_to_original = factory->NewWeakCell(original);
- // Don't link to original here. We remember the original
- // as a weak link. If that link isn't clear by the time we finish
- // instantiating this instance, then we link it at that time.
- compiled_module->reset_weak_next_instance();
-
- // Clone the code for WASM functions and exports.
- for (int i = 0; i < code_table->length(); ++i) {
- Handle<Code> orig_code = code_table->GetValueChecked<Code>(isolate, i);
- switch (orig_code->kind()) {
- case Code::WASM_TO_JS_FUNCTION:
- // Imports will be overwritten with newly compiled wrappers.
- break;
- case Code::JS_TO_WASM_FUNCTION:
- case Code::WASM_FUNCTION: {
- Handle<Code> code = factory->CopyCode(orig_code);
- code_table->set(i, *code);
- break;
+static Handle<Code> UnwrapImportWrapper(Handle<Object> target) {
+ Handle<JSFunction> func = Handle<JSFunction>::cast(target);
+ Handle<Code> export_wrapper_code = handle(func->code());
+ int found = 0;
+ int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+ Handle<Code> code;
+ for (RelocIterator it(*export_wrapper_code, mask); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Address target_address = rinfo->target_address();
+ Code* target = Code::GetCodeFromTargetAddress(target_address);
+ if (target->kind() == Code::WASM_FUNCTION ||
+ target->kind() == Code::WASM_TO_JS_FUNCTION) {
+ ++found;
+ code = handle(target);
+ }
+ }
+ DCHECK(found == 1);
+ return code;
+}
+
+static Handle<Code> CompileImportWrapper(Isolate* isolate, int index,
+ FunctionSig* sig,
+ Handle<JSReceiver> target,
+ Handle<String> module_name,
+ MaybeHandle<String> import_name) {
+ Handle<Code> code;
+ WasmFunction* other_func = GetWasmFunctionForImportWrapper(isolate, target);
+ if (other_func) {
+ if (sig->Equals(other_func->sig)) {
+ // Signature matched. Unwrap the JS->WASM wrapper and return the raw
+ // WASM function code.
+ return UnwrapImportWrapper(target);
+ } else {
+ return Handle<Code>::null();
+ }
+ } else {
+ // Signature mismatch. Compile a new wrapper for the new signature.
+ return compiler::CompileWasmToJSWrapper(isolate, target, sig, index,
+ module_name, import_name);
+ }
+}
+
+static void UpdateDispatchTablesInternal(Isolate* isolate,
+ Handle<FixedArray> dispatch_tables,
+ int index, WasmFunction* function,
+ Handle<Code> code) {
+ DCHECK_EQ(0, dispatch_tables->length() % 3);
+ for (int i = 0; i < dispatch_tables->length(); i += 3) {
+ int table_index = Smi::cast(dispatch_tables->get(i + 1))->value();
+ Handle<FixedArray> dispatch_table(
+ FixedArray::cast(dispatch_tables->get(i + 2)), isolate);
+ if (function) {
+ // TODO(titzer): the signature might need to be copied to avoid
+ // a dangling pointer in the signature map.
+ Handle<WasmInstanceObject> instance(
+ WasmInstanceObject::cast(dispatch_tables->get(i)), isolate);
+ int sig_index = static_cast<int>(
+ instance->module()->function_tables[table_index].map.FindOrInsert(
+ function->sig));
+ dispatch_table->set(index, Smi::FromInt(sig_index));
+ dispatch_table->set(index + (dispatch_table->length() / 2), *code);
+ } else {
+ Code* code = nullptr;
+ dispatch_table->set(index, Smi::FromInt(-1));
+ dispatch_table->set(index + (dispatch_table->length() / 2), code);
+ }
+ }
+}
+
+void wasm::UpdateDispatchTables(Isolate* isolate,
+ Handle<FixedArray> dispatch_tables, int index,
+ Handle<JSFunction> function) {
+ if (function.is_null()) {
+ UpdateDispatchTablesInternal(isolate, dispatch_tables, index, nullptr,
+ Handle<Code>::null());
+ } else {
+ UpdateDispatchTablesInternal(
+ isolate, dispatch_tables, index,
+ GetWasmFunctionForImportWrapper(isolate, function),
+ UnwrapImportWrapper(function));
+ }
+}
+
+// A helper class to simplify instantiating a module from a compiled module.
+// It closes over the {Isolate}, the {ErrorThrower}, the {WasmCompiledModule},
+// etc.
+class WasmInstanceBuilder {
+ public:
+ WasmInstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
+ Handle<JSObject> module_object, Handle<JSReceiver> ffi,
+ Handle<JSArrayBuffer> memory)
+ : isolate_(isolate),
+ thrower_(thrower),
+ module_object_(module_object),
+ ffi_(ffi),
+ memory_(memory) {}
+
+ // Build an instance, in all of its glory.
+ MaybeHandle<JSObject> Build() {
+ MaybeHandle<JSObject> nothing;
+ HistogramTimerScope wasm_instantiate_module_time_scope(
+ isolate_->counters()->wasm_instantiate_module_time());
+ Factory* factory = isolate_->factory();
+
+ //--------------------------------------------------------------------------
+ // Reuse the compiled module (if no owner), otherwise clone.
+ //--------------------------------------------------------------------------
+ Handle<FixedArray> code_table;
+ Handle<FixedArray> old_code_table;
+ MaybeHandle<WasmInstanceObject> owner;
+
+ TRACE("Starting new module instantiation\n");
+ {
+ // Root the owner, if any, before doing any allocations, which
+ // may trigger GC.
+ // Both owner and original template need to be in sync. Even
+ // after we lose the original template handle, the code
+ // objects we copied from it have data relative to the
+ // instance - such as globals addresses.
+ Handle<WasmCompiledModule> original;
+ {
+ DisallowHeapAllocation no_gc;
+ original = handle(
+ WasmCompiledModule::cast(module_object_->GetInternalField(0)));
+ if (original->has_weak_owning_instance()) {
+ owner = handle(WasmInstanceObject::cast(
+ original->weak_owning_instance()->value()));
+ }
+ }
+ DCHECK(!original.is_null());
+ // Always make a new copy of the code_table, since the old_code_table
+ // may still have placeholders for imports.
+ old_code_table = original->code_table();
+ code_table = factory->CopyFixedArray(old_code_table);
+
+ if (original->has_weak_owning_instance()) {
+ // Clone, but don't insert yet the clone in the instances chain.
+ // We do that last. Since we are holding on to the owner instance,
+ // the owner + original state used for cloning and patching
+ // won't be mutated by possible finalizer runs.
+ DCHECK(!owner.is_null());
+ TRACE("Cloning from %d\n", original->instance_id());
+ compiled_module_ = WasmCompiledModule::Clone(isolate_, original);
+ // Avoid creating too many handles in the outer scope.
+ HandleScope scope(isolate_);
+
+ // Clone the code for WASM functions and exports.
+ for (int i = 0; i < code_table->length(); ++i) {
+ Handle<Code> orig_code =
+ code_table->GetValueChecked<Code>(isolate_, i);
+ switch (orig_code->kind()) {
+ case Code::WASM_TO_JS_FUNCTION:
+ // Imports will be overwritten with newly compiled wrappers.
+ break;
+ case Code::JS_TO_WASM_FUNCTION:
+ case Code::WASM_FUNCTION: {
+ Handle<Code> code = factory->CopyCode(orig_code);
+ code_table->set(i, *code);
+ break;
+ }
+ default:
+ UNREACHABLE();
}
- default:
- UNREACHABLE();
}
+ RecordStats(isolate_, code_table);
+ } else {
+ // There was no owner, so we can reuse the original.
+ compiled_module_ = original;
+ TRACE("Reusing existing instance %d\n",
+ compiled_module_->instance_id());
+ }
+ compiled_module_->set_code_table(code_table);
+ }
+ module_ = reinterpret_cast<WasmModuleWrapper*>(
+ *compiled_module_->module_wrapper())
+ ->get();
+
+ //--------------------------------------------------------------------------
+ // Allocate the instance object.
+ //--------------------------------------------------------------------------
+ Handle<WasmInstanceObject> instance =
+ WasmInstanceObject::New(isolate_, compiled_module_);
+
+ //--------------------------------------------------------------------------
+ // Set up the globals for the new instance.
+ //--------------------------------------------------------------------------
+ MaybeHandle<JSArrayBuffer> old_globals;
+ uint32_t globals_size = module_->globals_size;
+ if (globals_size > 0) {
+ Handle<JSArrayBuffer> global_buffer =
+ NewArrayBuffer(isolate_, globals_size);
+ globals_ = global_buffer;
+ if (globals_.is_null()) {
+ thrower_->RangeError("Out of memory: wasm globals");
+ return nothing;
}
- RecordStats(isolate, code_table);
+ Address old_address =
+ owner.is_null() ? nullptr : GetGlobalStartAddressFromCodeTemplate(
+ isolate_->heap()->undefined_value(),
+ *owner.ToHandleChecked());
+ RelocateGlobals(code_table, old_address,
+ static_cast<Address>(global_buffer->backing_store()));
+ instance->set_globals_buffer(*global_buffer);
+ }
+
+ //--------------------------------------------------------------------------
+ // Prepare for initialization of function tables.
+ //--------------------------------------------------------------------------
+ int function_table_count =
+ static_cast<int>(module_->function_tables.size());
+ table_instances_.reserve(module_->function_tables.size());
+ for (int index = 0; index < function_table_count; ++index) {
+ table_instances_.push_back({Handle<WasmTableObject>::null(),
+ Handle<FixedArray>::null(),
+ Handle<FixedArray>::null()});
+ }
+
+ //--------------------------------------------------------------------------
+ // Process the imports for the module.
+ //--------------------------------------------------------------------------
+ int num_imported_functions = ProcessImports(code_table, instance);
+ if (num_imported_functions < 0) return nothing;
+
+ //--------------------------------------------------------------------------
+ // Process the initialization for the module's globals.
+ //--------------------------------------------------------------------------
+ InitGlobals();
+
+ //--------------------------------------------------------------------------
+ // Set up the memory for the new instance.
+ //--------------------------------------------------------------------------
+ MaybeHandle<JSArrayBuffer> old_memory;
+
+ uint32_t min_mem_pages = module_->min_mem_pages;
+ isolate_->counters()->wasm_min_mem_pages_count()->AddSample(min_mem_pages);
+
+ if (!memory_.is_null()) {
+ // Set externally passed ArrayBuffer non neuterable.
+ memory_->set_is_neuterable(false);
+ } else if (min_mem_pages > 0) {
+ memory_ = AllocateMemory(min_mem_pages);
+ if (memory_.is_null()) return nothing; // failed to allocate memory
+ }
+
+ if (!memory_.is_null()) {
+ instance->set_memory_buffer(*memory_);
+ Address mem_start = static_cast<Address>(memory_->backing_store());
+ uint32_t mem_size =
+ static_cast<uint32_t>(memory_->byte_length()->Number());
+ LoadDataSegments(mem_start, mem_size);
+
+ uint32_t old_mem_size = compiled_module_->mem_size();
+ Address old_mem_start =
+ compiled_module_->has_memory()
+ ? static_cast<Address>(
+ compiled_module_->memory()->backing_store())
+ : nullptr;
+ RelocateMemoryReferencesInCode(code_table, old_mem_start, mem_start,
+ old_mem_size, mem_size);
+ compiled_module_->set_memory(memory_);
} else {
- // There was no owner, so we can reuse the original.
- compiled_module = original;
- TRACE("Reusing existing instance %d\n", compiled_module->instance_id());
+ LoadDataSegments(nullptr, 0);
}
- compiled_module->set_code_table(code_table);
- }
-
- //--------------------------------------------------------------------------
- // Allocate the instance object.
- //--------------------------------------------------------------------------
- Handle<Map> map = factory->NewMap(
- JS_OBJECT_TYPE,
- JSObject::kHeaderSize + kWasmModuleInternalFieldCount * kPointerSize);
- Handle<JSObject> instance = factory->NewJSObjectFromMap(map, TENURED);
- instance->SetInternalField(kWasmModuleCodeTable, *code_table);
-
- //--------------------------------------------------------------------------
- // Set up the memory for the new instance.
- //--------------------------------------------------------------------------
- MaybeHandle<JSArrayBuffer> old_memory;
- // TODO(titzer): handle imported memory properly.
-
- uint32_t min_mem_pages = compiled_module->min_memory_pages();
- isolate->counters()->wasm_min_mem_pages_count()->AddSample(min_mem_pages);
- // TODO(wasm): re-enable counter for max_mem_pages when we use that field.
-
- if (memory.is_null() && min_mem_pages > 0) {
- memory = AllocateMemory(thrower, isolate, min_mem_pages);
- if (memory.is_null()) return nothing; // failed to allocate memory
- }
-
- if (!memory.is_null()) {
- instance->SetInternalField(kWasmMemArrayBuffer, *memory);
- Address mem_start = static_cast<Address>(memory->backing_store());
- uint32_t mem_size = static_cast<uint32_t>(memory->byte_length()->Number());
- LoadDataSegments(compiled_module, mem_start, mem_size);
-
- uint32_t old_mem_size = compiled_module->has_heap()
- ? compiled_module->mem_size()
- : compiled_module->default_mem_size();
- Address old_mem_start =
- compiled_module->has_heap()
- ? static_cast<Address>(compiled_module->heap()->backing_store())
- : nullptr;
- RelocateInstanceCode(instance, old_mem_start, mem_start, old_mem_size,
- mem_size);
- compiled_module->set_heap(memory);
- }
-
- //--------------------------------------------------------------------------
- // Set up the globals for the new instance.
- //--------------------------------------------------------------------------
- MaybeHandle<JSArrayBuffer> old_globals;
- MaybeHandle<JSArrayBuffer> globals;
- uint32_t globals_size = compiled_module->globals_size();
- if (globals_size > 0) {
- Handle<JSArrayBuffer> global_buffer = NewArrayBuffer(isolate, globals_size);
- globals = global_buffer;
- if (globals.is_null()) {
- thrower->Error("Out of memory: wasm globals");
- return nothing;
+
+ //--------------------------------------------------------------------------
+ // Set up the runtime support for the new instance.
+ //--------------------------------------------------------------------------
+ Handle<WeakCell> weak_link = factory->NewWeakCell(instance);
+
+ for (int i = num_imported_functions + FLAG_skip_compiling_wasm_funcs;
+ i < code_table->length(); ++i) {
+ Handle<Code> code = code_table->GetValueChecked<Code>(isolate_, i);
+ if (code->kind() == Code::WASM_FUNCTION) {
+ Handle<FixedArray> deopt_data = factory->NewFixedArray(2, TENURED);
+ deopt_data->set(0, *weak_link);
+ deopt_data->set(1, Smi::FromInt(static_cast<int>(i)));
+ deopt_data->set_length(2);
+ code->set_deoptimization_data(*deopt_data);
+ }
}
- Address old_address =
- owner.is_null() ? nullptr : GetGlobalStartAddressFromCodeTemplate(
- *isolate->factory()->undefined_value(),
- JSObject::cast(*owner));
- RelocateGlobals(instance, old_address,
- static_cast<Address>(global_buffer->backing_store()));
- instance->SetInternalField(kWasmGlobalsArrayBuffer, *global_buffer);
- }
-
- //--------------------------------------------------------------------------
- // Compile the import wrappers for the new instance.
- //--------------------------------------------------------------------------
- // TODO(titzer): handle imported globals and function tables.
- int num_imported_functions = 0;
- if (compiled_module->has_import_data()) {
- Handle<FixedArray> import_data = compiled_module->import_data();
- num_imported_functions = import_data->length();
- for (int index = 0; index < num_imported_functions; index++) {
- Handle<Code> import_wrapper =
- CompileImportWrapper(isolate, ffi, index, import_data, thrower);
- if (thrower->error()) return nothing;
- code_table->set(index, *import_wrapper);
- RecordStats(isolate, *import_wrapper);
+
+ //--------------------------------------------------------------------------
+ // Set up the exports object for the new instance.
+ //--------------------------------------------------------------------------
+ ProcessExports(code_table, instance);
+
+ //--------------------------------------------------------------------------
+ // Set up the indirect function tables for the new instance.
+ //--------------------------------------------------------------------------
+ if (function_table_count > 0) InitializeTables(code_table, instance);
+
+ if (num_imported_functions > 0 || !owner.is_null()) {
+ // If the code was cloned, or new imports were compiled, patch.
+ PatchDirectCalls(old_code_table, code_table, num_imported_functions);
}
- }
- //--------------------------------------------------------------------------
- // Set up the debug support for the new instance.
- //--------------------------------------------------------------------------
- // TODO(wasm): avoid referencing this stuff from the instance, use it off
- // the compiled module instead. See the following 3 assignments:
- if (compiled_module->has_module_bytes()) {
- instance->SetInternalField(kWasmModuleBytesString,
- compiled_module->ptr_to_module_bytes());
- }
+ FlushICache(isolate_, code_table);
- if (compiled_module->has_function_names()) {
- instance->SetInternalField(kWasmFunctionNamesArray,
- compiled_module->ptr_to_function_names());
- }
+ //--------------------------------------------------------------------------
+ // Set up and link the new instance.
+ //--------------------------------------------------------------------------
+ {
+ Handle<Object> global_handle =
+ isolate_->global_handles()->Create(*instance);
+ Handle<WeakCell> link_to_clone = factory->NewWeakCell(compiled_module_);
+ Handle<WeakCell> link_to_owning_instance = factory->NewWeakCell(instance);
+ MaybeHandle<WeakCell> link_to_original;
+ MaybeHandle<WasmCompiledModule> original;
+ if (!owner.is_null()) {
+ // prepare the data needed for publishing in a chain, but don't link
+ // just yet, because
+ // we want all the publishing to happen free from GC interruptions, and
+ // so we do it in
+ // one GC-free scope afterwards.
+ original = handle(owner.ToHandleChecked()->get_compiled_module());
+ link_to_original = factory->NewWeakCell(original.ToHandleChecked());
+ }
+ // Publish the new instance to the instances chain.
+ {
+ DisallowHeapAllocation no_gc;
+ if (!link_to_original.is_null()) {
+ compiled_module_->set_weak_next_instance(
+ link_to_original.ToHandleChecked());
+ original.ToHandleChecked()->set_weak_prev_instance(link_to_clone);
+ compiled_module_->set_weak_wasm_module(
+ original.ToHandleChecked()->weak_wasm_module());
+ }
+ module_object_->SetInternalField(0, *compiled_module_);
+ compiled_module_->set_weak_owning_instance(link_to_owning_instance);
+ GlobalHandles::MakeWeak(global_handle.location(),
+ global_handle.location(), &InstanceFinalizer,
+ v8::WeakCallbackType::kFinalizer);
+ }
+ }
- {
- Handle<Object> handle = factory->NewNumber(num_imported_functions);
- instance->SetInternalField(kWasmNumImportedFunctions, *handle);
+ DCHECK(wasm::IsWasmInstance(*instance));
+ if (instance->has_memory_object()) {
+ instance->get_memory_object()->AddInstance(*instance);
+ }
+
+ //--------------------------------------------------------------------------
+ // Run the start function if one was specified.
+ //--------------------------------------------------------------------------
+ if (module_->start_function_index >= 0) {
+ HandleScope scope(isolate_);
+ ModuleEnv module_env;
+ module_env.module = module_;
+ module_env.instance = nullptr;
+ module_env.origin = module_->origin;
+ int start_index = module_->start_function_index;
+ Handle<Code> startup_code =
+ code_table->GetValueChecked<Code>(isolate_, start_index);
+ FunctionSig* sig = module_->functions[start_index].sig;
+ Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
+ isolate_, &module_env, startup_code, start_index);
+ Handle<WasmExportedFunction> startup_fct = WasmExportedFunction::New(
+ isolate_, instance, factory->InternalizeUtf8String("start"),
+ wrapper_code, static_cast<int>(sig->parameter_count()), start_index);
+ RecordStats(isolate_, *startup_code);
+ // Call the JS function.
+ Handle<Object> undefined = factory->undefined_value();
+ MaybeHandle<Object> retval =
+ Execution::Call(isolate_, startup_fct, undefined, 0, nullptr);
+
+ if (retval.is_null()) {
+ DCHECK(isolate_->has_pending_exception());
+ isolate_->OptionalRescheduleException(false);
+ // It's unfortunate that the new instance is already linked in the
+ // chain. However, we need to set up everything before executing the
+ // start function, such that stack trace information can be generated
+ // correctly already in the start function.
+ return nothing;
+ }
+ }
+
+ DCHECK(!isolate_->has_pending_exception());
+ TRACE("Finishing instance %d\n", compiled_module_->instance_id());
+ TRACE_CHAIN(WasmCompiledModule::cast(module_object_->GetInternalField(0)));
+ return instance;
}
- //--------------------------------------------------------------------------
- // Set up the runtime support for the new instance.
- //--------------------------------------------------------------------------
- Handle<WeakCell> weak_link = isolate->factory()->NewWeakCell(instance);
+ private:
+ // Represents the initialized state of a table.
+ struct TableInstance {
+ Handle<WasmTableObject> table_object; // WebAssembly.Table instance
+ Handle<FixedArray> js_wrappers; // JSFunctions exported
+ Handle<FixedArray> dispatch_table; // internal (code, sig) pairs
+ };
- for (int i = num_imported_functions + FLAG_skip_compiling_wasm_funcs;
- i < code_table->length(); ++i) {
- Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
- if (code->kind() == Code::WASM_FUNCTION) {
- Handle<FixedArray> deopt_data =
- isolate->factory()->NewFixedArray(2, TENURED);
- deopt_data->set(0, *weak_link);
- deopt_data->set(1, Smi::FromInt(static_cast<int>(i)));
- deopt_data->set_length(2);
- code->set_deoptimization_data(*deopt_data);
+ Isolate* isolate_;
+ WasmModule* module_;
+ ErrorThrower* thrower_;
+ Handle<JSObject> module_object_;
+ Handle<JSReceiver> ffi_;
+ Handle<JSArrayBuffer> memory_;
+ Handle<JSArrayBuffer> globals_;
+ Handle<WasmCompiledModule> compiled_module_;
+ std::vector<TableInstance> table_instances_;
+ std::vector<Handle<JSFunction>> js_wrappers_;
+
+ // Helper routine to print out errors with imports (FFI).
+ MaybeHandle<JSFunction> ReportFFIError(const char* error, uint32_t index,
+ Handle<String> module_name,
+ MaybeHandle<String> function_name) {
+ Handle<String> function_name_handle;
+ if (function_name.ToHandle(&function_name_handle)) {
+ thrower_->TypeError(
+ "Import #%d module=\"%.*s\" function=\"%.*s\" error: %s", index,
+ module_name->length(), module_name->ToCString().get(),
+ function_name_handle->length(),
+ function_name_handle->ToCString().get(), error);
+ } else {
+ thrower_->TypeError("Import #%d module=\"%.*s\" error: %s", index,
+ module_name->length(), module_name->ToCString().get(),
+ error);
}
+ thrower_->TypeError("Import ");
+ return MaybeHandle<JSFunction>();
}
- //--------------------------------------------------------------------------
- // Set up the indirect function tables for the new instance.
- //--------------------------------------------------------------------------
- {
- std::vector<Handle<Code>> functions(
- static_cast<size_t>(code_table->length()));
- for (int i = 0; i < code_table->length(); ++i) {
- functions[i] = code_table->GetValueChecked<Code>(isolate, i);
+ // Look up an import value in the {ffi_} object.
+ MaybeHandle<Object> LookupImport(uint32_t index, Handle<String> module_name,
+ MaybeHandle<String> import_name) {
+ if (ffi_.is_null()) {
+ return ReportFFIError("FFI is not an object", index, module_name,
+ import_name);
+ }
+
+ // Look up the module first.
+ MaybeHandle<Object> result = Object::GetProperty(ffi_, module_name);
+ if (result.is_null()) {
+ return ReportFFIError("module not found", index, module_name,
+ import_name);
}
- if (compiled_module->has_indirect_function_tables()) {
- Handle<FixedArray> indirect_tables_template =
- compiled_module->indirect_function_tables();
- Handle<FixedArray> to_replace =
- owner.is_null() ? indirect_tables_template
- : handle(FixedArray::cast(owner->GetInternalField(
- kWasmModuleFunctionTable)));
- Handle<FixedArray> indirect_tables = SetupIndirectFunctionTable(
- isolate, code_table, indirect_tables_template, to_replace);
- for (int i = 0; i < indirect_tables->length(); ++i) {
- Handle<FixedArray> metadata =
- indirect_tables->GetValueChecked<FixedArray>(isolate, i);
- uint32_t size = Smi::cast(metadata->get(kSize))->value();
- Handle<FixedArray> table =
- metadata->GetValueChecked<FixedArray>(isolate, kTable);
- PopulateFunctionTable(table, size, &functions);
+ Handle<Object> module = result.ToHandleChecked();
+
+ if (!import_name.is_null()) {
+ // Look up the value in the module.
+ if (!module->IsJSReceiver()) {
+ return ReportFFIError("module is not an object or function", index,
+ module_name, import_name);
}
- instance->SetInternalField(kWasmModuleFunctionTable, *indirect_tables);
+
+ result = Object::GetProperty(module, import_name.ToHandleChecked());
+ if (result.is_null()) {
+ return ReportFFIError("import not found", index, module_name,
+ import_name);
+ }
+ } else {
+ // No function specified. Use the "default export".
+ result = module;
}
+
+ return result;
}
- //--------------------------------------------------------------------------
- // Set up the exports object for the new instance.
- //--------------------------------------------------------------------------
- bool mem_export = compiled_module->export_memory();
- ModuleOrigin origin = compiled_module->origin();
+ uint32_t EvalUint32InitExpr(const WasmInitExpr& expr) {
+ switch (expr.kind) {
+ case WasmInitExpr::kI32Const:
+ return expr.val.i32_const;
+ case WasmInitExpr::kGlobalIndex: {
+ uint32_t offset = module_->globals[expr.val.global_index].offset;
+ return *reinterpret_cast<uint32_t*>(raw_buffer_ptr(globals_, offset));
+ }
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+ }
- if (compiled_module->has_exports() || mem_export) {
- PropertyDescriptor desc;
- desc.set_writable(false);
+ // Load data segments into the memory.
+ void LoadDataSegments(Address mem_addr, size_t mem_size) {
+ Handle<SeqOneByteString> module_bytes = compiled_module_->module_bytes();
+ for (const WasmDataSegment& segment : module_->data_segments) {
+ uint32_t source_size = segment.source_size;
+ // Segments of size == 0 are just nops.
+ if (source_size == 0) continue;
+ uint32_t dest_offset = EvalUint32InitExpr(segment.dest_addr);
+ if (dest_offset >= mem_size || source_size >= mem_size ||
+ dest_offset > (mem_size - source_size)) {
+ thrower_->TypeError("data segment (start = %" PRIu32 ", size = %" PRIu32
+ ") does not fit into memory (size = %" PRIuS ")",
+ dest_offset, source_size, mem_size);
+ return;
+ }
+ byte* dest = mem_addr + dest_offset;
+ const byte* src = reinterpret_cast<const byte*>(
+ module_bytes->GetCharsAddress() + segment.source_offset);
+ memcpy(dest, src, source_size);
+ }
+ }
- Handle<JSObject> exports_object = instance;
- if (origin == kWasmOrigin) {
- // Create the "exports" object.
- Handle<JSFunction> object_function = Handle<JSFunction>(
- isolate->native_context()->object_function(), isolate);
- exports_object = factory->NewJSObject(object_function, TENURED);
- Handle<String> exports_name = factory->InternalizeUtf8String("exports");
- JSObject::AddProperty(instance, exports_name, exports_object, READ_ONLY);
+ void WriteGlobalValue(WasmGlobal& global, Handle<Object> value) {
+ double num = 0;
+ if (value->IsSmi()) {
+ num = Smi::cast(*value)->value();
+ } else if (value->IsHeapNumber()) {
+ num = HeapNumber::cast(*value)->value();
+ } else {
+ UNREACHABLE();
}
- int first_export = -1;
- // TODO(wasm): another iteration over the code objects.
- for (int i = 0; i < code_table->length(); i++) {
- Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
- if (code->kind() == Code::JS_TO_WASM_FUNCTION) {
- first_export = i;
+ TRACE("init [globals+%u] = %lf, type = %s\n", global.offset, num,
+ WasmOpcodes::TypeName(global.type));
+ switch (global.type) {
+ case kAstI32:
+ *GetRawGlobalPtr<int32_t>(global) = static_cast<int32_t>(num);
break;
- }
+ case kAstI64:
+ // TODO(titzer): initialization of imported i64 globals.
+ UNREACHABLE();
+ break;
+ case kAstF32:
+ *GetRawGlobalPtr<float>(global) = static_cast<float>(num);
+ break;
+ case kAstF64:
+ *GetRawGlobalPtr<double>(global) = static_cast<double>(num);
+ break;
+ default:
+ UNREACHABLE();
}
- if (compiled_module->has_exports()) {
- Handle<FixedArray> exports = compiled_module->exports();
- int export_size = exports->length();
- for (int i = 0; i < export_size; ++i) {
- Handle<FixedArray> export_data =
- exports->GetValueChecked<FixedArray>(isolate, i);
- Handle<String> name =
- export_data->GetValueChecked<String>(isolate, kExportName);
- int arity = Smi::cast(export_data->get(kExportArity))->value();
- MaybeHandle<ByteArray> signature =
- export_data->GetValue<ByteArray>(isolate, kExportedSignature);
- Handle<Code> export_code =
- code_table->GetValueChecked<Code>(isolate, first_export + i);
- Handle<JSFunction> function = WrapExportCodeAsJSFunction(
- isolate, export_code, name, arity, signature, instance);
- desc.set_value(function);
- Maybe<bool> status = JSReceiver::DefineOwnProperty(
- isolate, exports_object, name, &desc, Object::THROW_ON_ERROR);
- if (!status.IsJust()) {
- thrower->Error("export of %.*s failed.", name->length(),
- name->ToCString().get());
- return nothing;
+ }
+
+ // Process the imports, including functions, tables, globals, and memory, in
+ // order, loading them from the {ffi_} object. Returns the number of imported
+ // functions.
+ int ProcessImports(Handle<FixedArray> code_table,
+ Handle<WasmInstanceObject> instance) {
+ int num_imported_functions = 0;
+ int num_imported_tables = 0;
+ for (int index = 0; index < static_cast<int>(module_->import_table.size());
+ ++index) {
+ WasmImport& import = module_->import_table[index];
+ Handle<String> module_name =
+ ExtractStringFromModuleBytes(isolate_, compiled_module_,
+ import.module_name_offset,
+ import.module_name_length)
+ .ToHandleChecked();
+ Handle<String> function_name = Handle<String>::null();
+ if (import.field_name_length > 0) {
+ function_name = ExtractStringFromModuleBytes(isolate_, compiled_module_,
+ import.field_name_offset,
+ import.field_name_length)
+ .ToHandleChecked();
+ }
+
+ MaybeHandle<Object> result =
+ LookupImport(index, module_name, function_name);
+ if (thrower_->error()) return -1;
+
+ switch (import.kind) {
+ case kExternalFunction: {
+ // Function imports must be callable.
+ Handle<Object> function = result.ToHandleChecked();
+ if (!function->IsCallable()) {
+ ReportFFIError("function import requires a callable", index,
+ module_name, function_name);
+ return -1;
+ }
+
+ Handle<Code> import_wrapper = CompileImportWrapper(
+ isolate_, index, module_->functions[import.index].sig,
+ Handle<JSReceiver>::cast(function), module_name, function_name);
+ if (import_wrapper.is_null()) {
+ ReportFFIError("imported function does not match the expected type",
+ index, module_name, function_name);
+ return -1;
+ }
+ code_table->set(num_imported_functions, *import_wrapper);
+ RecordStats(isolate_, *import_wrapper);
+ num_imported_functions++;
+ break;
+ }
+ case kExternalTable: {
+ Handle<Object> value = result.ToHandleChecked();
+ if (!WasmJs::IsWasmTableObject(isolate_, value)) {
+ ReportFFIError("table import requires a WebAssembly.Table", index,
+ module_name, function_name);
+ return -1;
+ }
+ WasmIndirectFunctionTable& table =
+ module_->function_tables[num_imported_tables];
+ TableInstance& table_instance = table_instances_[num_imported_tables];
+ table_instance.table_object = Handle<WasmTableObject>::cast(value);
+ table_instance.js_wrappers = Handle<FixedArray>(
+ table_instance.table_object->get_functions(), isolate_);
+
+ // TODO(titzer): import table size must match exactly for now.
+ int table_size = table_instance.js_wrappers->length();
+ if (table_size != static_cast<int>(table.min_size)) {
+ thrower_->TypeError(
+ "table import %d is wrong size (%d), expected %u", index,
+ table_size, table.min_size);
+ return -1;
+ }
+
+ // Allocate a new dispatch table.
+ table_instance.dispatch_table =
+ isolate_->factory()->NewFixedArray(table_size * 2);
+ for (int i = 0; i < table_size * 2; ++i) {
+ table_instance.dispatch_table->set(i,
+ Smi::FromInt(kInvalidSigIndex));
+ }
+ // Initialize the dispatch table with the (foreign) JS functions
+ // that are already in the table.
+ for (int i = 0; i < table_size; ++i) {
+ Handle<Object> val(table_instance.js_wrappers->get(i), isolate_);
+ if (!val->IsJSFunction()) continue;
+ WasmFunction* function =
+ GetWasmFunctionForImportWrapper(isolate_, val);
+ if (function == nullptr) {
+ thrower_->TypeError("table import %d[%d] is not a WASM function",
+ index, i);
+ return -1;
+ }
+ int sig_index = table.map.FindOrInsert(function->sig);
+ table_instance.dispatch_table->set(i, Smi::FromInt(sig_index));
+ table_instance.dispatch_table->set(i + table_size,
+ *UnwrapImportWrapper(val));
+ }
+
+ num_imported_tables++;
+ break;
+ }
+ case kExternalMemory: {
+ Handle<Object> object = result.ToHandleChecked();
+ if (!WasmJs::IsWasmMemoryObject(isolate_, object)) {
+ ReportFFIError("memory import must be a WebAssembly.Memory object",
+ index, module_name, function_name);
+ return -1;
+ }
+ auto memory = Handle<WasmMemoryObject>::cast(object);
+ instance->set_memory_object(*memory);
+ memory_ = Handle<JSArrayBuffer>(memory->get_buffer(), isolate_);
+ break;
}
+ case kExternalGlobal: {
+ // Global imports are converted to numbers and written into the
+ // {globals_} array buffer.
+ Handle<Object> object = result.ToHandleChecked();
+ MaybeHandle<Object> number = Object::ToNumber(object);
+ if (number.is_null()) {
+ ReportFFIError("global import could not be converted to number",
+ index, module_name, function_name);
+ return -1;
+ }
+ Handle<Object> val = number.ToHandleChecked();
+ WriteGlobalValue(module_->globals[import.index], val);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
}
- if (mem_export) {
- // Export the memory as a named property.
- Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>(
- JSArrayBuffer::cast(instance->GetInternalField(kWasmMemArrayBuffer)));
- Handle<Object> memory_object =
- WasmJs::CreateWasmMemoryObject(isolate, buffer, false, 0);
- // TODO(titzer): export the memory with the correct name.
- Handle<String> name = factory->InternalizeUtf8String("memory");
- JSObject::AddProperty(exports_object, name, memory_object, READ_ONLY);
+ return num_imported_functions;
+ }
+
+ template <typename T>
+ T* GetRawGlobalPtr(WasmGlobal& global) {
+ return reinterpret_cast<T*>(raw_buffer_ptr(globals_, global.offset));
+ }
+
+ // Process initialization of globals.
+ void InitGlobals() {
+ for (auto global : module_->globals) {
+ switch (global.init.kind) {
+ case WasmInitExpr::kI32Const:
+ *GetRawGlobalPtr<int32_t>(global) = global.init.val.i32_const;
+ break;
+ case WasmInitExpr::kI64Const:
+ *GetRawGlobalPtr<int64_t>(global) = global.init.val.i64_const;
+ break;
+ case WasmInitExpr::kF32Const:
+ *GetRawGlobalPtr<float>(global) = global.init.val.f32_const;
+ break;
+ case WasmInitExpr::kF64Const:
+ *GetRawGlobalPtr<double>(global) = global.init.val.f64_const;
+ break;
+ case WasmInitExpr::kGlobalIndex: {
+ // Initialize with another global.
+ uint32_t new_offset = global.offset;
+ uint32_t old_offset =
+ module_->globals[global.init.val.global_index].offset;
+ TRACE("init [globals+%u] = [globals+%d]\n", global.offset,
+ old_offset);
+ size_t size = (global.type == kAstI64 || global.type == kAstF64)
+ ? sizeof(double)
+ : sizeof(int32_t);
+ memcpy(raw_buffer_ptr(globals_, new_offset),
+ raw_buffer_ptr(globals_, old_offset), size);
+ break;
+ }
+ case WasmInitExpr::kNone:
+ // Happens with imported globals.
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
}
}
- if (num_imported_functions > 0 || !owner.is_null()) {
- // If the code was cloned, or new imports were compiled, patch.
- PatchDirectCalls(old_code_table, code_table, num_imported_functions);
- }
-
- FlushICache(isolate, code_table);
-
- //--------------------------------------------------------------------------
- // Run the start function if one was specified.
- //--------------------------------------------------------------------------
- if (compiled_module->has_startup_function()) {
- Handle<FixedArray> startup_data = compiled_module->startup_function();
- HandleScope scope(isolate);
- int32_t start_index =
- startup_data->GetValueChecked<Smi>(isolate, kExportedFunctionIndex)
- ->value();
- Handle<Code> startup_code =
- code_table->GetValueChecked<Code>(isolate, start_index);
- int arity = Smi::cast(startup_data->get(kExportArity))->value();
- MaybeHandle<ByteArray> startup_signature =
- startup_data->GetValue<ByteArray>(isolate, kExportedSignature);
- Handle<JSFunction> startup_fct = WrapExportCodeAsJSFunction(
- isolate, startup_code, factory->InternalizeUtf8String("start"), arity,
- startup_signature, instance);
- RecordStats(isolate, *startup_code);
- // Call the JS function.
- Handle<Object> undefined = isolate->factory()->undefined_value();
- MaybeHandle<Object> retval =
- Execution::Call(isolate, startup_fct, undefined, 0, nullptr);
-
- if (retval.is_null()) {
- thrower->Error("WASM.instantiateModule(): start function failed");
- return nothing;
+ // Allocate memory for a module instance as a new JSArrayBuffer.
+ Handle<JSArrayBuffer> AllocateMemory(uint32_t min_mem_pages) {
+ if (min_mem_pages > WasmModule::kV8MaxPages) {
+ thrower_->RangeError("Out of memory: wasm memory too large");
+ return Handle<JSArrayBuffer>::null();
+ }
+ Handle<JSArrayBuffer> mem_buffer =
+ NewArrayBuffer(isolate_, min_mem_pages * WasmModule::kPageSize);
+
+ if (mem_buffer.is_null()) {
+ thrower_->RangeError("Out of memory: wasm memory");
}
+ return mem_buffer;
}
- DCHECK(wasm::IsWasmObject(*instance));
+ // Process the exports, creating wrappers for functions, tables, memories,
+ // and globals.
+ void ProcessExports(Handle<FixedArray> code_table,
+ Handle<WasmInstanceObject> instance) {
+ bool needs_wrappers = module_->num_exported_functions > 0;
+ for (auto table_instance : table_instances_) {
+ if (!table_instance.js_wrappers.is_null()) {
+ needs_wrappers = true;
+ break;
+ }
+ }
+ for (auto table : module_->function_tables) {
+ if (table.exported) {
+ needs_wrappers = true;
+ break;
+ }
+ }
+ if (needs_wrappers) {
+ // Fill the table to cache the exported JSFunction wrappers.
+ js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
+ Handle<JSFunction>::null());
+ }
- {
- Handle<WeakCell> link_to_owner = factory->NewWeakCell(instance);
+ Handle<JSObject> exports_object = instance;
+ if (module_->export_table.size() > 0 && module_->origin == kWasmOrigin) {
+ // Create the "exports" object.
+ Handle<JSFunction> object_function = Handle<JSFunction>(
+ isolate_->native_context()->object_function(), isolate_);
+ exports_object =
+ isolate_->factory()->NewJSObject(object_function, TENURED);
+ Handle<String> exports_name =
+ isolate_->factory()->InternalizeUtf8String("exports");
+ JSObject::AddProperty(instance, exports_name, exports_object, READ_ONLY);
+ }
- Handle<Object> global_handle = isolate->global_handles()->Create(*instance);
- Handle<WeakCell> link_to_clone = factory->NewWeakCell(compiled_module);
- {
- DisallowHeapAllocation no_gc;
- compiled_module->set_weak_owning_instance(link_to_owner);
- Handle<WeakCell> next;
- if (link_to_original.ToHandle(&next) && !next->cleared()) {
- WasmCompiledModule* original = WasmCompiledModule::cast(next->value());
- DCHECK(original->has_weak_owning_instance());
- DCHECK(!original->weak_owning_instance()->cleared());
- compiled_module->set_weak_next_instance(next);
- original->set_weak_prev_instance(link_to_clone);
+ PropertyDescriptor desc;
+ desc.set_writable(false);
+
+ // Process each export in the export table.
+ int export_index = 0;
+ for (auto exp : module_->export_table) {
+ Handle<String> name =
+ ExtractStringFromModuleBytes(isolate_, compiled_module_,
+ exp.name_offset, exp.name_length)
+ .ToHandleChecked();
+ switch (exp.kind) {
+ case kExternalFunction: {
+ // Wrap and export the code as a JSFunction.
+ WasmFunction& function = module_->functions[exp.index];
+ int func_index =
+ static_cast<int>(module_->functions.size() + export_index);
+ Handle<JSFunction> js_function = js_wrappers_[exp.index];
+ if (js_function.is_null()) {
+ // Wrap the exported code as a JSFunction.
+ Handle<Code> export_code =
+ code_table->GetValueChecked<Code>(isolate_, func_index);
+ js_function = WasmExportedFunction::New(
+ isolate_, instance, name, export_code,
+ static_cast<int>(function.sig->parameter_count()),
+ function.func_index);
+ js_wrappers_[exp.index] = js_function;
+ }
+ desc.set_value(js_function);
+ export_index++;
+ break;
+ }
+ case kExternalTable: {
+ // Export a table as a WebAssembly.Table object.
+ TableInstance& table_instance = table_instances_[exp.index];
+ WasmIndirectFunctionTable& table =
+ module_->function_tables[exp.index];
+ if (table_instance.table_object.is_null()) {
+ uint32_t maximum =
+ table.has_max ? table.max_size : WasmModule::kV8MaxTableSize;
+ table_instance.table_object = WasmTableObject::New(
+ isolate_, table.min_size, maximum, &table_instance.js_wrappers);
+ }
+ desc.set_value(table_instance.table_object);
+ break;
+ }
+ case kExternalMemory: {
+ // Export the memory as a WebAssembly.Memory object.
+ Handle<WasmMemoryObject> memory_object;
+ if (!instance->has_memory_object()) {
+ // If there was no imported WebAssembly.Memory object, create one.
+ Handle<JSArrayBuffer> buffer(instance->get_memory_buffer(),
+ isolate_);
+ memory_object = WasmMemoryObject::New(
+ isolate_, buffer,
+ (module_->max_mem_pages != 0) ? module_->max_mem_pages : -1);
+ instance->set_memory_object(*memory_object);
+ } else {
+ memory_object = Handle<WasmMemoryObject>(
+ instance->get_memory_object(), isolate_);
+ }
+
+ desc.set_value(memory_object);
+ break;
+ }
+ case kExternalGlobal: {
+ // Export the value of the global variable as a number.
+ WasmGlobal& global = module_->globals[exp.index];
+ double num = 0;
+ switch (global.type) {
+ case kAstI32:
+ num = *GetRawGlobalPtr<int32_t>(global);
+ break;
+ case kAstF32:
+ num = *GetRawGlobalPtr<float>(global);
+ break;
+ case kAstF64:
+ num = *GetRawGlobalPtr<double>(global);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ desc.set_value(isolate_->factory()->NewNumber(num));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
- compiled_module->set_weak_owning_instance(link_to_owner);
- instance->SetInternalField(kWasmCompiledModule, *compiled_module);
- GlobalHandles::MakeWeak(global_handle.location(),
- global_handle.location(), &InstanceFinalizer,
- v8::WeakCallbackType::kFinalizer);
+ v8::Maybe<bool> status = JSReceiver::DefineOwnProperty(
+ isolate_, exports_object, name, &desc, Object::THROW_ON_ERROR);
+ if (!status.IsJust()) {
+ thrower_->TypeError("export of %.*s failed.", name->length(),
+ name->ToCString().get());
+ return;
+ }
}
}
- TRACE("Finishing instance %d\n", compiled_module->instance_id());
- TRACE_CHAIN(WasmCompiledModule::cast(module_object->GetInternalField(0)));
- return instance;
-}
-#if DEBUG
-uint32_t WasmCompiledModule::instance_id_counter_ = 0;
-#endif
+ void InitializeTables(Handle<FixedArray> code_table,
+ Handle<WasmInstanceObject> instance) {
+ Handle<FixedArray> old_function_tables =
+ compiled_module_->function_tables();
+ int function_table_count =
+ static_cast<int>(module_->function_tables.size());
+ Handle<FixedArray> new_function_tables =
+ isolate_->factory()->NewFixedArray(function_table_count);
+ for (int index = 0; index < function_table_count; ++index) {
+ WasmIndirectFunctionTable& table = module_->function_tables[index];
+ TableInstance& table_instance = table_instances_[index];
+ int table_size = static_cast<int>(table.min_size);
+
+ if (table_instance.dispatch_table.is_null()) {
+ // Create a new dispatch table if necessary.
+ table_instance.dispatch_table =
+ isolate_->factory()->NewFixedArray(table_size * 2);
+ for (int i = 0; i < table_size; ++i) {
+ // Fill the table with invalid signature indexes so that
+ // uninitialized entries will always fail the signature check.
+ table_instance.dispatch_table->set(i, Smi::FromInt(kInvalidSigIndex));
+ }
+ }
-Handle<WasmCompiledModule> WasmCompiledModule::New(Isolate* isolate,
- uint32_t min_memory_pages,
- uint32_t globals_size,
- bool export_memory,
- ModuleOrigin origin) {
- Handle<FixedArray> ret =
- isolate->factory()->NewFixedArray(PropertyIndices::Count, TENURED);
- // Globals size is expected to fit into an int without overflow. This is not
- // supported by the spec at the moment, however, we don't support array
- // buffer sizes over 1g, so, for now, we avoid alocating a HeapNumber for
- // the globals size. The CHECK guards this assumption.
- CHECK_GE(static_cast<int>(globals_size), 0);
- ret->set(kID_min_memory_pages,
- Smi::FromInt(static_cast<int>(min_memory_pages)));
- ret->set(kID_globals_size, Smi::FromInt(static_cast<int>(globals_size)));
- ret->set(kID_export_memory, Smi::FromInt(static_cast<int>(export_memory)));
- ret->set(kID_origin, Smi::FromInt(static_cast<int>(origin)));
- WasmCompiledModule::cast(*ret)->Init();
- return handle(WasmCompiledModule::cast(*ret));
-}
+ new_function_tables->set(static_cast<int>(index),
+ *table_instance.dispatch_table);
-void WasmCompiledModule::Init() {
-#if DEBUG
- set(kID_instance_id, Smi::FromInt(instance_id_counter_++));
- TRACE("New compiled module id: %d\n", instance_id());
-#endif
-}
+ Handle<FixedArray> all_dispatch_tables;
+ if (!table_instance.table_object.is_null()) {
+ // Get the existing dispatch table(s) with the WebAssembly.Table object.
+ all_dispatch_tables = WasmTableObject::AddDispatchTable(
+ isolate_, table_instance.table_object,
+ Handle<WasmInstanceObject>::null(), index,
+ Handle<FixedArray>::null());
+ }
-void WasmCompiledModule::PrintInstancesChain() {
-#if DEBUG
- if (!FLAG_trace_wasm_instances) return;
- for (WasmCompiledModule* current = this; current != nullptr;) {
- PrintF("->%d", current->instance_id());
- if (current->ptr_to_weak_next_instance() == nullptr) break;
- CHECK(!current->ptr_to_weak_next_instance()->cleared());
- current =
- WasmCompiledModule::cast(current->ptr_to_weak_next_instance()->value());
- }
- PrintF("\n");
-#endif
-}
+ // TODO(titzer): this does redundant work if there are multiple tables,
+ // since initializations are not sorted by table index.
+ for (auto table_init : module_->table_inits) {
+ uint32_t base = EvalUint32InitExpr(table_init.offset);
+ if (base > static_cast<uint32_t>(table_size) ||
+ (base + table_init.entries.size() >
+ static_cast<uint32_t>(table_size))) {
+ thrower_->CompileError("table initializer is out of bounds");
+ continue;
+ }
+ for (int i = 0; i < static_cast<int>(table_init.entries.size()); ++i) {
+ uint32_t func_index = table_init.entries[i];
+ WasmFunction* function = &module_->functions[func_index];
+ int table_index = static_cast<int>(i + base);
+ int32_t sig_index = table.map.Find(function->sig);
+ DCHECK_GE(sig_index, 0);
+ table_instance.dispatch_table->set(table_index,
+ Smi::FromInt(sig_index));
+ table_instance.dispatch_table->set(table_index + table_size,
+ code_table->get(func_index));
+
+ if (!all_dispatch_tables.is_null()) {
+ Handle<Code> wasm_code(Code::cast(code_table->get(func_index)),
+ isolate_);
+ if (js_wrappers_[func_index].is_null()) {
+ // No JSFunction entry yet exists for this function. Create one.
+ // TODO(titzer): We compile JS->WASM wrappers for functions are
+ // not exported but are in an exported table. This should be done
+ // at module compile time and cached instead.
+ WasmInstance temp_instance(module_);
+ temp_instance.context = isolate_->native_context();
+ temp_instance.mem_size = 0;
+ temp_instance.mem_start = nullptr;
+ temp_instance.globals_start = nullptr;
+
+ ModuleEnv module_env;
+ module_env.module = module_;
+ module_env.instance = &temp_instance;
+ module_env.origin = module_->origin;
+
+ Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
+ isolate_, &module_env, wasm_code, func_index);
+ Handle<WasmExportedFunction> js_function =
+ WasmExportedFunction::New(
+ isolate_, instance, isolate_->factory()->empty_string(),
+ wrapper_code,
+ static_cast<int>(function->sig->parameter_count()),
+ func_index);
+ js_wrappers_[func_index] = js_function;
+ }
+ table_instance.js_wrappers->set(table_index,
+ *js_wrappers_[func_index]);
+
+ UpdateDispatchTablesInternal(isolate_, all_dispatch_tables,
+ table_index, function, wasm_code);
+ }
+ }
+ }
-Handle<Object> GetWasmFunctionNameOrNull(Isolate* isolate, Handle<Object> wasm,
- uint32_t func_index) {
- if (!wasm->IsUndefined(isolate)) {
- Handle<ByteArray> func_names_arr_obj(
- ByteArray::cast(Handle<JSObject>::cast(wasm)->GetInternalField(
- kWasmFunctionNamesArray)),
- isolate);
- // TODO(clemens): Extract this from the module bytes; skip whole function
- // name table.
- Handle<Object> name;
- if (GetWasmFunctionNameFromTable(func_names_arr_obj, func_index)
- .ToHandle(&name)) {
- return name;
+ // TODO(titzer): we add the new dispatch table at the end to avoid
+ // redundant work and also because the new instance is not yet fully
+ // initialized.
+ if (!table_instance.table_object.is_null()) {
+ // Add the new dispatch table to the WebAssembly.Table object.
+ all_dispatch_tables = WasmTableObject::AddDispatchTable(
+ isolate_, table_instance.table_object, instance, index,
+ table_instance.dispatch_table);
+ }
+ }
+ // Patch all code that has references to the old indirect tables.
+ for (int i = 0; i < code_table->length(); ++i) {
+ if (!code_table->get(i)->IsCode()) continue;
+ Handle<Code> code(Code::cast(code_table->get(i)), isolate_);
+ for (int j = 0; j < function_table_count; ++j) {
+ ReplaceReferenceInCode(
+ code, Handle<Object>(old_function_tables->get(j), isolate_),
+ Handle<Object>(new_function_tables->get(j), isolate_));
+ }
}
+ compiled_module_->set_function_tables(new_function_tables);
}
- return isolate->factory()->null_value();
+};
+
+// Instantiates a WASM module, creating a WebAssembly.Instance from a
+// WebAssembly.Module.
+MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
+ ErrorThrower* thrower,
+ Handle<JSObject> wasm_module,
+ Handle<JSReceiver> ffi,
+ Handle<JSArrayBuffer> memory) {
+ WasmInstanceBuilder builder(isolate, thrower, wasm_module, ffi, memory);
+ return builder.Build();
}
-Handle<String> GetWasmFunctionName(Isolate* isolate, Handle<Object> wasm,
- uint32_t func_index) {
- Handle<Object> name_or_null =
- GetWasmFunctionNameOrNull(isolate, wasm, func_index);
- if (!name_or_null->IsNull(isolate)) {
- return Handle<String>::cast(name_or_null);
+Handle<String> wasm::GetWasmFunctionName(Isolate* isolate,
+ Handle<Object> instance_or_undef,
+ uint32_t func_index) {
+ if (!instance_or_undef->IsUndefined(isolate)) {
+ Handle<WasmCompiledModule> compiled_module(
+ Handle<WasmInstanceObject>::cast(instance_or_undef)
+ ->get_compiled_module());
+ MaybeHandle<String> maybe_name =
+ WasmCompiledModule::GetFunctionName(compiled_module, func_index);
+ if (!maybe_name.is_null()) return maybe_name.ToHandleChecked();
}
return isolate->factory()->NewStringFromStaticChars("<WASM UNNAMED>");
}
-bool IsWasmObject(Object* object) {
- if (!object->IsJSObject()) return false;
-
- JSObject* obj = JSObject::cast(object);
- Isolate* isolate = obj->GetIsolate();
- if (obj->GetInternalFieldCount() != kWasmModuleInternalFieldCount) {
- return false;
- }
-
- Object* mem = obj->GetInternalField(kWasmMemArrayBuffer);
- if (obj->GetInternalField(kWasmModuleCodeTable)->IsFixedArray() &&
- (mem->IsUndefined(isolate) || mem->IsJSArrayBuffer()) &&
- obj->GetInternalField(kWasmFunctionNamesArray)->IsByteArray()) {
- Object* debug_bytes = obj->GetInternalField(kWasmModuleBytesString);
- if (!debug_bytes->IsUndefined(isolate)) {
- if (!debug_bytes->IsSeqOneByteString()) {
- return false;
- }
- DisallowHeapAllocation no_gc;
- SeqOneByteString* bytes = SeqOneByteString::cast(debug_bytes);
- if (bytes->length() < 4) return false;
- if (memcmp(bytes->GetChars(), "\0asm", 4)) return false;
- // All checks passed.
- }
- return true;
- }
- return false;
+bool wasm::IsWasmInstance(Object* object) {
+ return WasmInstanceObject::IsWasmInstanceObject(object);
}
-SeqOneByteString* GetWasmBytes(JSObject* wasm) {
- return SeqOneByteString::cast(wasm->GetInternalField(kWasmModuleBytesString));
+WasmCompiledModule* wasm::GetCompiledModule(Object* object) {
+ return WasmInstanceObject::cast(object)->get_compiled_module();
}
-Handle<WasmDebugInfo> GetDebugInfo(Handle<JSObject> wasm) {
- Handle<Object> info(wasm->GetInternalField(kWasmDebugInfo),
- wasm->GetIsolate());
- if (!info->IsUndefined(wasm->GetIsolate()))
- return Handle<WasmDebugInfo>::cast(info);
- Handle<WasmDebugInfo> new_info = WasmDebugInfo::New(wasm);
- wasm->SetInternalField(kWasmDebugInfo, *new_info);
- return new_info;
+bool wasm::WasmIsAsmJs(Object* instance, Isolate* isolate) {
+ if (instance->IsUndefined(isolate)) return false;
+ DCHECK(IsWasmInstance(instance));
+ WasmCompiledModule* compiled_module =
+ GetCompiledModule(JSObject::cast(instance));
+ DCHECK_EQ(compiled_module->has_asm_js_offset_tables(),
+ compiled_module->script()->type() == Script::TYPE_NORMAL);
+ return compiled_module->has_asm_js_offset_tables();
}
-bool UpdateWasmModuleMemory(Handle<JSObject> object, Address old_start,
- Address new_start, uint32_t old_size,
- uint32_t new_size) {
- DisallowHeapAllocation no_allocation;
- if (!IsWasmObject(*object)) {
- return false;
- }
-
- // Get code table associated with the module js_object
- Object* obj = object->GetInternalField(kWasmModuleCodeTable);
- Handle<FixedArray> code_table(FixedArray::cast(obj));
-
- // Iterate through the code objects in the code table and update relocation
- // information
- for (int i = 0; i < code_table->length(); i++) {
- obj = code_table->get(i);
- Handle<Code> code(Code::cast(obj));
-
- int mode_mask = RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
- for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (RelocInfo::IsWasmMemoryReference(mode) ||
- RelocInfo::IsWasmMemorySizeReference(mode)) {
- it.rinfo()->update_wasm_memory_reference(old_start, new_start, old_size,
- new_size);
- }
- }
- }
- return true;
+Handle<Script> wasm::GetScript(Handle<JSObject> instance) {
+ DCHECK(IsWasmInstance(*instance));
+ WasmCompiledModule* compiled_module = GetCompiledModule(*instance);
+ DCHECK(compiled_module->has_script());
+ return compiled_module->script();
}
-Handle<FixedArray> BuildFunctionTable(Isolate* isolate, uint32_t index,
- const WasmModule* module) {
- const WasmIndirectFunctionTable* table = &module->function_tables[index];
- DCHECK_EQ(table->size, table->values.size());
- DCHECK_GE(table->max_size, table->size);
- Handle<FixedArray> values =
- isolate->factory()->NewFixedArray(2 * table->max_size, TENURED);
- for (uint32_t i = 0; i < table->size; ++i) {
- const WasmFunction* function = &module->functions[table->values[i]];
- values->set(i, Smi::FromInt(function->sig_index));
- values->set(i + table->max_size, Smi::FromInt(table->values[i]));
- }
- // Set the remaining elements to -1 (instead of "undefined"). These
- // elements are accessed directly as SMIs (without a check). On 64-bit
- // platforms, it is possible to have the top bits of "undefined" take
- // small integer values (or zero), which are more likely to be equal to
- // the signature index we check against.
- for (uint32_t i = table->size; i < table->max_size; i++) {
- values->set(i, Smi::FromInt(-1));
- }
- return values;
+int wasm::GetAsmWasmSourcePosition(Handle<JSObject> instance, int func_index,
+ int byte_offset) {
+ return WasmDebugInfo::GetAsmJsSourcePosition(GetDebugInfo(instance),
+ func_index, byte_offset);
}
-void PopulateFunctionTable(Handle<FixedArray> table, uint32_t table_size,
- const std::vector<Handle<Code>>* code_table) {
- uint32_t max_size = table->length() / 2;
- for (uint32_t i = max_size; i < max_size + table_size; ++i) {
- int index = Smi::cast(table->get(static_cast<int>(i)))->value();
- DCHECK_GE(index, 0);
- DCHECK_LT(static_cast<size_t>(index), code_table->size());
- table->set(static_cast<int>(i), *(*code_table)[index]);
- }
+Handle<SeqOneByteString> wasm::GetWasmBytes(Handle<JSObject> object) {
+ return Handle<WasmInstanceObject>::cast(object)
+ ->get_compiled_module()
+ ->module_bytes();
}
-int GetNumberOfFunctions(JSObject* wasm) {
- Object* func_names_obj = wasm->GetInternalField(kWasmFunctionNamesArray);
- // TODO(clemensh): this looks inside an array constructed elsewhere. Refactor.
- return ByteArray::cast(func_names_obj)->get_int(0);
+Handle<WasmDebugInfo> wasm::GetDebugInfo(Handle<JSObject> object) {
+ auto instance = Handle<WasmInstanceObject>::cast(object);
+ if (instance->has_debug_info()) {
+ Handle<WasmDebugInfo> info(instance->get_debug_info(),
+ instance->GetIsolate());
+ return info;
+ }
+ Handle<WasmDebugInfo> new_info = WasmDebugInfo::New(instance);
+ instance->set_debug_info(*new_info);
+ return new_info;
}
-Handle<JSObject> CreateCompiledModuleObject(Isolate* isolate,
- Handle<FixedArray> compiled_module,
- ModuleOrigin origin) {
- Handle<JSObject> module_obj;
- if (origin == ModuleOrigin::kWasmOrigin) {
- Handle<JSFunction> module_cons(
- isolate->native_context()->wasm_module_constructor());
- module_obj = isolate->factory()->NewJSObject(module_cons);
- } else {
- DCHECK(origin == ModuleOrigin::kAsmJsOrigin);
- Handle<Map> map = isolate->factory()->NewMap(
- JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
- module_obj = isolate->factory()->NewJSObjectFromMap(map, TENURED);
- }
- module_obj->SetInternalField(0, *compiled_module);
- if (origin == ModuleOrigin::kWasmOrigin) {
- Handle<Symbol> module_sym(isolate->native_context()->wasm_module_sym());
- Object::SetProperty(module_obj, module_sym, module_obj, STRICT).Check();
- }
- Handle<WeakCell> link_to_module = isolate->factory()->NewWeakCell(module_obj);
- WasmCompiledModule::cast(*compiled_module)
- ->set_weak_module_object(link_to_module);
- return module_obj;
+int wasm::GetNumberOfFunctions(Handle<JSObject> object) {
+ return static_cast<int>(
+ Handle<WasmInstanceObject>::cast(object)->module()->functions.size());
}
-MaybeHandle<JSObject> CreateModuleObjectFromBytes(Isolate* isolate,
- const byte* start,
- const byte* end,
- ErrorThrower* thrower,
- ModuleOrigin origin) {
- MaybeHandle<JSObject> nothing;
- Zone zone(isolate->allocator());
- ModuleResult result =
- DecodeWasmModule(isolate, &zone, start, end, false, origin);
- std::unique_ptr<const WasmModule> decoded_module(result.val);
+// TODO(clemensh): origin can be inferred from asm_js_script; remove it.
+MaybeHandle<WasmModuleObject> wasm::CreateModuleObjectFromBytes(
+ Isolate* isolate, const byte* start, const byte* end, ErrorThrower* thrower,
+ ModuleOrigin origin, Handle<Script> asm_js_script,
+ const byte* asm_js_offset_tables_start,
+ const byte* asm_js_offset_tables_end) {
+ MaybeHandle<WasmModuleObject> nothing;
+ ModuleResult result = DecodeWasmModule(isolate, start, end, false, origin);
if (result.failed()) {
- thrower->Failed("Wasm decoding failed", result);
+ if (result.val) delete result.val;
+ thrower->CompileFailed("Wasm decoding failed", result);
return nothing;
}
- MaybeHandle<FixedArray> compiled_module =
- decoded_module->CompileFunctions(isolate, thrower);
- if (compiled_module.is_null()) return nothing;
+ // The {module_wrapper} will take ownership of the {WasmModule} object,
+ // and it will be destroyed when the GC reclaims the wrapper object.
+ Handle<WasmModuleWrapper> module_wrapper =
+ WasmModuleWrapper::New(isolate, const_cast<WasmModule*>(result.val));
+
+ // Compile the functions of the module, producing a compiled module.
+ MaybeHandle<WasmCompiledModule> maybe_compiled_module =
+ result.val->CompileFunctions(isolate, module_wrapper, thrower);
+
+ if (maybe_compiled_module.is_null()) return nothing;
+
+ Handle<WasmCompiledModule> compiled_module =
+ maybe_compiled_module.ToHandleChecked();
+
+ DCHECK_EQ(origin == kAsmJsOrigin, !asm_js_script.is_null());
+ DCHECK(!compiled_module->has_script());
+ DCHECK(!compiled_module->has_asm_js_offset_tables());
+ if (origin == kAsmJsOrigin) {
+ // Set script for the asm.js source, and the offset table mapping wasm byte
+ // offsets to source positions.
+ compiled_module->set_script(asm_js_script);
+ size_t offset_tables_len =
+ asm_js_offset_tables_end - asm_js_offset_tables_start;
+ DCHECK_GE(static_cast<size_t>(kMaxInt), offset_tables_len);
+ Handle<ByteArray> offset_tables =
+ isolate->factory()->NewByteArray(static_cast<int>(offset_tables_len));
+ memcpy(offset_tables->GetDataStartAddress(), asm_js_offset_tables_start,
+ offset_tables_len);
+ compiled_module->set_asm_js_offset_tables(offset_tables);
+ } else {
+ // Create a new Script object representing this wasm module, store it in the
+ // compiled wasm module, and register it at the debugger.
+ Handle<Script> script =
+ isolate->factory()->NewScript(isolate->factory()->empty_string());
+ script->set_type(Script::TYPE_WASM);
+
+ DCHECK_GE(kMaxInt, end - start);
+ int hash = StringHasher::HashSequentialString(
+ reinterpret_cast<const char*>(start), static_cast<int>(end - start),
+ kZeroHashSeed);
+
+ const int kBufferSize = 50;
+ char buffer[kBufferSize];
+ int url_chars = SNPrintF(ArrayVector(buffer), "wasm://wasm/%08x", hash);
+ DCHECK(url_chars >= 0 && url_chars < kBufferSize);
+ MaybeHandle<String> url_str = isolate->factory()->NewStringFromOneByte(
+ Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), url_chars),
+ TENURED);
+ script->set_source_url(*url_str.ToHandleChecked());
+
+ int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
+ DCHECK(name_chars >= 0 && name_chars < kBufferSize);
+ MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
+ Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), name_chars),
+ TENURED);
+ script->set_name(*name_str.ToHandleChecked());
+
+ script->set_wasm_compiled_module(*compiled_module);
+ compiled_module->set_script(script);
+ isolate->debug()->OnAfterCompile(script);
+ }
- return CreateCompiledModuleObject(isolate, compiled_module.ToHandleChecked(),
- origin);
+ return WasmModuleObject::New(isolate, compiled_module);
}
-bool ValidateModuleBytes(Isolate* isolate, const byte* start, const byte* end,
- ErrorThrower* thrower, ModuleOrigin origin) {
- Zone zone(isolate->allocator());
- ModuleResult result =
- DecodeWasmModule(isolate, &zone, start, end, false, origin);
- if (result.ok()) {
- DCHECK_NOT_NULL(result.val);
+bool wasm::ValidateModuleBytes(Isolate* isolate, const byte* start,
+ const byte* end, ErrorThrower* thrower,
+ ModuleOrigin origin) {
+ ModuleResult result = DecodeWasmModule(isolate, start, end, true, origin);
+ if (result.val) {
delete result.val;
- return true;
+ } else {
+ DCHECK(!result.ok());
}
- return false;
+ return result.ok();
}
-MaybeHandle<JSArrayBuffer> GetInstanceMemory(Isolate* isolate,
- Handle<JSObject> instance) {
- Object* mem = instance->GetInternalField(kWasmMemArrayBuffer);
- DCHECK(IsWasmObject(*instance));
- if (mem->IsUndefined(isolate)) return MaybeHandle<JSArrayBuffer>();
- return Handle<JSArrayBuffer>(JSArrayBuffer::cast(mem));
+MaybeHandle<JSArrayBuffer> wasm::GetInstanceMemory(Isolate* isolate,
+ Handle<JSObject> object) {
+ auto instance = Handle<WasmInstanceObject>::cast(object);
+ if (instance->has_memory_buffer()) {
+ return Handle<JSArrayBuffer>(instance->get_memory_buffer(), isolate);
+ }
+ return MaybeHandle<JSArrayBuffer>();
}
-void SetInstanceMemory(Handle<JSObject> instance, JSArrayBuffer* buffer) {
+void SetInstanceMemory(Handle<JSObject> object, JSArrayBuffer* buffer) {
DisallowHeapAllocation no_gc;
- DCHECK(IsWasmObject(*instance));
- instance->SetInternalField(kWasmMemArrayBuffer, buffer);
- WasmCompiledModule* module =
- WasmCompiledModule::cast(instance->GetInternalField(kWasmCompiledModule));
- module->set_ptr_to_heap(buffer);
+ auto instance = Handle<WasmInstanceObject>::cast(object);
+ instance->set_memory_buffer(buffer);
+ instance->get_compiled_module()->set_ptr_to_memory(buffer);
}
-int32_t GetInstanceMemorySize(Isolate* isolate, Handle<JSObject> instance) {
+int32_t wasm::GetInstanceMemorySize(Isolate* isolate,
+ Handle<JSObject> instance) {
MaybeHandle<JSArrayBuffer> maybe_mem_buffer =
GetInstanceMemory(isolate, instance);
Handle<JSArrayBuffer> buffer;
@@ -1820,35 +2028,55 @@ int32_t GetInstanceMemorySize(Isolate* isolate, Handle<JSObject> instance) {
}
}
-int32_t GrowInstanceMemory(Isolate* isolate, Handle<JSObject> instance,
- uint32_t pages) {
+uint32_t GetMaxInstanceMemorySize(Isolate* isolate,
+ Handle<WasmInstanceObject> instance) {
+ if (instance->has_memory_object()) {
+ Handle<WasmMemoryObject> memory_object(instance->get_memory_object(),
+ isolate);
+
+ int maximum = memory_object->maximum_pages();
+ if (maximum > 0) return static_cast<uint32_t>(maximum);
+ }
+ uint32_t compiled_max_pages =
+ instance->get_compiled_module()->max_mem_pages();
+ isolate->counters()->wasm_max_mem_pages_count()->AddSample(
+ compiled_max_pages);
+ if (compiled_max_pages != 0) return compiled_max_pages;
+ return WasmModule::kV8MaxPages;
+}
+
+int32_t wasm::GrowInstanceMemory(Isolate* isolate, Handle<JSObject> object,
+ uint32_t pages) {
+ if (!IsWasmInstance(*object)) return -1;
+ auto instance = Handle<WasmInstanceObject>::cast(object);
+ if (pages == 0) return GetInstanceMemorySize(isolate, instance);
+ uint32_t max_pages = GetMaxInstanceMemorySize(isolate, instance);
+
Address old_mem_start = nullptr;
uint32_t old_size = 0, new_size = 0;
MaybeHandle<JSArrayBuffer> maybe_mem_buffer =
GetInstanceMemory(isolate, instance);
Handle<JSArrayBuffer> old_buffer;
- if (!maybe_mem_buffer.ToHandle(&old_buffer)) {
+ if (!maybe_mem_buffer.ToHandle(&old_buffer) ||
+ old_buffer->backing_store() == nullptr) {
// If module object does not have linear memory associated with it,
// Allocate new array buffer of given size.
- // TODO(gdeepti): Fix bounds check to take into account size of memtype.
new_size = pages * WasmModule::kPageSize;
- // The code generated in the wasm compiler guarantees this precondition.
- DCHECK(pages <= WasmModule::kMaxMemPages);
+ if (max_pages < pages) return -1;
} else {
old_mem_start = static_cast<Address>(old_buffer->backing_store());
old_size = old_buffer->byte_length()->Number();
// If the old memory was zero-sized, we should have been in the
// "undefined" case above.
DCHECK_NOT_NULL(old_mem_start);
- DCHECK_NE(0, old_size);
DCHECK(old_size + pages * WasmModule::kPageSize <=
std::numeric_limits<uint32_t>::max());
new_size = old_size + pages * WasmModule::kPageSize;
}
- if (new_size <= old_size ||
- WasmModule::kMaxMemPages * WasmModule::kPageSize <= new_size) {
+ if (new_size <= old_size || max_pages * WasmModule::kPageSize < new_size ||
+ WasmModule::kV8MaxPages * WasmModule::kPageSize < new_size) {
return -1;
}
Handle<JSArrayBuffer> buffer = NewArrayBuffer(isolate, new_size);
@@ -1858,35 +2086,36 @@ int32_t GrowInstanceMemory(Isolate* isolate, Handle<JSObject> instance,
memcpy(new_mem_start, old_mem_start, old_size);
}
SetInstanceMemory(instance, *buffer);
- if (!UpdateWasmModuleMemory(instance, old_mem_start, new_mem_start, old_size,
- new_size)) {
- return -1;
+ Handle<FixedArray> code_table = instance->get_compiled_module()->code_table();
+ RelocateMemoryReferencesInCode(code_table, old_mem_start, new_mem_start,
+ old_size, new_size);
+ if (instance->has_memory_object()) {
+ instance->get_memory_object()->set_buffer(*buffer);
}
+
DCHECK(old_size % WasmModule::kPageSize == 0);
return (old_size / WasmModule::kPageSize);
}
-namespace testing {
-
-void ValidateInstancesChain(Isolate* isolate, Handle<JSObject> module_obj,
- int instance_count) {
+void testing::ValidateInstancesChain(Isolate* isolate,
+ Handle<JSObject> wasm_module,
+ int instance_count) {
CHECK_GE(instance_count, 0);
DisallowHeapAllocation no_gc;
WasmCompiledModule* compiled_module =
- WasmCompiledModule::cast(module_obj->GetInternalField(0));
- CHECK_EQ(
- JSObject::cast(compiled_module->ptr_to_weak_module_object()->value()),
- *module_obj);
+ WasmCompiledModule::cast(wasm_module->GetInternalField(0));
+ CHECK_EQ(JSObject::cast(compiled_module->ptr_to_weak_wasm_module()->value()),
+ *wasm_module);
Object* prev = nullptr;
int found_instances = compiled_module->has_weak_owning_instance() ? 1 : 0;
WasmCompiledModule* current_instance = compiled_module;
while (current_instance->has_weak_next_instance()) {
CHECK((prev == nullptr && !current_instance->has_weak_prev_instance()) ||
current_instance->ptr_to_weak_prev_instance()->value() == prev);
- CHECK_EQ(current_instance->ptr_to_weak_module_object()->value(),
- *module_obj);
- CHECK(
- IsWasmObject(current_instance->ptr_to_weak_owning_instance()->value()));
+ CHECK_EQ(current_instance->ptr_to_weak_wasm_module()->value(),
+ *wasm_module);
+ CHECK(IsWasmInstance(
+ current_instance->ptr_to_weak_owning_instance()->value()));
prev = current_instance;
current_instance = WasmCompiledModule::cast(
current_instance->ptr_to_weak_next_instance()->value());
@@ -1896,27 +2125,64 @@ void ValidateInstancesChain(Isolate* isolate, Handle<JSObject> module_obj,
CHECK_EQ(found_instances, instance_count);
}
-void ValidateModuleState(Isolate* isolate, Handle<JSObject> module_obj) {
+void testing::ValidateModuleState(Isolate* isolate,
+ Handle<JSObject> wasm_module) {
DisallowHeapAllocation no_gc;
WasmCompiledModule* compiled_module =
- WasmCompiledModule::cast(module_obj->GetInternalField(0));
- CHECK(compiled_module->has_weak_module_object());
- CHECK_EQ(compiled_module->ptr_to_weak_module_object()->value(), *module_obj);
+ WasmCompiledModule::cast(wasm_module->GetInternalField(0));
+ CHECK(compiled_module->has_weak_wasm_module());
+ CHECK_EQ(compiled_module->ptr_to_weak_wasm_module()->value(), *wasm_module);
CHECK(!compiled_module->has_weak_prev_instance());
CHECK(!compiled_module->has_weak_next_instance());
CHECK(!compiled_module->has_weak_owning_instance());
}
-void ValidateOrphanedInstance(Isolate* isolate, Handle<JSObject> instance) {
+void testing::ValidateOrphanedInstance(Isolate* isolate,
+ Handle<JSObject> object) {
DisallowHeapAllocation no_gc;
- CHECK(IsWasmObject(*instance));
- WasmCompiledModule* compiled_module =
- WasmCompiledModule::cast(instance->GetInternalField(kWasmCompiledModule));
- CHECK(compiled_module->has_weak_module_object());
- CHECK(compiled_module->ptr_to_weak_module_object()->cleared());
+ WasmInstanceObject* instance = WasmInstanceObject::cast(*object);
+ WasmCompiledModule* compiled_module = instance->get_compiled_module();
+ CHECK(compiled_module->has_weak_wasm_module());
+ CHECK(compiled_module->ptr_to_weak_wasm_module()->cleared());
}
-} // namespace testing
-} // namespace wasm
-} // namespace internal
-} // namespace v8
+void WasmCompiledModule::RecreateModuleWrapper(Isolate* isolate,
+ Handle<FixedArray> array) {
+ Handle<WasmCompiledModule> compiled_module(
+ reinterpret_cast<WasmCompiledModule*>(*array), isolate);
+
+ WasmModule* module = nullptr;
+ {
+ Handle<SeqOneByteString> module_bytes = compiled_module->module_bytes();
+ // We parse the module again directly from the module bytes, so
+ // the underlying storage must not be moved meanwhile.
+ DisallowHeapAllocation no_allocation;
+ const byte* start =
+ reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
+ const byte* end = start + module_bytes->length();
+ // TODO(titzer): remember the module origin in the compiled_module
+ // For now, we assume serialized modules did not originate from asm.js.
+ ModuleResult result =
+ DecodeWasmModule(isolate, start, end, false, kWasmOrigin);
+ CHECK(result.ok());
+ CHECK_NOT_NULL(result.val);
+ module = const_cast<WasmModule*>(result.val);
+ }
+
+ Handle<WasmModuleWrapper> module_wrapper =
+ WasmModuleWrapper::New(isolate, module);
+
+ compiled_module->set_module_wrapper(module_wrapper);
+ DCHECK(WasmCompiledModule::IsWasmCompiledModule(*compiled_module));
+}
+
+MaybeHandle<String> WasmCompiledModule::GetFunctionName(
+ Handle<WasmCompiledModule> compiled_module, uint32_t func_index) {
+ DCHECK_LT(func_index, compiled_module->module()->functions.size());
+ WasmFunction& function = compiled_module->module()->functions[func_index];
+ Isolate* isolate = compiled_module->GetIsolate();
+ MaybeHandle<String> string = ExtractStringFromModuleBytes(
+ isolate, compiled_module, function.name_offset, function.name_length);
+ if (!string.is_null()) return string.ToHandleChecked();
+ return {};
+}
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index ac75042392..2ad46e21b6 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -8,29 +8,37 @@
#include <memory>
#include "src/api.h"
+#include "src/globals.h"
#include "src/handles.h"
#include "src/parsing/preparse-data.h"
+#include "src/wasm/managed.h"
+#include "src/wasm/signature-map.h"
#include "src/wasm/wasm-opcodes.h"
-#include "src/wasm/wasm-result.h"
namespace v8 {
namespace internal {
+class WasmCompiledModule;
+class WasmDebugInfo;
+class WasmModuleObject;
+
namespace compiler {
class CallDescriptor;
class WasmCompilationUnit;
}
namespace wasm {
+class ErrorThrower;
+
const size_t kMaxModuleSize = 1024 * 1024 * 1024;
const size_t kMaxFunctionSize = 128 * 1024;
const size_t kMaxStringSize = 256;
const uint32_t kWasmMagic = 0x6d736100;
-const uint32_t kWasmVersion = 0x0c;
+const uint32_t kWasmVersion = 0x0d;
-const uint8_t kWasmFunctionTypeForm = 0x40;
-const uint8_t kWasmAnyFunctionTypeForm = 0x20;
+const uint8_t kWasmFunctionTypeForm = 0x60;
+const uint8_t kWasmAnyFunctionTypeForm = 0x70;
enum WasmSectionCode {
kUnknownSectionCode = 0, // code for unknown sections
@@ -54,8 +62,6 @@ inline bool IsValidSectionCode(uint8_t byte) {
const char* SectionName(WasmSectionCode code);
-class WasmDebugInfo;
-
// Constants for fixed-size elements within a module.
static const uint32_t kMaxReturnCount = 1;
static const uint8_t kResizableMaximumFlag = 1;
@@ -86,12 +92,16 @@ struct WasmInitExpr {
double f64_const;
uint32_t global_index;
} val;
-};
-#define NO_INIT \
- { \
- WasmInitExpr::kNone, { 0u } \
+ WasmInitExpr() : kind(kNone) {}
+ explicit WasmInitExpr(int32_t v) : kind(kI32Const) { val.i32_const = v; }
+ explicit WasmInitExpr(int64_t v) : kind(kI64Const) { val.i64_const = v; }
+ explicit WasmInitExpr(float v) : kind(kF32Const) { val.f32_const = v; }
+ explicit WasmInitExpr(double v) : kind(kF64Const) { val.f64_const = v; }
+ WasmInitExpr(WasmInitKind kind, uint32_t global_index) : kind(kGlobalIndex) {
+ val.global_index = global_index;
}
+};
// Static representation of a WASM function.
struct WasmFunction {
@@ -125,11 +135,14 @@ struct WasmDataSegment {
// Static representation of a wasm indirect call table.
struct WasmIndirectFunctionTable {
- uint32_t size; // initial table size.
+ uint32_t min_size; // minimum table size.
uint32_t max_size; // maximum table size.
+ bool has_max; // true if there is a maximum size.
+ // TODO(titzer): Move this to WasmInstance. Needed by interpreter only.
std::vector<int32_t> values; // function table, -1 indicating invalid.
bool imported; // true if imported.
bool exported; // true if exported.
+ SignatureMap map; // canonicalizing map for sig indexes.
};
// Static representation of how to initialize a table.
@@ -159,31 +172,32 @@ struct WasmExport {
enum ModuleOrigin { kWasmOrigin, kAsmJsOrigin };
-class WasmCompiledModule;
-
// Static representation of a module.
-struct WasmModule {
+struct V8_EXPORT_PRIVATE WasmModule {
static const uint32_t kPageSize = 0x10000; // Page size, 64kb.
- static const uint32_t kMaxLegalPages = 65536; // Maximum legal pages
static const uint32_t kMinMemPages = 1; // Minimum memory size = 64kb
- static const uint32_t kMaxMemPages = 16384; // Maximum memory size = 1gb
-
- const byte* module_start; // starting address for the module bytes.
- const byte* module_end; // end address for the module bytes.
- uint32_t min_mem_pages; // minimum size of the memory in 64k pages.
- uint32_t max_mem_pages; // maximum size of the memory in 64k pages.
- bool mem_export; // true if the memory is exported.
+ static const size_t kV8MaxPages = 16384; // Maximum memory size = 1gb
+ static const size_t kSpecMaxPages = 65536; // Maximum according to the spec
+ static const size_t kV8MaxTableSize = 16 * 1024 * 1024;
+
+ Zone* owned_zone;
+ const byte* module_start = nullptr; // starting address for the module bytes
+ const byte* module_end = nullptr; // end address for the module bytes
+ uint32_t min_mem_pages = 0; // minimum size of the memory in 64k pages
+ uint32_t max_mem_pages = 0; // maximum size of the memory in 64k pages
+ bool has_memory = false; // true if the memory was defined or imported
+ bool mem_export = false; // true if the memory is exported
// TODO(wasm): reconcile start function index being an int with
// the fact that we index on uint32_t, so we may technically not be
// able to represent some start_function_index -es.
- int start_function_index; // start function, if any.
- ModuleOrigin origin; // origin of the module
+ int start_function_index = -1; // start function, if any
+ ModuleOrigin origin = kWasmOrigin; // origin of the module
std::vector<WasmGlobal> globals; // globals in this module.
- uint32_t globals_size; // size of globals table.
- uint32_t num_imported_functions; // number of imported functions.
- uint32_t num_declared_functions; // number of declared functions.
- uint32_t num_exported_functions; // number of exported functions.
+ uint32_t globals_size = 0; // size of globals table.
+ uint32_t num_imported_functions = 0; // number of imported functions.
+ uint32_t num_declared_functions = 0; // number of declared functions.
+ uint32_t num_exported_functions = 0; // number of exported functions.
std::vector<FunctionSig*> signatures; // signatures in this module.
std::vector<WasmFunction> functions; // functions in this module.
std::vector<WasmDataSegment> data_segments; // data segments in this module.
@@ -200,8 +214,11 @@ struct WasmModule {
// switch to libc-2.21 or higher.
std::unique_ptr<base::Semaphore> pending_tasks;
- WasmModule() : WasmModule(nullptr) {}
- explicit WasmModule(byte* module_start);
+ WasmModule() : WasmModule(nullptr, nullptr) {}
+ WasmModule(Zone* owned_zone, const byte* module_start);
+ ~WasmModule() {
+ if (owned_zone) delete owned_zone;
+ }
// Get a string stored in the module bytes representing a name.
WasmName GetName(uint32_t offset, uint32_t length) const {
@@ -238,19 +255,21 @@ struct WasmModule {
}
// Creates a new instantiation of the module in the given isolate.
- V8_EXPORT_PRIVATE static MaybeHandle<JSObject> Instantiate(
- Isolate* isolate, ErrorThrower* thrower, Handle<JSObject> module_object,
- Handle<JSReceiver> ffi, Handle<JSArrayBuffer> memory);
-
- MaybeHandle<WasmCompiledModule> CompileFunctions(Isolate* isolate,
- ErrorThrower* thrower) const;
+ static MaybeHandle<JSObject> Instantiate(Isolate* isolate,
+ ErrorThrower* thrower,
+ Handle<JSObject> wasm_module,
+ Handle<JSReceiver> ffi,
+ Handle<JSArrayBuffer> memory);
- private:
- DISALLOW_COPY_AND_ASSIGN(WasmModule);
+ MaybeHandle<WasmCompiledModule> CompileFunctions(
+ Isolate* isolate, Handle<Managed<WasmModule>> module_wrapper,
+ ErrorThrower* thrower) const;
};
+typedef Managed<WasmModule> WasmModuleWrapper;
+
// An instantiated WASM module, including memory, function table, etc.
-struct WasmModuleInstance {
+struct WasmInstance {
const WasmModule* module; // static representation of the module.
// -- Heap allocated --------------------------------------------------------
Handle<JSObject> js_object; // JavaScript module object.
@@ -260,25 +279,22 @@ struct WasmModuleInstance {
std::vector<Handle<FixedArray>> function_tables; // indirect function tables.
std::vector<Handle<Code>> function_code; // code objects for each function.
// -- raw memory ------------------------------------------------------------
- byte* mem_start; // start of linear memory.
- uint32_t mem_size; // size of the linear memory.
+ byte* mem_start = nullptr; // start of linear memory.
+ uint32_t mem_size = 0; // size of the linear memory.
// -- raw globals -----------------------------------------------------------
- byte* globals_start; // start of the globals area.
+ byte* globals_start = nullptr; // start of the globals area.
- explicit WasmModuleInstance(const WasmModule* m)
+ explicit WasmInstance(const WasmModule* m)
: module(m),
function_tables(m->function_tables.size()),
- function_code(m->functions.size()),
- mem_start(nullptr),
- mem_size(0),
- globals_start(nullptr) {}
+ function_code(m->functions.size()) {}
};
// Interface provided to the decoder/graph builder which contains only
// minimal information about the globals, functions, and function tables.
-struct ModuleEnv {
+struct V8_EXPORT_PRIVATE ModuleEnv {
const WasmModule* module;
- WasmModuleInstance* instance;
+ WasmInstance* instance;
ModuleOrigin origin;
bool IsValidGlobal(uint32_t index) const {
@@ -321,6 +337,8 @@ struct ModuleEnv {
FunctionSig* sig);
static compiler::CallDescriptor* GetI32WasmCallDescriptor(
Zone* zone, compiler::CallDescriptor* descriptor);
+ static compiler::CallDescriptor* GetI32WasmCallDescriptorForSimd(
+ Zone* zone, compiler::CallDescriptor* descriptor);
};
// A helper for printing out the names of functions.
@@ -335,215 +353,93 @@ std::ostream& operator<<(std::ostream& os, const WasmModule& module);
std::ostream& operator<<(std::ostream& os, const WasmFunction& function);
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
-typedef Result<const WasmModule*> ModuleResult;
-typedef Result<WasmFunction*> FunctionResult;
-typedef std::vector<std::pair<int, int>> FunctionOffsets;
-typedef Result<FunctionOffsets> FunctionOffsetsResult;
-
-class WasmCompiledModule : public FixedArray {
- public:
- static WasmCompiledModule* cast(Object* fixed_array) {
- return reinterpret_cast<WasmCompiledModule*>(fixed_array);
- }
-
-#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID) \
- Handle<TYPE> NAME() const { return handle(ptr_to_##NAME()); } \
- \
- MaybeHandle<TYPE> maybe_##NAME() const { \
- if (has_##NAME()) return NAME(); \
- return MaybeHandle<TYPE>(); \
- } \
- \
- TYPE* ptr_to_##NAME() const { \
- Object* obj = get(ID); \
- if (!obj->Is##TYPE()) return nullptr; \
- return TYPE::cast(obj); \
- } \
- \
- void set_##NAME(Handle<TYPE> value) { set_ptr_to_##NAME(*value); } \
- \
- void set_ptr_to_##NAME(TYPE* value) { set(ID, value); } \
- \
- bool has_##NAME() const { return get(ID)->Is##TYPE(); } \
- \
- void reset_##NAME() { set_undefined(ID); }
-
-#define WCM_OBJECT(TYPE, NAME) WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME)
-
-#define WCM_SMALL_NUMBER(TYPE, NAME) \
- TYPE NAME() const { \
- return static_cast<TYPE>(Smi::cast(get(kID_##NAME))->value()); \
- }
-
-#define WCM_WEAK_LINK(TYPE, NAME) \
- WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME); \
- \
- Handle<TYPE> NAME() const { \
- return handle(TYPE::cast(weak_##NAME()->value())); \
- }
-
-#define CORE_WCM_PROPERTY_TABLE(MACRO) \
- MACRO(OBJECT, FixedArray, code_table) \
- MACRO(OBJECT, FixedArray, import_data) \
- MACRO(OBJECT, FixedArray, exports) \
- MACRO(OBJECT, FixedArray, startup_function) \
- MACRO(OBJECT, FixedArray, indirect_function_tables) \
- MACRO(OBJECT, String, module_bytes) \
- MACRO(OBJECT, ByteArray, function_names) \
- MACRO(SMALL_NUMBER, uint32_t, min_memory_pages) \
- MACRO(OBJECT, FixedArray, data_segments_info) \
- MACRO(OBJECT, ByteArray, data_segments) \
- MACRO(SMALL_NUMBER, uint32_t, globals_size) \
- MACRO(OBJECT, JSArrayBuffer, heap) \
- MACRO(SMALL_NUMBER, bool, export_memory) \
- MACRO(SMALL_NUMBER, ModuleOrigin, origin) \
- MACRO(WEAK_LINK, WasmCompiledModule, next_instance) \
- MACRO(WEAK_LINK, WasmCompiledModule, prev_instance) \
- MACRO(WEAK_LINK, JSObject, owning_instance) \
- MACRO(WEAK_LINK, JSObject, module_object)
-
-#if DEBUG
-#define DEBUG_ONLY_TABLE(MACRO) MACRO(SMALL_NUMBER, uint32_t, instance_id)
-#else
-#define DEBUG_ONLY_TABLE(IGNORE)
- uint32_t instance_id() const { return -1; }
-#endif
-
-#define WCM_PROPERTY_TABLE(MACRO) \
- CORE_WCM_PROPERTY_TABLE(MACRO) \
- DEBUG_ONLY_TABLE(MACRO)
-
- private:
- enum PropertyIndices {
-#define INDICES(IGNORE1, IGNORE2, NAME) kID_##NAME,
- WCM_PROPERTY_TABLE(INDICES) Count
-#undef INDICES
- };
-
- public:
- static Handle<WasmCompiledModule> New(Isolate* isolate,
- uint32_t min_memory_pages,
- uint32_t globals_size,
- bool export_memory,
- ModuleOrigin origin);
-
- static Handle<WasmCompiledModule> Clone(Isolate* isolate,
- Handle<WasmCompiledModule> module) {
- Handle<WasmCompiledModule> ret = Handle<WasmCompiledModule>::cast(
- isolate->factory()->CopyFixedArray(module));
- ret->Init();
- ret->reset_weak_owning_instance();
- ret->reset_weak_next_instance();
- ret->reset_weak_prev_instance();
- return ret;
- }
-
- uint32_t mem_size() const {
- DCHECK(has_heap());
- return heap()->byte_length()->Number();
- }
-
- uint32_t default_mem_size() const {
- return min_memory_pages() * WasmModule::kPageSize;
- }
-
-#define DECLARATION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
- WCM_PROPERTY_TABLE(DECLARATION)
-#undef DECLARATION
-
- void PrintInstancesChain();
-
- private:
-#if DEBUG
- static uint32_t instance_id_counter_;
-#endif
- void Init();
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(WasmCompiledModule);
-};
-
-// Extract a function name from the given wasm object.
-// Returns "<WASM UNNAMED>" if the function is unnamed or the name is not a
-// valid UTF-8 string.
-Handle<String> GetWasmFunctionName(Isolate* isolate, Handle<Object> wasm,
+// Extract a function name from the given wasm instance.
+// Returns "<WASM UNNAMED>" if no instance is passed, the function is unnamed or
+// the name is not a valid UTF-8 string.
+// TODO(5620): Refactor once we always get a wasm instance.
+Handle<String> GetWasmFunctionName(Isolate* isolate, Handle<Object> instance,
uint32_t func_index);
-// Extract a function name from the given wasm object.
-// Returns a null handle if the function is unnamed or the name is not a valid
-// UTF-8 string.
-Handle<Object> GetWasmFunctionNameOrNull(Isolate* isolate, Handle<Object> wasm,
- uint32_t func_index);
-
// Return the binary source bytes of a wasm module.
-SeqOneByteString* GetWasmBytes(JSObject* wasm);
+Handle<SeqOneByteString> GetWasmBytes(Handle<JSObject> wasm);
// Get the debug info associated with the given wasm object.
// If no debug info exists yet, it is created automatically.
Handle<WasmDebugInfo> GetDebugInfo(Handle<JSObject> wasm);
// Return the number of functions in the given wasm object.
-int GetNumberOfFunctions(JSObject* wasm);
+int GetNumberOfFunctions(Handle<JSObject> wasm);
// Create and export JSFunction
Handle<JSFunction> WrapExportCodeAsJSFunction(Isolate* isolate,
Handle<Code> export_code,
- Handle<String> name, int arity,
- MaybeHandle<ByteArray> signature,
- Handle<JSObject> module_instance);
+ Handle<String> name,
+ FunctionSig* sig, int func_index,
+ Handle<JSObject> instance);
-// Check whether the given object is a wasm object.
+// Check whether the given object represents a WebAssembly.Instance instance.
// This checks the number and type of internal fields, so it's not 100 percent
// secure. If it turns out that we need more complete checks, we could add a
// special marker as internal field, which will definitely never occur anywhere
// else.
-bool IsWasmObject(Object* object);
+bool IsWasmInstance(Object* instance);
-// Update memory references of code objects associated with the module
-bool UpdateWasmModuleMemory(Handle<JSObject> object, Address old_start,
- Address new_start, uint32_t old_size,
- uint32_t new_size);
+// Return the compiled module object for this WASM instance.
+WasmCompiledModule* GetCompiledModule(Object* wasm_instance);
-// Constructs a single function table as a FixedArray of double size,
-// populating it with function signature indices and function indices.
-Handle<FixedArray> BuildFunctionTable(Isolate* isolate, uint32_t index,
- const WasmModule* module);
+// Check whether the wasm module was generated from asm.js code.
+bool WasmIsAsmJs(Object* instance, Isolate* isolate);
-// Populates a function table by replacing function indices with handles to
-// the compiled code.
-void PopulateFunctionTable(Handle<FixedArray> table, uint32_t table_size,
- const std::vector<Handle<Code>>* code_table);
+// Get the script of the wasm module. If the origin of the module is asm.js, the
+// returned Script will be a JavaScript Script of Script::TYPE_NORMAL, otherwise
+// it's of type TYPE_WASM.
+Handle<Script> GetScript(Handle<JSObject> instance);
-Handle<JSObject> CreateCompiledModuleObject(Isolate* isolate,
- Handle<FixedArray> compiled_module,
- ModuleOrigin origin);
+// Get the asm.js source position for the given byte offset in the given
+// function.
+int GetAsmWasmSourcePosition(Handle<JSObject> instance, int func_index,
+ int byte_offset);
-V8_EXPORT_PRIVATE MaybeHandle<JSObject> CreateModuleObjectFromBytes(
+V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> CreateModuleObjectFromBytes(
Isolate* isolate, const byte* start, const byte* end, ErrorThrower* thrower,
- ModuleOrigin origin);
+ ModuleOrigin origin, Handle<Script> asm_js_script,
+ const byte* asm_offset_tables_start, const byte* asm_offset_tables_end);
V8_EXPORT_PRIVATE bool ValidateModuleBytes(Isolate* isolate, const byte* start,
const byte* end,
ErrorThrower* thrower,
ModuleOrigin origin);
-// Get the number of imported functions for a WASM instance.
-uint32_t GetNumImportedFunctions(Handle<JSObject> wasm_object);
+// Get the offset of the code of a function within a module.
+int GetFunctionCodeOffset(Handle<WasmCompiledModule> compiled_module,
+ int func_index);
+
+// Translate from byte offset in the module to function number and byte offset
+// within that function, encoded as line and column in the position info.
+bool GetPositionInfo(Handle<WasmCompiledModule> compiled_module,
+ uint32_t position, Script::PositionInfo* info);
// Assumed to be called with a code object associated to a wasm module instance.
// Intended to be called from runtime functions.
// Returns nullptr on failing to get owning instance.
Object* GetOwningWasmInstance(Code* code);
+MaybeHandle<JSArrayBuffer> GetInstanceMemory(Isolate* isolate,
+ Handle<JSObject> instance);
+
int32_t GetInstanceMemorySize(Isolate* isolate, Handle<JSObject> instance);
int32_t GrowInstanceMemory(Isolate* isolate, Handle<JSObject> instance,
uint32_t pages);
+void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
+ int index, Handle<JSFunction> js_function);
+
namespace testing {
-void ValidateInstancesChain(Isolate* isolate, Handle<JSObject> module_obj,
+void ValidateInstancesChain(Isolate* isolate, Handle<JSObject> wasm_module,
int instance_count);
-void ValidateModuleState(Isolate* isolate, Handle<JSObject> module_obj);
+void ValidateModuleState(Isolate* isolate, Handle<JSObject> wasm_module);
void ValidateOrphanedInstance(Isolate* isolate, Handle<JSObject> instance);
} // namespace testing
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
new file mode 100644
index 0000000000..68f66d246d
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -0,0 +1,359 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-module.h"
+
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_wasm_instances) PrintF(__VA_ARGS__); \
+ } while (false)
+
+#define TRACE_CHAIN(instance) \
+ do { \
+ instance->PrintInstancesChain(); \
+ } while (false)
+
+using namespace v8::internal;
+using namespace v8::internal::wasm;
+
+#define DEFINE_ACCESSORS(Container, name, field, type) \
+ type* Container::get_##name() { \
+ return type::cast(GetInternalField(field)); \
+ } \
+ void Container::set_##name(type* value) { \
+ return SetInternalField(field, value); \
+ }
+
+#define DEFINE_OPTIONAL_ACCESSORS(Container, name, field, type) \
+ bool Container::has_##name() { \
+ return !GetInternalField(field)->IsUndefined(GetIsolate()); \
+ } \
+ type* Container::get_##name() { \
+ return type::cast(GetInternalField(field)); \
+ } \
+ void Container::set_##name(type* value) { \
+ return SetInternalField(field, value); \
+ }
+
+#define DEFINE_GETTER(Container, name, field, type) \
+ type* Container::get_##name() { return type::cast(GetInternalField(field)); }
+
+static uint32_t SafeUint32(Object* value) {
+ if (value->IsSmi()) {
+ int32_t val = Smi::cast(value)->value();
+ CHECK_GE(val, 0);
+ return static_cast<uint32_t>(val);
+ }
+ DCHECK(value->IsHeapNumber());
+ HeapNumber* num = HeapNumber::cast(value);
+ CHECK_GE(num->value(), 0.0);
+ CHECK_LE(num->value(), static_cast<double>(kMaxUInt32));
+ return static_cast<uint32_t>(num->value());
+}
+
+static int32_t SafeInt32(Object* value) {
+ if (value->IsSmi()) {
+ return Smi::cast(value)->value();
+ }
+ DCHECK(value->IsHeapNumber());
+ HeapNumber* num = HeapNumber::cast(value);
+ CHECK_GE(num->value(), static_cast<double>(Smi::kMinValue));
+ CHECK_LE(num->value(), static_cast<double>(Smi::kMaxValue));
+ return static_cast<int32_t>(num->value());
+}
+
+Handle<WasmModuleObject> WasmModuleObject::New(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
+ ModuleOrigin origin = compiled_module->module()->origin;
+
+ Handle<JSObject> module_object;
+ if (origin == ModuleOrigin::kWasmOrigin) {
+ Handle<JSFunction> module_cons(
+ isolate->native_context()->wasm_module_constructor());
+ module_object = isolate->factory()->NewJSObject(module_cons);
+ Handle<Symbol> module_sym(isolate->native_context()->wasm_module_sym());
+ Object::SetProperty(module_object, module_sym, module_object, STRICT)
+ .Check();
+ } else {
+ DCHECK(origin == ModuleOrigin::kAsmJsOrigin);
+ Handle<Map> map = isolate->factory()->NewMap(
+ JS_OBJECT_TYPE,
+ JSObject::kHeaderSize + WasmModuleObject::kFieldCount * kPointerSize);
+ module_object = isolate->factory()->NewJSObjectFromMap(map, TENURED);
+ }
+ module_object->SetInternalField(WasmModuleObject::kCompiledModule,
+ *compiled_module);
+ Handle<WeakCell> link_to_module =
+ isolate->factory()->NewWeakCell(module_object);
+ compiled_module->set_weak_wasm_module(link_to_module);
+ return Handle<WasmModuleObject>::cast(module_object);
+}
+
+WasmModuleObject* WasmModuleObject::cast(Object* object) {
+ DCHECK(object->IsJSObject());
+ // TODO(titzer): brand check for WasmModuleObject.
+ return reinterpret_cast<WasmModuleObject*>(object);
+}
+
+Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
+ uint32_t maximum,
+ Handle<FixedArray>* js_functions) {
+ Handle<JSFunction> table_ctor(
+ isolate->native_context()->wasm_table_constructor());
+ Handle<JSObject> table_obj = isolate->factory()->NewJSObject(table_ctor);
+ *js_functions = isolate->factory()->NewFixedArray(initial);
+ Object* null = isolate->heap()->null_value();
+ for (int i = 0; i < static_cast<int>(initial); ++i) {
+ (*js_functions)->set(i, null);
+ }
+ table_obj->SetInternalField(kFunctions, *(*js_functions));
+ table_obj->SetInternalField(kMaximum,
+ static_cast<Object*>(Smi::FromInt(maximum)));
+
+ Handle<FixedArray> dispatch_tables = isolate->factory()->NewFixedArray(0);
+ table_obj->SetInternalField(kDispatchTables, *dispatch_tables);
+ Handle<Symbol> table_sym(isolate->native_context()->wasm_table_sym());
+ Object::SetProperty(table_obj, table_sym, table_obj, STRICT).Check();
+ return Handle<WasmTableObject>::cast(table_obj);
+}
+
+DEFINE_GETTER(WasmTableObject, dispatch_tables, kDispatchTables, FixedArray)
+
+Handle<FixedArray> WasmTableObject::AddDispatchTable(
+ Isolate* isolate, Handle<WasmTableObject> table_obj,
+ Handle<WasmInstanceObject> instance, int table_index,
+ Handle<FixedArray> dispatch_table) {
+ Handle<FixedArray> dispatch_tables(
+ FixedArray::cast(table_obj->GetInternalField(kDispatchTables)), isolate);
+ DCHECK_EQ(0, dispatch_tables->length() % 3);
+
+ if (instance.is_null()) return dispatch_tables;
+ // TODO(titzer): use weak cells here to avoid leaking instances.
+
+ // Grow the dispatch table and add a new triple at the end.
+ Handle<FixedArray> new_dispatch_tables =
+ isolate->factory()->CopyFixedArrayAndGrow(dispatch_tables, 3);
+
+ new_dispatch_tables->set(dispatch_tables->length() + 0, *instance);
+ new_dispatch_tables->set(dispatch_tables->length() + 1,
+ Smi::FromInt(table_index));
+ new_dispatch_tables->set(dispatch_tables->length() + 2, *dispatch_table);
+
+ table_obj->SetInternalField(WasmTableObject::kDispatchTables,
+ *new_dispatch_tables);
+
+ return new_dispatch_tables;
+}
+
+DEFINE_ACCESSORS(WasmTableObject, functions, kFunctions, FixedArray)
+
+uint32_t WasmTableObject::current_length() { return get_functions()->length(); }
+
+uint32_t WasmTableObject::maximum_length() {
+ return SafeUint32(GetInternalField(kMaximum));
+}
+
+WasmTableObject* WasmTableObject::cast(Object* object) {
+ DCHECK(object && object->IsJSObject());
+ // TODO(titzer): brand check for WasmTableObject.
+ return reinterpret_cast<WasmTableObject*>(object);
+}
+
+Handle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
+ Handle<JSArrayBuffer> buffer,
+ int maximum) {
+ Handle<JSFunction> memory_ctor(
+ isolate->native_context()->wasm_memory_constructor());
+ Handle<JSObject> memory_obj = isolate->factory()->NewJSObject(memory_ctor);
+ memory_obj->SetInternalField(kArrayBuffer, *buffer);
+ memory_obj->SetInternalField(kMaximum,
+ static_cast<Object*>(Smi::FromInt(maximum)));
+ Handle<Symbol> memory_sym(isolate->native_context()->wasm_memory_sym());
+ Object::SetProperty(memory_obj, memory_sym, memory_obj, STRICT).Check();
+ return Handle<WasmMemoryObject>::cast(memory_obj);
+}
+
+DEFINE_ACCESSORS(WasmMemoryObject, buffer, kArrayBuffer, JSArrayBuffer)
+
+uint32_t WasmMemoryObject::current_pages() {
+ return SafeUint32(get_buffer()->byte_length()) / wasm::WasmModule::kPageSize;
+}
+
+int32_t WasmMemoryObject::maximum_pages() {
+ return SafeInt32(GetInternalField(kMaximum));
+}
+
+WasmMemoryObject* WasmMemoryObject::cast(Object* object) {
+ DCHECK(object && object->IsJSObject());
+ // TODO(titzer): brand check for WasmMemoryObject.
+ return reinterpret_cast<WasmMemoryObject*>(object);
+}
+
+void WasmMemoryObject::AddInstance(WasmInstanceObject* instance) {
+ // TODO(gdeepti): This should be a weak list of instance objects
+ // for instances that share memory.
+ SetInternalField(kInstance, instance);
+}
+
+DEFINE_ACCESSORS(WasmInstanceObject, compiled_module, kCompiledModule,
+ WasmCompiledModule)
+DEFINE_OPTIONAL_ACCESSORS(WasmInstanceObject, globals_buffer,
+ kGlobalsArrayBuffer, JSArrayBuffer)
+DEFINE_OPTIONAL_ACCESSORS(WasmInstanceObject, memory_buffer, kMemoryArrayBuffer,
+ JSArrayBuffer)
+DEFINE_OPTIONAL_ACCESSORS(WasmInstanceObject, memory_object, kMemoryObject,
+ WasmMemoryObject)
+DEFINE_OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, kDebugInfo,
+ WasmDebugInfo)
+
+WasmModuleObject* WasmInstanceObject::module_object() {
+ return WasmModuleObject::cast(*get_compiled_module()->wasm_module());
+}
+
+WasmModule* WasmInstanceObject::module() {
+ return reinterpret_cast<WasmModuleWrapper*>(
+ *get_compiled_module()->module_wrapper())
+ ->get();
+}
+
+WasmInstanceObject* WasmInstanceObject::cast(Object* object) {
+ DCHECK(IsWasmInstanceObject(object));
+ return reinterpret_cast<WasmInstanceObject*>(object);
+}
+
+bool WasmInstanceObject::IsWasmInstanceObject(Object* object) {
+ if (!object->IsObject()) return false;
+ if (!object->IsJSObject()) return false;
+
+ JSObject* obj = JSObject::cast(object);
+ Isolate* isolate = obj->GetIsolate();
+ if (obj->GetInternalFieldCount() != kFieldCount) {
+ return false;
+ }
+
+ Object* mem = obj->GetInternalField(kMemoryArrayBuffer);
+ if (!(mem->IsUndefined(isolate) || mem->IsJSArrayBuffer()) ||
+ !WasmCompiledModule::IsWasmCompiledModule(
+ obj->GetInternalField(kCompiledModule))) {
+ return false;
+ }
+
+ // All checks passed.
+ return true;
+}
+
+Handle<WasmInstanceObject> WasmInstanceObject::New(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
+ Handle<Map> map = isolate->factory()->NewMap(
+ JS_OBJECT_TYPE, JSObject::kHeaderSize + kFieldCount * kPointerSize);
+ Handle<WasmInstanceObject> instance(
+ reinterpret_cast<WasmInstanceObject*>(
+ *isolate->factory()->NewJSObjectFromMap(map, TENURED)),
+ isolate);
+
+ instance->SetInternalField(kCompiledModule, *compiled_module);
+ instance->SetInternalField(kMemoryObject, isolate->heap()->undefined_value());
+ return instance;
+}
+
+WasmInstanceObject* WasmExportedFunction::instance() {
+ return WasmInstanceObject::cast(GetInternalField(kInstance));
+}
+
+int WasmExportedFunction::function_index() {
+ return SafeInt32(GetInternalField(kIndex));
+}
+
+WasmExportedFunction* WasmExportedFunction::cast(Object* object) {
+ DCHECK(object && object->IsJSFunction());
+ DCHECK_EQ(Code::JS_TO_WASM_FUNCTION,
+ JSFunction::cast(object)->code()->kind());
+ // TODO(titzer): brand check for WasmExportedFunction.
+ return reinterpret_cast<WasmExportedFunction*>(object);
+}
+
+Handle<WasmExportedFunction> WasmExportedFunction::New(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<String> name,
+ Handle<Code> export_wrapper, int arity, int func_index) {
+ DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfo(name, export_wrapper, false);
+ shared->set_length(arity);
+ shared->set_internal_formal_parameter_count(arity);
+ Handle<JSFunction> function = isolate->factory()->NewFunction(
+ isolate->wasm_function_map(), name, export_wrapper);
+ function->set_shared(*shared);
+
+ function->SetInternalField(kInstance, *instance);
+ function->SetInternalField(kIndex, Smi::FromInt(func_index));
+ return Handle<WasmExportedFunction>::cast(function);
+}
+
+Handle<WasmCompiledModule> WasmCompiledModule::New(
+ Isolate* isolate, Handle<WasmModuleWrapper> module_wrapper) {
+ Handle<FixedArray> ret =
+ isolate->factory()->NewFixedArray(PropertyIndices::Count, TENURED);
+ // WasmCompiledModule::cast would fail since module bytes are not set yet.
+ Handle<WasmCompiledModule> compiled_module(
+ reinterpret_cast<WasmCompiledModule*>(*ret), isolate);
+ compiled_module->InitId();
+ compiled_module->set_module_wrapper(module_wrapper);
+ return compiled_module;
+}
+
+wasm::WasmModule* WasmCompiledModule::module() const {
+ return reinterpret_cast<WasmModuleWrapper*>(*module_wrapper())->get();
+}
+
+void WasmCompiledModule::InitId() {
+#if DEBUG
+ static uint32_t instance_id_counter = 0;
+ set(kID_instance_id, Smi::FromInt(instance_id_counter++));
+ TRACE("New compiled module id: %d\n", instance_id());
+#endif
+}
+
+bool WasmCompiledModule::IsWasmCompiledModule(Object* obj) {
+ if (!obj->IsFixedArray()) return false;
+ FixedArray* arr = FixedArray::cast(obj);
+ if (arr->length() != PropertyIndices::Count) return false;
+ Isolate* isolate = arr->GetIsolate();
+#define WCM_CHECK_SMALL_NUMBER(TYPE, NAME) \
+ if (!arr->get(kID_##NAME)->IsSmi()) return false;
+#define WCM_CHECK_OBJECT_OR_WEAK(TYPE, NAME) \
+ if (!arr->get(kID_##NAME)->IsUndefined(isolate) && \
+ !arr->get(kID_##NAME)->Is##TYPE()) \
+ return false;
+#define WCM_CHECK_OBJECT(TYPE, NAME) WCM_CHECK_OBJECT_OR_WEAK(TYPE, NAME)
+#define WCM_CHECK_WEAK_LINK(TYPE, NAME) WCM_CHECK_OBJECT_OR_WEAK(WeakCell, NAME)
+#define WCM_CHECK(KIND, TYPE, NAME) WCM_CHECK_##KIND(TYPE, NAME)
+ WCM_PROPERTY_TABLE(WCM_CHECK)
+#undef WCM_CHECK
+
+ // All checks passed.
+ return true;
+}
+
+void WasmCompiledModule::PrintInstancesChain() {
+#if DEBUG
+ if (!FLAG_trace_wasm_instances) return;
+ for (WasmCompiledModule* current = this; current != nullptr;) {
+ PrintF("->%d", current->instance_id());
+ if (current->ptr_to_weak_next_instance() == nullptr) break;
+ CHECK(!current->ptr_to_weak_next_instance()->cleared());
+ current =
+ WasmCompiledModule::cast(current->ptr_to_weak_next_instance()->value());
+ }
+ PrintF("\n");
+#endif
+}
+
+uint32_t WasmCompiledModule::mem_size() const {
+ return has_memory() ? memory()->byte_length()->Number() : default_mem_size();
+}
+
+uint32_t WasmCompiledModule::default_mem_size() const {
+ return min_mem_pages() * WasmModule::kPageSize;
+}
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
new file mode 100644
index 0000000000..f74661f652
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -0,0 +1,308 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_OBJECTS_H_
+#define V8_WASM_OBJECTS_H_
+
+#include "src/objects-inl.h"
+#include "src/wasm/managed.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+struct WasmModule;
+}
+
+class WasmCompiledModule;
+class WasmDebugInfo;
+class WasmInstanceObject;
+
+#define DECLARE_CASTS(name) \
+ static bool Is##name(Object* object); \
+ static name* cast(Object* object)
+
+#define DECLARE_ACCESSORS(name, type) \
+ type* get_##name(); \
+ void set_##name(type* value)
+
+#define DECLARE_OPTIONAL_ACCESSORS(name, type) \
+ bool has_##name(); \
+ type* get_##name(); \
+ void set_##name(type* value)
+
+// Representation of a WebAssembly.Module JavaScript-level object.
+class WasmModuleObject : public JSObject {
+ public:
+ // TODO(titzer): add the brand as an internal field instead of a property.
+ enum Fields { kCompiledModule, kFieldCount };
+
+ DECLARE_CASTS(WasmModuleObject);
+
+ WasmCompiledModule* compiled_module();
+ wasm::WasmModule* module();
+ int num_functions();
+ bool is_asm_js();
+ int GetAsmWasmSourcePosition(int func_index, int byte_offset);
+ WasmDebugInfo* debug_info();
+ void set_debug_info(WasmDebugInfo* debug_info);
+ MaybeHandle<String> GetFunctionName(Isolate* isolate, int func_index);
+
+ static Handle<WasmModuleObject> New(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
+};
+
+// Representation of a WebAssembly.Table JavaScript-level object.
+class WasmTableObject : public JSObject {
+ public:
+ // TODO(titzer): add the brand as an internal field instead of a property.
+ enum Fields { kFunctions, kMaximum, kDispatchTables, kFieldCount };
+
+ DECLARE_CASTS(WasmTableObject);
+ DECLARE_ACCESSORS(functions, FixedArray);
+
+ FixedArray* get_dispatch_tables();
+ uint32_t current_length();
+ uint32_t maximum_length();
+
+ static Handle<WasmTableObject> New(Isolate* isolate, uint32_t initial,
+ uint32_t maximum,
+ Handle<FixedArray>* js_functions);
+ static bool Grow(Handle<WasmTableObject> table, uint32_t count);
+ static Handle<FixedArray> AddDispatchTable(
+ Isolate* isolate, Handle<WasmTableObject> table,
+ Handle<WasmInstanceObject> instance, int table_index,
+ Handle<FixedArray> dispatch_table);
+};
+
+// Representation of a WebAssembly.Memory JavaScript-level object.
+class WasmMemoryObject : public JSObject {
+ public:
+ // TODO(titzer): add the brand as an internal field instead of a property.
+ enum Fields : uint8_t { kArrayBuffer, kMaximum, kInstance, kFieldCount };
+
+ DECLARE_CASTS(WasmMemoryObject);
+ DECLARE_ACCESSORS(buffer, JSArrayBuffer);
+
+ void AddInstance(WasmInstanceObject* object);
+ uint32_t current_pages();
+ int32_t maximum_pages(); // returns < 0 if there is no maximum
+
+ static Handle<WasmMemoryObject> New(Isolate* isolate,
+ Handle<JSArrayBuffer> buffer,
+ int maximum);
+
+ static bool Grow(Handle<WasmMemoryObject> memory, uint32_t count);
+};
+
+// Representation of a WebAssembly.Instance JavaScript-level object.
+class WasmInstanceObject : public JSObject {
+ public:
+ // TODO(titzer): add the brand as an internal field instead of a property.
+ enum Fields {
+ kCompiledModule,
+ kMemoryObject,
+ kMemoryArrayBuffer,
+ kGlobalsArrayBuffer,
+ kDebugInfo,
+ kFieldCount
+ };
+
+ DECLARE_CASTS(WasmInstanceObject);
+
+ DECLARE_ACCESSORS(compiled_module, WasmCompiledModule);
+ DECLARE_OPTIONAL_ACCESSORS(globals_buffer, JSArrayBuffer);
+ DECLARE_OPTIONAL_ACCESSORS(memory_buffer, JSArrayBuffer);
+ DECLARE_OPTIONAL_ACCESSORS(memory_object, WasmMemoryObject);
+ DECLARE_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo);
+
+ WasmModuleObject* module_object();
+ wasm::WasmModule* module();
+
+ static Handle<WasmInstanceObject> New(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
+};
+
+// Representation of an exported WASM function.
+class WasmExportedFunction : public JSFunction {
+ public:
+ enum Fields { kInstance, kIndex, kFieldCount };
+
+ DECLARE_CASTS(WasmExportedFunction);
+
+ WasmInstanceObject* instance();
+ int function_index();
+
+ static Handle<WasmExportedFunction> New(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ Handle<String> name,
+ Handle<Code> export_wrapper,
+ int arity, int func_index);
+};
+
+class WasmCompiledModule : public FixedArray {
+ public:
+ enum Fields { kFieldCount };
+
+ static WasmCompiledModule* cast(Object* fixed_array) {
+ SLOW_DCHECK(IsWasmCompiledModule(fixed_array));
+ return reinterpret_cast<WasmCompiledModule*>(fixed_array);
+ }
+
+#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID) \
+ Handle<TYPE> NAME() const { return handle(ptr_to_##NAME()); } \
+ \
+ MaybeHandle<TYPE> maybe_##NAME() const { \
+ if (has_##NAME()) return NAME(); \
+ return MaybeHandle<TYPE>(); \
+ } \
+ \
+ TYPE* ptr_to_##NAME() const { \
+ Object* obj = get(ID); \
+ if (!obj->Is##TYPE()) return nullptr; \
+ return TYPE::cast(obj); \
+ } \
+ \
+ void set_##NAME(Handle<TYPE> value) { set_ptr_to_##NAME(*value); } \
+ \
+ void set_ptr_to_##NAME(TYPE* value) { set(ID, value); } \
+ \
+ bool has_##NAME() const { return get(ID)->Is##TYPE(); } \
+ \
+ void reset_##NAME() { set_undefined(ID); }
+
+#define WCM_OBJECT(TYPE, NAME) WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME)
+
+#define WCM_SMALL_NUMBER(TYPE, NAME) \
+ TYPE NAME() const { \
+ return static_cast<TYPE>(Smi::cast(get(kID_##NAME))->value()); \
+ } \
+ void set_##NAME(TYPE value) { set(kID_##NAME, Smi::FromInt(value)); }
+
+#define WCM_WEAK_LINK(TYPE, NAME) \
+ WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME); \
+ \
+ Handle<TYPE> NAME() const { \
+ return handle(TYPE::cast(weak_##NAME()->value())); \
+ }
+
+#define CORE_WCM_PROPERTY_TABLE(MACRO) \
+ MACRO(OBJECT, FixedArray, code_table) \
+ MACRO(OBJECT, Foreign, module_wrapper) \
+ /* For debugging: */ \
+ MACRO(OBJECT, SeqOneByteString, module_bytes) \
+ MACRO(OBJECT, Script, script) \
+ MACRO(OBJECT, ByteArray, asm_js_offset_tables) \
+ /* End of debugging stuff */ \
+ MACRO(OBJECT, FixedArray, function_tables) \
+ MACRO(OBJECT, FixedArray, empty_function_tables) \
+ MACRO(OBJECT, JSArrayBuffer, memory) \
+ MACRO(SMALL_NUMBER, uint32_t, min_mem_pages) \
+ MACRO(SMALL_NUMBER, uint32_t, max_mem_pages) \
+ MACRO(WEAK_LINK, WasmCompiledModule, next_instance) \
+ MACRO(WEAK_LINK, WasmCompiledModule, prev_instance) \
+ MACRO(WEAK_LINK, JSObject, owning_instance) \
+ MACRO(WEAK_LINK, JSObject, wasm_module)
+
+#if DEBUG
+#define DEBUG_ONLY_TABLE(MACRO) MACRO(SMALL_NUMBER, uint32_t, instance_id)
+#else
+#define DEBUG_ONLY_TABLE(IGNORE)
+ uint32_t instance_id() const { return -1; }
+#endif
+
+#define WCM_PROPERTY_TABLE(MACRO) \
+ CORE_WCM_PROPERTY_TABLE(MACRO) \
+ DEBUG_ONLY_TABLE(MACRO)
+
+ private:
+ enum PropertyIndices {
+#define INDICES(IGNORE1, IGNORE2, NAME) kID_##NAME,
+ WCM_PROPERTY_TABLE(INDICES) Count
+#undef INDICES
+ };
+
+ public:
+ static Handle<WasmCompiledModule> New(
+ Isolate* isolate, Handle<Managed<wasm::WasmModule>> module_wrapper);
+
+ static Handle<WasmCompiledModule> Clone(Isolate* isolate,
+ Handle<WasmCompiledModule> module) {
+ Handle<WasmCompiledModule> ret = Handle<WasmCompiledModule>::cast(
+ isolate->factory()->CopyFixedArray(module));
+ ret->InitId();
+ ret->reset_weak_owning_instance();
+ ret->reset_weak_next_instance();
+ ret->reset_weak_prev_instance();
+ return ret;
+ }
+
+ uint32_t mem_size() const;
+ uint32_t default_mem_size() const;
+
+ wasm::WasmModule* module() const;
+
+#define DECLARATION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
+ WCM_PROPERTY_TABLE(DECLARATION)
+#undef DECLARATION
+
+ static bool IsWasmCompiledModule(Object* obj);
+
+ void PrintInstancesChain();
+
+ static void RecreateModuleWrapper(Isolate* isolate,
+ Handle<FixedArray> compiled_module);
+
+ // Extract a function name from the given wasm instance.
+ // Returns a null handle if the function is unnamed or the name is not a valid
+ // UTF-8 string.
+ static MaybeHandle<String> GetFunctionName(
+ Handle<WasmCompiledModule> compiled_module, uint32_t func_index);
+
+ private:
+ void InitId();
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(WasmCompiledModule);
+};
+
+class WasmDebugInfo : public FixedArray {
+ public:
+ enum class Fields { kFieldCount };
+
+ static Handle<WasmDebugInfo> New(Handle<JSObject> wasm);
+
+ static bool IsDebugInfo(Object* object);
+ static WasmDebugInfo* cast(Object* object);
+
+ JSObject* wasm_instance();
+
+ bool SetBreakPoint(int byte_offset);
+
+ // Get the Script for the specified function.
+ static Script* GetFunctionScript(Handle<WasmDebugInfo> debug_info,
+ int func_index);
+
+ // Disassemble the specified function from this module.
+ static Handle<String> DisassembleFunction(Handle<WasmDebugInfo> debug_info,
+ int func_index);
+
+ // Get the offset table for the specified function, mapping from byte offsets
+ // to position in the disassembly.
+ // Returns an array with three entries per instruction: byte offset, line and
+ // column.
+ static Handle<FixedArray> GetFunctionOffsetTable(
+ Handle<WasmDebugInfo> debug_info, int func_index);
+
+ // Get the asm.js source position from a byte offset.
+ // Must only be called if the associated wasm object was created from asm.js.
+ static int GetAsmJsSourcePosition(Handle<WasmDebugInfo> debug_info,
+ int func_index, int byte_offset);
+};
+
+#undef DECLARE_ACCESSORS
+#undef DECLARE_OPTIONAL_ACCESSORS
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_OBJECTS_H_
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index cd2dde4748..8f81b81a50 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -88,6 +88,7 @@ static const FunctionSig* kSimdExprSigs[] = {
static byte kSimpleExprSigTable[256];
static byte kSimpleAsmjsExprSigTable[256];
static byte kSimdExprSigTable[256];
+static byte kAtomicExprSigTable[256];
// Initialize the signature table.
static void InitSigTables() {
@@ -105,6 +106,12 @@ static void InitSigTables() {
kSimdExprSigTable[simd_index] = static_cast<int>(kSigEnum_##sig) + 1;
FOREACH_SIMD_0_OPERAND_OPCODE(SET_SIG_TABLE)
#undef SET_SIG_TABLE
+ byte atomic_index;
+#define SET_ATOMIC_SIG_TABLE(name, opcode, sig) \
+ atomic_index = opcode & 0xff; \
+ kAtomicExprSigTable[atomic_index] = static_cast<int>(kSigEnum_##sig) + 1;
+ FOREACH_ATOMIC_OPCODE(SET_ATOMIC_SIG_TABLE)
+#undef SET_ATOMIC_SIG_TABLE
}
class SigTable {
@@ -125,6 +132,10 @@ class SigTable {
return const_cast<FunctionSig*>(
kSimdExprSigs[kSimdExprSigTable[static_cast<byte>(opcode & 0xff)]]);
}
+ FunctionSig* AtomicSignature(WasmOpcode opcode) const {
+ return const_cast<FunctionSig*>(
+ kSimpleExprSigs[kAtomicExprSigTable[static_cast<byte>(opcode & 0xff)]]);
+ }
};
static base::LazyInstance<SigTable>::type sig_table = LAZY_INSTANCE_INITIALIZER;
@@ -141,6 +152,10 @@ FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
return sig_table.Get().AsmjsSignature(opcode);
}
+FunctionSig* WasmOpcodes::AtomicSignature(WasmOpcode opcode) {
+ return sig_table.Get().AtomicSignature(opcode);
+}
+
// TODO(titzer): pull WASM_64 up to a common header.
#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
#define WASM_64 1
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 03827b2035..ec22579bd7 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -5,6 +5,7 @@
#ifndef V8_WASM_OPCODES_H_
#define V8_WASM_OPCODES_H_
+#include "src/globals.h"
#include "src/machine-type.h"
#include "src/signature.h"
@@ -14,12 +15,12 @@ namespace wasm {
// Binary encoding of local types.
enum LocalTypeCode {
- kLocalVoid = 0,
- kLocalI32 = 1,
- kLocalI64 = 2,
- kLocalF32 = 3,
- kLocalF64 = 4,
- kLocalS128 = 5
+ kLocalVoid = 0x40,
+ kLocalI32 = 0x7f,
+ kLocalI64 = 0x7e,
+ kLocalF32 = 0x7d,
+ kLocalF64 = 0x7c,
+ kLocalS128 = 0x7b
};
// Type code for multi-value block types.
@@ -46,199 +47,198 @@ typedef int WasmCodePosition;
const WasmCodePosition kNoCodePosition = -1;
// Control expressions and blocks.
-#define FOREACH_CONTROL_OPCODE(V) \
- V(Unreachable, 0x00, _) \
- V(Block, 0x01, _) \
- V(Loop, 0x02, _) \
- V(If, 0x03, _) \
- V(Else, 0x04, _) \
- V(Select, 0x05, _) \
- V(Br, 0x06, _) \
- V(BrIf, 0x07, _) \
- V(BrTable, 0x08, _) \
- V(Return, 0x09, _) \
- V(Nop, 0x0a, _) \
- V(Throw, 0xfa, _) \
- V(Try, 0xfb, _) \
- V(Catch, 0xfe, _) \
- V(End, 0x0F, _)
+#define FOREACH_CONTROL_OPCODE(V) \
+ V(Unreachable, 0x00, _) \
+ V(Nop, 0x01, _) \
+ V(Block, 0x02, _) \
+ V(Loop, 0x03, _) \
+ V(If, 0x004, _) \
+ V(Else, 0x05, _) \
+ V(Try, 0x06, _ /* eh_prototype */) \
+ V(Catch, 0x07, _ /* eh_prototype */) \
+ V(Throw, 0x08, _ /* eh_prototype */) \
+ V(End, 0x0b, _) \
+ V(Br, 0x0c, _) \
+ V(BrIf, 0x0d, _) \
+ V(BrTable, 0x0e, _) \
+ V(Return, 0x0f, _)
// Constants, locals, globals, and calls.
#define FOREACH_MISC_OPCODE(V) \
- V(I32Const, 0x10, _) \
- V(I64Const, 0x11, _) \
- V(F64Const, 0x12, _) \
- V(F32Const, 0x13, _) \
- V(GetLocal, 0x14, _) \
- V(SetLocal, 0x15, _) \
- V(TeeLocal, 0x19, _) \
- V(Drop, 0x0b, _) \
- V(CallFunction, 0x16, _) \
- V(CallIndirect, 0x17, _) \
- V(I8Const, 0xcb, _) \
- V(GetGlobal, 0xbb, _) \
- V(SetGlobal, 0xbc, _)
+ V(CallFunction, 0x10, _) \
+ V(CallIndirect, 0x11, _) \
+ V(Drop, 0x1a, _) \
+ V(Select, 0x1b, _) \
+ V(GetLocal, 0x20, _) \
+ V(SetLocal, 0x21, _) \
+ V(TeeLocal, 0x22, _) \
+ V(GetGlobal, 0x23, _) \
+ V(SetGlobal, 0x24, _) \
+ V(I32Const, 0x41, _) \
+ V(I64Const, 0x42, _) \
+ V(F32Const, 0x43, _) \
+ V(F64Const, 0x44, _) \
+ V(I8Const, 0xcb, _ /* TODO(titzer): V8 specific, remove */)
// Load memory expressions.
#define FOREACH_LOAD_MEM_OPCODE(V) \
- V(I32LoadMem8S, 0x20, i_i) \
- V(I32LoadMem8U, 0x21, i_i) \
- V(I32LoadMem16S, 0x22, i_i) \
- V(I32LoadMem16U, 0x23, i_i) \
- V(I64LoadMem8S, 0x24, l_i) \
- V(I64LoadMem8U, 0x25, l_i) \
- V(I64LoadMem16S, 0x26, l_i) \
- V(I64LoadMem16U, 0x27, l_i) \
- V(I64LoadMem32S, 0x28, l_i) \
- V(I64LoadMem32U, 0x29, l_i) \
- V(I32LoadMem, 0x2a, i_i) \
- V(I64LoadMem, 0x2b, l_i) \
- V(F32LoadMem, 0x2c, f_i) \
- V(F64LoadMem, 0x2d, d_i)
+ V(I32LoadMem, 0x28, i_i) \
+ V(I64LoadMem, 0x29, l_i) \
+ V(F32LoadMem, 0x2a, f_i) \
+ V(F64LoadMem, 0x2b, d_i) \
+ V(I32LoadMem8S, 0x2c, i_i) \
+ V(I32LoadMem8U, 0x2d, i_i) \
+ V(I32LoadMem16S, 0x2e, i_i) \
+ V(I32LoadMem16U, 0x2f, i_i) \
+ V(I64LoadMem8S, 0x30, l_i) \
+ V(I64LoadMem8U, 0x31, l_i) \
+ V(I64LoadMem16S, 0x32, l_i) \
+ V(I64LoadMem16U, 0x33, l_i) \
+ V(I64LoadMem32S, 0x34, l_i) \
+ V(I64LoadMem32U, 0x35, l_i)
// Store memory expressions.
#define FOREACH_STORE_MEM_OPCODE(V) \
- V(I32StoreMem8, 0x2e, i_ii) \
- V(I32StoreMem16, 0x2f, i_ii) \
- V(I64StoreMem8, 0x30, l_il) \
- V(I64StoreMem16, 0x31, l_il) \
- V(I64StoreMem32, 0x32, l_il) \
- V(I32StoreMem, 0x33, i_ii) \
- V(I64StoreMem, 0x34, l_il) \
- V(F32StoreMem, 0x35, f_if) \
- V(F64StoreMem, 0x36, d_id)
-
-#define FOREACH_SIMPLE_MEM_OPCODE(V) V(GrowMemory, 0x39, i_i)
-
-// Load memory expressions.
+ V(I32StoreMem, 0x36, i_ii) \
+ V(I64StoreMem, 0x37, l_il) \
+ V(F32StoreMem, 0x38, f_if) \
+ V(F64StoreMem, 0x39, d_id) \
+ V(I32StoreMem8, 0x3a, i_ii) \
+ V(I32StoreMem16, 0x3b, i_ii) \
+ V(I64StoreMem8, 0x3c, l_il) \
+ V(I64StoreMem16, 0x3d, l_il) \
+ V(I64StoreMem32, 0x3e, l_il)
+
+// Miscellaneous memory expressions
#define FOREACH_MISC_MEM_OPCODE(V) \
- V(MemorySize, 0x3b, i_v)
+ V(MemorySize, 0x3f, i_v) \
+ V(GrowMemory, 0x40, i_i)
// Expressions with signatures.
#define FOREACH_SIMPLE_OPCODE(V) \
- V(I32Add, 0x40, i_ii) \
- V(I32Sub, 0x41, i_ii) \
- V(I32Mul, 0x42, i_ii) \
- V(I32DivS, 0x43, i_ii) \
- V(I32DivU, 0x44, i_ii) \
- V(I32RemS, 0x45, i_ii) \
- V(I32RemU, 0x46, i_ii) \
- V(I32And, 0x47, i_ii) \
- V(I32Ior, 0x48, i_ii) \
- V(I32Xor, 0x49, i_ii) \
- V(I32Shl, 0x4a, i_ii) \
- V(I32ShrU, 0x4b, i_ii) \
- V(I32ShrS, 0x4c, i_ii) \
- V(I32Eq, 0x4d, i_ii) \
- V(I32Ne, 0x4e, i_ii) \
- V(I32LtS, 0x4f, i_ii) \
- V(I32LeS, 0x50, i_ii) \
- V(I32LtU, 0x51, i_ii) \
- V(I32LeU, 0x52, i_ii) \
- V(I32GtS, 0x53, i_ii) \
- V(I32GeS, 0x54, i_ii) \
- V(I32GtU, 0x55, i_ii) \
- V(I32GeU, 0x56, i_ii) \
- V(I32Clz, 0x57, i_i) \
- V(I32Ctz, 0x58, i_i) \
- V(I32Popcnt, 0x59, i_i) \
- V(I32Eqz, 0x5a, i_i) \
- V(I64Add, 0x5b, l_ll) \
- V(I64Sub, 0x5c, l_ll) \
- V(I64Mul, 0x5d, l_ll) \
- V(I64DivS, 0x5e, l_ll) \
- V(I64DivU, 0x5f, l_ll) \
- V(I64RemS, 0x60, l_ll) \
- V(I64RemU, 0x61, l_ll) \
- V(I64And, 0x62, l_ll) \
- V(I64Ior, 0x63, l_ll) \
- V(I64Xor, 0x64, l_ll) \
- V(I64Shl, 0x65, l_ll) \
- V(I64ShrU, 0x66, l_ll) \
- V(I64ShrS, 0x67, l_ll) \
- V(I64Eq, 0x68, i_ll) \
- V(I64Ne, 0x69, i_ll) \
- V(I64LtS, 0x6a, i_ll) \
- V(I64LeS, 0x6b, i_ll) \
- V(I64LtU, 0x6c, i_ll) \
- V(I64LeU, 0x6d, i_ll) \
- V(I64GtS, 0x6e, i_ll) \
- V(I64GeS, 0x6f, i_ll) \
- V(I64GtU, 0x70, i_ll) \
- V(I64GeU, 0x71, i_ll) \
- V(I64Clz, 0x72, l_l) \
- V(I64Ctz, 0x73, l_l) \
- V(I64Popcnt, 0x74, l_l) \
- V(I64Eqz, 0xba, i_l) \
- V(F32Add, 0x75, f_ff) \
- V(F32Sub, 0x76, f_ff) \
- V(F32Mul, 0x77, f_ff) \
- V(F32Div, 0x78, f_ff) \
- V(F32Min, 0x79, f_ff) \
- V(F32Max, 0x7a, f_ff) \
- V(F32Abs, 0x7b, f_f) \
- V(F32Neg, 0x7c, f_f) \
- V(F32CopySign, 0x7d, f_ff) \
- V(F32Ceil, 0x7e, f_f) \
- V(F32Floor, 0x7f, f_f) \
- V(F32Trunc, 0x80, f_f) \
- V(F32NearestInt, 0x81, f_f) \
- V(F32Sqrt, 0x82, f_f) \
- V(F32Eq, 0x83, i_ff) \
- V(F32Ne, 0x84, i_ff) \
- V(F32Lt, 0x85, i_ff) \
- V(F32Le, 0x86, i_ff) \
- V(F32Gt, 0x87, i_ff) \
- V(F32Ge, 0x88, i_ff) \
- V(F64Add, 0x89, d_dd) \
- V(F64Sub, 0x8a, d_dd) \
- V(F64Mul, 0x8b, d_dd) \
- V(F64Div, 0x8c, d_dd) \
- V(F64Min, 0x8d, d_dd) \
- V(F64Max, 0x8e, d_dd) \
- V(F64Abs, 0x8f, d_d) \
- V(F64Neg, 0x90, d_d) \
- V(F64CopySign, 0x91, d_dd) \
- V(F64Ceil, 0x92, d_d) \
- V(F64Floor, 0x93, d_d) \
- V(F64Trunc, 0x94, d_d) \
- V(F64NearestInt, 0x95, d_d) \
- V(F64Sqrt, 0x96, d_d) \
- V(F64Eq, 0x97, i_dd) \
- V(F64Ne, 0x98, i_dd) \
- V(F64Lt, 0x99, i_dd) \
- V(F64Le, 0x9a, i_dd) \
- V(F64Gt, 0x9b, i_dd) \
- V(F64Ge, 0x9c, i_dd) \
- V(I32SConvertF32, 0x9d, i_f) \
- V(I32SConvertF64, 0x9e, i_d) \
- V(I32UConvertF32, 0x9f, i_f) \
- V(I32UConvertF64, 0xa0, i_d) \
- V(I32ConvertI64, 0xa1, i_l) \
- V(I64SConvertF32, 0xa2, l_f) \
- V(I64SConvertF64, 0xa3, l_d) \
- V(I64UConvertF32, 0xa4, l_f) \
- V(I64UConvertF64, 0xa5, l_d) \
- V(I64SConvertI32, 0xa6, l_i) \
- V(I64UConvertI32, 0xa7, l_i) \
- V(F32SConvertI32, 0xa8, f_i) \
- V(F32UConvertI32, 0xa9, f_i) \
- V(F32SConvertI64, 0xaa, f_l) \
- V(F32UConvertI64, 0xab, f_l) \
- V(F32ConvertF64, 0xac, f_d) \
- V(F32ReinterpretI32, 0xad, f_i) \
- V(F64SConvertI32, 0xae, d_i) \
- V(F64UConvertI32, 0xaf, d_i) \
- V(F64SConvertI64, 0xb0, d_l) \
- V(F64UConvertI64, 0xb1, d_l) \
- V(F64ConvertF32, 0xb2, d_f) \
- V(F64ReinterpretI64, 0xb3, d_l) \
- V(I32ReinterpretF32, 0xb4, i_f) \
- V(I64ReinterpretF64, 0xb5, l_d) \
- V(I32Ror, 0xb6, i_ii) \
- V(I32Rol, 0xb7, i_ii) \
- V(I64Ror, 0xb8, l_ll) \
- V(I64Rol, 0xb9, l_ll)
+ V(I32Eqz, 0x45, i_i) \
+ V(I32Eq, 0x46, i_ii) \
+ V(I32Ne, 0x47, i_ii) \
+ V(I32LtS, 0x48, i_ii) \
+ V(I32LtU, 0x49, i_ii) \
+ V(I32GtS, 0x4a, i_ii) \
+ V(I32GtU, 0x4b, i_ii) \
+ V(I32LeS, 0x4c, i_ii) \
+ V(I32LeU, 0x4d, i_ii) \
+ V(I32GeS, 0x4e, i_ii) \
+ V(I32GeU, 0x4f, i_ii) \
+ V(I64Eqz, 0x50, i_l) \
+ V(I64Eq, 0x51, i_ll) \
+ V(I64Ne, 0x52, i_ll) \
+ V(I64LtS, 0x53, i_ll) \
+ V(I64LtU, 0x54, i_ll) \
+ V(I64GtS, 0x55, i_ll) \
+ V(I64GtU, 0x56, i_ll) \
+ V(I64LeS, 0x57, i_ll) \
+ V(I64LeU, 0x58, i_ll) \
+ V(I64GeS, 0x59, i_ll) \
+ V(I64GeU, 0x5a, i_ll) \
+ V(F32Eq, 0x5b, i_ff) \
+ V(F32Ne, 0x5c, i_ff) \
+ V(F32Lt, 0x5d, i_ff) \
+ V(F32Gt, 0x5e, i_ff) \
+ V(F32Le, 0x5f, i_ff) \
+ V(F32Ge, 0x60, i_ff) \
+ V(F64Eq, 0x61, i_dd) \
+ V(F64Ne, 0x62, i_dd) \
+ V(F64Lt, 0x63, i_dd) \
+ V(F64Gt, 0x64, i_dd) \
+ V(F64Le, 0x65, i_dd) \
+ V(F64Ge, 0x66, i_dd) \
+ V(I32Clz, 0x67, i_i) \
+ V(I32Ctz, 0x68, i_i) \
+ V(I32Popcnt, 0x69, i_i) \
+ V(I32Add, 0x6a, i_ii) \
+ V(I32Sub, 0x6b, i_ii) \
+ V(I32Mul, 0x6c, i_ii) \
+ V(I32DivS, 0x6d, i_ii) \
+ V(I32DivU, 0x6e, i_ii) \
+ V(I32RemS, 0x6f, i_ii) \
+ V(I32RemU, 0x70, i_ii) \
+ V(I32And, 0x71, i_ii) \
+ V(I32Ior, 0x72, i_ii) \
+ V(I32Xor, 0x73, i_ii) \
+ V(I32Shl, 0x74, i_ii) \
+ V(I32ShrS, 0x75, i_ii) \
+ V(I32ShrU, 0x76, i_ii) \
+ V(I32Rol, 0x77, i_ii) \
+ V(I32Ror, 0x78, i_ii) \
+ V(I64Clz, 0x79, l_l) \
+ V(I64Ctz, 0x7a, l_l) \
+ V(I64Popcnt, 0x7b, l_l) \
+ V(I64Add, 0x7c, l_ll) \
+ V(I64Sub, 0x7d, l_ll) \
+ V(I64Mul, 0x7e, l_ll) \
+ V(I64DivS, 0x7f, l_ll) \
+ V(I64DivU, 0x80, l_ll) \
+ V(I64RemS, 0x81, l_ll) \
+ V(I64RemU, 0x82, l_ll) \
+ V(I64And, 0x83, l_ll) \
+ V(I64Ior, 0x84, l_ll) \
+ V(I64Xor, 0x85, l_ll) \
+ V(I64Shl, 0x86, l_ll) \
+ V(I64ShrS, 0x87, l_ll) \
+ V(I64ShrU, 0x88, l_ll) \
+ V(I64Rol, 0x89, l_ll) \
+ V(I64Ror, 0x8a, l_ll) \
+ V(F32Abs, 0x8b, f_f) \
+ V(F32Neg, 0x8c, f_f) \
+ V(F32Ceil, 0x8d, f_f) \
+ V(F32Floor, 0x8e, f_f) \
+ V(F32Trunc, 0x8f, f_f) \
+ V(F32NearestInt, 0x90, f_f) \
+ V(F32Sqrt, 0x91, f_f) \
+ V(F32Add, 0x92, f_ff) \
+ V(F32Sub, 0x93, f_ff) \
+ V(F32Mul, 0x94, f_ff) \
+ V(F32Div, 0x95, f_ff) \
+ V(F32Min, 0x96, f_ff) \
+ V(F32Max, 0x97, f_ff) \
+ V(F32CopySign, 0x98, f_ff) \
+ V(F64Abs, 0x99, d_d) \
+ V(F64Neg, 0x9a, d_d) \
+ V(F64Ceil, 0x9b, d_d) \
+ V(F64Floor, 0x9c, d_d) \
+ V(F64Trunc, 0x9d, d_d) \
+ V(F64NearestInt, 0x9e, d_d) \
+ V(F64Sqrt, 0x9f, d_d) \
+ V(F64Add, 0xa0, d_dd) \
+ V(F64Sub, 0xa1, d_dd) \
+ V(F64Mul, 0xa2, d_dd) \
+ V(F64Div, 0xa3, d_dd) \
+ V(F64Min, 0xa4, d_dd) \
+ V(F64Max, 0xa5, d_dd) \
+ V(F64CopySign, 0xa6, d_dd) \
+ V(I32ConvertI64, 0xa7, i_l) \
+ V(I32SConvertF32, 0xa8, i_f) \
+ V(I32UConvertF32, 0xa9, i_f) \
+ V(I32SConvertF64, 0xaa, i_d) \
+ V(I32UConvertF64, 0xab, i_d) \
+ V(I64SConvertI32, 0xac, l_i) \
+ V(I64UConvertI32, 0xad, l_i) \
+ V(I64SConvertF32, 0xae, l_f) \
+ V(I64UConvertF32, 0xaf, l_f) \
+ V(I64SConvertF64, 0xb0, l_d) \
+ V(I64UConvertF64, 0xb1, l_d) \
+ V(F32SConvertI32, 0xb2, f_i) \
+ V(F32UConvertI32, 0xb3, f_i) \
+ V(F32SConvertI64, 0xb4, f_l) \
+ V(F32UConvertI64, 0xb5, f_l) \
+ V(F32ConvertF64, 0xb6, f_d) \
+ V(F64SConvertI32, 0xb7, d_i) \
+ V(F64UConvertI32, 0xb8, d_i) \
+ V(F64SConvertI64, 0xb9, d_l) \
+ V(F64UConvertI64, 0xba, d_l) \
+ V(F64ConvertF32, 0xbb, d_f) \
+ V(I32ReinterpretF32, 0xbc, i_f) \
+ V(I64ReinterpretF64, 0xbd, l_d) \
+ V(F32ReinterpretI32, 0xbe, f_i) \
+ V(F64ReinterpretI64, 0xbf, d_l)
// For compatibility with Asm.js.
#define FOREACH_ASMJS_COMPAT_OPCODE(V) \
@@ -400,18 +400,55 @@ const WasmCodePosition kNoCodePosition = -1;
V(I16x8ExtractLane, 0xe539, _) \
V(I8x16ExtractLane, 0xe558, _)
+#define FOREACH_ATOMIC_OPCODE(V) \
+ V(I32AtomicAdd8S, 0xe601, i_ii) \
+ V(I32AtomicAdd8U, 0xe602, i_ii) \
+ V(I32AtomicAdd16S, 0xe603, i_ii) \
+ V(I32AtomicAdd16U, 0xe604, i_ii) \
+ V(I32AtomicAdd32, 0xe605, i_ii) \
+ V(I32AtomicAnd8S, 0xe606, i_ii) \
+ V(I32AtomicAnd8U, 0xe607, i_ii) \
+ V(I32AtomicAnd16S, 0xe608, i_ii) \
+ V(I32AtomicAnd16U, 0xe609, i_ii) \
+ V(I32AtomicAnd32, 0xe60a, i_ii) \
+ V(I32AtomicCompareExchange8S, 0xe60b, i_ii) \
+ V(I32AtomicCompareExchange8U, 0xe60c, i_ii) \
+ V(I32AtomicCompareExchange16S, 0xe60d, i_ii) \
+ V(I32AtomicCompareExchange16U, 0xe60e, i_ii) \
+ V(I32AtomicCompareExchange32, 0xe60f, i_ii) \
+ V(I32AtomicExchange8S, 0xe610, i_ii) \
+ V(I32AtomicExchange8U, 0xe611, i_ii) \
+ V(I32AtomicExchange16S, 0xe612, i_ii) \
+ V(I32AtomicExchange16U, 0xe613, i_ii) \
+ V(I32AtomicExchange32, 0xe614, i_ii) \
+ V(I32AtomicOr8S, 0xe615, i_ii) \
+ V(I32AtomicOr8U, 0xe616, i_ii) \
+ V(I32AtomicOr16S, 0xe617, i_ii) \
+ V(I32AtomicOr16U, 0xe618, i_ii) \
+ V(I32AtomicOr32, 0xe619, i_ii) \
+ V(I32AtomicSub8S, 0xe61a, i_ii) \
+ V(I32AtomicSub8U, 0xe61b, i_ii) \
+ V(I32AtomicSub16S, 0xe61c, i_ii) \
+ V(I32AtomicSub16U, 0xe61d, i_ii) \
+ V(I32AtomicSub32, 0xe61e, i_ii) \
+ V(I32AtomicXor8S, 0xe61f, i_ii) \
+ V(I32AtomicXor8U, 0xe620, i_ii) \
+ V(I32AtomicXor16S, 0xe621, i_ii) \
+ V(I32AtomicXor16U, 0xe622, i_ii) \
+ V(I32AtomicXor32, 0xe623, i_ii)
+
// All opcodes.
#define FOREACH_OPCODE(V) \
FOREACH_CONTROL_OPCODE(V) \
FOREACH_MISC_OPCODE(V) \
FOREACH_SIMPLE_OPCODE(V) \
- FOREACH_SIMPLE_MEM_OPCODE(V) \
FOREACH_STORE_MEM_OPCODE(V) \
FOREACH_LOAD_MEM_OPCODE(V) \
FOREACH_MISC_MEM_OPCODE(V) \
FOREACH_ASMJS_COMPAT_OPCODE(V) \
FOREACH_SIMD_0_OPERAND_OPCODE(V) \
- FOREACH_SIMD_1_OPERAND_OPCODE(V)
+ FOREACH_SIMD_1_OPERAND_OPCODE(V) \
+ FOREACH_ATOMIC_OPCODE(V)
// All signatures.
#define FOREACH_SIGNATURE(V) \
@@ -454,7 +491,9 @@ const WasmCodePosition kNoCodePosition = -1;
V(s_sii, kAstS128, kAstS128, kAstI32, kAstI32) \
V(s_si, kAstS128, kAstS128, kAstI32)
-#define FOREACH_PREFIX(V) V(Simd, 0xe5)
+#define FOREACH_PREFIX(V) \
+ V(Simd, 0xe5) \
+ V(Atomic, 0xe6)
enum WasmOpcode {
// Declare expression opcodes.
@@ -486,12 +525,13 @@ enum TrapReason {
};
// A collection of opcode-related static methods.
-class WasmOpcodes {
+class V8_EXPORT_PRIVATE WasmOpcodes {
public:
static const char* OpcodeName(WasmOpcode opcode);
static const char* ShortOpcodeName(WasmOpcode opcode);
static FunctionSig* Signature(WasmOpcode opcode);
static FunctionSig* AsmjsSignature(WasmOpcode opcode);
+ static FunctionSig* AtomicSignature(WasmOpcode opcode);
static bool IsPrefixOpcode(WasmOpcode opcode);
static int TrapReasonToMessageId(TrapReason reason);
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index 7d251f03df..6d535e3f57 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -46,28 +46,35 @@ void ErrorThrower::Format(i::Handle<i::JSFunction> constructor,
exception_ = isolate_->factory()->NewError(constructor, message);
}
-void ErrorThrower::Error(const char* format, ...) {
+void ErrorThrower::TypeError(const char* format, ...) {
if (error()) return;
va_list arguments;
va_start(arguments, format);
- Format(isolate_->error_function(), format, arguments);
+ Format(isolate_->type_error_function(), format, arguments);
va_end(arguments);
}
-void ErrorThrower::TypeError(const char* format, ...) {
+void ErrorThrower::RangeError(const char* format, ...) {
if (error()) return;
va_list arguments;
va_start(arguments, format);
- Format(isolate_->type_error_function(), format, arguments);
+ Format(isolate_->range_error_function(), format, arguments);
va_end(arguments);
}
-void ErrorThrower::RangeError(const char* format, ...) {
+void ErrorThrower::CompileError(const char* format, ...) {
if (error()) return;
va_list arguments;
va_start(arguments, format);
- CHECK(*isolate_->range_error_function() != *isolate_->type_error_function());
- Format(isolate_->range_error_function(), format, arguments);
+ Format(isolate_->wasm_compile_error_function(), format, arguments);
+ va_end(arguments);
+}
+
+void ErrorThrower::RuntimeError(const char* format, ...) {
+ if (error()) return;
+ va_list arguments;
+ va_start(arguments, format);
+ Format(isolate_->wasm_runtime_error_function(), format, arguments);
va_end(arguments);
}
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index ecc54e5b7a..53c6b8dcf9 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -82,7 +82,8 @@ std::ostream& operator<<(std::ostream& os, const Result<T>& result) {
return os;
}
-std::ostream& operator<<(std::ostream& os, const ErrorCode& error_code);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const ErrorCode& error_code);
// A helper for generating error messages that bubble up to JS exceptions.
class V8_EXPORT_PRIVATE ErrorThrower {
@@ -91,15 +92,16 @@ class V8_EXPORT_PRIVATE ErrorThrower {
: isolate_(isolate), context_(context) {}
~ErrorThrower();
- PRINTF_FORMAT(2, 3) void Error(const char* fmt, ...);
PRINTF_FORMAT(2, 3) void TypeError(const char* fmt, ...);
PRINTF_FORMAT(2, 3) void RangeError(const char* fmt, ...);
+ PRINTF_FORMAT(2, 3) void CompileError(const char* fmt, ...);
+ PRINTF_FORMAT(2, 3) void RuntimeError(const char* fmt, ...);
template <typename T>
- void Failed(const char* error, Result<T>& result) {
+ void CompileFailed(const char* error, Result<T>& result) {
std::ostringstream str;
str << error << result;
- Error("%s", str.str().c_str());
+ CompileError("%s", str.str().c_str());
}
i::Handle<i::Object> Reify() {
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index d202aadf7a..5402a8ce87 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -402,8 +402,9 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
+ if (desc.buffer_size > kMaximalBufferSize ||
+ static_cast<size_t>(desc.buffer_size) >
+ isolate()->heap()->MaxOldGenerationSize()) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 5de891cf36..e8ee9e4bdd 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -1993,7 +1993,8 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+ void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+ int id);
void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
ConstantPoolEntry::Access access,
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 2a962b32f9..d62aafe573 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -693,13 +693,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ leal(rdx, Operand(rax, rax, times_1, 2));
// rdx: Number of capture registers
- // Check that the fourth object is a JSObject.
- __ movp(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
- __ JumpIfSmi(r15, &runtime);
- __ CmpObjectType(r15, JS_OBJECT_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
+ // Check that the last match info is a FixedArray.
+ __ movp(rbx, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
+ __ JumpIfSmi(rbx, &runtime);
// Check that the object has fast elements.
- __ movp(rbx, FieldOperand(r15, JSArray::kElementsOffset));
__ movp(rax, FieldOperand(rbx, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &runtime);
@@ -707,43 +704,37 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// additional information. Ensure no overflow in add.
STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
__ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ subl(rax, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ subl(rax, Immediate(RegExpMatchInfo::kLastMatchOverhead));
__ cmpl(rdx, rax);
__ j(greater, &runtime);
- // rbx: last_match_info backing store (FixedArray)
+ // rbx: last_match_info (FixedArray)
// rdx: number of capture registers
// Store the capture count.
__ Integer32ToSmi(kScratchRegister, rdx);
- __ movp(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
+ __ movp(FieldOperand(rbx, RegExpMatchInfo::kNumberOfCapturesOffset),
kScratchRegister);
// Store last subject and last input.
__ movp(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
- __ movp(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
+ __ movp(FieldOperand(rbx, RegExpMatchInfo::kLastSubjectOffset), rax);
__ movp(rcx, rax);
- __ RecordWriteField(rbx,
- RegExpImpl::kLastSubjectOffset,
- rax,
- rdi,
+ __ RecordWriteField(rbx, RegExpMatchInfo::kLastSubjectOffset, rax, rdi,
kDontSaveFPRegs);
__ movp(rax, rcx);
- __ movp(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
- __ RecordWriteField(rbx,
- RegExpImpl::kLastInputOffset,
- rax,
- rdi,
+ __ movp(FieldOperand(rbx, RegExpMatchInfo::kLastInputOffset), rax);
+ __ RecordWriteField(rbx, RegExpMatchInfo::kLastInputOffset, rax, rdi,
kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
__ LoadAddress(
rcx, ExternalReference::address_of_static_offsets_vector(isolate()));
- // rbx: last_match_info backing store (FixedArray)
+ // rbx: last_match_info (FixedArray)
// rcx: offsets vector
// rdx: number of capture registers
Label next_capture, done;
// Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
+ // counts down until wrapping after zero.
__ bind(&next_capture);
__ subp(rdx, Immediate(1));
__ j(negative, &done, Label::kNear);
@@ -751,16 +742,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
__ Integer32ToSmi(rdi, rdi);
// Store the smi value in the last match info.
- __ movp(FieldOperand(rbx,
- rdx,
- times_pointer_size,
- RegExpImpl::kFirstCaptureOffset),
+ __ movp(FieldOperand(rbx, rdx, times_pointer_size,
+ RegExpMatchInfo::kFirstCaptureOffset),
rdi);
__ jmp(&next_capture);
__ bind(&done);
// Return last match info.
- __ movp(rax, r15);
+ __ movp(rax, rbx);
__ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
__ bind(&exception);
@@ -1324,29 +1313,25 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ cmpp(rdi, r8);
__ j(not_equal, miss);
- __ movp(rax, Immediate(arg_count()));
-
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, rbx, rdx);
__ movp(rbx, rcx);
__ movp(rdx, rdi);
- ArrayConstructorStub stub(masm->isolate(), arg_count());
+ ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
void CallICStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax - number of arguments
// -- rdi - function
// -- rdx - slot id
// -- rbx - vector
// -----------------------------------
Isolate* isolate = masm->isolate();
Label extra_checks_or_miss, call, call_function, call_count_incremented;
- int argc = arg_count();
- StackArgumentsAccessor args(rsp, argc);
- ParameterCount actual(argc);
// The checks. First, does rdi match the recorded monomorphic target?
__ SmiToInteger32(rdx, rdx);
@@ -1378,7 +1363,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, rbx, rdx);
- __ Set(rax, argc);
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -1422,7 +1406,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
IncrementCallCount(masm, rbx, rdx);
__ bind(&call_count_incremented);
- __ Set(rax, argc);
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -1455,7 +1438,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(isolate);
+ __ Integer32ToSmi(rax, rax);
__ Integer32ToSmi(rdx, rdx);
+ __ Push(rax);
__ Push(rbx);
__ Push(rdx);
__ Push(rdi);
@@ -1465,7 +1450,9 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Pop(rdi);
__ Pop(rdx);
__ Pop(rbx);
+ __ Pop(rax);
__ SmiToInteger32(rdx, rdx);
+ __ SmiToInteger32(rax, rax);
}
__ jmp(&call_function);
@@ -1484,6 +1471,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Preserve the number of arguments.
+ __ Integer32ToSmi(rax, rax);
+ __ Push(rax);
+
// Push the receiver and the function and feedback info.
__ Integer32ToSmi(rdx, rdx);
__ Push(rdi);
@@ -1495,6 +1486,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
// Move result to edi and exit the internal frame.
__ movp(rdi, rax);
+
+ // Restore number of arguments.
+ __ Pop(rax);
+ __ SmiToInteger32(rax, rax);
}
bool CEntryStub::NeedsImmovableCode() {
@@ -2954,21 +2949,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
}
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, Register scratch3,
@@ -3040,190 +3020,12 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ jmp(handler);
}
-
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // rdx
- Register name = LoadWithVectorDescriptor::NameRegister(); // rcx
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // rbx
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // rax
- Register feedback = rdi;
- Register integer_slot = r8;
- Register receiver_map = r9;
-
- __ SmiToInteger32(integer_slot, slot);
- __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
- FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector,
- integer_slot, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &not_array);
- HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, true,
- &miss);
-
- __ bind(&not_array);
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ j(not_equal, &miss);
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, name,
- feedback, no_reg);
-
- __ bind(&miss);
- LoadIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // rdx
- Register key = LoadWithVectorDescriptor::NameRegister(); // rcx
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // rbx
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // rax
- Register feedback = rdi;
- Register integer_slot = r8;
- Register receiver_map = r9;
-
- __ SmiToInteger32(integer_slot, slot);
- __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
- FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector,
- integer_slot, &compare_map, &load_smi_map, &try_array);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &not_array);
-
- // We have a polymorphic element handler.
- Label polymorphic, try_poly_name;
- __ bind(&polymorphic);
- HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, true,
- &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ j(not_equal, &try_poly_name);
- Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ cmpp(key, feedback);
- __ j(not_equal, &miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, false,
- &miss);
-
- __ bind(&miss);
- KeyedLoadIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- StoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // rdx
- Register key = StoreWithVectorDescriptor::NameRegister(); // rcx
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // rbx
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // rdi
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(rax)); // rax
- Register feedback = r8;
- Register integer_slot = r9;
- Register receiver_map = r11;
- DCHECK(!AreAliased(feedback, integer_slot, vector, slot, receiver_map));
-
- __ SmiToInteger32(integer_slot, slot);
- __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
- FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector,
- integer_slot, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &not_array);
- HandleArrayCases(masm, feedback, receiver_map, integer_slot, r14, r15, true,
- &miss);
-
- __ bind(&not_array);
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ j(not_equal, &miss);
-
- masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key,
- feedback, no_reg);
-
- __ bind(&miss);
- StoreIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
@@ -3539,36 +3341,22 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
}
}
-
void ArrayConstructorStub::GenerateDispatchToArrayStub(
- MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
- if (argument_count() == ANY) {
- Label not_zero_case, not_one_case;
- __ testp(rax, rax);
- __ j(not_zero, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ bind(&not_zero_case);
- __ cmpl(rax, Immediate(1));
- __ j(greater, &not_one_case);
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else if (argument_count() == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count() == ONE) {
- CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count() == MORE_THAN_ONE) {
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- UNREACHABLE();
- }
-}
+ MacroAssembler* masm, AllocationSiteOverrideMode mode) {
+ Label not_zero_case, not_one_case;
+ __ testp(rax, rax);
+ __ j(not_zero, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ cmpl(rax, Immediate(1));
+ __ j(greater, &not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+ __ bind(&not_one_case);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+}
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -3621,27 +3409,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing
__ bind(&subclassing);
- switch (argument_count()) {
- case ANY:
- case MORE_THAN_ONE: {
- StackArgumentsAccessor args(rsp, rax);
- __ movp(args.GetReceiverOperand(), rdi);
- __ addp(rax, Immediate(3));
- break;
- }
- case NONE: {
- StackArgumentsAccessor args(rsp, 0);
- __ movp(args.GetReceiverOperand(), rdi);
- __ Set(rax, 3);
- break;
- }
- case ONE: {
- StackArgumentsAccessor args(rsp, 1);
- __ movp(args.GetReceiverOperand(), rdi);
- __ Set(rax, 4);
- break;
- }
- }
+ StackArgumentsAccessor args(rsp, rax);
+ __ movp(args.GetReceiverOperand(), rdi);
+ __ addp(rax, Immediate(3));
__ PopReturnAddressTo(rcx);
__ Push(rdx);
__ Push(rbx);
@@ -4388,129 +4158,6 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
}
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register context_reg = rsi;
- Register slot_reg = rbx;
- Register value_reg = rax;
- Register cell_reg = r8;
- Register cell_details_reg = rdx;
- Register cell_value_reg = r9;
- Label fast_heapobject_case, fast_smi_case, slow_case;
-
- if (FLAG_debug_code) {
- __ CompareRoot(value_reg, Heap::kTheHoleValueRootIndex);
- __ Check(not_equal, kUnexpectedValue);
- }
-
- // Go up context chain to the script context.
- for (int i = 0; i < depth(); ++i) {
- __ movp(rdi, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
- context_reg = rdi;
- }
-
- // Load the PropertyCell at the specified slot.
- __ movp(cell_reg, ContextOperand(context_reg, slot_reg));
-
- // Load PropertyDetails for the cell (actually only the cell_type, kind and
- // READ_ONLY bit of attributes).
- __ SmiToInteger32(cell_details_reg,
- FieldOperand(cell_reg, PropertyCell::kDetailsOffset));
- __ andl(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::kMask |
- PropertyDetails::KindField::kMask |
- PropertyDetails::kAttributesReadOnlyMask));
-
- // Check if PropertyCell holds mutable data.
- Label not_mutable_data;
- __ cmpl(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kMutable) |
- PropertyDetails::KindField::encode(kData)));
- __ j(not_equal, &not_mutable_data);
- __ JumpIfSmi(value_reg, &fast_smi_case);
- __ bind(&fast_heapobject_case);
- __ movp(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
- __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
- cell_value_reg, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // RecordWriteField clobbers the value register, so we need to reload.
- __ movp(value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
- __ Ret();
- __ bind(&not_mutable_data);
-
- // Check if PropertyCell value matches the new value (relevant for Constant,
- // ConstantType and Undefined cells).
- Label not_same_value;
- __ movp(cell_value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
- __ cmpp(cell_value_reg, value_reg);
- __ j(not_equal, &not_same_value,
- FLAG_debug_code ? Label::kFar : Label::kNear);
- // Make sure the PropertyCell is not marked READ_ONLY.
- __ testl(cell_details_reg,
- Immediate(PropertyDetails::kAttributesReadOnlyMask));
- __ j(not_zero, &slow_case);
- if (FLAG_debug_code) {
- Label done;
- // This can only be true for Constant, ConstantType and Undefined cells,
- // because we never store the_hole via this stub.
- __ cmpl(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstant) |
- PropertyDetails::KindField::encode(kData)));
- __ j(equal, &done);
- __ cmpl(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
- __ j(equal, &done);
- __ cmpl(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kUndefined) |
- PropertyDetails::KindField::encode(kData)));
- __ Check(equal, kUnexpectedValue);
- __ bind(&done);
- }
- __ Ret();
- __ bind(&not_same_value);
-
- // Check if PropertyCell contains data with constant type (and is not
- // READ_ONLY).
- __ cmpl(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
- __ j(not_equal, &slow_case, Label::kNear);
-
- // Now either both old and new values must be SMIs or both must be heap
- // objects with same map.
- Label value_is_heap_object;
- __ JumpIfNotSmi(value_reg, &value_is_heap_object, Label::kNear);
- __ JumpIfNotSmi(cell_value_reg, &slow_case, Label::kNear);
- // Old and new values are SMIs, no need for a write barrier here.
- __ bind(&fast_smi_case);
- __ movp(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
- __ Ret();
- __ bind(&value_is_heap_object);
- __ JumpIfSmi(cell_value_reg, &slow_case, Label::kNear);
- Register cell_value_map_reg = cell_value_reg;
- __ movp(cell_value_map_reg,
- FieldOperand(cell_value_reg, HeapObject::kMapOffset));
- __ cmpp(cell_value_map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
- __ j(equal, &fast_heapobject_case);
-
- // Fallback to the runtime.
- __ bind(&slow_case);
- __ Integer32ToSmi(slot_reg, slot_reg);
- __ PopReturnAddressTo(kScratchRegister);
- __ Push(slot_reg);
- __ Push(value_reg);
- __ Push(kScratchRegister);
- __ TailCallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
static int Offset(ExternalReference ref0, ExternalReference ref1) {
int64_t offset = (ref0.address() - ref1.address());
// Check that fits into int.
@@ -4853,7 +4500,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ Push(kScratchRegister); // return value default
__ PushAddress(ExternalReference::isolate_address(isolate()));
__ Push(holder);
- __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
+ __ Push(Smi::kZero); // should_throw_on_error -> false
__ Push(FieldOperand(callback, AccessorInfo::kNameOffset));
__ PushReturnAddressFrom(scratch);
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 35da7a2c00..9fbf69e55e 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -66,7 +66,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
#endif
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
- deopt_data->SetSharedFunctionInfo(Smi::FromInt(0));
+ deopt_data->SetSharedFunctionInfo(Smi::kZero);
// For each LLazyBailout instruction insert a call to the corresponding
// deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 9e486446bc..3ee4412c10 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -29,9 +29,9 @@ const Register LoadDescriptor::ReceiverRegister() { return rdx; }
const Register LoadDescriptor::NameRegister() { return rcx; }
const Register LoadDescriptor::SlotRegister() { return rax; }
-
const Register LoadWithVectorDescriptor::VectorRegister() { return rbx; }
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return rdi; }
const Register StoreDescriptor::ReceiverRegister() { return rdx; }
const Register StoreDescriptor::NameRegister() { return rcx; }
@@ -44,10 +44,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return rdi; }
const Register StoreTransitionDescriptor::VectorRegister() { return rbx; }
const Register StoreTransitionDescriptor::MapRegister() { return r11; }
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return rbx; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return rax; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return rdx; }
const Register StringCompareDescriptor::RightRegister() { return rax; }
@@ -158,7 +154,7 @@ void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {rdi, rdx, rbx};
+ Register registers[] = {rdi, rax, rdx, rbx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -206,13 +202,6 @@ void ConstructTrampolineDescriptor::InitializePlatformSpecific(
}
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rcx, rbx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rax, rbx};
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 0fd6333996..8d70f540de 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -676,20 +676,6 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // The assert checks that the constants for the maximum number of digits
- // for an array index cached in the hash field and the number of bits
- // reserved for it does not conflict.
- DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- if (!hash.is(index)) {
- movl(index, hash);
- }
- DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f,
int num_arguments,
SaveFPRegsMode save_doubles) {
@@ -2371,7 +2357,7 @@ void MacroAssembler::SelectNonSmi(Register dst,
Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
#endif
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
movl(kScratchRegister, Immediate(kSmiTagMask));
andp(kScratchRegister, src1);
testl(kScratchRegister, src2);
@@ -3677,20 +3663,6 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
Immediate(static_cast<int8_t>(type)));
}
-
-void MacroAssembler::CheckFastElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleyElementValue));
- j(above, fail, distance);
-}
-
-
void MacroAssembler::CheckFastObjectElements(Register map,
Label* fail,
Label::Distance distance) {
@@ -4686,82 +4658,6 @@ void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
}
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss) {
- Label same_contexts;
-
- DCHECK(!holder_reg.is(scratch));
- DCHECK(!scratch.is(kScratchRegister));
- // Load current lexical context from the active StandardFrame, which
- // may require crawling past STUB frames.
- Label load_context;
- Label has_context;
- movp(scratch, rbp);
- bind(&load_context);
- DCHECK(SmiValuesAre32Bits());
- // This is "JumpIfNotSmi" but without loading the value into a register.
- cmpl(MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset),
- Immediate(0));
- j(not_equal, &has_context);
- movp(scratch, MemOperand(scratch, CommonFrameConstants::kCallerFPOffset));
- jmp(&load_context);
- bind(&has_context);
- movp(scratch,
- MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
-
- // When generating debug code, make sure the lexical context is set.
- if (emit_debug_code()) {
- cmpp(scratch, Immediate(0));
- Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
- }
- // Load the native context of the current context.
- movp(scratch, ContextOperand(scratch, Context::NATIVE_CONTEXT_INDEX));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
- isolate()->factory()->native_context_map());
- Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
- }
-
- // Check if both contexts are the same.
- cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- j(equal, &same_contexts);
-
- // Compare security tokens.
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // Preserve original value of holder_reg.
- Push(holder_reg);
- movp(holder_reg,
- FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- CompareRoot(holder_reg, Heap::kNullValueRootIndex);
- Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
-
- // Read the first word and compare to native_context_map(),
- movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
- Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
- Pop(holder_reg);
- }
-
- movp(kScratchRegister,
- FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- int token_offset =
- Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
- movp(scratch, FieldOperand(scratch, token_offset));
- cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
- j(not_equal, miss);
-
- bind(&same_contexts);
-}
-
-
// Compute the hash code from the untagged key. This must be kept in sync with
// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
@@ -4800,87 +4696,6 @@ void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
andl(r0, Immediate(0x3fffffff));
}
-
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // Scratch registers:
- //
- // r0 - holds the untagged key on entry and holds the hash once computed.
- //
- // r1 - used to hold the capacity mask of the dictionary
- //
- // r2 - used for the index into the dictionary.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
-
- Label done;
-
- GetNumberHash(r0, r1);
-
- // Compute capacity mask.
- SmiToInteger32(r1, FieldOperand(elements,
- SeededNumberDictionary::kCapacityOffset));
- decl(r1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- for (int i = 0; i < kNumberDictionaryProbes; i++) {
- // Use r2 for index calculations and keep the hash intact in r0.
- movp(r2, r0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
- }
- andp(r2, r1);
-
- // Scale the index by multiplying by the entry size.
- DCHECK(SeededNumberDictionary::kEntrySize == 3);
- leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
-
- // Check if the key matches.
- cmpp(key, FieldOperand(elements,
- r2,
- times_pointer_size,
- SeededNumberDictionary::kElementsStartOffset));
- if (i != (kNumberDictionaryProbes - 1)) {
- j(equal, &done);
- } else {
- j(not_equal, miss);
- }
- }
-
- bind(&done);
- // Check that the value is a field property.
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- DCHECK_EQ(DATA, 0);
- Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
- Smi::FromInt(PropertyDetails::TypeField::kMask));
- j(not_zero, miss);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
@@ -5283,93 +5098,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
-
-// Copy memory, byte-by-byte, from source to destination. Not optimized for
-// long or aligned copies. The contents of scratch and length are destroyed.
-// Destination is incremented by length, source, length and scratch are
-// clobbered.
-// A simpler loop is faster on small copies, but slower on large ones.
-// The cld() instruction must have been emitted, to set the direction flag(),
-// before calling this function.
-void MacroAssembler::CopyBytes(Register destination,
- Register source,
- Register length,
- int min_length,
- Register scratch) {
- DCHECK(min_length >= 0);
- if (emit_debug_code()) {
- cmpl(length, Immediate(min_length));
- Assert(greater_equal, kInvalidMinLength);
- }
- Label short_loop, len8, len16, len24, done, short_string;
-
- const int kLongStringLimit = 4 * kPointerSize;
- if (min_length <= kLongStringLimit) {
- cmpl(length, Immediate(kPointerSize));
- j(below, &short_string, Label::kNear);
- }
-
- DCHECK(source.is(rsi));
- DCHECK(destination.is(rdi));
- DCHECK(length.is(rcx));
-
- if (min_length <= kLongStringLimit) {
- cmpl(length, Immediate(2 * kPointerSize));
- j(below_equal, &len8, Label::kNear);
- cmpl(length, Immediate(3 * kPointerSize));
- j(below_equal, &len16, Label::kNear);
- cmpl(length, Immediate(4 * kPointerSize));
- j(below_equal, &len24, Label::kNear);
- }
-
- // Because source is 8-byte aligned in our uses of this function,
- // we keep source aligned for the rep movs operation by copying the odd bytes
- // at the end of the ranges.
- movp(scratch, length);
- shrl(length, Immediate(kPointerSizeLog2));
- repmovsp();
- // Move remaining bytes of length.
- andl(scratch, Immediate(kPointerSize - 1));
- movp(length, Operand(source, scratch, times_1, -kPointerSize));
- movp(Operand(destination, scratch, times_1, -kPointerSize), length);
- addp(destination, scratch);
-
- if (min_length <= kLongStringLimit) {
- jmp(&done, Label::kNear);
- bind(&len24);
- movp(scratch, Operand(source, 2 * kPointerSize));
- movp(Operand(destination, 2 * kPointerSize), scratch);
- bind(&len16);
- movp(scratch, Operand(source, kPointerSize));
- movp(Operand(destination, kPointerSize), scratch);
- bind(&len8);
- movp(scratch, Operand(source, 0));
- movp(Operand(destination, 0), scratch);
- // Move remaining bytes of length.
- movp(scratch, Operand(source, length, times_1, -kPointerSize));
- movp(Operand(destination, length, times_1, -kPointerSize), scratch);
- addp(destination, length);
- jmp(&done, Label::kNear);
-
- bind(&short_string);
- if (min_length == 0) {
- testl(length, length);
- j(zero, &done, Label::kNear);
- }
-
- bind(&short_loop);
- movb(scratch, Operand(source, 0));
- movb(Operand(destination, 0), scratch);
- incp(source);
- incp(destination);
- decl(length);
- j(not_zero, &short_loop, Label::kNear);
- }
-
- bind(&done);
-}
-
-
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {
@@ -5503,7 +5231,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
SmiCompare(index, FieldOperand(string, String::kLengthOffset));
Check(less, kIndexIsTooLarge);
- SmiCompare(index, Smi::FromInt(0));
+ SmiCompare(index, Smi::kZero);
Check(greater_equal, kIndexIsNegative);
// Restore the index
@@ -5708,7 +5436,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// For all objects but the receiver, check that the cache is empty.
EnumLength(rdx, rbx);
- Cmp(rdx, Smi::FromInt(0));
+ Cmp(rdx, Smi::kZero);
j(not_equal, call_runtime);
bind(&start);
@@ -5741,20 +5469,21 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
- const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+ const int kMementoLastWordOffset =
+ kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
// Bail out if the object is not in new space.
JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
- leap(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+ leap(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
xorp(scratch_reg, ExternalOperand(new_space_allocation_top));
testp(scratch_reg, Immediate(~Page::kPageAlignmentMask));
j(zero, &top_check);
// The object is on a different page than allocation top. Bail out if the
// object sits on the page boundary as no memento can follow and we cannot
// touch the memory following it.
- leap(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+ leap(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
xorp(scratch_reg, receiver_reg);
testp(scratch_reg, Immediate(~Page::kPageAlignmentMask));
j(not_zero, no_memento_found);
@@ -5763,9 +5492,9 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
// If top is on the same page as the current object, we need to check whether
// we are below top.
bind(&top_check);
- leap(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+ leap(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
- j(greater, no_memento_found);
+ j(greater_equal, no_memento_found);
// Memento map check.
bind(&map_check);
CompareRoot(MemOperand(receiver_reg, kMementoMapOffset),
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index a8d0c60aa1..f085509914 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -1112,12 +1112,6 @@ class MacroAssembler: public Assembler {
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,
@@ -1295,25 +1289,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Inline caching support
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, but the scratch register and kScratchRegister,
- // which must be different, are clobbered.
- void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
- Label* miss);
-
void GetNumberHash(Register r0, Register scratch);
- void LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result);
-
-
// ---------------------------------------------------------------------------
// Allocation support
@@ -1430,12 +1407,6 @@ class MacroAssembler: public Assembler {
// clobbered.
void TryGetFunctionPrototype(Register function, Register result, Label* miss);
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
@@ -1549,18 +1520,6 @@ class MacroAssembler: public Assembler {
return code_object_;
}
- // Copy length bytes from source to destination.
- // Uses scratch register internally (if you have a low-eight register
- // free, do use it, otherwise kScratchRegister will be used).
- // The min_length is a minimum limit on the value that length will have.
- // The algorithm has some special cases that might be omitted if the string
- // is known to always be long.
- void CopyBytes(Register destination,
- Register source,
- Register length,
- int min_length = 0,
- Register scratch = kScratchRegister);
-
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.
diff --git a/deps/v8/src/x87/assembler-x87.cc b/deps/v8/src/x87/assembler-x87.cc
index 62b662f285..eb8dafa3b0 100644
--- a/deps/v8/src/x87/assembler-x87.cc
+++ b/deps/v8/src/x87/assembler-x87.cc
@@ -2052,8 +2052,9 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if ((desc.buffer_size > kMaximalBufferSize) ||
- (desc.buffer_size > isolate()->heap()->MaxOldGenerationSize())) {
+ if (desc.buffer_size > kMaximalBufferSize ||
+ static_cast<size_t>(desc.buffer_size) >
+ isolate()->heap()->MaxOldGenerationSize()) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
diff --git a/deps/v8/src/x87/assembler-x87.h b/deps/v8/src/x87/assembler-x87.h
index 4111e8df12..160145b5ad 100644
--- a/deps/v8/src/x87/assembler-x87.h
+++ b/deps/v8/src/x87/assembler-x87.h
@@ -969,7 +969,8 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(DeoptimizeReason reason, int raw_position, int id);
+ void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
+ int id);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
index e70cbad7ee..0ea919d3b1 100644
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ b/deps/v8/src/x87/code-stubs-x87.cc
@@ -631,14 +631,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ add(edx, Immediate(2)); // edx was a smi.
// edx: Number of capture registers
- // Load last_match_info which is still known to be a fast-elements JSObject.
- // Check that the fourth object is a JSObject.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_OBJECT_TYPE, ebx);
- __ j(not_equal, &runtime);
+ // Check that the last match info is a FixedArray.
+ __ mov(ebx, Operand(esp, kLastMatchInfoOffset));
+ __ JumpIfSmi(ebx, &runtime);
// Check that the object has fast elements.
- __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
__ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
__ cmp(eax, factory->fixed_array_map());
__ j(not_equal, &runtime);
@@ -646,7 +642,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// additional information.
__ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
__ SmiUntag(eax);
- __ sub(eax, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ sub(eax, Immediate(RegExpMatchInfo::kLastMatchOverhead));
__ cmp(edx, eax);
__ j(greater, &runtime);
@@ -654,17 +650,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// edx: number of capture registers
// Store the capture count.
__ SmiTag(edx); // Number of capture registers to smi.
- __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
+ __ mov(FieldOperand(ebx, RegExpMatchInfo::kNumberOfCapturesOffset), edx);
__ SmiUntag(edx); // Number of capture registers back from smi.
// Store last subject and last input.
__ mov(eax, Operand(esp, kSubjectOffset));
__ mov(ecx, eax);
- __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
- __ RecordWriteField(ebx, RegExpImpl::kLastSubjectOffset, eax, edi,
+ __ mov(FieldOperand(ebx, RegExpMatchInfo::kLastSubjectOffset), eax);
+ __ RecordWriteField(ebx, RegExpMatchInfo::kLastSubjectOffset, eax, edi,
kDontSaveFPRegs);
__ mov(eax, ecx);
- __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
- __ RecordWriteField(ebx, RegExpImpl::kLastInputOffset, eax, edi,
+ __ mov(FieldOperand(ebx, RegExpMatchInfo::kLastInputOffset), eax);
+ __ RecordWriteField(ebx, RegExpMatchInfo::kLastInputOffset, eax, edi,
kDontSaveFPRegs);
// Get the static offsets vector filled by the native regexp code.
@@ -677,7 +673,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// edx: number of capture registers
Label next_capture, done;
// Capture register counter starts from number of capture registers and
- // counts down until wraping after zero.
+ // counts down until wrapping after zero.
__ bind(&next_capture);
__ sub(edx, Immediate(1));
__ j(negative, &done, Label::kNear);
@@ -685,16 +681,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(edi, Operand(ecx, edx, times_int_size, 0));
__ SmiTag(edi);
// Store the smi value in the last match info.
- __ mov(FieldOperand(ebx,
- edx,
- times_pointer_size,
- RegExpImpl::kFirstCaptureOffset),
- edi);
+ __ mov(FieldOperand(ebx, edx, times_pointer_size,
+ RegExpMatchInfo::kFirstCaptureOffset),
+ edi);
__ jmp(&next_capture);
__ bind(&done);
// Return last match info.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ mov(eax, ebx);
__ ret(4 * kPointerSize);
// Do the runtime call to execute the regexp.
@@ -877,7 +871,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// If either is a Smi (we know that not both are), then they can only
// be equal if the other is a HeapNumber. If so, use the slow case.
STATIC_ASSERT(kSmiTag == 0);
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
__ mov(ecx, Immediate(kSmiTagMask));
__ and_(ecx, eax);
__ test(ecx, edx);
@@ -1258,6 +1252,7 @@ static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
+ // eax - number of arguments
// edi - function
// edx - slot id
// ebx - vector
@@ -1265,7 +1260,6 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ cmp(edi, ecx);
__ j(not_equal, miss);
- __ mov(eax, arg_count());
// Reload ecx.
__ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize));
@@ -1275,7 +1269,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ mov(ebx, ecx);
__ mov(edx, edi);
- ArrayConstructorStub stub(masm->isolate(), arg_count());
+ ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
// Unreachable.
@@ -1283,13 +1277,12 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
void CallICStub::Generate(MacroAssembler* masm) {
+ // edi - number of arguments
// edi - function
// edx - slot id
// ebx - vector
Isolate* isolate = masm->isolate();
Label extra_checks_or_miss, call, call_function, call_count_incremented;
- int argc = arg_count();
- ParameterCount actual(argc);
// The checks. First, does edi match the recorded monomorphic target?
__ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
@@ -1321,7 +1314,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, ebx, edx);
- __ Set(eax, argc);
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -1367,7 +1359,6 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&call_count_incremented);
- __ Set(eax, argc);
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -1393,12 +1384,15 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &miss);
// Store the function. Use a stub since we need a frame for allocation.
+ // eax - number of arguments
// ebx - vector
// edx - slot
// edi - function
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(isolate);
+ __ SmiTag(eax);
+ __ push(eax);
__ push(ebx);
__ push(edx);
__ push(edi);
@@ -1408,6 +1402,8 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ pop(edi);
__ pop(edx);
__ pop(ebx);
+ __ pop(eax);
+ __ SmiUntag(eax);
}
__ jmp(&call_function);
@@ -1427,6 +1423,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Preserve the number of arguments.
+ __ SmiTag(eax);
+ __ push(eax);
+
// Push the function and feedback info.
__ push(edi);
__ push(ebx);
@@ -1437,6 +1437,10 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
// Move result to edi and exit the internal frame.
__ mov(edi, eax);
+
+ // Restore number of arguments.
+ __ pop(eax);
+ __ SmiUntag(eax);
}
@@ -2830,254 +2834,12 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
}
-
-void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- LoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
-void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
- KeyedLoadICStub stub(isolate());
- stub.GenerateForTrampoline(masm);
-}
-
-
-static void HandleArrayCases(MacroAssembler* masm, Register receiver,
- Register key, Register vector, Register slot,
- Register feedback, bool is_polymorphic,
- Label* miss) {
- // feedback initially contains the feedback array
- Label next, next_loop, prepare_next;
- Label load_smi_map, compare_map;
- Label start_polymorphic;
-
- __ push(receiver);
- __ push(vector);
-
- Register receiver_map = receiver;
- Register cached_map = vector;
-
- // Receiver might not be a heap object.
- __ JumpIfSmi(receiver, &load_smi_map);
- __ mov(receiver_map, FieldOperand(receiver, 0));
- __ bind(&compare_map);
- __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
-
- // A named keyed load might have a 2 element array, all other cases can count
- // on an array with at least 2 {map, handler} pairs, so they can go right
- // into polymorphic array handling.
- __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
- __ j(not_equal, is_polymorphic ? &start_polymorphic : &next);
-
- // found, now call handler.
- Register handler = feedback;
- __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ pop(vector);
- __ pop(receiver);
- __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
- __ jmp(handler);
-
- if (!is_polymorphic) {
- __ bind(&next);
- __ cmp(FieldOperand(feedback, FixedArray::kLengthOffset),
- Immediate(Smi::FromInt(2)));
- __ j(not_equal, &start_polymorphic);
- __ pop(vector);
- __ pop(receiver);
- __ jmp(miss);
- }
-
- // Polymorphic, we have to loop from 2 to N
- __ bind(&start_polymorphic);
- __ push(key);
- Register counter = key;
- __ mov(counter, Immediate(Smi::FromInt(2)));
- __ bind(&next_loop);
- __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
- __ j(not_equal, &prepare_next);
- __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ pop(key);
- __ pop(vector);
- __ pop(receiver);
- __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
- __ jmp(handler);
-
- __ bind(&prepare_next);
- __ add(counter, Immediate(Smi::FromInt(2)));
- __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
- __ j(less, &next_loop);
-
- // We exhausted our array of map handler pairs.
- __ pop(key);
- __ pop(vector);
- __ pop(receiver);
- __ jmp(miss);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
- Register key, Register vector, Register slot,
- Register weak_cell, Label* miss) {
- // feedback initially contains the feedback array
- Label compare_smi_map;
-
- // Move the weak map into the weak_cell register.
- Register ic_map = weak_cell;
- __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
-
- // Receiver might not be a heap object.
- __ JumpIfSmi(receiver, &compare_smi_map);
- __ cmp(ic_map, FieldOperand(receiver, 0));
- __ j(not_equal, miss);
- Register handler = weak_cell;
- __ mov(handler, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
- __ jmp(handler);
-
- // In microbenchmarks, it made sense to unroll this code so that the call to
- // the handler is duplicated for a HeapObject receiver and a Smi receiver.
- __ bind(&compare_smi_map);
- __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, miss);
- __ mov(handler, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
- __ jmp(handler);
-}
-
-
-void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-
-void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // edx
- Register name = LoadWithVectorDescriptor::NameRegister(); // ecx
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // ebx
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // eax
- Register scratch = edi;
- __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize));
-
- // Is it a weak cell?
- Label try_array;
- Label not_array, smi_key, key_okay, miss;
- __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
- __ j(not_equal, &try_array);
- HandleMonomorphicCase(masm, receiver, name, vector, slot, scratch, &miss);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &not_array);
- HandleArrayCases(masm, receiver, name, vector, slot, scratch, true, &miss);
-
- __ bind(&not_array);
- __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
- __ j(not_equal, &miss);
- __ push(slot);
- __ push(vector);
- masm->isolate()->load_stub_cache()->GenerateProbe(masm, receiver, name,
- vector, scratch);
- __ pop(vector);
- __ pop(slot);
-
- __ bind(&miss);
- LoadIC::GenerateMiss(masm);
-}
-
-
-void KeyedLoadICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-
-void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // edx
- Register key = LoadWithVectorDescriptor::NameRegister(); // ecx
- Register vector = LoadWithVectorDescriptor::VectorRegister(); // ebx
- Register slot = LoadWithVectorDescriptor::SlotRegister(); // eax
- Register feedback = edi;
- __ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize));
- // Is it a weak cell?
- Label try_array;
- Label not_array, smi_key, key_okay, miss;
- __ CompareRoot(FieldOperand(feedback, 0), Heap::kWeakCellMapRootIndex);
- __ j(not_equal, &try_array);
- HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, &miss);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &not_array);
-
- // We have a polymorphic element handler.
- Label polymorphic, try_poly_name;
- __ bind(&polymorphic);
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, true, &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ j(not_equal, &try_poly_name);
- Handle<Code> megamorphic_stub =
- KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ cmp(key, feedback);
- __ j(not_equal, &miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, false, &miss);
-
- __ bind(&miss);
- KeyedLoadIC::GenerateMiss(masm);
-}
-
-void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- StoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
-
-void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
// value is on the stack already.
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
Register key, Register vector,
@@ -3194,63 +2956,6 @@ static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
__ jmp(weak_cell);
}
-void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // edx
- Register key = StoreWithVectorDescriptor::NameRegister(); // ecx
- Register value = StoreWithVectorDescriptor::ValueRegister(); // eax
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // ebx
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // edi
- Label miss;
-
- if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
- // Current stack layout:
- // - esp[8] -- value
- // - esp[4] -- slot
- // - esp[0] -- return address
- STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
- STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
- if (in_frame) {
- __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
- // If the vector is not on the stack, then insert the vector beneath
- // return address in order to prepare for calling handler with
- // StoreWithVector calling convention.
- __ push(Operand(esp, 0));
- __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
- __ RecordComment("]");
- } else {
- __ mov(vector, Operand(esp, 1 * kPointerSize));
- }
- __ mov(slot, Operand(esp, 2 * kPointerSize));
- }
-
- Register scratch = value;
- __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize));
-
- // Is it a weak cell?
- Label try_array;
- Label not_array, smi_key, key_okay;
- __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
- __ j(not_equal, &try_array);
- HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &not_array);
- HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, true,
- &miss);
-
- __ bind(&not_array);
- __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
- __ j(not_equal, &miss);
-
- masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, slot,
- no_reg);
- __ bind(&miss);
- StoreIC::GenerateMiss(masm);
-}
-
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
@@ -3299,7 +3004,7 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
// - esp[12] -- value
// - receiver, key, handler in registers.
Register counter = key;
- __ mov(counter, Immediate(Smi::FromInt(0)));
+ __ mov(counter, Immediate(Smi::kZero));
__ bind(&next_loop);
__ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
FixedArray::kHeaderSize));
@@ -3634,30 +3339,19 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm, AllocationSiteOverrideMode mode) {
- if (argument_count() == ANY) {
- Label not_zero_case, not_one_case;
- __ test(eax, eax);
- __ j(not_zero, &not_zero_case);
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-
- __ bind(&not_zero_case);
- __ cmp(eax, 1);
- __ j(greater, &not_one_case);
- CreateArrayDispatchOneArgument(masm, mode);
-
- __ bind(&not_one_case);
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else if (argument_count() == NONE) {
- CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
- } else if (argument_count() == ONE) {
- CreateArrayDispatchOneArgument(masm, mode);
- } else if (argument_count() == MORE_THAN_ONE) {
- ArrayNArgumentsConstructorStub stub(masm->isolate());
- __ TailCallStub(&stub);
- } else {
- UNREACHABLE();
- }
+ Label not_zero_case, not_one_case;
+ __ test(eax, eax);
+ __ j(not_zero, &not_zero_case);
+ CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+ __ bind(&not_zero_case);
+ __ cmp(eax, 1);
+ __ j(greater, &not_one_case);
+ CreateArrayDispatchOneArgument(masm, mode);
+
+ __ bind(&not_one_case);
+ ArrayNArgumentsConstructorStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
@@ -3711,21 +3405,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
- switch (argument_count()) {
- case ANY:
- case MORE_THAN_ONE:
- __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
- __ add(eax, Immediate(3));
- break;
- case NONE:
- __ mov(Operand(esp, 1 * kPointerSize), edi);
- __ mov(eax, Immediate(3));
- break;
- case ONE:
- __ mov(Operand(esp, 2 * kPointerSize), edi);
- __ mov(eax, Immediate(4));
- break;
- }
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+ __ add(eax, Immediate(3));
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ Push(ebx);
@@ -4004,8 +3685,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ mov(ecx, isolate()->factory()->empty_fixed_array());
__ mov(FieldOperand(eax, JSArray::kPropertiesOffset), ecx);
__ mov(FieldOperand(eax, JSArray::kElementsOffset), ecx);
- __ mov(FieldOperand(eax, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(0)));
+ __ mov(FieldOperand(eax, JSArray::kLengthOffset), Immediate(Smi::kZero));
STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
__ Ret();
@@ -4046,7 +3726,7 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
{
Label loop, done_loop;
- __ Move(ecx, Smi::FromInt(0));
+ __ Move(ecx, Smi::kZero);
__ bind(&loop);
__ cmp(ecx, eax);
__ j(equal, &done_loop, Label::kNear);
@@ -4433,7 +4113,7 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
{
Label loop, done_loop;
- __ Move(ecx, Smi::FromInt(0));
+ __ Move(ecx, Smi::kZero);
__ bind(&loop);
__ cmp(ecx, eax);
__ j(equal, &done_loop, Label::kNear);
@@ -4495,129 +4175,6 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewStrictArguments);
}
-void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
- Register context_reg = esi;
- Register slot_reg = ebx;
- Register value_reg = eax;
- Register cell_reg = edi;
- Register cell_details_reg = edx;
- Register cell_value_reg = ecx;
- Label fast_heapobject_case, fast_smi_case, slow_case;
-
- if (FLAG_debug_code) {
- __ CompareRoot(value_reg, Heap::kTheHoleValueRootIndex);
- __ Check(not_equal, kUnexpectedValue);
- }
-
- // Go up context chain to the script context.
- for (int i = 0; i < depth(); ++i) {
- __ mov(cell_reg, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
- context_reg = cell_reg;
- }
-
- // Load the PropertyCell at the specified slot.
- __ mov(cell_reg, ContextOperand(context_reg, slot_reg));
-
- // Load PropertyDetails for the cell (actually only the cell_type and kind).
- __ mov(cell_details_reg,
- FieldOperand(cell_reg, PropertyCell::kDetailsOffset));
- __ SmiUntag(cell_details_reg);
- __ and_(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::kMask |
- PropertyDetails::KindField::kMask |
- PropertyDetails::kAttributesReadOnlyMask));
-
- // Check if PropertyCell holds mutable data.
- Label not_mutable_data;
- __ cmp(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kMutable) |
- PropertyDetails::KindField::encode(kData)));
- __ j(not_equal, &not_mutable_data);
- __ JumpIfSmi(value_reg, &fast_smi_case);
- __ bind(&fast_heapobject_case);
- __ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
- __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
- cell_details_reg, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // RecordWriteField clobbers the value register, so we need to reload.
- __ mov(value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
- __ Ret();
- __ bind(&not_mutable_data);
-
- // Check if PropertyCell value matches the new value (relevant for Constant,
- // ConstantType and Undefined cells).
- Label not_same_value;
- __ mov(cell_value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
- __ cmp(cell_value_reg, value_reg);
- __ j(not_equal, &not_same_value,
- FLAG_debug_code ? Label::kFar : Label::kNear);
- // Make sure the PropertyCell is not marked READ_ONLY.
- __ test(cell_details_reg,
- Immediate(PropertyDetails::kAttributesReadOnlyMask));
- __ j(not_zero, &slow_case);
- if (FLAG_debug_code) {
- Label done;
- // This can only be true for Constant, ConstantType and Undefined cells,
- // because we never store the_hole via this stub.
- __ cmp(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstant) |
- PropertyDetails::KindField::encode(kData)));
- __ j(equal, &done);
- __ cmp(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
- __ j(equal, &done);
- __ cmp(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kUndefined) |
- PropertyDetails::KindField::encode(kData)));
- __ Check(equal, kUnexpectedValue);
- __ bind(&done);
- }
- __ Ret();
- __ bind(&not_same_value);
-
- // Check if PropertyCell contains data with constant type (and is not
- // READ_ONLY).
- __ cmp(cell_details_reg,
- Immediate(PropertyDetails::PropertyCellTypeField::encode(
- PropertyCellType::kConstantType) |
- PropertyDetails::KindField::encode(kData)));
- __ j(not_equal, &slow_case, Label::kNear);
-
- // Now either both old and new values must be SMIs or both must be heap
- // objects with same map.
- Label value_is_heap_object;
- __ JumpIfNotSmi(value_reg, &value_is_heap_object, Label::kNear);
- __ JumpIfNotSmi(cell_value_reg, &slow_case, Label::kNear);
- // Old and new values are SMIs, no need for a write barrier here.
- __ bind(&fast_smi_case);
- __ mov(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
- __ Ret();
- __ bind(&value_is_heap_object);
- __ JumpIfSmi(cell_value_reg, &slow_case, Label::kNear);
- Register cell_value_map_reg = cell_value_reg;
- __ mov(cell_value_map_reg,
- FieldOperand(cell_value_reg, HeapObject::kMapOffset));
- __ cmp(cell_value_map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
- __ j(equal, &fast_heapobject_case);
-
- // Fallback to the runtime.
- __ bind(&slow_case);
- __ SmiTag(slot_reg);
- __ Pop(cell_reg); // Pop return address.
- __ Push(slot_reg);
- __ Push(value_reg);
- __ Push(cell_reg); // Push return address.
- __ TailCallRuntime(is_strict(language_mode())
- ? Runtime::kStoreGlobalViaContext_Strict
- : Runtime::kStoreGlobalViaContext_Sloppy);
-}
-
-
// Generates an Operand for saving parameters after PrepareCallApiFunction.
static Operand ApiParameterOperand(int index) {
return Operand(esp, index * kPointerSize);
@@ -4951,7 +4508,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
__ push(Immediate(ExternalReference::isolate_address(isolate())));
__ push(holder);
- __ push(Immediate(Smi::FromInt(0))); // should_throw_on_error -> false
+ __ push(Immediate(Smi::kZero)); // should_throw_on_error -> false
__ push(FieldOperand(callback, AccessorInfo::kNameOffset));
__ push(scratch); // Restore return address.
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
index 85b26ca1b0..70b110af5b 100644
--- a/deps/v8/src/x87/interface-descriptors-x87.cc
+++ b/deps/v8/src/x87/interface-descriptors-x87.cc
@@ -31,6 +31,7 @@ const Register LoadDescriptor::SlotRegister() { return eax; }
const Register LoadWithVectorDescriptor::VectorRegister() { return ebx; }
+const Register LoadICProtoArrayDescriptor::HandlerRegister() { return edi; }
const Register StoreDescriptor::ReceiverRegister() { return edx; }
const Register StoreDescriptor::NameRegister() { return ecx; }
@@ -40,15 +41,9 @@ const Register StoreDescriptor::SlotRegister() { return edi; }
const Register StoreWithVectorDescriptor::VectorRegister() { return ebx; }
const Register StoreTransitionDescriptor::SlotRegister() { return no_reg; }
-
const Register StoreTransitionDescriptor::VectorRegister() { return ebx; }
-
const Register StoreTransitionDescriptor::MapRegister() { return edi; }
-const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
-const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
-
-
const Register StringCompareDescriptor::LeftRegister() { return edx; }
const Register StringCompareDescriptor::RightRegister() { return eax; }
@@ -159,7 +154,7 @@ void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {edi, edx, ebx};
+ Register registers[] = {edi, eax, edx, ebx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -208,13 +203,6 @@ void ConstructTrampolineDescriptor::InitializePlatformSpecific(
}
-void RegExpConstructResultDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ecx, ebx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {eax, ebx};
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
index dafe985ff8..ee81a680e4 100644
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ b/deps/v8/src/x87/macro-assembler-x87.cc
@@ -701,20 +701,6 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
-
-void MacroAssembler::CheckFastElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleyElementValue));
- j(above, fail, distance);
-}
-
-
void MacroAssembler::CheckFastObjectElements(Register map,
Label* fail,
Label::Distance distance) {
@@ -1238,79 +1224,6 @@ void MacroAssembler::PopStackHandler() {
}
-void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label same_contexts;
-
- DCHECK(!holder_reg.is(scratch1));
- DCHECK(!holder_reg.is(scratch2));
- DCHECK(!scratch1.is(scratch2));
-
- // Load current lexical context from the active StandardFrame, which
- // may require crawling past STUB frames.
- Label load_context;
- Label has_context;
- mov(scratch2, ebp);
- bind(&load_context);
- mov(scratch1,
- MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
- JumpIfNotSmi(scratch1, &has_context);
- mov(scratch2, MemOperand(scratch2, CommonFrameConstants::kCallerFPOffset));
- jmp(&load_context);
- bind(&has_context);
-
- // When generating debug code, make sure the lexical context is set.
- if (emit_debug_code()) {
- cmp(scratch1, Immediate(0));
- Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
- }
- // Load the native context of the current context.
- mov(scratch1, ContextOperand(scratch1, Context::NATIVE_CONTEXT_INDEX));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- // Read the first word and compare to native_context_map.
- cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
- isolate()->factory()->native_context_map());
- Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
- }
-
- // Check if both contexts are the same.
- cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
- j(equal, &same_contexts);
-
- // Compare security tokens, save holder_reg on the stack so we can use it
- // as a temporary register.
- //
- // Check that the security token in the calling global object is
- // compatible with the security token in the receiving global
- // object.
- mov(scratch2,
- FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-
- // Check the context is a native context.
- if (emit_debug_code()) {
- cmp(scratch2, isolate()->factory()->null_value());
- Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
-
- // Read the first word and compare to native_context_map(),
- cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
- isolate()->factory()->native_context_map());
- Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
- }
-
- int token_offset = Context::kHeaderSize +
- Context::SECURITY_TOKEN_INDEX * kPointerSize;
- mov(scratch1, FieldOperand(scratch1, token_offset));
- cmp(scratch1, FieldOperand(scratch2, token_offset));
- j(not_equal, miss);
-
- bind(&same_contexts);
-}
-
-
// Compute the hash code from the untagged key. This must be kept in sync with
// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
@@ -1355,82 +1268,6 @@ void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
and_(r0, 0x3fffffff);
}
-
-
-void MacroAssembler::LoadFromNumberDictionary(Label* miss,
- Register elements,
- Register key,
- Register r0,
- Register r1,
- Register r2,
- Register result) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver and is unchanged.
- //
- // key - holds the smi key on entry and is unchanged.
- //
- // Scratch registers:
- //
- // r0 - holds the untagged key on entry and holds the hash once computed.
- //
- // r1 - used to hold the capacity mask of the dictionary
- //
- // r2 - used for the index into the dictionary.
- //
- // result - holds the result on exit if the load succeeds and we fall through.
-
- Label done;
-
- GetNumberHash(r0, r1);
-
- // Compute capacity mask.
- mov(r1, FieldOperand(elements, SeededNumberDictionary::kCapacityOffset));
- shr(r1, kSmiTagSize); // convert smi to int
- dec(r1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- for (int i = 0; i < kNumberDictionaryProbes; i++) {
- // Use r2 for index calculations and keep the hash intact in r0.
- mov(r2, r0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
- }
- and_(r2, r1);
-
- // Scale the index by multiplying by the entry size.
- DCHECK(SeededNumberDictionary::kEntrySize == 3);
- lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
-
- // Check if the key matches.
- cmp(key, FieldOperand(elements,
- r2,
- times_pointer_size,
- SeededNumberDictionary::kElementsStartOffset));
- if (i != (kNumberDictionaryProbes - 1)) {
- j(equal, &done);
- } else {
- j(not_equal, miss);
- }
- }
-
- bind(&done);
- // Check that the value is a field property.
- const int kDetailsOffset =
- SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- DCHECK_EQ(DATA, 0);
- test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
- Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
- j(not_zero, miss);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset =
- SeededNumberDictionary::kElementsStartOffset + kPointerSize;
- mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
-}
-
-
void MacroAssembler::LoadAllocationTopHelper(Register result,
Register scratch,
AllocationFlags flags) {
@@ -1912,74 +1749,6 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
-
-// Copy memory, byte-by-byte, from source to destination. Not optimized for
-// long or aligned copies. The contents of scratch and length are destroyed.
-// Source and destination are incremented by length.
-// Many variants of movsb, loop unrolling, word moves, and indexed operands
-// have been tried here already, and this is fastest.
-// A simpler loop is faster on small copies, but 30% slower on large ones.
-// The cld() instruction must have been emitted, to set the direction flag(),
-// before calling this function.
-void MacroAssembler::CopyBytes(Register source,
- Register destination,
- Register length,
- Register scratch) {
- Label short_loop, len4, len8, len12, done, short_string;
- DCHECK(source.is(esi));
- DCHECK(destination.is(edi));
- DCHECK(length.is(ecx));
- cmp(length, Immediate(4));
- j(below, &short_string, Label::kNear);
-
- // Because source is 4-byte aligned in our uses of this function,
- // we keep source aligned for the rep_movs call by copying the odd bytes
- // at the end of the ranges.
- mov(scratch, Operand(source, length, times_1, -4));
- mov(Operand(destination, length, times_1, -4), scratch);
-
- cmp(length, Immediate(8));
- j(below_equal, &len4, Label::kNear);
- cmp(length, Immediate(12));
- j(below_equal, &len8, Label::kNear);
- cmp(length, Immediate(16));
- j(below_equal, &len12, Label::kNear);
-
- mov(scratch, ecx);
- shr(ecx, 2);
- rep_movs();
- and_(scratch, Immediate(0x3));
- add(destination, scratch);
- jmp(&done, Label::kNear);
-
- bind(&len12);
- mov(scratch, Operand(source, 8));
- mov(Operand(destination, 8), scratch);
- bind(&len8);
- mov(scratch, Operand(source, 4));
- mov(Operand(destination, 4), scratch);
- bind(&len4);
- mov(scratch, Operand(source, 0));
- mov(Operand(destination, 0), scratch);
- add(destination, length);
- jmp(&done, Label::kNear);
-
- bind(&short_string);
- test(length, length);
- j(zero, &done, Label::kNear);
-
- bind(&short_loop);
- mov_b(scratch, Operand(source, 0));
- mov_b(Operand(destination, 0), scratch);
- inc(source);
- inc(destination);
- dec(length);
- j(not_zero, &short_loop);
-
- bind(&done);
-}
-
-
void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {
@@ -2094,20 +1863,6 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
-
-void MacroAssembler::IndexFromHash(Register hash, Register index) {
- // The assert checks that the constants for the maximum number of digits
- // for an array index cached in the hash field and the number of bits
- // reserved for it does not conflict.
- DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
- (1 << String::kArrayIndexValueBits));
- if (!index.is(hash)) {
- mov(index, hash);
- }
- DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
-}
-
-
void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
SaveFPRegsMode save_doubles) {
// If the expected number of arguments of the runtime function is
@@ -2937,7 +2692,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
cmp(index, FieldOperand(string, String::kLengthOffset));
Check(less, kIndexIsTooLarge);
- cmp(index, Immediate(Smi::FromInt(0)));
+ cmp(index, Immediate(Smi::kZero));
Check(greater_equal, kIndexIsNegative);
// Restore the index
@@ -3190,7 +2945,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// For all objects but the receiver, check that the cache is empty.
EnumLength(edx, ebx);
- cmp(edx, Immediate(Smi::FromInt(0)));
+ cmp(edx, Immediate(Smi::kZero));
j(not_equal, call_runtime);
bind(&start);
@@ -3222,20 +2977,21 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
- const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
+ const int kMementoLastWordOffset =
+ kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
// Bail out if the object is not in new space.
JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
// If the object is in new space, we need to check whether it is on the same
// page as the current top.
- lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+ lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
xor_(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
j(zero, &top_check);
// The object is on a different page than allocation top. Bail out if the
// object sits on the page boundary as no memento can follow and we cannot
// touch the memory following it.
- lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+ lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
xor_(scratch_reg, receiver_reg);
test(scratch_reg, Immediate(~Page::kPageAlignmentMask));
j(not_zero, no_memento_found);
@@ -3244,9 +3000,9 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
// If top is on the same page as the current object, we need to check whether
// we are below top.
bind(&top_check);
- lea(scratch_reg, Operand(receiver_reg, kMementoEndOffset));
+ lea(scratch_reg, Operand(receiver_reg, kMementoLastWordOffset));
cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
- j(greater, no_memento_found);
+ j(greater_equal, no_memento_found);
// Memento map check.
bind(&map_check);
mov(scratch_reg, Operand(receiver_reg, kMementoMapOffset));
diff --git a/deps/v8/src/x87/macro-assembler-x87.h b/deps/v8/src/x87/macro-assembler-x87.h
index bdd3c037e5..6bb63594a9 100644
--- a/deps/v8/src/x87/macro-assembler-x87.h
+++ b/deps/v8/src/x87/macro-assembler-x87.h
@@ -381,11 +381,6 @@ class MacroAssembler: public Assembler {
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map, Label* fail,
- Label::Distance distance = Label::kFar);
-
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map, Label* fail,
@@ -593,18 +588,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Inline caching support
- // Generate code for checking access rights - used for security checks
- // on access to global objects across environments. The holder register
- // is left untouched, but the scratch register is clobbered.
- void CheckAccessGlobalProxy(Register holder_reg, Register scratch1,
- Register scratch2, Label* miss);
-
void GetNumberHash(Register r0, Register scratch);
- void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
- Register r0, Register r1, Register r2,
- Register result);
-
// ---------------------------------------------------------------------------
// Allocation support
@@ -674,12 +659,6 @@ class MacroAssembler: public Assembler {
void AllocateJSValue(Register result, Register constructor, Register value,
Register scratch, Label* gc_required);
- // Copy memory, byte-by-byte, from source to destination. Not optimized for
- // long or aligned copies.
- // The contents of index and scratch are destroyed.
- void CopyBytes(Register source, Register destination, Register length,
- Register scratch);
-
// Initialize fields with filler values. Fields starting at |current_address|
// not including |end_address| are overwritten with the value in |filler|. At
// the end the loop, |current_address| takes the value of |end_address|.
@@ -712,12 +691,6 @@ class MacroAssembler: public Assembler {
void TryGetFunctionPrototype(Register function, Register result,
Register scratch, Label* miss);
- // Picks out an array index from the hash field.
- // Register use:
- // hash - holds the index's hash. Clobbered.
- // index - holds the overwritten index on exit.
- void IndexFromHash(Register hash, Register index);
-
// ---------------------------------------------------------------------------
// Runtime calls
@@ -810,7 +783,10 @@ class MacroAssembler: public Assembler {
void Drop(int element_count);
void Call(Label* target) { call(target); }
- void Call(Handle<Code> target, RelocInfo::Mode rmode) { call(target, rmode); }
+ void Call(Handle<Code> target, RelocInfo::Mode rmode,
+ TypeFeedbackId id = TypeFeedbackId::None()) {
+ call(target, rmode, id);
+ }
void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
void Push(Register src) { push(src); }
void Push(const Operand& src) { push(src); }
diff --git a/deps/v8/src/zone/accounting-allocator.cc b/deps/v8/src/zone/accounting-allocator.cc
index 663ea321a4..587e09d585 100644
--- a/deps/v8/src/zone/accounting-allocator.cc
+++ b/deps/v8/src/zone/accounting-allocator.cc
@@ -13,6 +13,72 @@
namespace v8 {
namespace internal {
+AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() {
+ static const size_t kDefaultBucketMaxSize = 5;
+
+ memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
+ std::fill(unused_segments_heads_, unused_segments_heads_ + kNumberBuckets,
+ nullptr);
+ std::fill(unused_segments_sizes_, unused_segments_sizes_ + kNumberBuckets, 0);
+ std::fill(unused_segments_max_sizes_,
+ unused_segments_max_sizes_ + kNumberBuckets, kDefaultBucketMaxSize);
+}
+
+AccountingAllocator::~AccountingAllocator() { ClearPool(); }
+
+void AccountingAllocator::MemoryPressureNotification(
+ MemoryPressureLevel level) {
+ memory_pressure_level_.SetValue(level);
+
+ if (level != MemoryPressureLevel::kNone) {
+ ClearPool();
+ }
+}
+
+void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) {
+ // The sum of the bytes of one segment of each size.
+ static const size_t full_size = (size_t(1) << (kMaxSegmentSizePower + 1)) -
+ (size_t(1) << kMinSegmentSizePower);
+ size_t fits_fully = max_pool_size / full_size;
+
+ base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
+
+ // We assume few zones (less than 'fits_fully' many) to be active at the same
+ // time. When zones grow regularly, they will keep requesting segments of
+ // increasing size each time. Therefore we try to get as many segments with an
+ // equal number of segments of each size as possible.
+ // The remaining space is used to make more room for an 'incomplete set' of
+ // segments beginning with the smaller ones.
+ // This code will work best if the max_pool_size is a multiple of the
+ // full_size. If max_pool_size is no sum of segment sizes the actual pool
+ // size might be smaller then max_pool_size. Note that no actual memory gets
+ // wasted though.
+ // TODO(heimbuef): Determine better strategy generating a segment sizes
+ // distribution that is closer to real/benchmark usecases and uses the given
+ // max_pool_size more efficiently.
+ size_t total_size = fits_fully * full_size;
+
+ for (size_t power = 0; power < kNumberBuckets; ++power) {
+ if (total_size + (size_t(1) << (power + kMinSegmentSizePower)) <=
+ max_pool_size) {
+ unused_segments_max_sizes_[power] = fits_fully + 1;
+ total_size += size_t(1) << power;
+ } else {
+ unused_segments_max_sizes_[power] = fits_fully;
+ }
+ }
+}
+
+Segment* AccountingAllocator::GetSegment(size_t bytes) {
+ Segment* result = GetSegmentFromPool(bytes);
+ if (result == nullptr) {
+ result = AllocateSegment(bytes);
+ result->Initialize(bytes);
+ }
+
+ return result;
+}
+
Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
void* memory = malloc(bytes);
if (memory) {
@@ -26,6 +92,16 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
return reinterpret_cast<Segment*>(memory);
}
+void AccountingAllocator::ReturnSegment(Segment* segment) {
+ segment->ZapContents();
+
+ if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
+ FreeSegment(segment);
+ } else if (!AddSegmentToPool(segment)) {
+ FreeSegment(segment);
+ }
+}
+
void AccountingAllocator::FreeSegment(Segment* memory) {
base::NoBarrier_AtomicIncrement(
&current_memory_usage_, -static_cast<base::AtomicWord>(memory->size()));
@@ -41,5 +117,87 @@ size_t AccountingAllocator::GetMaxMemoryUsage() const {
return base::NoBarrier_Load(&max_memory_usage_);
}
+size_t AccountingAllocator::GetCurrentPoolSize() const {
+ return base::NoBarrier_Load(&current_pool_size_);
+}
+
+Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
+ if (requested_size > (1 << kMaxSegmentSizePower)) {
+ return nullptr;
+ }
+
+ size_t power = kMinSegmentSizePower;
+ while (requested_size > (static_cast<size_t>(1) << power)) power++;
+
+ DCHECK_GE(power, kMinSegmentSizePower + 0);
+ power -= kMinSegmentSizePower;
+
+ Segment* segment;
+ {
+ base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
+
+ segment = unused_segments_heads_[power];
+
+ if (segment != nullptr) {
+ unused_segments_heads_[power] = segment->next();
+ segment->set_next(nullptr);
+
+ unused_segments_sizes_[power]--;
+ base::NoBarrier_AtomicIncrement(
+ &current_pool_size_, -static_cast<base::AtomicWord>(segment->size()));
+ }
+ }
+
+ if (segment) {
+ DCHECK_GE(segment->size(), requested_size);
+ }
+ return segment;
+}
+
+bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
+ size_t size = segment->size();
+
+ if (size >= (1 << (kMaxSegmentSizePower + 1))) return false;
+
+ if (size < (1 << kMinSegmentSizePower)) return false;
+
+ size_t power = kMaxSegmentSizePower;
+
+ while (size < (static_cast<size_t>(1) << power)) power--;
+
+ DCHECK_GE(power, kMinSegmentSizePower + 0);
+ power -= kMinSegmentSizePower;
+
+ {
+ base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
+
+ if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) {
+ return false;
+ }
+
+ segment->set_next(unused_segments_heads_[power]);
+ unused_segments_heads_[power] = segment;
+ base::NoBarrier_AtomicIncrement(&current_pool_size_, size);
+ unused_segments_sizes_[power]++;
+ }
+
+ return true;
+}
+
+void AccountingAllocator::ClearPool() {
+ base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
+
+ for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
+ power++) {
+ Segment* current = unused_segments_heads_[power];
+ while (current) {
+ Segment* next = current->next();
+ FreeSegment(current);
+ current = next;
+ }
+ unused_segments_heads_[power] = nullptr;
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/zone/accounting-allocator.h b/deps/v8/src/zone/accounting-allocator.h
index 31016a5018..c6bf7a75e3 100644
--- a/deps/v8/src/zone/accounting-allocator.h
+++ b/deps/v8/src/zone/accounting-allocator.h
@@ -13,24 +13,77 @@
#include "src/base/platform/semaphore.h"
#include "src/base/platform/time.h"
#include "src/zone/zone-segment.h"
+#include "testing/gtest/include/gtest/gtest_prod.h"
namespace v8 {
namespace internal {
class V8_EXPORT_PRIVATE AccountingAllocator {
public:
- AccountingAllocator() = default;
- virtual ~AccountingAllocator() = default;
+ static const size_t kMaxPoolSizeLowMemoryDevice = 8ul * KB;
+ static const size_t kMaxPoolSizeMediumMemoryDevice = 8ul * KB;
+ static const size_t kMaxPoolSizeHighMemoryDevice = 8ul * KB;
+ static const size_t kMaxPoolSizeHugeMemoryDevice = 8ul * KB;
- virtual Segment* AllocateSegment(size_t bytes);
- virtual void FreeSegment(Segment* memory);
+ AccountingAllocator();
+ virtual ~AccountingAllocator();
+
+ // Gets an empty segment from the pool or creates a new one.
+ virtual Segment* GetSegment(size_t bytes);
+ // Return unneeded segments to either insert them into the pool or release
+ // them if the pool is already full or memory pressure is high.
+ virtual void ReturnSegment(Segment* memory);
size_t GetCurrentMemoryUsage() const;
size_t GetMaxMemoryUsage() const;
+ size_t GetCurrentPoolSize() const;
+
+ void MemoryPressureNotification(MemoryPressureLevel level);
+ // Configures the zone segment pool size limits so the pool does not
+ // grow bigger than max_pool_size.
+ // TODO(heimbuef): Do not accept segments to pool that are larger than
+ // their size class requires. Sometimes the zones generate weird segments.
+ void ConfigureSegmentPool(const size_t max_pool_size);
+
+ virtual void ZoneCreation(const Zone* zone) {}
+ virtual void ZoneDestruction(const Zone* zone) {}
+
private:
+ FRIEND_TEST(Zone, SegmentPoolConstraints);
+
+ static const size_t kMinSegmentSizePower = 13;
+ static const size_t kMaxSegmentSizePower = 18;
+
+ STATIC_ASSERT(kMinSegmentSizePower <= kMaxSegmentSizePower);
+
+ static const size_t kNumberBuckets =
+ 1 + kMaxSegmentSizePower - kMinSegmentSizePower;
+
+ // Allocates a new segment. Returns nullptr on failed allocation.
+ Segment* AllocateSegment(size_t bytes);
+ void FreeSegment(Segment* memory);
+
+ // Returns a segment from the pool of at least the requested size.
+ Segment* GetSegmentFromPool(size_t requested_size);
+ // Trys to add a segment to the pool. Returns false if the pool is full.
+ bool AddSegmentToPool(Segment* segment);
+
+ // Empties the pool and puts all its contents onto the garbage stack.
+ void ClearPool();
+
+ Segment* unused_segments_heads_[kNumberBuckets];
+
+ size_t unused_segments_sizes_[kNumberBuckets];
+ size_t unused_segments_max_sizes_[kNumberBuckets];
+
+ base::Mutex unused_segments_mutex_;
+
base::AtomicWord current_memory_usage_ = 0;
base::AtomicWord max_memory_usage_ = 0;
+ base::AtomicWord current_pool_size_ = 0;
+
+ base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
};
diff --git a/deps/v8/src/zone/zone-allocator.h b/deps/v8/src/zone/zone-allocator.h
index 8370d73e49..1e2862a2c1 100644
--- a/deps/v8/src/zone/zone-allocator.h
+++ b/deps/v8/src/zone/zone-allocator.h
@@ -26,6 +26,8 @@ class zone_allocator {
typedef zone_allocator<O> other;
};
+ // TODO(bbudge) Remove when V8 updates to MSVS 2015. See crbug.com/603131.
+ zone_allocator() : zone_(nullptr) { UNREACHABLE(); }
explicit zone_allocator(Zone* zone) throw() : zone_(zone) {}
explicit zone_allocator(const zone_allocator& other) throw()
: zone_(other.zone_) {}
@@ -62,7 +64,6 @@ class zone_allocator {
Zone* zone() { return zone_; }
private:
- zone_allocator();
Zone* zone_;
};
diff --git a/deps/v8/src/zone/zone-chunk-list.h b/deps/v8/src/zone/zone-chunk-list.h
new file mode 100644
index 0000000000..f977a0cb31
--- /dev/null
+++ b/deps/v8/src/zone/zone-chunk-list.h
@@ -0,0 +1,452 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "src/globals.h"
+#include "src/zone/zone.h"
+
+#ifndef V8_SRC_ZONE_ZONE_CHUNK_LIST_H_
+#define V8_SRC_ZONE_ZONE_CHUNK_LIST_H_
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+class ZoneChunkListIterator;
+template <typename T>
+class ForwardZoneChunkListIterator;
+template <typename T>
+class ReverseZoneChunkListIterator;
+
+// A zone-backed hybrid of a vector and a linked list. Use it if you need a
+// collection that
+// * needs to grow indefinitely,
+// * will mostly grow at the back, but may sometimes grow in front as well
+// (preferrably in batches),
+// * needs to have very low overhead,
+// * offers forward- and backwards-iteration,
+// * offers relatively fast seeking,
+// * offers bidirectional iterators,
+// * can be rewound without freeing the backing store.
+// This list will maintain a doubly-linked list of chunks. When a chunk is
+// filled up, a new one gets appended. New chunks appended at the end will
+// grow in size up to a certain limit to avoid over-allocation and to keep
+// the zone clean.
+template <typename T>
+class ZoneChunkList : public ZoneObject {
+ public:
+ enum class StartMode {
+ // The list will not allocate a starting chunk. Use if you expect your
+ // list to remain empty in many cases.
+ kEmpty = 0,
+ // The list will start with a small initial chunk. Subsequent chunks will
+ // get bigger over time.
+ kSmall = 8,
+ // The list will start with one chunk at maximum size. Use this if you
+ // expect your list to contain many items to avoid growing chunks.
+ kBig = 256
+ };
+
+ explicit ZoneChunkList(Zone* zone, StartMode start_mode = StartMode::kEmpty)
+ : zone_(zone) {
+ if (start_mode != StartMode::kEmpty) {
+ front_ = NewChunk(static_cast<uint32_t>(start_mode));
+ back_ = front_;
+ }
+ }
+
+ size_t size() const;
+
+ T& front() const;
+ T& back() const;
+
+ void push_back(const T& item);
+ void pop_back();
+
+ // Will push a separate chunk to the front of the chunk-list.
+ // Very memory-inefficient. Do only use sparsely! If you have many items to
+ // add in front, consider using 'push_front_many'.
+ void push_front(const T& item);
+ // TODO(heimbuef): Add 'push_front_many'.
+
+ // Cuts the last list elements so at most 'limit' many remain. Does not
+ // free the actual memory, since it is zone allocated.
+ void Rewind(const size_t limit = 0);
+
+ // Quickly scans the list to retrieve the element at the given index. Will
+ // *not* check bounds.
+ ForwardZoneChunkListIterator<T> Find(const size_t index);
+ ForwardZoneChunkListIterator<const T> Find(const size_t index) const;
+ // TODO(heimbuef): Add 'rFind', seeking from the end and returning a
+ // reverse iterator.
+
+ void CopyTo(T* ptr);
+
+ ForwardZoneChunkListIterator<T> begin();
+ ForwardZoneChunkListIterator<T> end();
+ ReverseZoneChunkListIterator<T> rbegin();
+ ReverseZoneChunkListIterator<T> rend();
+ ForwardZoneChunkListIterator<const T> begin() const;
+ ForwardZoneChunkListIterator<const T> end() const;
+ ReverseZoneChunkListIterator<const T> rbegin() const;
+ ReverseZoneChunkListIterator<const T> rend() const;
+
+ private:
+ friend class ZoneChunkListIterator<T>;
+ friend class ForwardZoneChunkListIterator<T>;
+ friend class ReverseZoneChunkListIterator<T>;
+ static const uint32_t kMaxChunkCapacity = 256u;
+
+ STATIC_ASSERT(kMaxChunkCapacity == static_cast<uint32_t>(StartMode::kBig));
+
+ struct Chunk {
+ uint32_t capacity_ = 0;
+ uint32_t position_ = 0;
+ Chunk* next_ = nullptr;
+ Chunk* previous_ = nullptr;
+ T* items() { return reinterpret_cast<T*>(this + 1); }
+ };
+
+ Chunk* NewChunk(const uint32_t capacity) {
+ Chunk* chunk =
+ new (zone_->New(sizeof(Chunk) + capacity * sizeof(T))) Chunk();
+ chunk->capacity_ = capacity;
+ return chunk;
+ }
+
+ struct SeekResult {
+ Chunk* chunk_;
+ uint32_t chunk_index_;
+ };
+
+ // Returns the chunk and relative index of the element at the given global
+ // index. Will skip entire chunks and is therefore faster than iterating.
+ SeekResult SeekIndex(size_t index) const;
+
+ Zone* zone_;
+
+ size_t size_ = 0;
+ Chunk* front_ = nullptr;
+ Chunk* back_ = nullptr;
+
+ DISALLOW_COPY_AND_ASSIGN(ZoneChunkList);
+};
+
+template <typename T>
+class ZoneChunkListIterator {
+ public:
+ T& operator*() { return current_->items()[position_]; }
+ bool operator==(const ZoneChunkListIterator& other) {
+ return other.current_ == current_ && other.position_ == position_;
+ }
+ bool operator!=(const ZoneChunkListIterator& other) {
+ return !operator==(other);
+ }
+
+ protected:
+ ZoneChunkListIterator(typename ZoneChunkList<T>::Chunk* current,
+ size_t position)
+ : current_(current), position_(position) {}
+
+ void MoveNext() {
+ ++position_;
+ if (position_ >= current_->capacity_) {
+ current_ = current_->next_;
+ position_ = 0;
+ }
+ }
+
+ void MoveRNext() {
+ if (position_ == 0) {
+ current_ = current_->previous_;
+ position_ = current_ ? current_->capacity_ - 1 : 0;
+ } else {
+ --position_;
+ }
+ }
+
+ typename ZoneChunkList<T>::Chunk* current_;
+ size_t position_;
+};
+
+template <typename T>
+class ForwardZoneChunkListIterator : public ZoneChunkListIterator<T> {
+ using ZoneChunkListIterator<T>::current_;
+ using ZoneChunkListIterator<T>::position_;
+ using ZoneChunkListIterator<T>::MoveNext;
+ using ZoneChunkListIterator<T>::MoveRNext;
+
+ public:
+ ForwardZoneChunkListIterator(typename ZoneChunkList<T>::Chunk* current,
+ size_t position)
+ : ZoneChunkListIterator<T>(current, position) {}
+
+ ForwardZoneChunkListIterator& operator++() {
+ MoveNext();
+ return *this;
+ }
+
+ ForwardZoneChunkListIterator operator++(int) {
+ ForwardZoneChunkListIterator<T> clone(*this);
+ MoveNext();
+ return clone;
+ }
+
+ ForwardZoneChunkListIterator& operator--() {
+ MoveRNext();
+ return *this;
+ }
+
+ ForwardZoneChunkListIterator operator--(int) {
+ ForwardZoneChunkListIterator<T> clone(*this);
+ MoveRNext();
+ return clone;
+ }
+
+ private:
+ friend class ZoneChunkList<T>;
+ static ForwardZoneChunkListIterator<T> Begin(ZoneChunkList<T>* list) {
+ return ForwardZoneChunkListIterator<T>(list->front_, 0);
+ }
+ static ForwardZoneChunkListIterator<T> End(ZoneChunkList<T>* list) {
+ if (list->back_ == nullptr) return Begin(list);
+
+ DCHECK_LE(list->back_->position_, list->back_->capacity_);
+ if (list->back_->position_ == list->back_->capacity_) {
+ return ForwardZoneChunkListIterator<T>(nullptr, 0);
+ }
+
+ return ForwardZoneChunkListIterator<T>(list->back_, list->back_->position_);
+ }
+};
+
+template <typename T>
+class ReverseZoneChunkListIterator : public ZoneChunkListIterator<T> {
+ using ZoneChunkListIterator<T>::current_;
+ using ZoneChunkListIterator<T>::position_;
+ using ZoneChunkListIterator<T>::MoveNext;
+ using ZoneChunkListIterator<T>::MoveRNext;
+
+ public:
+ ReverseZoneChunkListIterator(typename ZoneChunkList<T>::Chunk* current,
+ size_t position)
+ : ZoneChunkListIterator<T>(current, position) {}
+
+ ReverseZoneChunkListIterator& operator++() {
+ MoveRNext();
+ return *this;
+ }
+
+ ReverseZoneChunkListIterator operator++(int) {
+ ReverseZoneChunkListIterator<T> clone(*this);
+ MoveRNext();
+ return clone;
+ }
+
+ ReverseZoneChunkListIterator& operator--() {
+ MoveNext();
+ return *this;
+ }
+
+ ReverseZoneChunkListIterator operator--(int) {
+ ForwardZoneChunkListIterator<T> clone(*this);
+ MoveNext();
+ return clone;
+ }
+
+ private:
+ friend class ZoneChunkList<T>;
+ static ReverseZoneChunkListIterator<T> Begin(ZoneChunkList<T>* list) {
+ if (list->back_ == nullptr) return End(list);
+ if (list->back_->position_ == 0) {
+ if (list->back_->previous_ != nullptr) {
+ return ReverseZoneChunkListIterator<T>(
+ list->back_->previous_, list->back_->previous_->capacity_ - 1);
+ } else {
+ return End(list);
+ }
+ }
+ return ReverseZoneChunkListIterator<T>(list->back_,
+ list->back_->position_ - 1);
+ }
+ static ReverseZoneChunkListIterator<T> End(ZoneChunkList<T>* list) {
+ return ReverseZoneChunkListIterator<T>(nullptr, 0);
+ }
+};
+
+template <typename T>
+size_t ZoneChunkList<T>::size() const {
+ return size_;
+}
+
+template <typename T>
+T& ZoneChunkList<T>::front() const {
+ DCHECK_LT(size_t(0), size());
+ return front_->items()[0];
+}
+
+template <typename T>
+T& ZoneChunkList<T>::back() const {
+ DCHECK_LT(size_t(0), size());
+
+ if (back_->position_ == 0) {
+ return back_->previous_->items()[back_->previous_->position_ - 1];
+ } else {
+ return back_->items()[back_->position_ - 1];
+ }
+}
+
+template <typename T>
+void ZoneChunkList<T>::push_back(const T& item) {
+ if (back_ == nullptr) {
+ front_ = NewChunk(static_cast<uint32_t>(StartMode::kSmall));
+ back_ = front_;
+ }
+
+ DCHECK_LE(back_->position_, back_->capacity_);
+ if (back_->position_ == back_->capacity_) {
+ if (back_->next_ == nullptr) {
+ Chunk* chunk = NewChunk(Min(back_->capacity_ << 1, kMaxChunkCapacity));
+ back_->next_ = chunk;
+ chunk->previous_ = back_;
+ }
+ back_ = back_->next_;
+ }
+ back_->items()[back_->position_] = item;
+ ++back_->position_;
+ ++size_;
+}
+
+template <typename T>
+void ZoneChunkList<T>::pop_back() {
+ DCHECK_LT(size_t(0), size());
+ if (back_->position_ == 0) {
+ back_ = back_->previous_;
+ }
+ --back_->position_;
+}
+
+template <typename T>
+void ZoneChunkList<T>::push_front(const T& item) {
+ Chunk* chunk = NewChunk(1); // Yes, this gets really inefficient.
+ chunk->next_ = front_;
+ if (front_) {
+ front_->previous_ = chunk;
+ } else {
+ back_ = chunk;
+ }
+ front_ = chunk;
+
+ chunk->items()[0] = item;
+ chunk->position_ = 1;
+ ++size_;
+}
+
+template <typename T>
+typename ZoneChunkList<T>::SeekResult ZoneChunkList<T>::SeekIndex(
+ size_t index) const {
+ DCHECK_LT(index, size());
+ Chunk* current = front_;
+ while (index > current->capacity_) {
+ index -= current->capacity_;
+ current = current->next_;
+ }
+ return {current, static_cast<uint32_t>(index)};
+}
+
+template <typename T>
+void ZoneChunkList<T>::Rewind(const size_t limit) {
+ if (limit >= size()) return;
+
+ SeekResult seek_result = SeekIndex(limit);
+ DCHECK_NOT_NULL(seek_result.chunk_);
+
+ // Do a partial rewind of the chunk containing the index.
+ seek_result.chunk_->position_ = seek_result.chunk_index_;
+
+ // Set back_ so iterators will work correctly.
+ back_ = seek_result.chunk_;
+
+ // Do full rewind of all subsequent chunks.
+ for (Chunk* current = seek_result.chunk_->next_; current != nullptr;
+ current = current->next_) {
+ current->position_ = 0;
+ }
+
+ size_ = limit;
+}
+
+template <typename T>
+ForwardZoneChunkListIterator<T> ZoneChunkList<T>::Find(const size_t index) {
+ SeekResult seek_result = SeekIndex(index);
+ return ForwardZoneChunkListIterator<T>(seek_result.chunk_,
+ seek_result.chunk_index_);
+}
+
+template <typename T>
+ForwardZoneChunkListIterator<const T> ZoneChunkList<T>::Find(
+ const size_t index) const {
+ SeekResult seek_result = SeekIndex(index);
+ return ForwardZoneChunkListIterator<const T>(seek_result.chunk_,
+ seek_result.chunk_index_);
+}
+
+template <typename T>
+void ZoneChunkList<T>::CopyTo(T* ptr) {
+ for (Chunk* current = front_; current != nullptr; current = current->next_) {
+ void* start = current->items();
+ void* end = current->items() + current->position_;
+ size_t bytes = static_cast<size_t>(reinterpret_cast<uintptr_t>(end) -
+ reinterpret_cast<uintptr_t>(start));
+
+ MemCopy(ptr, current->items(), bytes);
+ ptr += current->position_;
+ }
+}
+
+template <typename T>
+ForwardZoneChunkListIterator<T> ZoneChunkList<T>::begin() {
+ return ForwardZoneChunkListIterator<T>::Begin(this);
+}
+
+template <typename T>
+ForwardZoneChunkListIterator<T> ZoneChunkList<T>::end() {
+ return ForwardZoneChunkListIterator<T>::End(this);
+}
+
+template <typename T>
+ReverseZoneChunkListIterator<T> ZoneChunkList<T>::rbegin() {
+ return ReverseZoneChunkListIterator<T>::Begin(this);
+}
+
+template <typename T>
+ReverseZoneChunkListIterator<T> ZoneChunkList<T>::rend() {
+ return ReverseZoneChunkListIterator<T>::End(this);
+}
+
+template <typename T>
+ForwardZoneChunkListIterator<const T> ZoneChunkList<T>::begin() const {
+ return ForwardZoneChunkListIterator<const T>::Begin(this);
+}
+
+template <typename T>
+ForwardZoneChunkListIterator<const T> ZoneChunkList<T>::end() const {
+ return ForwardZoneChunkListIterator<const T>::End(this);
+}
+
+template <typename T>
+ReverseZoneChunkListIterator<const T> ZoneChunkList<T>::rbegin() const {
+ return ReverseZoneChunkListIterator<const T>::Begin(this);
+}
+
+template <typename T>
+ReverseZoneChunkListIterator<const T> ZoneChunkList<T>::rend() const {
+ return ReverseZoneChunkListIterator<const T>::End(this);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SRC_ZONE_ZONE_CHUNK_LIST_H_
diff --git a/deps/v8/src/zone/zone-segment.cc b/deps/v8/src/zone/zone-segment.cc
index f63b530667..1fa49d49ba 100644
--- a/deps/v8/src/zone/zone-segment.cc
+++ b/deps/v8/src/zone/zone-segment.cc
@@ -18,5 +18,6 @@ void Segment::ZapHeader() {
memset(this, kZapDeadByte, sizeof(Segment));
#endif
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/zone/zone-segment.h b/deps/v8/src/zone/zone-segment.h
index d37cf5648d..e1c3ed0048 100644
--- a/deps/v8/src/zone/zone-segment.h
+++ b/deps/v8/src/zone/zone-segment.h
@@ -20,11 +20,7 @@ class Zone;
class Segment {
public:
- void Initialize(Segment* next, size_t size, Zone* zone) {
- next_ = next;
- size_ = size;
- zone_ = zone;
- }
+ void Initialize(size_t size) { size_ = size; }
Zone* zone() const { return zone_; }
void set_zone(Zone* const zone) { zone_ = zone; }
@@ -48,6 +44,7 @@ class Segment {
// Constant byte value used for zapping dead memory in debug mode.
static const unsigned char kZapDeadByte = 0xcd;
#endif
+
// Computes the address of the nth byte in this segment.
Address address(size_t n) const { return Address(this) + n; }
diff --git a/deps/v8/src/zone/zone.cc b/deps/v8/src/zone/zone.cc
index db46b8ba23..792f555897 100644
--- a/deps/v8/src/zone/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -41,17 +41,21 @@ const size_t kASanRedzoneBytes = 0;
} // namespace
-Zone::Zone(AccountingAllocator* allocator)
+Zone::Zone(AccountingAllocator* allocator, const char* name)
: allocation_size_(0),
segment_bytes_allocated_(0),
position_(0),
limit_(0),
allocator_(allocator),
- segment_head_(nullptr) {}
+ segment_head_(nullptr),
+ name_(name) {
+ allocator_->ZoneCreation(this);
+}
Zone::~Zone() {
+ allocator_->ZoneDestruction(this);
+
DeleteAll();
- DeleteKeptSegment();
DCHECK(segment_bytes_allocated_ == 0);
}
@@ -84,73 +88,33 @@ void* Zone::New(size_t size) {
}
void Zone::DeleteAll() {
- // Find a segment with a suitable size to keep around.
- Segment* keep = nullptr;
- // Traverse the chained list of segments, zapping (in debug mode)
- // and freeing every segment except the one we wish to keep.
+ // Traverse the chained list of segments and return them all to the allocator.
for (Segment* current = segment_head_; current;) {
Segment* next = current->next();
- if (!keep && current->size() <= kMaximumKeptSegmentSize) {
- // Unlink the segment we wish to keep from the list.
- keep = current;
- keep->set_next(nullptr);
- } else {
- size_t size = current->size();
-#ifdef DEBUG
- // Un-poison first so the zapping doesn't trigger ASan complaints.
- ASAN_UNPOISON_MEMORY_REGION(current, size);
-#endif
- current->ZapContents();
- segment_bytes_allocated_ -= size;
- allocator_->FreeSegment(current);
- }
- current = next;
- }
-
- // If we have found a segment we want to keep, we must recompute the
- // variables 'position' and 'limit' to prepare for future allocate
- // attempts. Otherwise, we must clear the position and limit to
- // force a new segment to be allocated on demand.
- if (keep) {
- Address start = keep->start();
- position_ = RoundUp(start, kAlignmentInBytes);
- limit_ = keep->end();
- // Un-poison so we can re-use the segment later.
- ASAN_UNPOISON_MEMORY_REGION(start, keep->capacity());
- keep->ZapContents();
- } else {
- position_ = limit_ = 0;
- }
+ size_t size = current->size();
- allocation_size_ = 0;
- // Update the head segment to be the kept segment (if any).
- segment_head_ = keep;
-}
+ // Un-poison the segment content so we can re-use or zap it later.
+ ASAN_UNPOISON_MEMORY_REGION(current->start(), current->capacity());
-void Zone::DeleteKeptSegment() {
- DCHECK(segment_head_ == nullptr || segment_head_->next() == nullptr);
- if (segment_head_ != nullptr) {
- size_t size = segment_head_->size();
-#ifdef DEBUG
- // Un-poison first so the zapping doesn't trigger ASan complaints.
- ASAN_UNPOISON_MEMORY_REGION(segment_head_, size);
-#endif
- segment_head_->ZapContents();
segment_bytes_allocated_ -= size;
- allocator_->FreeSegment(segment_head_);
- segment_head_ = nullptr;
+ allocator_->ReturnSegment(current);
+ current = next;
}
- DCHECK(segment_bytes_allocated_ == 0);
+ position_ = limit_ = 0;
+ allocation_size_ = 0;
+ segment_head_ = nullptr;
}
// Creates a new segment, sets it size, and pushes it to the front
// of the segment chain. Returns the new segment.
-Segment* Zone::NewSegment(size_t size) {
- Segment* result = allocator_->AllocateSegment(size);
- segment_bytes_allocated_ += size;
+Segment* Zone::NewSegment(size_t requested_size) {
+ Segment* result = allocator_->GetSegment(requested_size);
+ DCHECK_GE(result->size(), requested_size);
+ segment_bytes_allocated_ += result->size();
if (result != nullptr) {
- result->Initialize(segment_head_, size, this);
+ result->set_zone(this);
+ result->set_next(segment_head_);
segment_head_ = result;
}
return result;
diff --git a/deps/v8/src/zone/zone.h b/deps/v8/src/zone/zone.h
index ca0d7e4437..dbc1dadadd 100644
--- a/deps/v8/src/zone/zone.h
+++ b/deps/v8/src/zone/zone.h
@@ -14,6 +14,12 @@
#include "src/splay-tree.h"
#include "src/zone/accounting-allocator.h"
+#ifndef ZONE_NAME
+#define STRINGIFY(x) #x
+#define TOSTRING(x) STRINGIFY(x)
+#define ZONE_NAME __FILE__ ":" TOSTRING(__LINE__)
+#endif
+
namespace v8 {
namespace internal {
@@ -25,13 +31,13 @@ namespace internal {
//
// Note: There is no need to initialize the Zone; the first time an
// allocation is attempted, a segment of memory will be requested
-// through a call to malloc().
+// through the allocator.
//
// Note: The implementation is inherently not thread safe. Do not use
// from multi-threaded code.
class V8_EXPORT_PRIVATE Zone final {
public:
- explicit Zone(AccountingAllocator* allocator);
+ Zone(AccountingAllocator* allocator, const char* name);
~Zone();
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
@@ -44,20 +50,14 @@ class V8_EXPORT_PRIVATE Zone final {
return static_cast<T*>(New(length * sizeof(T)));
}
- // Deletes all objects and free all memory allocated in the Zone. Keeps one
- // small (size <= kMaximumKeptSegmentSize) segment around if it finds one.
- void DeleteAll();
-
- // Deletes the last small segment kept around by DeleteAll(). You
- // may no longer allocate in the Zone after a call to this method.
- void DeleteKeptSegment();
-
// Returns true if more memory has been allocated in zones than
// the limit allows.
bool excess_allocation() const {
return segment_bytes_allocated_ > kExcessLimit;
}
+ const char* name() const { return name_; }
+
size_t allocation_size() const { return allocation_size_; }
AccountingAllocator* allocator() const { return allocator_; }
@@ -72,12 +72,12 @@ class V8_EXPORT_PRIVATE Zone final {
// Never allocate segments larger than this size in bytes.
static const size_t kMaximumSegmentSize = 1 * MB;
- // Never keep segments larger than this size in bytes around.
- static const size_t kMaximumKeptSegmentSize = 64 * KB;
-
// Report zone excess when allocation exceeds this limit.
static const size_t kExcessLimit = 256 * MB;
+ // Deletes all objects and free all memory allocated in the Zone.
+ void DeleteAll();
+
// The number of bytes allocated in this zone so far.
size_t allocation_size_;
@@ -94,17 +94,18 @@ class V8_EXPORT_PRIVATE Zone final {
// Creates a new segment, sets it size, and pushes it to the front
// of the segment chain. Returns the new segment.
- inline Segment* NewSegment(size_t size);
+ inline Segment* NewSegment(size_t requested_size);
// The free region in the current (front) segment is represented as
// the half-open interval [position, limit). The 'position' variable
- // is guaranteed to be aligned as dictated by kAlignmentInBytes.
+ // is guaranteed to be aligned as dictated by kAlignment.
Address position_;
Address limit_;
AccountingAllocator* allocator_;
Segment* segment_head_;
+ const char* name_;
};
// ZoneObject is an abstraction that helps define classes of objects
@@ -126,19 +127,6 @@ class ZoneObject {
void operator delete(void* pointer, Zone* zone) { UNREACHABLE(); }
};
-// The ZoneScope is used to automatically call DeleteAll() on a
-// Zone when the ZoneScope is destroyed (i.e. goes out of scope)
-class ZoneScope final {
- public:
- explicit ZoneScope(Zone* zone) : zone_(zone) {}
- ~ZoneScope() { zone_->DeleteAll(); }
-
- Zone* zone() const { return zone_; }
-
- private:
- Zone* zone_;
-};
-
// The ZoneAllocationPolicy is used to specialize generic data
// structures to allocate themselves and their elements in the Zone.
class ZoneAllocationPolicy final {
@@ -164,6 +152,13 @@ class ZoneList final : public List<T, ZoneAllocationPolicy> {
ZoneList(int capacity, Zone* zone)
: List<T, ZoneAllocationPolicy>(capacity, ZoneAllocationPolicy(zone)) {}
+ // Construct a new ZoneList from a std::initializer_list
+ ZoneList(std::initializer_list<T> list, Zone* zone)
+ : List<T, ZoneAllocationPolicy>(static_cast<int>(list.size()),
+ ZoneAllocationPolicy(zone)) {
+ for (auto& i : list) Add(i, zone);
+ }
+
void* operator new(size_t size, Zone* zone) { return zone->New(size); }
// Construct a new ZoneList by copying the elements of the given ZoneList.
diff --git a/deps/v8/test/BUILD.gn b/deps/v8/test/BUILD.gn
index e24615a443..9973134bf2 100644
--- a/deps/v8/test/BUILD.gn
+++ b/deps/v8/test/BUILD.gn
@@ -48,6 +48,7 @@ group("default_tests") {
deps = [
":cctest_run",
":fuzzer_run",
+ ":inspector-test_run",
":intl_run",
":message_run",
":mjsunit_run",
@@ -77,6 +78,7 @@ v8_isolate_run("default") {
v8_isolate_run("optimize_for_size") {
deps = [
":cctest_run",
+ ":inspector-test_run",
":intl_run",
":mjsunit_run",
":webkit_run",
@@ -134,6 +136,15 @@ v8_isolate_run("fuzzer") {
isolate = "fuzzer/fuzzer.isolate"
}
+v8_isolate_run("inspector-test") {
+ deps = []
+ if (v8_enable_inspector_override) {
+ deps += [ "inspector:inspector-test" ]
+ }
+
+ isolate = "inspector/inspector.isolate"
+}
+
v8_isolate_run("intl") {
deps = [
"..:d8_run",
diff --git a/deps/v8/test/bot_default.gyp b/deps/v8/test/bot_default.gyp
index 04679183b4..ab0b49e7ef 100644
--- a/deps/v8/test/bot_default.gyp
+++ b/deps/v8/test/bot_default.gyp
@@ -19,6 +19,13 @@
'unittests/unittests.gyp:unittests_run',
'webkit/webkit.gyp:webkit_run',
],
+ 'conditions': [
+ ['v8_enable_inspector==1', {
+ 'dependencies': [
+ 'inspector/inspector.gyp:inspector-test_run',
+ ],
+ }],
+ ],
'includes': [
'../gypfiles/features.gypi',
'../gypfiles/isolate.gypi',
diff --git a/deps/v8/test/bot_default.isolate b/deps/v8/test/bot_default.isolate
index d6e4aa3474..59420cb056 100644
--- a/deps/v8/test/bot_default.isolate
+++ b/deps/v8/test/bot_default.isolate
@@ -9,7 +9,9 @@
},
'includes': [
'cctest/cctest.isolate',
+ 'debugger/debugger.isolate',
'fuzzer/fuzzer.isolate',
+ 'inspector/inspector.isolate',
'intl/intl.isolate',
'message/message.isolate',
'mjsunit/mjsunit.isolate',
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index d2918d90ae..f926707915 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -40,7 +40,6 @@ v8_executable("cctest") {
"compiler/test-multiple-return.cc",
"compiler/test-node.cc",
"compiler/test-operator.cc",
- "compiler/test-osr.cc",
"compiler/test-representation-change.cc",
"compiler/test-run-bytecode-graph-builder.cc",
"compiler/test-run-calls-to-external-references.cc",
@@ -167,6 +166,7 @@ v8_executable("cctest") {
"test-thread-termination.cc",
"test-threads.cc",
"test-trace-event.cc",
+ "test-traced-value.cc",
"test-transitions.cc",
"test-typedarrays.cc",
"test-types.cc",
@@ -182,6 +182,7 @@ v8_executable("cctest") {
"trace-extension.cc",
"trace-extension.h",
"types-fuzz.h",
+ "wasm/test-managed.cc",
"wasm/test-run-wasm-64.cc",
"wasm/test-run-wasm-asmjs.cc",
"wasm/test-run-wasm-interpreter.cc",
@@ -189,7 +190,6 @@ v8_executable("cctest") {
"wasm/test-run-wasm-module.cc",
"wasm/test-run-wasm-relocation.cc",
"wasm/test-run-wasm.cc",
- "wasm/test-wasm-function-name-table.cc",
"wasm/test-wasm-stack.cc",
"wasm/test-wasm-trap-position.cc",
"wasm/wasm-run-utils.h",
@@ -276,6 +276,7 @@ v8_executable("cctest") {
"test-log-stack-tracer.cc",
"test-macro-assembler-x64.cc",
"test-run-wasm-relocation-x64.cc",
+ "wasm/test-run-wasm-simd-lowering.cc",
"wasm/test-run-wasm-simd.cc",
]
} else if (v8_current_cpu == "x87") {
@@ -324,6 +325,7 @@ v8_executable("cctest") {
deps = [
":resources",
+ "../..:v8_libbase",
"../..:v8_libplatform",
"../..:wasm_module_runner",
"../..:wasm_test_signatures",
@@ -331,10 +333,13 @@ v8_executable("cctest") {
"//build/win:default_exe_manifest",
]
+ defines = []
+
if (is_component_build) {
# cctest can't be built against a shared library, so we
# need to depend on the underlying static target in that case.
deps += [ "../..:v8_maybe_snapshot" ]
+ defines += [ "BUILDING_V8_SHARED" ]
} else {
deps += [ "../..:v8" ]
}
@@ -358,14 +363,6 @@ v8_executable("cctest") {
# MSVS wants this for gay-{precision,shortest}.cc.
cflags += [ "/bigobj" ]
-
- # Suppress warnings about importing locally defined symbols.
- if (is_component_build) {
- ldflags += [
- "/ignore:4049",
- "/ignore:4217",
- ]
- }
}
}
@@ -416,25 +413,10 @@ v8_executable("generate-bytecode-expectations") {
]
deps = [
+ "../..:v8",
+ "../..:v8_libbase",
"../..:v8_libplatform",
"//build/config/sanitizers:deps",
"//build/win:default_exe_manifest",
]
-
- if (is_component_build) {
- # Same as cctest, we need to depend on the underlying static target.
- deps += [ "../..:v8_maybe_snapshot" ]
- } else {
- deps += [ "../..:v8" ]
- }
-
- if (is_win) {
- # Suppress warnings about importing locally defined symbols.
- if (is_component_build) {
- ldflags = [
- "/ignore:4049",
- "/ignore:4217",
- ]
- }
- }
}
diff --git a/deps/v8/test/cctest/asmjs/test-asm-typer.cc b/deps/v8/test/cctest/asmjs/test-asm-typer.cc
index a44ecf9283..d345774dbf 100644
--- a/deps/v8/test/cctest/asmjs/test-asm-typer.cc
+++ b/deps/v8/test/cctest/asmjs/test-asm-typer.cc
@@ -52,8 +52,6 @@ class AsmTyperHarnessBuilder {
factory_->NewStringFromUtf8(CStrVector(source)).ToHandleChecked()),
script_(factory_->NewScript(source_code_)) {
ParseInfo info(zone_, script_);
- info.set_global();
- info.set_lazy(false);
info.set_allow_lazy_parsing(false);
info.set_toplevel(true);
info.set_ast_value_factory(&ast_value_factory_);
@@ -66,17 +64,18 @@ class AsmTyperHarnessBuilder {
}
outer_scope_ = info.script_scope();
- module_ =
- info.scope()->declarations()->at(0)->AsFunctionDeclaration()->fun();
+ module_ = info.scope()
+ ->declarations()
+ ->AtForTest(0)
+ ->AsFunctionDeclaration()
+ ->fun();
typer_.reset(new AsmTyper(isolate_, zone_, *script_, module_));
if (validation_type_ == ValidateStatement ||
validation_type_ == ValidateExpression) {
fun_scope_.reset(new AsmTyper::FunctionScope(typer_.get()));
- auto* decls = module_->scope()->declarations();
- for (int ii = 0; ii < decls->length(); ++ii) {
- Declaration* decl = decls->at(ii);
+ for (Declaration* decl : *module_->scope()->declarations()) {
if (FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration()) {
fun_decl_ = fun_decl;
break;
@@ -507,14 +506,15 @@ TEST(ErrorsInGlobalVariableDefinition) {
const char* error_message;
} kTests[] = {
{"var v;", "Global variable missing initializer"},
- {"var v = uninitialized;", "Invalid global variable initializer"},
+ {"var v = uninitialized;", "Undeclared identifier in global"},
{"var v = 'use asm';", "type annotation - forbidden literal"},
{"var v = 4294967296;", " - forbidden literal"},
- {"var v = not_fround;", "Invalid global variable initializer"},
+ {"var v = not_fround;", "initialize a global must be a const"},
{"var v = not_fround(1);", "expected call fround(literal)"},
{"var v = __fround__(1.0);", "expected call fround(literal)"},
{"var v = fround(1.0, 1.0);", "expected call fround(literal)"},
{"var v = fround(not_fround);", "literal argument for call to fround"},
+ {"var v = i?0:1;", "Invalid global variable initializer"},
{"var v = stdlib.nan", "Invalid import"},
{"var v = stdlib.Math.nan", "Invalid import"},
{"var v = stdlib.Mathh.E", "Invalid import"},
@@ -790,6 +790,19 @@ TEST(ErrorsInFunction) {
" var c = 0;\n"
"}\n",
"Local variable missing initializer in asm.js module"},
+ {"function f(a) {\n"
+ " a = a|0;\n"
+ " var x = a;\n"
+ "}\n",
+ "variable declaration initializer must be const"},
+ {"function f() {\n"
+ " var x = 1+i;\n"
+ "}\n",
+ "should be a literal, const, or fround(literal"},
+ {"function f() {\n"
+ " var x = a;\n"
+ "}\n",
+ "Undeclared identifier in variable declaration initializer"},
{"function f() {\n"
" function ff() {}\n"
"}\n",
@@ -814,6 +827,19 @@ TEST(ErrorsInFunction) {
" return 2147483648;\n"
"}\n",
"Invalid literal in return statement"},
+ {"function f(a) {\n"
+ " a = a|0;\n"
+ " return a;\n"
+ "}\n",
+ "in return statement is not const"},
+ {"function f() {\n"
+ " return a;\n"
+ "}\n",
+ "Undeclared identifier in return statement"},
+ {"function f() {\n"
+ " return i?0:1;\n"
+ "}\n",
+ "Invalid return type expression"},
{"function f() {\n"
" return stdlib.Math.E;"
"}\n",
@@ -1097,7 +1123,8 @@ TEST(ValidateCallExpression) {
for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
const auto* test = kTests + ii;
CHECK(v8::base::OS::SNPrintF(full_test, kFullTestSize, "fround(%s)",
- test->expression) < kFullTestSize);
+ test->expression) <
+ static_cast<int>(kFullTestSize));
if (!ValidationOf(Expression(full_test))
->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
->WithGlobal(DynamicGlobal("a_float_function"), v2f)
@@ -1128,7 +1155,8 @@ TEST(ValidateCallExpression) {
for (size_t ii = 0; ii < arraysize(kFailureTests); ++ii) {
const auto* test = kFailureTests + ii;
CHECK(v8::base::OS::SNPrintF(full_test, kFullTestSize, "fround(%s)",
- test->expression) < kFullTestSize);
+ test->expression) <
+ static_cast<int>(kFullTestSize));
if (!ValidationOf(Expression(full_test))
->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
->WithLocal(DynamicGlobal("ilocal"), iw::AsmType::Int())
@@ -2000,4 +2028,31 @@ TEST(B640194) {
}
}
+TEST(B660813) {
+ const char* kTests[] = {
+ "function asm() {\n"
+ " 'use asm';\n"
+ " const i = 0xffffffff;\n"
+ " function f() {\n"
+ " return i;\n"
+ " }\n"
+ "}",
+ "function asm() {\n"
+ " 'use asm';\n"
+ " const i = -(-2147483648);\n"
+ " function f() {\n"
+ " return i;\n"
+ " }\n"
+ "}",
+ };
+ for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
+ if (!ValidationOf(Module(kTests[ii]))
+ ->FailsWithMessage(
+ "Constant in return must be signed, float, or double.")) {
+ std::cerr << "Test:\n" << kTests[ii];
+ CHECK(false);
+ }
+ }
+}
+
} // namespace
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 17127ed9ec..c987d8d375 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -197,7 +197,7 @@ InitializedHandleScope::InitializedHandleScope()
InitializedHandleScope::~InitializedHandleScope() {}
HandleAndZoneScope::HandleAndZoneScope()
- : main_zone_(new i::Zone(&allocator_)) {}
+ : main_zone_(new i::Zone(&allocator_, ZONE_NAME)) {}
HandleAndZoneScope::~HandleAndZoneScope() {}
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index c95a0b1749..6b5dba6472 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -60,7 +60,6 @@
'compiler/test-multiple-return.cc',
'compiler/test-node.cc',
'compiler/test-operator.cc',
- 'compiler/test-osr.cc',
'compiler/test-representation-change.cc',
'compiler/test-run-bytecode-graph-builder.cc',
'compiler/test-run-calls-to-external-references.cc',
@@ -187,6 +186,7 @@
'test-thread-termination.cc',
'test-threads.cc',
'test-trace-event.cc',
+ 'test-traced-value.cc',
'test-transitions.cc',
'test-typedarrays.cc',
'test-ast-types.cc',
@@ -203,6 +203,7 @@
'trace-extension.cc',
'trace-extension.h',
'types-fuzz.h',
+ 'wasm/test-managed.cc',
'wasm/test-run-wasm.cc',
'wasm/test-run-wasm-64.cc',
'wasm/test-run-wasm-asmjs.cc',
@@ -210,7 +211,6 @@
'wasm/test-run-wasm-js.cc',
'wasm/test-run-wasm-module.cc',
'wasm/test-run-wasm-relocation.cc',
- 'wasm/test-wasm-function-name-table.cc',
'wasm/test-wasm-stack.cc',
'wasm/test-wasm-trap-position.cc',
'wasm/wasm-run-utils.h',
@@ -234,7 +234,8 @@
'test-macro-assembler-x64.cc',
'test-log-stack-tracer.cc',
'test-run-wasm-relocation-x64.cc',
- 'wasm/test-run-wasm-simd.cc'
+ 'wasm/test-run-wasm-simd.cc',
+ 'wasm/test-run-wasm-simd-lowering.cc',
],
'cctest_sources_arm': [ ### gcmole(arch:arm) ###
'test-assembler-arm.cc',
@@ -320,6 +321,7 @@
'type': 'executable',
'dependencies': [
'resources',
+ '../../src/v8.gyp:v8_libbase',
'../../src/v8.gyp:v8_libplatform',
],
'include_dirs': [
@@ -428,6 +430,7 @@
# cctest can't be built against a shared library, so we need to
# depend on the underlying static target in that case.
'dependencies': ['../../src/v8.gyp:v8_maybe_snapshot'],
+ 'defines': [ 'BUILDING_V8_SHARED', ]
}, {
'dependencies': ['../../src/v8.gyp:v8'],
}],
@@ -472,16 +475,10 @@
'target_name': 'generate-bytecode-expectations',
'type': 'executable',
'dependencies': [
+ '../../src/v8.gyp:v8',
+ '../../src/v8.gyp:v8_libbase',
'../../src/v8.gyp:v8_libplatform',
],
- 'conditions': [
- ['component=="shared_library"', {
- # Same as cctest, we need to depend on the underlying static target.
- 'dependencies': ['../../src/v8.gyp:v8_maybe_snapshot'],
- }, {
- 'dependencies': ['../../src/v8.gyp:v8'],
- }],
- ],
'include_dirs+': [
'../..',
],
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index d8fa871484..690a8c14cd 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -189,11 +189,17 @@ class ApiTestFuzzer: public v8::base::Thread {
// The ApiTestFuzzer is also a Thread, so it has a Run method.
virtual void Run();
- enum PartOfTest { FIRST_PART,
- SECOND_PART,
- THIRD_PART,
- FOURTH_PART,
- LAST_PART = FOURTH_PART };
+ enum PartOfTest {
+ FIRST_PART,
+ SECOND_PART,
+ THIRD_PART,
+ FOURTH_PART,
+ FIFTH_PART,
+ SIXTH_PART,
+ SEVENTH_PART,
+ EIGHTH_PART,
+ LAST_PART = EIGHTH_PART
+ };
static void SetUp(PartOfTest part);
static void RunAllTests();
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index b7bcb6b5e3..059d099bf5 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -29,7 +29,6 @@
[ALWAYS, {
# All tests prefixed with 'Bug' are expected to fail.
'test-api/Bug*': [FAIL],
- 'test-serialize/Bug*': [FAIL],
##############################################################################
@@ -69,7 +68,8 @@
'test-ast-types/*': [PASS, NO_VARIANTS],
# This tests API threading, no point in running several variants.
- 'test-api/Threading*': [PASS, NO_VARIANTS],
+ # They are also slow in debug mode.
+ 'test-api/Threading*': [PASS, NO_VARIANTS, ['mode == debug', SLOW]],
# BUG(2999). The cpu profiler tests are notoriously flaky.
'test-cpu-profiler/CpuProfileDeepStack': [SKIP],
@@ -90,12 +90,11 @@
# BUG(3742).
'test-mark-compact/MarkCompactCollector': [PASS, ['arch==arm', NO_VARIANTS]],
+ # Test that serialization with unknown external reference fails.
+ 'test-serialize/SnapshotCreatorUnknownExternalReferences': [FAIL],
+
############################################################################
# Slow tests.
- 'test-api/Threading1': [PASS, ['mode == debug', SLOW]],
- 'test-api/Threading2': [PASS, ['mode == debug', SLOW]],
- 'test-api/Threading3': [PASS, ['mode == debug', SLOW]],
- 'test-api/Threading4': [PASS, ['mode == debug', SLOW]],
'test-debug/CallFunctionInDebugger': [PASS, ['mode == debug', SLOW]],
'test-strings/StringOOM*': [PASS, ['mode == debug', SKIP]],
'test-serialize/CustomSnapshotDataBlobImmortalImmovableRoots': [PASS, ['mode == debug', SKIP]],
@@ -132,8 +131,6 @@
# Pass but take too long with the simulator.
'test-api/ExternalArrays': [PASS, TIMEOUT],
- 'test-api/Threading1': [SKIP],
- 'test-api/Threading2': [SKIP],
}], # 'arch == arm64 and simulator_run == True'
['arch == arm64 and mode == debug and simulator_run == True', {
@@ -165,10 +162,7 @@
'test-strings/CountBreakIterator': [SKIP],
# Slow tests.
- 'test-api/Threading1': [PASS, SLOW],
- 'test-api/Threading2': [PASS, SLOW],
- 'test-api/Threading3': [PASS, SLOW],
- 'test-api/Threading4': [PASS, SLOW],
+ 'test-api/Threading*': [PASS, SLOW],
}], # 'msan == True'
##############################################################################
@@ -227,10 +221,7 @@
############################################################################
# Slow tests.
- 'test-api/Threading1': [PASS, SLOW],
- 'test-api/Threading2': [PASS, SLOW],
- 'test-api/Threading3': [PASS, SLOW],
- 'test-api/Threading4': [PASS, SLOW],
+ 'test-api/Threading*': [PASS, SLOW],
}], # 'arch == arm'
##############################################################################
@@ -254,7 +245,8 @@
# TODO(mips-team): Currently fails on mips board.
'test-parsing/TooManyArguments': [SKIP],
- 'test-api/Threading3': [SKIP],
+ 'test-api/Threading5': [SKIP],
+ 'test-api/Threading6': [SKIP],
}], # 'arch == mips'
##############################################################################
@@ -327,54 +319,43 @@
['arch == ppc and simulator_run == True or arch == ppc64 and simulator_run == True', {
# Pass but take too long with the simulator.
- 'test-api/Threading1': [PASS, SLOW],
- 'test-api/Threading2': [PASS, SLOW],
+ 'test-api/Threading*': [PASS, SLOW],
'test-api/ExternalArrays': [PASS, SLOW],
}], # 'arch == ppc64 and simulator_run == True'
##############################################################################
-['variant == turbofan', {
-
- # TurboFan cpu profiler result is different.
- 'test-cpu-profiler/CollectDeoptEvents': [FAIL],
- 'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [FAIL],
- 'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [FAIL],
-
-}], # variant == turbofan
-
-##############################################################################
['variant == turbofan_opt', {
- # BUG(5193): Flaky.
- 'test-cpu-profiler/FunctionApplySample': [PASS, ['system == windows', SKIP]],
+ # TODO(mythria,4680): Lack of code-ageing and/or lack of compilation cache
+ # in interpreter.
+ 'test-heap/CompilationCacheCachingBehavior': [FAIL],
+
+ # TODO(mstarzinger): Triggers Ignition+TurboFan on everything now and makes
+ # the stack traces within the profilers look different. Needs investigation.
+ 'test-api/SetFunctionEntryHook': [SKIP],
+ 'test-cpu-profiler/BoundFunctionCall': [FAIL],
+ 'test-cpu-profiler/CollectSampleAPI': [FAIL],
+ 'test-cpu-profiler/FunctionApplySample': [FAIL],
+ 'test-cpu-profiler/FunctionCallSample': [FAIL],
+ 'test-cpu-profiler/JsNativeJsRuntimeJsSample': [FAIL],
+ 'test-cpu-profiler/JsNativeJsSample': [FAIL],
+ 'test-cpu-profiler/JsNativeJsRuntimeJsSampleMultiple': [FAIL],
+ 'test-cpu-profiler/JsNative1JsNative2JsSample': [FAIL],
+ 'test-cpu-profiler/NativeMethodUninitializedIC': [FAIL],
+ 'test-cpu-profiler/NativeAccessorUninitializedIC': [FAIL],
+ 'test-profile-generator/LineNumber': [FAIL],
+ 'test-sampler-api/StackFramesConsistent': [FAIL],
+
+ # BUG(v8:5457)
+ 'test-api/SetJitCodeEventHandler': [PASS, ['no_snap', SKIP]],
}], # variant == turbofan_opt
##############################################################################
['variant == ignition', {
- # TODO(rmcilroy,4680): Related to lack of code flushing. Check failed: !function->shared()->is_compiled() || function->IsOptimized().
- 'test-heap/TestCodeFlushingPreAged': [FAIL],
- 'test-heap/TestCodeFlushingIncrementalScavenge': [FAIL],
- 'test-heap/TestCodeFlushing': [FAIL],
- 'test-heap/TestCodeFlushingIncremental': [FAIL],
- 'test-heap/TestCodeFlushingIncrementalAbort': [PASS, ['mode == debug or dcheck_always_on == True', FAIL]],
-
- # TODO(mythria,4680): Lack of code-ageing in interpreter.
- 'test-heap/Regress169209': [FAIL],
-
# TODO(mythria,4680): Lack of code-ageing and/or lack of compilation cache
# in interpreter.
'test-heap/CompilationCacheCachingBehavior': [FAIL],
- # BUG(4680): Missing type feedback makes optimistic optimizations fail.
- 'test-cpu-profiler/DeoptUntrackedFunction': [SKIP],
-
- # BUG(4751). Flaky with ignition.
- 'test-cpu-profiler/JsNativeJsSample': [PASS, FAIL],
-
- # TODO(ignition): Fails due to missing type info when optimizing from bytecode
- # with crankshaft.
- 'test-cpu-profiler/TickLinesOptimized': [SKIP],
-
# BUG(5193): Flaky.
'test-cpu-profiler/FunctionApplySample': [PASS, ['system == windows', SKIP]],
}], # variant == ignition
@@ -384,32 +365,13 @@
'test-cpu-profiler/DeoptUntrackedFunction': [SKIP],
'test-cpu-profiler/TickLinesOptimized': [SKIP],
'test-heap/CompilationCacheCachingBehavior': [FAIL],
- 'test-heap/Regress169209': [FAIL],
- 'test-heap/TestCodeFlushing': [FAIL],
- 'test-heap/TestCodeFlushingIncremental': [FAIL],
- 'test-heap/TestCodeFlushingIncrementalScavenge': [FAIL],
- 'test-heap/TestCodeFlushingPreAged': [FAIL],
# BUG(5193): Flaky.
'test-cpu-profiler/FunctionApplySample': [PASS, ['system == windows', SKIP]],
}], # variant == ignition_staging
##############################################################################
-['variant == ignition_turbofan', {
- # TODO(rmcilroy,4766): Requires BytecodeGraphBuilder to track source position
- # on nodes (behind --turbo_source_positions flag).
- 'test-cpu-profiler/TickLinesOptimized': [FAIL],
-
- # TODO(rmcilroy,4680): Related to lack of code flushing. Check failed: !function->shared()->is_compiled() || function->IsOptimized().
- 'test-heap/TestCodeFlushingPreAged': [FAIL],
- 'test-heap/TestCodeFlushingIncrementalScavenge': [FAIL],
- 'test-heap/TestCodeFlushing': [FAIL],
- 'test-heap/TestCodeFlushingIncremental': [FAIL],
- 'test-heap/TestCodeFlushingIncrementalAbort': [PASS, ['mode == debug or dcheck_always_on == True', FAIL]],
-
- # TODO(mythria,4680): Lack of code-ageing in interpreter.
- 'test-heap/Regress169209': [FAIL],
-
+['variant == turbofan or variant == ignition_turbofan', {
# TODO(mythria,4680): Lack of code-ageing and/or lack of compilation cache
# in interpreter.
'test-heap/CompilationCacheCachingBehavior': [FAIL],
@@ -421,13 +383,12 @@
# BUG(4751). Flaky with Ignition.
'test-cpu-profiler/JsNativeJsSample': [SKIP],
- # TurboFan cpu profiler result is different.
- 'test-cpu-profiler/DeoptAtFirstLevelInlinedSource': [FAIL],
- 'test-cpu-profiler/DeoptAtSecondLevelInlinedSource': [FAIL],
+ # TODO(vogelheim,5548): Turbofan does support cached accessors.
+ 'test-api-accessors/CachedAccessorCrankshaft': [FAIL],
# BUG(5193): Flaky.
'test-cpu-profiler/FunctionApplySample': [PASS, ['system == windows', SKIP]],
-}], # variant == ignition_turbofan
+}], # variant == turbofan or variant == ignition_turbofan
##############################################################################
['variant != ignition and variant != ignition_staging and variant != ignition_turbofan', {
diff --git a/deps/v8/test/cctest/compiler/code-assembler-tester.h b/deps/v8/test/cctest/compiler/code-assembler-tester.h
index eb2d77a171..b0c84ec94a 100644
--- a/deps/v8/test/cctest/compiler/code-assembler-tester.h
+++ b/deps/v8/test/cctest/compiler/code-assembler-tester.h
@@ -13,11 +13,12 @@ namespace compiler {
class ZoneHolder {
public:
- explicit ZoneHolder(Isolate* isolate) : zone_(isolate->allocator()) {}
- Zone* zone() { return &zone_; }
+ explicit ZoneHolder(Isolate* isolate)
+ : held_zone_(isolate->allocator(), ZONE_NAME) {}
+ Zone* held_zone() { return &held_zone_; }
private:
- Zone zone_;
+ Zone held_zone_;
};
// Inherit from ZoneHolder in order to create a zone that can be passed to
@@ -29,22 +30,23 @@ class CodeAssemblerTesterImpl : private ZoneHolder, public CodeAssemblerT {
CodeAssemblerTesterImpl(Isolate* isolate,
const CallInterfaceDescriptor& descriptor)
: ZoneHolder(isolate),
- CodeAssemblerT(isolate, ZoneHolder::zone(), descriptor,
+ CodeAssemblerT(isolate, ZoneHolder::held_zone(), descriptor,
Code::ComputeFlags(Code::STUB), "test"),
scope_(isolate) {}
// Test generating code for a JS function (e.g. builtins).
- CodeAssemblerTesterImpl(Isolate* isolate, int parameter_count)
+ CodeAssemblerTesterImpl(Isolate* isolate, int parameter_count,
+ Code::Kind kind = Code::BUILTIN)
: ZoneHolder(isolate),
- CodeAssemblerT(isolate, ZoneHolder::zone(), parameter_count,
- Code::ComputeFlags(Code::FUNCTION), "test"),
+ CodeAssemblerT(isolate, ZoneHolder::held_zone(), parameter_count,
+ Code::ComputeFlags(kind), "test"),
scope_(isolate) {}
// This constructor is intended to be used for creating code objects with
// specific flags.
CodeAssemblerTesterImpl(Isolate* isolate, Code::Flags flags)
: ZoneHolder(isolate),
- CodeAssemblerT(isolate, ZoneHolder::zone(), 0, flags, "test"),
+ CodeAssemblerT(isolate, ZoneHolder::held_zone(), 0, flags, "test"),
scope_(isolate) {}
Handle<Code> GenerateCodeCloseAndEscape() {
diff --git a/deps/v8/test/cctest/compiler/function-tester.cc b/deps/v8/test/cctest/compiler/function-tester.cc
index 2da2dc14aa..24a49b852c 100644
--- a/deps/v8/test/cctest/compiler/function-tester.cc
+++ b/deps/v8/test/cctest/compiler/function-tester.cc
@@ -26,8 +26,7 @@ FunctionTester::FunctionTester(const char* source, uint32_t flags)
function((FLAG_allow_natives_syntax = true, NewFunction(source))),
flags_(flags) {
Compile(function);
- const uint32_t supported_flags = CompilationInfo::kNativeContextSpecializing |
- CompilationInfo::kInliningEnabled;
+ const uint32_t supported_flags = CompilationInfo::kInliningEnabled;
CHECK_EQ(0u, flags_ & ~supported_flags);
}
@@ -158,26 +157,19 @@ Handle<JSFunction> FunctionTester::ForMachineGraph(Graph* graph,
}
Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
- Zone zone(function->GetIsolate()->allocator());
- ParseInfo parse_info(&zone, function);
+ Zone zone(function->GetIsolate()->allocator(), ZONE_NAME);
+ ParseInfo parse_info(&zone, handle(function->shared()));
CompilationInfo info(&parse_info, function);
- info.MarkAsDeoptimizationEnabled();
- if (!FLAG_turbo_from_bytecode) {
- CHECK(Parser::ParseStatic(info.parse_info()));
- }
info.SetOptimizing();
- if (flags_ & CompilationInfo::kNativeContextSpecializing) {
- info.MarkAsNativeContextSpecializing();
- }
+ info.MarkAsDeoptimizationEnabled();
if (flags_ & CompilationInfo::kInliningEnabled) {
info.MarkAsInliningEnabled();
}
- if (FLAG_turbo_from_bytecode) {
- CHECK(Compiler::EnsureBytecode(&info));
+ if (Compiler::EnsureBytecode(&info)) {
info.MarkAsOptimizeFromBytecode();
} else {
- CHECK(Compiler::Analyze(info.parse_info()));
+ CHECK(Compiler::ParseAndAnalyze(info.parse_info()));
CHECK(Compiler::EnsureDeoptimizationSupport(&info));
}
JSFunction::EnsureLiterals(function);
@@ -193,8 +185,8 @@ Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
// Compile the given machine graph instead of the source of the function
// and replace the JSFunction's code with the result.
Handle<JSFunction> FunctionTester::CompileGraph(Graph* graph) {
- Zone zone(function->GetIsolate()->allocator());
- ParseInfo parse_info(&zone, function);
+ Zone zone(function->GetIsolate()->allocator(), ZONE_NAME);
+ ParseInfo parse_info(&zone, handle(function->shared()));
CompilationInfo info(&parse_info, function);
CHECK(Parser::ParseStatic(info.parse_info()));
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h
index c257448b8a..a2436ad1d2 100644
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.h
+++ b/deps/v8/test/cctest/compiler/graph-builder-tester.h
@@ -86,8 +86,9 @@ class GraphBuilderTester : public HandleAndZoneScope,
}
void Return(Node* value) {
- return_ =
- graph()->NewNode(common()->Return(), value, effect_, graph()->start());
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ return_ = graph()->NewNode(common()->Return(), zero, value, effect_,
+ graph()->start());
effect_ = NULL;
}
diff --git a/deps/v8/test/cctest/compiler/test-gap-resolver.cc b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
index 3b1cdb6d81..1cceb9cd59 100644
--- a/deps/v8/test/cctest/compiler/test-gap-resolver.cc
+++ b/deps/v8/test/cctest/compiler/test-gap-resolver.cc
@@ -13,15 +13,32 @@ namespace compiler {
const auto GetRegConfig = RegisterConfiguration::Turbofan;
-// Fragments the given operand into an equivalent set of operands to simplify
-// ParallelMove equivalence testing.
+// Fragments the given FP operand into an equivalent set of FP operands to
+// simplify ParallelMove equivalence testing.
void GetCanonicalOperands(const InstructionOperand& op,
std::vector<InstructionOperand>* fragments) {
CHECK(!kSimpleFPAliasing);
CHECK(op.IsFPLocationOperand());
- // TODO(bbudge) Split into float operands on platforms with non-simple FP
- // register aliasing.
- fragments->push_back(op);
+ const LocationOperand& loc = LocationOperand::cast(op);
+ MachineRepresentation rep = loc.representation();
+ int base = -1;
+ int aliases = GetRegConfig()->GetAliases(
+ rep, 0, MachineRepresentation::kFloat32, &base);
+ CHECK_LT(0, aliases);
+ CHECK_GE(4, aliases);
+ int index = -1;
+ int step = 1;
+ if (op.IsFPRegister()) {
+ index = loc.register_code() * aliases;
+ } else {
+ index = loc.index();
+ step = -1;
+ }
+ for (int i = 0; i < aliases; i++) {
+ fragments->push_back(AllocatedOperand(loc.location_kind(),
+ MachineRepresentation::kFloat32,
+ index + i * step));
+ }
}
// The state of our move interpreter is the mapping of operands to values. Note
@@ -36,7 +53,9 @@ class InterpreterState {
const InstructionOperand& dst = m->destination();
if (!kSimpleFPAliasing && src.IsFPLocationOperand() &&
dst.IsFPLocationOperand()) {
- // Canonicalize FP location-location moves.
+ // Canonicalize FP location-location moves by fragmenting them into
+ // an equivalent sequence of float32 moves, to simplify state
+ // equivalence testing.
std::vector<InstructionOperand> src_fragments;
GetCanonicalOperands(src, &src_fragments);
CHECK(!src_fragments.empty());
@@ -115,9 +134,11 @@ class InterpreterState {
int index;
if (!is_constant) {
const LocationOperand& loc_op = LocationOperand::cast(op);
- // Canonicalize FP location operand representations to kFloat64.
+ // Preserve FP representation when FP register aliasing is complex.
+ // Otherwise, canonicalize to kFloat64.
if (IsFloatingPoint(loc_op.representation())) {
- rep = MachineRepresentation::kFloat64;
+ rep = kSimpleFPAliasing ? MachineRepresentation::kFloat64
+ : loc_op.representation();
}
if (loc_op.IsAnyRegister()) {
index = loc_op.register_code();
@@ -321,9 +342,11 @@ class ParallelMoveCreator : public HandleAndZoneScope {
auto GetValidRegisterCode = [&conf](MachineRepresentation rep, int index) {
switch (rep) {
case MachineRepresentation::kFloat32:
+ return conf->RegisterConfiguration::GetAllocatableFloatCode(index);
case MachineRepresentation::kFloat64:
- case MachineRepresentation::kSimd128:
return conf->RegisterConfiguration::GetAllocatableDoubleCode(index);
+ case MachineRepresentation::kSimd128:
+ return conf->RegisterConfiguration::GetAllocatableSimd128Code(index);
default:
return conf->RegisterConfiguration::GetAllocatableGeneralCode(index);
}
@@ -368,6 +391,118 @@ void RunTest(ParallelMove* pm, Zone* zone) {
CHECK_EQ(mi1.state(), mi2.state());
}
+TEST(Aliasing) {
+ // On platforms with simple aliasing, these parallel moves are ill-formed.
+ if (kSimpleFPAliasing) return;
+
+ ParallelMoveCreator pmc;
+ Zone* zone = pmc.main_zone();
+
+ auto s0 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kFloat32, 0);
+ auto s1 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kFloat32, 1);
+ auto s2 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kFloat32, 2);
+ auto s3 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kFloat32, 3);
+ auto s4 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kFloat32, 4);
+
+ auto d0 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kFloat64, 0);
+ auto d1 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kFloat64, 1);
+ auto d16 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kFloat64, 16);
+
+ // Double slots must be odd to match frame allocation.
+ auto dSlot = AllocatedOperand(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kFloat64, 3);
+
+ // Cycles involving s- and d-registers.
+ {
+ std::vector<InstructionOperand> moves = {
+ s2, s0, // s2 <- s0
+ d0, d1 // d0 <- d1
+ };
+ RunTest(pmc.Create(moves), zone);
+ }
+ {
+ std::vector<InstructionOperand> moves = {
+ d0, d1, // d0 <- d1
+ s2, s0 // s2 <- s0
+ };
+ RunTest(pmc.Create(moves), zone);
+ }
+ {
+ std::vector<InstructionOperand> moves = {
+ s2, s1, // s2 <- s1
+ d0, d1 // d0 <- d1
+ };
+ RunTest(pmc.Create(moves), zone);
+ }
+ {
+ std::vector<InstructionOperand> moves = {
+ d0, d1, // d0 <- d1
+ s2, s1 // s2 <- s1
+ };
+ RunTest(pmc.Create(moves), zone);
+ }
+ // Two cycles involving a single d-register.
+ {
+ std::vector<InstructionOperand> moves = {
+ d0, d1, // d0 <- d1
+ s2, s1, // s2 <- s1
+ s3, s0 // s3 <- s0
+ };
+ RunTest(pmc.Create(moves), zone);
+ }
+ // Cycle with a float move that must be deferred until after swaps.
+ {
+ std::vector<InstructionOperand> moves = {
+ d0, d1, // d0 <- d1
+ s2, s0, // s2 <- s0
+ s3, s4 // s3 <- s4 must be deferred
+ };
+ RunTest(pmc.Create(moves), zone);
+ }
+ // Cycles involving s-registers and a non-aliased d-register.
+ {
+ std::vector<InstructionOperand> moves = {
+ d16, d0, // d16 <- d0
+ s1, s2, // s1 <- s2
+ d1, d16 // d1 <- d16
+ };
+ RunTest(pmc.Create(moves), zone);
+ }
+ {
+ std::vector<InstructionOperand> moves = {
+ s2, s1, // s1 <- s2
+ d0, d16, // d16 <- d0
+ d16, d1 // d1 <- d16
+ };
+ RunTest(pmc.Create(moves), zone);
+ }
+ {
+ std::vector<InstructionOperand> moves = {
+ d0, d16, // d0 <- d16
+ d16, d1, // s2 <- s0
+ s3, s0 // d0 <- d1
+ };
+ RunTest(pmc.Create(moves), zone);
+ }
+ // Cycle involving aliasing registers and a slot.
+ {
+ std::vector<InstructionOperand> moves = {
+ dSlot, d0, // dSlot <- d0
+ d1, dSlot, // d1 <- dSlot
+ s0, s3 // s0 <- s3
+ };
+ RunTest(pmc.Create(moves), zone);
+ }
+}
+
TEST(FuzzResolver) {
ParallelMoveCreator pmc;
for (int size = 0; size < 80; ++size) {
diff --git a/deps/v8/test/cctest/compiler/test-graph-visualizer.cc b/deps/v8/test/cctest/compiler/test-graph-visualizer.cc
index 48be46ce5f..842a23bdbc 100644
--- a/deps/v8/test/cctest/compiler/test-graph-visualizer.cc
+++ b/deps/v8/test/cctest/compiler/test-graph-visualizer.cc
@@ -3,15 +3,15 @@
// found in the LICENSE file.
#include "src/compiler/common-operator.h"
-#include "src/compiler/graph.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
-#include "src/compiler/source-position.h"
#include "src/compiler/verifier.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/compiler/test-instruction.cc b/deps/v8/test/cctest/compiler/test-instruction.cc
index 5265e476aa..15749b8950 100644
--- a/deps/v8/test/cctest/compiler/test-instruction.cc
+++ b/deps/v8/test/cctest/compiler/test-instruction.cc
@@ -269,7 +269,7 @@ TEST(InstructionAddGapMove) {
TEST(InstructionOperands) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
{
TestInstr* i = TestInstr::New(&zone, 101);
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index e9bf064750..023ef483ea 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/js-context-specialization.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
-#include "src/compiler/source-position.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/function-tester.h"
#include "test/cctest/compiler/graph-builder-tester.h"
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index 604e696ab5..f504e549fd 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -38,7 +38,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
common(main_zone()),
deps(main_isolate(), main_zone()),
graph(main_zone()),
- typer(main_isolate(), &graph),
+ typer(main_isolate(), Typer::kNoFlags, &graph),
context_node(NULL),
flags(flags) {
graph.SetStart(graph.NewNode(common.Start(num_parameters)));
@@ -169,11 +169,6 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
CHECK_EQ(effect, NodeProperties::GetEffectInput(use));
}
- void CheckInt32Constant(int32_t expected, Node* result) {
- CHECK_EQ(IrOpcode::kInt32Constant, result->opcode());
- CHECK_EQ(expected, OpParameter<int32_t>(result));
- }
-
void CheckNumberConstant(double expected, Node* result) {
CHECK_EQ(IrOpcode::kNumberConstant, result->opcode());
CHECK_EQ(expected, OpParameter<double>(result));
@@ -694,6 +689,7 @@ TEST(RemoveToNumberEffects) {
JSTypedLoweringTester R;
Node* effect_use = NULL;
+ Node* zero = R.graph.NewNode(R.common.NumberConstant(0));
for (int i = 0; i < 10; i++) {
Node* p0 = R.Parameter(Type::Number());
Node* ton = R.Unop(R.javascript.ToNumber(), p0);
@@ -724,10 +720,12 @@ TEST(RemoveToNumberEffects) {
R.context(), frame_state, ton, R.start());
break;
case 5:
- effect_use = R.graph.NewNode(R.common.Return(), p0, ton, R.start());
+ effect_use =
+ R.graph.NewNode(R.common.Return(), zero, p0, ton, R.start());
break;
case 6:
- effect_use = R.graph.NewNode(R.common.Return(), ton, ton, R.start());
+ effect_use =
+ R.graph.NewNode(R.common.Return(), zero, ton, ton, R.start());
}
R.CheckEffectInput(R.start(), ton);
diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc
index e58de67afc..a756254d82 100644
--- a/deps/v8/test/cctest/compiler/test-jump-threading.cc
+++ b/deps/v8/test/cctest/compiler/test-jump-threading.cc
@@ -107,7 +107,7 @@ class TestCode : public HandleAndZoneScope {
void VerifyForwarding(TestCode& code, int count, int* expected) {
v8::internal::AccountingAllocator allocator;
- Zone local_zone(&allocator);
+ Zone local_zone(&allocator, ZONE_NAME);
ZoneVector<RpoNumber> result(&local_zone);
JumpThreading::ComputeForwarding(&local_zone, result, &code.sequence_, true);
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 59ef5fdd25..fef3415984 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -43,7 +43,7 @@ static Handle<JSFunction> Compile(const char* source) {
TEST(TestLinkageCreate) {
HandleAndZoneScope handles;
Handle<JSFunction> function = Compile("a + b");
- ParseInfo parse_info(handles.main_zone(), function);
+ ParseInfo parse_info(handles.main_zone(), handle(function->shared()));
CompilationInfo info(&parse_info, function);
CallDescriptor* descriptor = Linkage::ComputeIncoming(info.zone(), &info);
CHECK(descriptor);
@@ -59,7 +59,7 @@ TEST(TestLinkageJSFunctionIncoming) {
Handle<JSFunction> function =
Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CompileRun(sources[i]))));
- ParseInfo parse_info(handles.main_zone(), function);
+ ParseInfo parse_info(handles.main_zone(), handle(function->shared()));
CompilationInfo info(&parse_info, function);
CallDescriptor* descriptor = Linkage::ComputeIncoming(info.zone(), &info);
CHECK(descriptor);
@@ -75,7 +75,7 @@ TEST(TestLinkageJSFunctionIncoming) {
TEST(TestLinkageJSCall) {
HandleAndZoneScope handles;
Handle<JSFunction> function = Compile("a + c");
- ParseInfo parse_info(handles.main_zone(), function);
+ ParseInfo parse_info(handles.main_zone(), handle(function->shared()));
CompilationInfo info(&parse_info, function);
for (int i = 0; i < 32; i++) {
@@ -97,7 +97,7 @@ TEST(TestLinkageRuntimeCall) {
TEST(TestLinkageStubCall) {
Isolate* isolate = CcTest::InitIsolateOnce();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
Callable callable = CodeFactory::ToNumber(isolate);
CompilationInfo info(ArrayVector("test"), isolate, &zone,
Code::ComputeFlags(Code::STUB));
diff --git a/deps/v8/test/cctest/compiler/test-loop-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
index fb61e20197..ffb0872269 100644
--- a/deps/v8/test/cctest/compiler/test-loop-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
@@ -116,7 +116,8 @@ class LoopFinderTester : HandleAndZoneScope {
}
Node* Return(Node* val, Node* effect, Node* control) {
- Node* ret = graph.NewNode(common.Return(), val, effect, control);
+ Node* zero = graph.NewNode(common.Int32Constant(0));
+ Node* ret = graph.NewNode(common.Return(), zero, val, effect, control);
end->ReplaceInput(0, ret);
return ret;
}
@@ -127,7 +128,7 @@ class LoopFinderTester : HandleAndZoneScope {
OFStream os(stdout);
os << AsRPO(graph);
}
- Zone zone(main_isolate()->allocator());
+ Zone zone(main_isolate()->allocator(), ZONE_NAME);
loop_tree = LoopFinder::BuildLoopTree(&graph, &zone);
}
return loop_tree;
@@ -696,7 +697,8 @@ TEST(LaEdgeMatrix1) {
Node* if_true = t.graph.NewNode(t.common.IfTrue(), branch);
Node* exit = t.graph.NewNode(t.common.IfFalse(), branch);
loop->ReplaceInput(1, if_true);
- Node* ret = t.graph.NewNode(t.common.Return(), p3, t.start, exit);
+ Node* zero = t.graph.NewNode(t.common.Int32Constant(0));
+ Node* ret = t.graph.NewNode(t.common.Return(), zero, p3, t.start, exit);
t.graph.SetEnd(ret);
Node* choices[] = {p1, phi, cond};
@@ -743,7 +745,9 @@ void RunEdgeMatrix2(int i) {
loop2->ReplaceInput(1, if_true2);
loop1->ReplaceInput(1, exit2);
- Node* ret = t.graph.NewNode(t.common.Return(), phi1, t.start, exit1);
+ Node* zero = t.graph.NewNode(t.common.Int32Constant(0));
+ Node* ret =
+ t.graph.NewNode(t.common.Return(), zero, phi1, t.start, exit1);
t.graph.SetEnd(ret);
Node* choices[] = {p1, phi1, cond1, phi2, cond2};
@@ -830,7 +834,8 @@ void RunEdgeMatrix3(int c1a, int c1b, int c1c, // line break
loop2->ReplaceInput(1, exit3);
loop1->ReplaceInput(1, exit2);
- Node* ret = t.graph.NewNode(t.common.Return(), phi1, t.start, exit1);
+ Node* zero = t.graph.NewNode(t.common.Int32Constant(0));
+ Node* ret = t.graph.NewNode(t.common.Return(), zero, phi1, t.start, exit1);
t.graph.SetEnd(ret);
// Mutate the graph according to the edge choices.
@@ -943,7 +948,8 @@ static void RunManyChainedLoops_i(int count) {
last = exit;
}
- Node* ret = t.graph.NewNode(t.common.Return(), t.p0, t.start, last);
+ Node* zero = t.graph.NewNode(t.common.Int32Constant(0));
+ Node* ret = t.graph.NewNode(t.common.Return(), zero, t.p0, t.start, last);
t.graph.SetEnd(ret);
// Verify loops.
@@ -962,6 +968,7 @@ static void RunManyNestedLoops_i(int count) {
Node* entry = t.start;
// Build loops.
+ Node* zero = t.graph.NewNode(t.common.Int32Constant(0));
for (int i = 0; i < count; i++) {
Node* loop = t.graph.NewNode(t.common.Loop(2), entry, t.start);
Node* phi = t.graph.NewNode(t.common.Phi(MachineRepresentation::kWord32, 2),
@@ -981,7 +988,7 @@ static void RunManyNestedLoops_i(int count) {
outer->ReplaceInput(1, exit);
} else {
// outer loop.
- Node* ret = t.graph.NewNode(t.common.Return(), t.p0, t.start, exit);
+ Node* ret = t.graph.NewNode(t.common.Return(), zero, t.p0, t.start, exit);
t.graph.SetEnd(ret);
}
outer = loop;
diff --git a/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
index d97e038883..7ae14b54c8 100644
--- a/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
@@ -32,7 +32,7 @@ struct TestHelper : public HandleAndZoneScope {
void CheckLoopAssignedCount(int expected, const char* var_name) {
// TODO(titzer): don't scope analyze every single time.
- ParseInfo parse_info(main_zone(), function);
+ ParseInfo parse_info(main_zone(), handle(function->shared()));
CompilationInfo info(&parse_info, function);
CHECK(Parser::ParseStatic(&parse_info));
diff --git a/deps/v8/test/cctest/compiler/test-multiple-return.cc b/deps/v8/test/cctest/compiler/test-multiple-return.cc
index 6cda32c792..39824b5155 100644
--- a/deps/v8/test/cctest/compiler/test-multiple-return.cc
+++ b/deps/v8/test/cctest/compiler/test-multiple-return.cc
@@ -65,7 +65,7 @@ CallDescriptor* GetCallDescriptor(Zone* zone, int return_count,
TEST(ReturnThreeValues) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
CallDescriptor* desc = GetCallDescriptor(&zone, 3, 2);
HandleAndZoneScope handles;
RawMachineAssembler m(handles.main_isolate(),
diff --git a/deps/v8/test/cctest/compiler/test-node.cc b/deps/v8/test/cctest/compiler/test-node.cc
index c5fc5b3c50..5137db4a52 100644
--- a/deps/v8/test/cctest/compiler/test-node.cc
+++ b/deps/v8/test/cctest/compiler/test-node.cc
@@ -142,7 +142,7 @@ void CheckInputs(Node* node, Node** inputs, int input_count) {
TEST(NodeUseIteratorReplaceUses) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
Node* n1 = graph.NewNode(&dummy_operator1, n0);
@@ -168,7 +168,7 @@ TEST(NodeUseIteratorReplaceUses) {
TEST(NodeUseIteratorReplaceUsesSelf) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
Node* n1 = graph.NewNode(&dummy_operator1, n0);
@@ -193,7 +193,7 @@ TEST(NodeUseIteratorReplaceUsesSelf) {
TEST(ReplaceInput) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
Node* n1 = graph.NewNode(&dummy_operator0);
@@ -220,7 +220,7 @@ TEST(ReplaceInput) {
TEST(OwnedBy) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
{
@@ -271,7 +271,7 @@ TEST(OwnedBy) {
TEST(Uses) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -294,7 +294,7 @@ TEST(Uses) {
TEST(Inputs) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -322,7 +322,7 @@ TEST(Inputs) {
TEST(InsertInputs) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -397,7 +397,7 @@ TEST(InsertInputs) {
TEST(RemoveInput) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -428,7 +428,7 @@ TEST(RemoveInput) {
TEST(AppendInputsAndIterator) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -451,7 +451,7 @@ TEST(AppendInputsAndIterator) {
TEST(NullInputsSimple) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -479,7 +479,7 @@ TEST(NullInputsSimple) {
TEST(NullInputsAppended) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -503,7 +503,7 @@ TEST(NullInputsAppended) {
TEST(ReplaceUsesFromAppendedInputs) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -532,7 +532,7 @@ TEST(ReplaceUsesFromAppendedInputs) {
TEST(ReplaceInputMultipleUses) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
Node* n0 = graph.NewNode(&dummy_operator0);
@@ -551,7 +551,7 @@ TEST(ReplaceInputMultipleUses) {
TEST(TrimInputCountInline) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
{
@@ -620,7 +620,7 @@ TEST(TrimInputCountInline) {
TEST(TrimInputCountOutOfLine1) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
{
@@ -715,7 +715,7 @@ TEST(TrimInputCountOutOfLine1) {
TEST(TrimInputCountOutOfLine2) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
{
@@ -785,7 +785,7 @@ TEST(TrimInputCountOutOfLine2) {
TEST(NullAllInputs) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
for (int i = 0; i < 2; i++) {
@@ -838,7 +838,7 @@ TEST(NullAllInputs) {
TEST(AppendAndTrim) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
Node* nodes[] = {
diff --git a/deps/v8/test/cctest/compiler/test-osr.cc b/deps/v8/test/cctest/compiler/test-osr.cc
deleted file mode 100644
index 9e3445ac0b..0000000000
--- a/deps/v8/test/cctest/compiler/test-osr.cc
+++ /dev/null
@@ -1,575 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/codegen.h"
-#include "src/compiler/all-nodes.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/diamond.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/js-operator.h"
-#include "src/compiler/operator.h"
-#include "src/compiler/osr.h"
-#include "test/cctest/cctest.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// TODO(titzer): move this method to a common testing place.
-
-static int CheckInputs(Node* node, Node* i0 = NULL, Node* i1 = NULL,
- Node* i2 = NULL, Node* i3 = NULL) {
- int count = 4;
- if (i3 == NULL) count = 3;
- if (i2 == NULL) count = 2;
- if (i1 == NULL) count = 1;
- if (i0 == NULL) count = 0;
- CHECK_EQ(count, node->InputCount());
- if (i0 != NULL) CHECK_EQ(i0, node->InputAt(0));
- if (i1 != NULL) CHECK_EQ(i1, node->InputAt(1));
- if (i2 != NULL) CHECK_EQ(i2, node->InputAt(2));
- if (i3 != NULL) CHECK_EQ(i3, node->InputAt(3));
- return count;
-}
-
-
-static Operator kIntLt(IrOpcode::kInt32LessThan, Operator::kPure,
- "Int32LessThan", 2, 0, 0, 1, 0, 0);
-static Operator kIntAdd(IrOpcode::kInt32Add, Operator::kPure, "Int32Add", 2, 0,
- 0, 1, 0, 0);
-
-
-static const int kMaxOsrValues = 10;
-
-class OsrDeconstructorTester : public HandleAndZoneScope {
- public:
- explicit OsrDeconstructorTester(int num_values)
- : isolate(main_isolate()),
- common(main_zone()),
- graph(main_zone()),
- jsgraph(main_isolate(), &graph, &common, nullptr, nullptr, nullptr),
- start(graph.NewNode(common.Start(1))),
- p0(graph.NewNode(common.Parameter(0), start)),
- end(graph.NewNode(common.End(1), start)),
- osr_normal_entry(graph.NewNode(common.OsrNormalEntry(), start, start)),
- osr_loop_entry(graph.NewNode(common.OsrLoopEntry(), start, start)),
- self(graph.NewNode(common.Int32Constant(0xaabbccdd))) {
- CHECK(num_values <= kMaxOsrValues);
- graph.SetStart(start);
- for (int i = 0; i < num_values; i++) {
- osr_values[i] = graph.NewNode(common.OsrValue(i), osr_loop_entry);
- }
- }
-
- Isolate* isolate;
- CommonOperatorBuilder common;
- Graph graph;
- JSGraph jsgraph;
- Node* start;
- Node* p0;
- Node* end;
- Node* osr_normal_entry;
- Node* osr_loop_entry;
- Node* self;
- Node* osr_values[kMaxOsrValues];
-
- Node* NewOsrPhi(Node* loop, Node* incoming, int osr_value, Node* back1 = NULL,
- Node* back2 = NULL, Node* back3 = NULL) {
- int count = 5;
- if (back3 == NULL) count = 4;
- if (back2 == NULL) count = 3;
- if (back1 == NULL) count = 2;
- CHECK_EQ(loop->InputCount(), count);
- CHECK_EQ(osr_loop_entry, loop->InputAt(1));
-
- Node* inputs[6];
- inputs[0] = incoming;
- inputs[1] = osr_values[osr_value];
- if (count > 2) inputs[2] = back1;
- if (count > 3) inputs[3] = back2;
- if (count > 4) inputs[4] = back3;
- inputs[count] = loop;
- return graph.NewNode(common.Phi(MachineRepresentation::kTagged, count),
- count + 1, inputs);
- }
-
- Node* NewLoop(bool is_osr, int num_backedges, Node* entry = nullptr) {
- if (entry == nullptr) entry = osr_normal_entry;
- Node* loop = graph.NewNode(common.Loop(1), entry);
- if (is_osr) {
- loop->AppendInput(graph.zone(), osr_loop_entry);
- }
- for (int i = 0; i < num_backedges; i++) {
- loop->AppendInput(graph.zone(), loop);
- }
- NodeProperties::ChangeOp(loop, common.Loop(loop->InputCount()));
- return loop;
- }
-
- Node* NewOsrLoop(int num_backedges, Node* entry = NULL) {
- return NewLoop(true, num_backedges, entry);
- }
-
- void DeconstructOsr() {
- OsrHelper helper(0, 0);
- helper.Deconstruct(&jsgraph, &common, main_zone());
- AllNodes nodes(main_zone(), &graph);
- // Should be edited out.
- CHECK(!nodes.IsLive(osr_normal_entry));
- CHECK(!nodes.IsLive(osr_loop_entry));
- // No dangling nodes should be left over.
- for (Node* const node : nodes.reachable) {
- for (Node* const use : node->uses()) {
- CHECK(std::find(nodes.reachable.begin(), nodes.reachable.end(), use) !=
- nodes.reachable.end());
- }
- }
- }
-};
-
-
-TEST(Deconstruct_osr0) {
- OsrDeconstructorTester T(0);
-
- Node* loop = T.NewOsrLoop(1);
-
- T.graph.SetEnd(loop);
-
- T.DeconstructOsr();
-
- CheckInputs(loop, T.start, loop);
-}
-
-
-TEST(Deconstruct_osr1) {
- OsrDeconstructorTester T(1);
-
- Node* loop = T.NewOsrLoop(1);
- Node* osr_phi =
- T.NewOsrPhi(loop, T.jsgraph.OneConstant(), 0, T.jsgraph.ZeroConstant());
-
- Node* ret = T.graph.NewNode(T.common.Return(), osr_phi, T.start, loop);
- T.graph.SetEnd(ret);
-
- T.DeconstructOsr();
-
- CheckInputs(loop, T.start, loop);
- CheckInputs(osr_phi, T.osr_values[0], T.jsgraph.ZeroConstant(), loop);
- CheckInputs(ret, osr_phi, T.start, loop);
-}
-
-
-TEST(Deconstruct_osr_remove_prologue) {
- OsrDeconstructorTester T(1);
- Diamond d(&T.graph, &T.common, T.p0);
- d.Chain(T.osr_normal_entry);
-
- Node* loop = T.NewOsrLoop(1, d.merge);
- Node* osr_phi =
- T.NewOsrPhi(loop, T.jsgraph.OneConstant(), 0, T.jsgraph.ZeroConstant());
-
- Node* ret = T.graph.NewNode(T.common.Return(), osr_phi, T.start, loop);
- T.graph.SetEnd(ret);
-
- T.DeconstructOsr();
-
- CheckInputs(loop, T.start, loop);
- CheckInputs(osr_phi, T.osr_values[0], T.jsgraph.ZeroConstant(), loop);
- CheckInputs(ret, osr_phi, T.start, loop);
-
- // The control before the loop should have been removed.
- AllNodes nodes(T.main_zone(), &T.graph);
- CHECK(!nodes.IsLive(d.branch));
- CHECK(!nodes.IsLive(d.if_true));
- CHECK(!nodes.IsLive(d.if_false));
- CHECK(!nodes.IsLive(d.merge));
-}
-
-
-TEST(Deconstruct_osr_with_body1) {
- OsrDeconstructorTester T(1);
-
- Node* loop = T.NewOsrLoop(1);
-
- Node* branch = T.graph.NewNode(T.common.Branch(), T.p0, loop);
- Node* if_true = T.graph.NewNode(T.common.IfTrue(), branch);
- Node* if_false = T.graph.NewNode(T.common.IfFalse(), branch);
- loop->ReplaceInput(2, if_true);
-
- Node* osr_phi =
- T.NewOsrPhi(loop, T.jsgraph.OneConstant(), 0, T.jsgraph.ZeroConstant());
-
- Node* ret = T.graph.NewNode(T.common.Return(), osr_phi, T.start, if_false);
- T.graph.SetEnd(ret);
-
- T.DeconstructOsr();
-
- CheckInputs(loop, T.start, if_true);
- CheckInputs(branch, T.p0, loop);
- CheckInputs(if_true, branch);
- CheckInputs(if_false, branch);
- CheckInputs(osr_phi, T.osr_values[0], T.jsgraph.ZeroConstant(), loop);
- CheckInputs(ret, osr_phi, T.start, if_false);
-}
-
-
-TEST(Deconstruct_osr_with_body2) {
- OsrDeconstructorTester T(1);
-
- Node* loop = T.NewOsrLoop(1);
-
- // Two chained branches in the the body of the loop.
- Node* branch1 = T.graph.NewNode(T.common.Branch(), T.p0, loop);
- Node* if_true1 = T.graph.NewNode(T.common.IfTrue(), branch1);
- Node* if_false1 = T.graph.NewNode(T.common.IfFalse(), branch1);
-
- Node* branch2 = T.graph.NewNode(T.common.Branch(), T.p0, if_true1);
- Node* if_true2 = T.graph.NewNode(T.common.IfTrue(), branch2);
- Node* if_false2 = T.graph.NewNode(T.common.IfFalse(), branch2);
- loop->ReplaceInput(2, if_true2);
-
- Node* osr_phi =
- T.NewOsrPhi(loop, T.jsgraph.OneConstant(), 0, T.jsgraph.ZeroConstant());
-
- Node* merge = T.graph.NewNode(T.common.Merge(2), if_false1, if_false2);
- Node* ret = T.graph.NewNode(T.common.Return(), osr_phi, T.start, merge);
- T.graph.SetEnd(ret);
-
- T.DeconstructOsr();
-
- CheckInputs(loop, T.start, if_true2);
- CheckInputs(branch1, T.p0, loop);
- CheckInputs(branch2, T.p0, if_true1);
- CheckInputs(if_true1, branch1);
- CheckInputs(if_false1, branch1);
- CheckInputs(if_true2, branch2);
- CheckInputs(if_false2, branch2);
-
- CheckInputs(osr_phi, T.osr_values[0], T.jsgraph.ZeroConstant(), loop);
- CheckInputs(ret, osr_phi, T.start, merge);
- CheckInputs(merge, if_false1, if_false2);
-}
-
-
-TEST(Deconstruct_osr_with_body3) {
- OsrDeconstructorTester T(1);
-
- Node* loop = T.NewOsrLoop(2);
-
- // Two branches that create two different backedges.
- Node* branch1 = T.graph.NewNode(T.common.Branch(), T.p0, loop);
- Node* if_true1 = T.graph.NewNode(T.common.IfTrue(), branch1);
- Node* if_false1 = T.graph.NewNode(T.common.IfFalse(), branch1);
-
- Node* branch2 = T.graph.NewNode(T.common.Branch(), T.p0, if_true1);
- Node* if_true2 = T.graph.NewNode(T.common.IfTrue(), branch2);
- Node* if_false2 = T.graph.NewNode(T.common.IfFalse(), branch2);
- loop->ReplaceInput(2, if_false1);
- loop->ReplaceInput(3, if_true2);
-
- Node* osr_phi =
- T.NewOsrPhi(loop, T.jsgraph.OneConstant(), 0, T.jsgraph.ZeroConstant(),
- T.jsgraph.ZeroConstant());
-
- Node* ret = T.graph.NewNode(T.common.Return(), osr_phi, T.start, if_false2);
- T.graph.SetEnd(ret);
-
- T.DeconstructOsr();
-
- CheckInputs(loop, T.start, if_false1, if_true2);
- CheckInputs(branch1, T.p0, loop);
- CheckInputs(branch2, T.p0, if_true1);
- CheckInputs(if_true1, branch1);
- CheckInputs(if_false1, branch1);
- CheckInputs(if_true2, branch2);
- CheckInputs(if_false2, branch2);
-
- CheckInputs(osr_phi, T.osr_values[0], T.jsgraph.ZeroConstant(),
- T.jsgraph.ZeroConstant(), loop);
- CheckInputs(ret, osr_phi, T.start, if_false2);
-}
-
-
-struct While {
- OsrDeconstructorTester& t;
- Node* branch;
- Node* if_true;
- Node* exit;
- Node* loop;
-
- While(OsrDeconstructorTester& R, Node* cond, bool is_osr, int backedges = 1)
- : t(R) {
- loop = t.NewLoop(is_osr, backedges);
- branch = t.graph.NewNode(t.common.Branch(), cond, loop);
- if_true = t.graph.NewNode(t.common.IfTrue(), branch);
- exit = t.graph.NewNode(t.common.IfFalse(), branch);
- loop->ReplaceInput(loop->InputCount() - 1, if_true);
- }
-
- void Nest(While& that) {
- that.loop->ReplaceInput(that.loop->InputCount() - 1, exit);
- this->loop->ReplaceInput(0, that.if_true);
- }
-
- Node* Phi(Node* i1, Node* i2, Node* i3) {
- if (loop->InputCount() == 2) {
- return t.graph.NewNode(t.common.Phi(MachineRepresentation::kTagged, 2),
- i1, i2, loop);
- } else {
- return t.graph.NewNode(t.common.Phi(MachineRepresentation::kTagged, 3),
- i1, i2, i3, loop);
- }
- }
-};
-
-
-static Node* FindSuccessor(Node* node, IrOpcode::Value opcode) {
- for (Node* use : node->uses()) {
- if (use->opcode() == opcode) return use;
- }
- UNREACHABLE(); // should have been found.
- return nullptr;
-}
-
-
-TEST(Deconstruct_osr_nested1) {
- OsrDeconstructorTester T(1);
-
- While outer(T, T.p0, false);
- While inner(T, T.p0, true);
- inner.Nest(outer);
-
- Node* outer_phi = outer.Phi(T.p0, T.p0, nullptr);
- outer.branch->ReplaceInput(0, outer_phi);
-
- Node* osr_phi = inner.Phi(T.jsgraph.TrueConstant(), T.osr_values[0],
- T.jsgraph.FalseConstant());
- inner.branch->ReplaceInput(0, osr_phi);
- outer_phi->ReplaceInput(1, osr_phi);
-
- Node* ret =
- T.graph.NewNode(T.common.Return(), outer_phi, T.start, outer.exit);
- Node* end = T.graph.NewNode(T.common.End(1), ret);
- T.graph.SetEnd(end);
-
- T.DeconstructOsr();
-
- // Check structure of deconstructed graph.
- // Check inner OSR loop is directly connected to start.
- CheckInputs(inner.loop, T.start, inner.if_true);
- CheckInputs(osr_phi, T.osr_values[0], T.jsgraph.FalseConstant(), inner.loop);
-
- // Check control transfer to copy of outer loop.
- Node* new_outer_loop = FindSuccessor(inner.exit, IrOpcode::kLoop);
- Node* new_outer_phi = FindSuccessor(new_outer_loop, IrOpcode::kPhi);
- CHECK_NE(new_outer_loop, outer.loop);
- CHECK_NE(new_outer_phi, outer_phi);
-
- CheckInputs(new_outer_loop, inner.exit, new_outer_loop->InputAt(1));
-
- // Check structure of outer loop.
- Node* new_outer_branch = FindSuccessor(new_outer_loop, IrOpcode::kBranch);
- CHECK_NE(new_outer_branch, outer.branch);
- CheckInputs(new_outer_branch, new_outer_phi, new_outer_loop);
- Node* new_outer_exit = FindSuccessor(new_outer_branch, IrOpcode::kIfFalse);
- Node* new_outer_if_true = FindSuccessor(new_outer_branch, IrOpcode::kIfTrue);
-
- // Check structure of return.
- end = T.graph.end();
- Node* new_ret = end->InputAt(0);
- CHECK_EQ(IrOpcode::kReturn, new_ret->opcode());
- CheckInputs(new_ret, new_outer_phi, T.start, new_outer_exit);
-
- // Check structure of inner loop.
- Node* new_inner_loop = FindSuccessor(new_outer_if_true, IrOpcode::kLoop);
- Node* new_inner_phi = FindSuccessor(new_inner_loop, IrOpcode::kPhi);
-
- CheckInputs(new_inner_phi, T.jsgraph.TrueConstant(),
- T.jsgraph.FalseConstant(), new_inner_loop);
- CheckInputs(new_outer_phi, osr_phi, new_inner_phi, new_outer_loop);
-}
-
-
-TEST(Deconstruct_osr_nested2) {
- OsrDeconstructorTester T(1);
-
- // Test multiple backedge outer loop.
- While outer(T, T.p0, false, 2);
- While inner(T, T.p0, true);
- inner.Nest(outer);
-
- Node* outer_phi = outer.Phi(T.p0, T.p0, T.p0);
- outer.branch->ReplaceInput(0, outer_phi);
-
- Node* osr_phi = inner.Phi(T.jsgraph.TrueConstant(), T.osr_values[0],
- T.jsgraph.FalseConstant());
- inner.branch->ReplaceInput(0, osr_phi);
- outer_phi->ReplaceInput(1, osr_phi);
- outer_phi->ReplaceInput(2, T.jsgraph.FalseConstant());
-
- Node* x_branch = T.graph.NewNode(T.common.Branch(), osr_phi, inner.exit);
- Node* x_true = T.graph.NewNode(T.common.IfTrue(), x_branch);
- Node* x_false = T.graph.NewNode(T.common.IfFalse(), x_branch);
-
- outer.loop->ReplaceInput(1, x_true);
- outer.loop->ReplaceInput(2, x_false);
-
- Node* ret =
- T.graph.NewNode(T.common.Return(), outer_phi, T.start, outer.exit);
- Node* end = T.graph.NewNode(T.common.End(1), ret);
- T.graph.SetEnd(end);
-
- T.DeconstructOsr();
-
- // Check structure of deconstructed graph.
- // Check inner OSR loop is directly connected to start.
- CheckInputs(inner.loop, T.start, inner.if_true);
- CheckInputs(osr_phi, T.osr_values[0], T.jsgraph.FalseConstant(), inner.loop);
-
- // Check control transfer to copy of outer loop.
- Node* new_merge = FindSuccessor(x_true, IrOpcode::kMerge);
- CHECK_EQ(new_merge, FindSuccessor(x_false, IrOpcode::kMerge));
- CheckInputs(new_merge, x_true, x_false);
-
- Node* new_outer_loop = FindSuccessor(new_merge, IrOpcode::kLoop);
- Node* new_outer_phi = FindSuccessor(new_outer_loop, IrOpcode::kPhi);
- CHECK_NE(new_outer_loop, outer.loop);
- CHECK_NE(new_outer_phi, outer_phi);
-
- Node* new_entry_phi = FindSuccessor(new_merge, IrOpcode::kPhi);
- CheckInputs(new_entry_phi, osr_phi, T.jsgraph.FalseConstant(), new_merge);
-
- CHECK_EQ(new_merge, new_outer_loop->InputAt(0));
-
- // Check structure of outer loop.
- Node* new_outer_branch = FindSuccessor(new_outer_loop, IrOpcode::kBranch);
- CHECK_NE(new_outer_branch, outer.branch);
- CheckInputs(new_outer_branch, new_outer_phi, new_outer_loop);
- Node* new_outer_exit = FindSuccessor(new_outer_branch, IrOpcode::kIfFalse);
- Node* new_outer_if_true = FindSuccessor(new_outer_branch, IrOpcode::kIfTrue);
-
- // Check structure of return.
- end = T.graph.end();
- Node* new_ret = end->InputAt(0);
- CHECK_EQ(IrOpcode::kReturn, new_ret->opcode());
- CheckInputs(new_ret, new_outer_phi, T.start, new_outer_exit);
-
- // Check structure of inner loop.
- Node* new_inner_loop = FindSuccessor(new_outer_if_true, IrOpcode::kLoop);
- Node* new_inner_phi = FindSuccessor(new_inner_loop, IrOpcode::kPhi);
-
- CheckInputs(new_inner_phi, T.jsgraph.TrueConstant(),
- T.jsgraph.FalseConstant(), new_inner_loop);
- CheckInputs(new_outer_phi, new_entry_phi, new_inner_phi,
- T.jsgraph.FalseConstant(), new_outer_loop);
-}
-
-
-Node* MakeCounter(JSGraph* jsgraph, Node* start, Node* loop) {
- int count = loop->InputCount();
- NodeVector tmp_inputs(jsgraph->graph()->zone());
- for (int i = 0; i < count; i++) {
- tmp_inputs.push_back(start);
- }
- tmp_inputs.push_back(loop);
-
- Node* phi = jsgraph->graph()->NewNode(
- jsgraph->common()->Phi(MachineRepresentation::kWord32, count), count + 1,
- &tmp_inputs[0]);
- Node* inc = jsgraph->graph()->NewNode(&kIntAdd, phi, jsgraph->OneConstant());
-
- for (int i = 1; i < count; i++) {
- phi->ReplaceInput(i, inc);
- }
- return phi;
-}
-
-
-TEST(Deconstruct_osr_nested3) {
- OsrDeconstructorTester T(1);
-
- // outermost loop.
- While loop0(T, T.p0, false, 1);
- Node* loop0_cntr = MakeCounter(&T.jsgraph, T.p0, loop0.loop);
- loop0.branch->ReplaceInput(0, loop0_cntr);
-
- // middle loop.
- Node* loop1 = T.graph.NewNode(T.common.Loop(1), loop0.if_true);
- Node* loop1_phi =
- T.graph.NewNode(T.common.Phi(MachineRepresentation::kTagged, 2),
- loop0_cntr, loop0_cntr, loop1);
-
- // innermost (OSR) loop.
- While loop2(T, T.p0, true, 1);
- loop2.loop->ReplaceInput(0, loop1);
-
- Node* loop2_cntr = MakeCounter(&T.jsgraph, loop1_phi, loop2.loop);
- loop2_cntr->ReplaceInput(1, T.osr_values[0]);
- Node* osr_phi = loop2_cntr;
- Node* loop2_inc = loop2_cntr->InputAt(2);
- loop2.branch->ReplaceInput(0, loop2_cntr);
-
- loop1_phi->ReplaceInput(1, loop2_cntr);
- loop0_cntr->ReplaceInput(1, loop2_cntr);
-
- // Branch to either the outer or middle loop.
- Node* branch = T.graph.NewNode(T.common.Branch(), loop2_cntr, loop2.exit);
- Node* if_true = T.graph.NewNode(T.common.IfTrue(), branch);
- Node* if_false = T.graph.NewNode(T.common.IfFalse(), branch);
-
- loop0.loop->ReplaceInput(1, if_true);
- loop1->AppendInput(T.graph.zone(), if_false);
- NodeProperties::ChangeOp(loop1, T.common.Loop(2));
-
- Node* ret =
- T.graph.NewNode(T.common.Return(), loop0_cntr, T.start, loop0.exit);
- Node* end = T.graph.NewNode(T.common.End(1), ret);
- T.graph.SetEnd(end);
-
- T.DeconstructOsr();
-
- // Check structure of deconstructed graph.
- // Check loop2 (OSR loop) is directly connected to start.
- CheckInputs(loop2.loop, T.start, loop2.if_true);
- CheckInputs(osr_phi, T.osr_values[0], loop2_inc, loop2.loop);
- CheckInputs(loop2.branch, osr_phi, loop2.loop);
- CheckInputs(loop2.if_true, loop2.branch);
- CheckInputs(loop2.exit, loop2.branch);
- CheckInputs(branch, osr_phi, loop2.exit);
- CheckInputs(if_true, branch);
- CheckInputs(if_false, branch);
-
- // Check structure of new_loop1.
- Node* new_loop1_loop = FindSuccessor(if_false, IrOpcode::kLoop);
- // TODO(titzer): check the internal copy of loop2.
- USE(new_loop1_loop);
-
- // Check structure of new_loop0.
- Node* new_loop0_loop_entry = FindSuccessor(if_true, IrOpcode::kMerge);
- Node* new_loop0_loop = FindSuccessor(new_loop0_loop_entry, IrOpcode::kLoop);
- // TODO(titzer): check the internal copies of loop1 and loop2.
-
- Node* new_loop0_branch = FindSuccessor(new_loop0_loop, IrOpcode::kBranch);
- Node* new_loop0_if_true = FindSuccessor(new_loop0_branch, IrOpcode::kIfTrue);
- Node* new_loop0_exit = FindSuccessor(new_loop0_branch, IrOpcode::kIfFalse);
-
- USE(new_loop0_if_true);
-
- Node* new_ret = T.graph.end()->InputAt(0);
- CHECK_EQ(IrOpcode::kReturn, new_ret->opcode());
-
- Node* new_loop0_phi = new_ret->InputAt(0);
- CHECK_EQ(IrOpcode::kPhi, new_loop0_phi->opcode());
- CHECK_EQ(new_loop0_loop, NodeProperties::GetControlInput(new_loop0_phi));
- CHECK_EQ(new_loop0_phi, FindSuccessor(new_loop0_loop, IrOpcode::kPhi));
-
- // Check that the return returns the phi from the OSR loop and control
- // depends on the copy of the outer loop0.
- CheckInputs(new_ret, new_loop0_phi, T.graph.start(), new_loop0_exit);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index 242793340e..ab2a1f6dfe 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -84,8 +84,8 @@ class RepresentationChangerTester : public HandleAndZoneScope,
}
Node* Return(Node* input) {
- Node* n = graph()->NewNode(common()->Return(), input, graph()->start(),
- graph()->start());
+ Node* n = graph()->NewNode(common()->Return(), jsgraph()->Int32Constant(0),
+ input, graph()->start(), graph()->start());
return n;
}
@@ -137,138 +137,65 @@ TEST(BoolToBit_constant) {
r.CheckInt32Constant(false_bit, 0);
}
-
-TEST(BitToBool_constant) {
- RepresentationChangerTester r;
-
- for (int i = -5; i < 5; i++) {
- Node* node = r.jsgraph()->Int32Constant(i);
- Node* use = r.Return(node);
- Node* val = r.changer()->GetRepresentationFor(
- node, MachineRepresentation::kBit, Type::Boolean(), use,
- UseInfo(MachineRepresentation::kTagged, Truncation::None()));
- r.CheckHeapConstant(val, i == 0 ? r.isolate()->heap()->false_value()
- : r.isolate()->heap()->true_value());
- }
-}
-
-
TEST(ToTagged_constant) {
RepresentationChangerTester r;
- {
- FOR_FLOAT64_INPUTS(i) {
- Node* n = r.jsgraph()->Float64Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat64, Type::None(), use,
- UseInfo(MachineRepresentation::kTagged, Truncation::None()));
- r.CheckNumberConstant(c, *i);
- }
- }
-
- {
- FOR_FLOAT64_INPUTS(i) {
- Node* n = r.jsgraph()->Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat64, Type::None(), use,
- UseInfo(MachineRepresentation::kTagged, Truncation::None()));
- r.CheckNumberConstant(c, *i);
- }
- }
-
- {
- FOR_FLOAT32_INPUTS(i) {
- Node* n = r.jsgraph()->Float32Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat32, Type::None(), use,
- UseInfo(MachineRepresentation::kTagged, Truncation::None()));
- r.CheckNumberConstant(c, *i);
- }
+ for (double i : ValueHelper::float64_vector()) {
+ Node* n = r.jsgraph()->Constant(i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kFloat64, Type::None(), use,
+ UseInfo(MachineRepresentation::kTagged, Truncation::None()));
+ r.CheckNumberConstant(c, i);
}
- {
- FOR_INT32_INPUTS(i) {
- Node* n = r.jsgraph()->Int32Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kWord32, Type::Signed32(), use,
- UseInfo(MachineRepresentation::kTagged, Truncation::None()));
- r.CheckNumberConstant(c, *i);
- }
+ for (int i : ValueHelper::int32_vector()) {
+ Node* n = r.jsgraph()->Constant(i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Signed32(), use,
+ UseInfo(MachineRepresentation::kTagged, Truncation::None()));
+ r.CheckNumberConstant(c, i);
}
- {
- FOR_UINT32_INPUTS(i) {
- Node* n = r.jsgraph()->Int32Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kWord32, Type::Unsigned32(), use,
- UseInfo(MachineRepresentation::kTagged, Truncation::None()));
- r.CheckNumberConstant(c, *i);
- }
+ for (uint32_t i : ValueHelper::uint32_vector()) {
+ Node* n = r.jsgraph()->Constant(i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Unsigned32(), use,
+ UseInfo(MachineRepresentation::kTagged, Truncation::None()));
+ r.CheckNumberConstant(c, i);
}
}
-
TEST(ToFloat64_constant) {
RepresentationChangerTester r;
- {
- FOR_FLOAT64_INPUTS(i) {
- Node* n = r.jsgraph()->Float64Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat64, Type::None(), use,
- UseInfo(MachineRepresentation::kFloat64, Truncation::None()));
- CHECK_EQ(n, c);
- }
- }
-
- {
- FOR_FLOAT64_INPUTS(i) {
- Node* n = r.jsgraph()->Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kTagged, Type::None(), use,
- UseInfo(MachineRepresentation::kFloat64, Truncation::None()));
- r.CheckFloat64Constant(c, *i);
- }
+ for (double i : ValueHelper::float64_vector()) {
+ Node* n = r.jsgraph()->Constant(i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kTagged, Type::None(), use,
+ UseInfo(MachineRepresentation::kFloat64, Truncation::None()));
+ r.CheckFloat64Constant(c, i);
}
- {
- FOR_FLOAT32_INPUTS(i) {
- Node* n = r.jsgraph()->Float32Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat32, Type::None(), use,
- UseInfo(MachineRepresentation::kFloat64, Truncation::None()));
- r.CheckFloat64Constant(c, *i);
- }
+ for (int i : ValueHelper::int32_vector()) {
+ Node* n = r.jsgraph()->Constant(i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Signed32(), use,
+ UseInfo(MachineRepresentation::kFloat64, Truncation::None()));
+ r.CheckFloat64Constant(c, i);
}
- {
- FOR_INT32_INPUTS(i) {
- Node* n = r.jsgraph()->Int32Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kWord32, Type::Signed32(), use,
- UseInfo(MachineRepresentation::kFloat64, Truncation::None()));
- r.CheckFloat64Constant(c, *i);
- }
- }
-
- {
- FOR_UINT32_INPUTS(i) {
- Node* n = r.jsgraph()->Int32Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kWord32, Type::Unsigned32(), use,
- UseInfo(MachineRepresentation::kFloat64, Truncation::None()));
- r.CheckFloat64Constant(c, *i);
- }
+ for (uint32_t i : ValueHelper::uint32_vector()) {
+ Node* n = r.jsgraph()->Constant(i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Unsigned32(), use,
+ UseInfo(MachineRepresentation::kFloat64, Truncation::None()));
+ r.CheckFloat64Constant(c, i);
}
}
@@ -284,102 +211,38 @@ static bool IsFloat32Uint32(uint32_t val) { return val <= (1 << 23); }
TEST(ToFloat32_constant) {
RepresentationChangerTester r;
- {
- FOR_FLOAT32_INPUTS(i) {
- Node* n = r.jsgraph()->Float32Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat32, Type::None(), use,
- UseInfo(MachineRepresentation::kFloat32, Truncation::None()));
- CHECK_EQ(n, c);
- }
- }
-
- {
- FOR_FLOAT32_INPUTS(i) {
- Node* n = r.jsgraph()->Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kTagged, Type::None(), use,
- UseInfo(MachineRepresentation::kFloat32, Truncation::None()));
- r.CheckFloat32Constant(c, *i);
- }
- }
-
- {
- FOR_FLOAT32_INPUTS(i) {
- Node* n = r.jsgraph()->Float64Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat64, Type::None(), use,
- UseInfo(MachineRepresentation::kFloat32, Truncation::None()));
- r.CheckFloat32Constant(c, *i);
- }
- }
-
- {
- FOR_INT32_INPUTS(i) {
- if (!IsFloat32Int32(*i)) continue;
- Node* n = r.jsgraph()->Int32Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kWord32, Type::Signed32(), use,
- UseInfo(MachineRepresentation::kFloat32, Truncation::None()));
- r.CheckFloat32Constant(c, static_cast<float>(*i));
- }
- }
-
- {
- FOR_UINT32_INPUTS(i) {
- if (!IsFloat32Uint32(*i)) continue;
- Node* n = r.jsgraph()->Int32Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kWord32, Type::Unsigned32(), use,
- UseInfo(MachineRepresentation::kFloat32, Truncation::None()));
- r.CheckFloat32Constant(c, static_cast<float>(*i));
- }
+ for (double i : ValueHelper::float32_vector()) {
+ Node* n = r.jsgraph()->Constant(i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kTagged, Type::None(), use,
+ UseInfo(MachineRepresentation::kFloat32, Truncation::None()));
+ r.CheckFloat32Constant(c, i);
+ }
+
+ for (int i : ValueHelper::int32_vector()) {
+ if (!IsFloat32Int32(i)) continue;
+ Node* n = r.jsgraph()->Constant(i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Signed32(), use,
+ UseInfo(MachineRepresentation::kFloat32, Truncation::None()));
+ r.CheckFloat32Constant(c, static_cast<float>(i));
+ }
+
+ for (uint32_t i : ValueHelper::uint32_vector()) {
+ if (!IsFloat32Uint32(i)) continue;
+ Node* n = r.jsgraph()->Constant(i);
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kWord32, Type::Unsigned32(), use,
+ UseInfo(MachineRepresentation::kFloat32, Truncation::None()));
+ r.CheckFloat32Constant(c, static_cast<float>(i));
}
}
-
TEST(ToInt32_constant) {
RepresentationChangerTester r;
-
- {
- FOR_INT32_INPUTS(i) {
- Node* n = r.jsgraph()->Int32Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kWord32, Type::Signed32(), use,
- UseInfo(MachineRepresentation::kWord32, Truncation::None()));
- r.CheckInt32Constant(c, *i);
- }
- }
-
- {
- FOR_INT32_INPUTS(i) {
- if (!IsFloat32Int32(*i)) continue;
- Node* n = r.jsgraph()->Float32Constant(static_cast<float>(*i));
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat32, Type::Signed32(), use,
- UseInfo(MachineRepresentation::kWord32, Truncation::None()));
- r.CheckInt32Constant(c, *i);
- }
- }
-
- {
- FOR_INT32_INPUTS(i) {
- Node* n = r.jsgraph()->Float64Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat64, Type::Signed32(), use,
- UseInfo(MachineRepresentation::kWord32, Truncation::None()));
- r.CheckInt32Constant(c, *i);
- }
- }
-
{
FOR_INT32_INPUTS(i) {
Node* n = r.jsgraph()->Constant(*i);
@@ -392,70 +255,44 @@ TEST(ToInt32_constant) {
}
}
-
TEST(ToUint32_constant) {
RepresentationChangerTester r;
-
- {
- FOR_UINT32_INPUTS(i) {
- Node* n = r.jsgraph()->Int32Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kWord32, Type::Unsigned32(), use,
- UseInfo(MachineRepresentation::kWord32, Truncation::None()));
- r.CheckUint32Constant(c, *i);
- }
- }
-
- {
- FOR_UINT32_INPUTS(i) {
- if (!IsFloat32Uint32(*i)) continue;
- Node* n = r.jsgraph()->Float32Constant(static_cast<float>(*i));
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat32, Type::Unsigned32(), use,
- UseInfo(MachineRepresentation::kWord32, Truncation::None()));
- r.CheckUint32Constant(c, *i);
- }
- }
-
- {
- FOR_UINT32_INPUTS(i) {
- Node* n = r.jsgraph()->Float64Constant(*i);
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kFloat64, Type::Unsigned32(), use,
- UseInfo(MachineRepresentation::kWord32, Truncation::None()));
- r.CheckUint32Constant(c, *i);
- }
- }
-
- {
- FOR_UINT32_INPUTS(i) {
- Node* n = r.jsgraph()->Constant(static_cast<double>(*i));
- Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(
- n, MachineRepresentation::kTagged, Type::Unsigned32(), use,
- UseInfo(MachineRepresentation::kWord32, Truncation::None()));
- r.CheckUint32Constant(c, *i);
- }
+ FOR_UINT32_INPUTS(i) {
+ Node* n = r.jsgraph()->Constant(static_cast<double>(*i));
+ Node* use = r.Return(n);
+ Node* c = r.changer()->GetRepresentationFor(
+ n, MachineRepresentation::kTagged, Type::Unsigned32(), use,
+ UseInfo(MachineRepresentation::kWord32, Truncation::None()));
+ r.CheckUint32Constant(c, *i);
}
}
static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
- Type* from_type, MachineRepresentation to) {
+ Type* from_type, UseInfo use_info) {
RepresentationChangerTester r;
Node* n = r.Parameter();
Node* use = r.Return(n);
- Node* c = r.changer()->GetRepresentationFor(n, from, from_type, use,
- UseInfo(to, Truncation::None()));
+ Node* c =
+ r.changer()->GetRepresentationFor(n, from, from_type, use, use_info);
CHECK_NE(c, n);
CHECK_EQ(expected, c->opcode());
CHECK_EQ(n, c->InputAt(0));
+
+ if (expected == IrOpcode::kCheckedFloat64ToInt32) {
+ CheckForMinusZeroMode mode =
+ from_type->Maybe(Type::MinusZero())
+ ? use_info.minus_zero_check()
+ : CheckForMinusZeroMode::kDontCheckForMinusZero;
+ CHECK_EQ(mode, CheckMinusZeroModeOf(c->op()));
+ }
}
+static void CheckChange(IrOpcode::Value expected, MachineRepresentation from,
+ Type* from_type, MachineRepresentation to) {
+ CheckChange(expected, from, from_type, UseInfo(to, Truncation::None()));
+}
static void CheckTwoChanges(IrOpcode::Value expected2,
IrOpcode::Value expected1,
@@ -604,6 +441,32 @@ TEST(SignednessInWord32) {
MachineRepresentation::kWord32);
}
+static void TestMinusZeroCheck(IrOpcode::Value expected, Type* from_type) {
+ RepresentationChangerTester r;
+
+ CheckChange(expected, MachineRepresentation::kFloat64, from_type,
+ UseInfo::CheckedSignedSmallAsWord32(
+ CheckForMinusZeroMode::kCheckForMinusZero));
+
+ CheckChange(expected, MachineRepresentation::kFloat64, from_type,
+ UseInfo::CheckedSignedSmallAsWord32(
+ CheckForMinusZeroMode::kDontCheckForMinusZero));
+
+ CheckChange(expected, MachineRepresentation::kFloat64, from_type,
+ UseInfo::CheckedSigned32AsWord32(
+ CheckForMinusZeroMode::kCheckForMinusZero));
+
+ CheckChange(expected, MachineRepresentation::kFloat64, from_type,
+ UseInfo::CheckedSigned32AsWord32(
+ CheckForMinusZeroMode::kDontCheckForMinusZero));
+}
+
+TEST(MinusZeroCheck) {
+ TestMinusZeroCheck(IrOpcode::kCheckedFloat64ToInt32, Type::NumberOrOddball());
+ // PlainNumber cannot be minus zero so the minus zero check should be
+ // eliminated.
+ TestMinusZeroCheck(IrOpcode::kCheckedFloat64ToInt32, Type::PlainNumber());
+}
TEST(Nops) {
RepresentationChangerTester r;
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index 9c2b05dd4b..74a51b915a 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -124,7 +124,7 @@ class BytecodeGraphTester {
// TODO(mstarzinger): We should be able to prime CompilationInfo without
// having to instantiate a ParseInfo first. Fix this!
- ParseInfo parse_info(zone_, function);
+ ParseInfo parse_info(zone_, handle(function->shared()));
CompilationInfo compilation_info(&parse_info, function);
compilation_info.SetOptimizing();
diff --git a/deps/v8/test/cctest/compiler/test-run-inlining.cc b/deps/v8/test/cctest/compiler/test-run-inlining.cc
index aab8b4e86b..22e791a5f8 100644
--- a/deps/v8/test/cctest/compiler/test-run-inlining.cc
+++ b/deps/v8/test/cctest/compiler/test-run-inlining.cc
@@ -48,11 +48,9 @@ void InstallAssertInlineCountHelper(v8::Isolate* isolate) {
.FromJust());
}
-const uint32_t kRestrictedInliningFlags =
- CompilationInfo::kNativeContextSpecializing;
+const uint32_t kRestrictedInliningFlags = 0;
-const uint32_t kInlineFlags = CompilationInfo::kInliningEnabled |
- CompilationInfo::kNativeContextSpecializing;
+const uint32_t kInlineFlags = CompilationInfo::kInliningEnabled;
} // namespace
diff --git a/deps/v8/test/cctest/compiler/test-run-jscalls.cc b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
index 84d7f714ae..64bd67cc0a 100644
--- a/deps/v8/test/cctest/compiler/test-run-jscalls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
@@ -205,56 +205,6 @@ TEST(CallEval) {
T.CheckCall(T.Val(42), T.Val("x"), T.undefined());
}
-
-TEST(ContextLoadedFromActivation) {
- const char* script =
- "var x = 42;"
- "(function() {"
- " return function () { return x };"
- "})()";
-
- // Disable context specialization.
- FunctionTester T(script);
- v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
- v8::Context::Scope scope(context);
- v8::Local<v8::Value> value = CompileRun(script);
- i::Handle<i::Object> ofun = v8::Utils::OpenHandle(*value);
- i::Handle<i::JSFunction> jsfun = Handle<JSFunction>::cast(ofun);
- jsfun->set_code(T.function->code());
- jsfun->set_shared(T.function->shared());
- jsfun->set_literals(T.function->literals());
- CHECK(context->Global()
- ->Set(context, v8_str("foo"), v8::Utils::CallableToLocal(jsfun))
- .FromJust());
- CompileRun("var x = 24;");
- ExpectInt32("foo();", 24);
-}
-
-
-TEST(BuiltinLoadedFromActivation) {
- const char* script =
- "var x = 42;"
- "(function() {"
- " return function () { return this; };"
- "})()";
-
- // Disable context specialization.
- FunctionTester T(script);
- v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
- v8::Context::Scope scope(context);
- v8::Local<v8::Value> value = CompileRun(script);
- i::Handle<i::Object> ofun = v8::Utils::OpenHandle(*value);
- i::Handle<i::JSFunction> jsfun = Handle<JSFunction>::cast(ofun);
- jsfun->set_code(T.function->code());
- jsfun->set_shared(T.function->shared());
- jsfun->set_literals(T.function->literals());
- CHECK(context->Global()
- ->Set(context, v8_str("foo"), v8::Utils::CallableToLocal(jsfun))
- .FromJust());
- CompileRun("var x = 24;");
- ExpectObject("foo()", context->Global());
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-load-store.cc b/deps/v8/test/cctest/compiler/test-run-load-store.cc
index 2461129384..559e6fd02a 100644
--- a/deps/v8/test/cctest/compiler/test-run-load-store.cc
+++ b/deps/v8/test/cctest/compiler/test-run-load-store.cc
@@ -278,7 +278,7 @@ void RunUnalignedLoadStoreUnalignedAccess(MachineType rep) {
CType out_buffer[2];
byte* raw;
- for (int x = 0; x < sizeof(CType); x++) {
+ for (int x = 0; x < static_cast<int>(sizeof(CType)); x++) {
int y = sizeof(CType) - x;
raw = reinterpret_cast<byte*>(&in);
@@ -523,7 +523,7 @@ void RunLoadStoreSignExtend64(TestAlignment t) {
void RunLoadStoreZeroExtend64(TestAlignment t) {
if (kPointerSize < 8) return;
uint64_t buffer[5];
- RawMachineAssemblerTester<int64_t> m;
+ RawMachineAssemblerTester<uint64_t> m;
Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
if (t == TestAlignment::kAligned) {
Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
@@ -988,12 +988,13 @@ void TestRunOobCheckedLoad_pseudo(uint64_t x, bool length_is_immediate) {
for (uint32_t i = 0; i < kNumElems; i++) {
uint32_t offset = static_cast<uint32_t>(i * sizeof(int32_t));
uint32_t expected = buffer[i];
- CHECK_EQ(expected, m.Call(offset + pseudo_base, kLength));
+ CHECK_EQ(expected,
+ static_cast<uint32_t>(m.Call(offset + pseudo_base, kLength)));
}
// slightly out-of-bounds accesses.
- for (int32_t i = kNumElems; i < kNumElems + 30; i++) {
- uint32_t offset = static_cast<uint32_t>(i * sizeof(int32_t));
+ for (uint32_t i = kNumElems; i < kNumElems + 30; i++) {
+ uint32_t offset = i * sizeof(int32_t);
CheckOobValue(m.Call(offset + pseudo_base, kLength));
}
@@ -1089,7 +1090,7 @@ void TestRunOobCheckedLoadT_pseudo(uint64_t x, bool length_is_immediate) {
}
// slightly out-of-bounds accesses.
- for (int32_t i = kNumElems; i < kNumElems + 30; i++) {
+ for (uint32_t i = kNumElems; i < kNumElems + 30; i++) {
uint32_t offset = static_cast<uint32_t>(i * sizeof(MemType));
CHECK_EQ(kReturn, m.Call(offset + pseudo_base, kLength));
CheckOobValue(result);
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 50b46d7d0e..2794aec51e 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -4161,6 +4161,26 @@ TEST(RunInt32PairAdd) {
}
}
+TEST(RunInt32PairAddUseOnlyHighWord) {
+ BufferedRawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
+
+ m.Return(m.Projection(1, m.Int32PairAdd(m.Parameter(0), m.Parameter(1),
+ m.Parameter(2), m.Parameter(3))));
+
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ CHECK_EQ(
+ static_cast<uint32_t>((*i + *j) >> 32),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(*i >> 32),
+ static_cast<uint32_t>(*j & 0xffffffff),
+ static_cast<uint32_t>(*j >> 32))));
+ }
+ }
+}
+
void TestInt32PairAddWithSharedInput(int a, int b, int c, int d) {
BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
MachineType::Uint32());
@@ -4224,6 +4244,26 @@ TEST(RunInt32PairSub) {
}
}
+TEST(RunInt32PairSubUseOnlyHighWord) {
+ BufferedRawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
+
+ m.Return(m.Projection(1, m.Int32PairSub(m.Parameter(0), m.Parameter(1),
+ m.Parameter(2), m.Parameter(3))));
+
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ CHECK_EQ(
+ static_cast<uint32_t>((*i - *j) >> 32),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(*i >> 32),
+ static_cast<uint32_t>(*j & 0xffffffff),
+ static_cast<uint32_t>(*j >> 32))));
+ }
+ }
+}
+
void TestInt32PairSubWithSharedInput(int a, int b, int c, int d) {
BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
MachineType::Uint32());
@@ -4287,6 +4327,26 @@ TEST(RunInt32PairMul) {
}
}
+TEST(RunInt32PairMulUseOnlyHighWord) {
+ BufferedRawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
+
+ m.Return(m.Projection(1, m.Int32PairMul(m.Parameter(0), m.Parameter(1),
+ m.Parameter(2), m.Parameter(3))));
+
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ CHECK_EQ(
+ static_cast<uint32_t>((*i * *j) >> 32),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(*i >> 32),
+ static_cast<uint32_t>(*j & 0xffffffff),
+ static_cast<uint32_t>(*j >> 32))));
+ }
+ }
+}
+
void TestInt32PairMulWithSharedInput(int a, int b, int c, int d) {
BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
MachineType::Uint32());
@@ -4330,13 +4390,13 @@ TEST(RunWord32PairShl) {
uint32_t high;
uint32_t low;
- Node* PairAdd =
+ Node* PairShl =
m.Word32PairShl(m.Parameter(0), m.Parameter(1), m.Parameter(2));
m.StoreToPointer(&low, MachineRepresentation::kWord32,
- m.Projection(0, PairAdd));
+ m.Projection(0, PairShl));
m.StoreToPointer(&high, MachineRepresentation::kWord32,
- m.Projection(1, PairAdd));
+ m.Projection(1, PairShl));
m.Return(m.Int32Constant(74));
FOR_UINT64_INPUTS(i) {
@@ -4348,6 +4408,23 @@ TEST(RunWord32PairShl) {
}
}
+TEST(RunWord32PairShlUseOnlyHighWord) {
+ BufferedRawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
+
+ m.Return(m.Projection(
+ 1, m.Word32PairShl(m.Parameter(0), m.Parameter(1), m.Parameter(2))));
+
+ FOR_UINT64_INPUTS(i) {
+ for (uint32_t j = 0; j < 64; j++) {
+ CHECK_EQ(
+ static_cast<uint32_t>((*i << j) >> 32),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(*i >> 32), j)));
+ }
+ }
+}
+
void TestWord32PairShlWithSharedInput(int a, int b) {
BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
MachineType::Uint32());
@@ -4405,6 +4482,23 @@ TEST(RunWord32PairShr) {
}
}
+TEST(RunWord32PairShrUseOnlyHighWord) {
+ BufferedRawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
+
+ m.Return(m.Projection(
+ 1, m.Word32PairShr(m.Parameter(0), m.Parameter(1), m.Parameter(2))));
+
+ FOR_UINT64_INPUTS(i) {
+ for (uint32_t j = 0; j < 64; j++) {
+ CHECK_EQ(
+ static_cast<uint32_t>((*i >> j) >> 32),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(*i >> 32), j)));
+ }
+ }
+}
+
TEST(RunWord32PairSar) {
BufferedRawMachineAssemblerTester<int32_t> m(
MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
@@ -4425,11 +4519,27 @@ TEST(RunWord32PairSar) {
for (uint32_t j = 0; j < 64; j++) {
m.Call(static_cast<uint32_t>(*i & 0xffffffff),
static_cast<uint32_t>(*i >> 32), j);
- CHECK_EQ(*i >> j, ToInt64(low, high));
+ CHECK_EQ(*i >> j, static_cast<int64_t>(ToInt64(low, high)));
}
}
}
+TEST(RunWord32PairSarUseOnlyHighWord) {
+ BufferedRawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
+
+ m.Return(m.Projection(
+ 1, m.Word32PairSar(m.Parameter(0), m.Parameter(1), m.Parameter(2))));
+
+ FOR_INT64_INPUTS(i) {
+ for (uint32_t j = 0; j < 64; j++) {
+ CHECK_EQ(
+ static_cast<uint32_t>((*i >> j) >> 32),
+ static_cast<uint32_t>(m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(*i >> 32), j)));
+ }
+ }
+}
#endif
TEST(RunDeadChangeFloat64ToInt32) {
@@ -6249,7 +6359,7 @@ TEST(RunTryTruncateFloat64ToUint64WithCheck) {
FOR_FLOAT64_INPUTS(i) {
if (*i < 18446744073709551616.0 && *i > -1) {
// Conversions within this range should succeed.
- CHECK_EQ(static_cast<uint64_t>(*i), m.Call(*i));
+ CHECK_EQ(static_cast<uint64_t>(*i), static_cast<uint64_t>(m.Call(*i)));
CHECK_NE(0, success);
} else {
m.Call(*i);
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index 0f76b897c3..89629ec090 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -87,8 +87,16 @@ class RegisterPairs : public Pairs {
class Float32RegisterPairs : public Pairs {
public:
Float32RegisterPairs()
- : Pairs(100, GetRegConfig()->num_allocatable_aliased_double_registers(),
- GetRegConfig()->allocatable_double_codes()) {}
+ : Pairs(
+ 100,
+#if V8_TARGET_ARCH_ARM
+ // TODO(bbudge) Modify wasm linkage to allow use of all float regs.
+ GetRegConfig()->num_allocatable_double_registers() / 2 - 2,
+#else
+ GetRegConfig()->num_allocatable_double_registers(),
+#endif
+ GetRegConfig()->allocatable_double_codes()) {
+ }
};
@@ -127,6 +135,10 @@ struct Allocator {
// Allocate a floating point register/stack location.
if (fp_offset < fp_count) {
int code = fp_regs[fp_offset++];
+#if V8_TARGET_ARCH_ARM
+ // TODO(bbudge) Modify wasm linkage to allow use of all float regs.
+ if (type.representation() == MachineRepresentation::kFloat32) code *= 2;
+#endif
return LinkageLocation::ForRegister(code, type);
} else {
int offset = -1 - stack_offset;
@@ -258,7 +270,7 @@ Handle<Code> CompileGraph(const char* name, CallDescriptor* desc, Graph* graph,
Handle<Code> WrapWithCFunction(Handle<Code> inner, CallDescriptor* desc) {
- Zone zone(inner->GetIsolate()->allocator());
+ Zone zone(inner->GetIsolate()->allocator(), ZONE_NAME);
int param_count = static_cast<int>(desc->ParameterCount());
GraphAndBuilders caller(&zone);
{
@@ -281,7 +293,9 @@ Handle<Code> WrapWithCFunction(Handle<Code> inner, CallDescriptor* desc) {
// Build the call and return nodes.
Node* call =
b.graph()->NewNode(b.common()->Call(desc), param_count + 3, args);
- Node* ret = b.graph()->NewNode(b.common()->Return(), call, call, start);
+ Node* zero = b.graph()->NewNode(b.common()->Int32Constant(0));
+ Node* ret =
+ b.graph()->NewNode(b.common()->Return(), zero, call, call, start);
b.graph()->SetEnd(ret);
}
@@ -424,7 +438,7 @@ class Computer {
Handle<Code> inner = Handle<Code>::null();
{
// Build the graph for the computation.
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
RawMachineAssembler raw(isolate, &graph, desc);
build(desc, raw);
@@ -439,7 +453,7 @@ class Computer {
Handle<Code> wrapper = Handle<Code>::null();
{
// Wrap the above code with a callable function that passes constants.
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
RawMachineAssembler raw(isolate, &graph, cdesc);
@@ -471,7 +485,7 @@ class Computer {
Handle<Code> wrapper = Handle<Code>::null();
{
// Wrap the above code with a callable function that loads from {input}.
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
RawMachineAssembler raw(isolate, &graph, cdesc);
@@ -509,7 +523,7 @@ class Computer {
static void TestInt32Sub(CallDescriptor* desc) {
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
GraphAndBuilders inner(&zone);
{
// Build the add function.
@@ -519,7 +533,9 @@ static void TestInt32Sub(CallDescriptor* desc) {
Node* p0 = b.graph()->NewNode(b.common()->Parameter(0), start);
Node* p1 = b.graph()->NewNode(b.common()->Parameter(1), start);
Node* add = b.graph()->NewNode(b.machine()->Int32Sub(), p0, p1);
- Node* ret = b.graph()->NewNode(b.common()->Return(), add, start, start);
+ Node* zero = b.graph()->NewNode(b.common()->Int32Constant(0));
+ Node* ret =
+ b.graph()->NewNode(b.common()->Return(), zero, add, start, start);
b.graph()->SetEnd(ret);
}
@@ -549,7 +565,7 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
Handle<Code> inner = Handle<Code>::null();
{
// Writes all parameters into the output buffer.
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
RawMachineAssembler raw(isolate, &graph, desc);
Node* base = raw.PointerConstant(output);
@@ -566,7 +582,7 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
Handle<Code> wrapper = Handle<Code>::null();
{
// Loads parameters from the input buffer and calls the above code.
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
RawMachineAssembler raw(isolate, &graph, cdesc);
@@ -606,7 +622,7 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
static void Test_RunInt32SubWithRet(int retreg) {
Int32Signature sig(2);
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
RegisterPairs pairs;
while (pairs.More()) {
int parray[2];
@@ -657,7 +673,7 @@ TEST(Run_Int32Sub_all_allocatable_single) {
RegisterPairs pairs;
while (pairs.More()) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
int parray[1];
int rarray[1];
pairs.Next(&rarray[0], &parray[0], true);
@@ -675,7 +691,7 @@ TEST(Run_CopyTwentyInt32_all_allocatable_pairs) {
RegisterPairs pairs;
while (pairs.More()) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
int parray[2];
int rarray[] = {GetRegConfig()->GetAllocatableGeneralCode(0)};
pairs.Next(&parray[0], &parray[1], false);
@@ -726,7 +742,7 @@ static void Test_Int32_WeightedSum_of_size(int count) {
for (int p0 = 0; p0 < Register::kNumRegisters; p0++) {
if (GetRegConfig()->IsAllocatableGeneralCode(p0)) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
int parray[] = {p0};
int rarray[] = {GetRegConfig()->GetAllocatableGeneralCode(0)};
@@ -789,7 +805,7 @@ void Test_Int32_Select() {
RegisterConfig config(params, rets);
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
for (int i = which + 1; i <= 64; i++) {
Int32Signature sig(i);
@@ -828,7 +844,7 @@ TEST(Int64Select_registers) {
RegisterPairs pairs;
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
while (pairs.More()) {
int parray[2];
pairs.Next(&parray[0], &parray[1], false);
@@ -853,7 +869,7 @@ TEST(Float32Select_registers) {
Float32RegisterPairs pairs;
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
while (pairs.More()) {
int parray[2];
pairs.Next(&parray[0], &parray[1], false);
@@ -876,7 +892,7 @@ TEST(Float64Select_registers) {
Float64RegisterPairs pairs;
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
while (pairs.More()) {
int parray[2];
pairs.Next(&parray[0], &parray[1], false);
@@ -898,7 +914,7 @@ TEST(Float32Select_stack_params_return_reg) {
RegisterConfig config(params, rets);
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
for (int count = 1; count < 6; count++) {
ArgsBuffer<float32>::Sig sig(count);
CallDescriptor* desc = config.Create(&zone, &sig);
@@ -919,7 +935,7 @@ TEST(Float64Select_stack_params_return_reg) {
RegisterConfig config(params, rets);
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
for (int count = 1; count < 6; count++) {
ArgsBuffer<float64>::Sig sig(count);
CallDescriptor* desc = config.Create(&zone, &sig);
@@ -942,7 +958,7 @@ static void Build_Select_With_Call(CallDescriptor* desc,
{
Isolate* isolate = CcTest::InitIsolateOnce();
// Build the actual select.
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
RawMachineAssembler raw(isolate, &graph, desc);
raw.Return(raw.Parameter(which));
@@ -971,7 +987,7 @@ TEST(Float64StackParamsToStackParams) {
Allocator rets(nullptr, 0, rarray, 1);
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
ArgsBuffer<float64>::Sig sig(2);
RegisterConfig config(params, rets);
CallDescriptor* desc = config.Create(&zone, &sig);
@@ -1026,7 +1042,7 @@ void MixedParamTest(int start) {
for (int which = 0; which < num_params; which++) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
HandleScope scope(isolate);
MachineSignature::Builder builder(&zone, 1, num_params);
builder.AddReturn(params[which]);
@@ -1037,7 +1053,7 @@ void MixedParamTest(int start) {
Handle<Code> select;
{
// build the select.
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
RawMachineAssembler raw(isolate, &graph, desc);
raw.Return(raw.Parameter(which));
@@ -1054,7 +1070,7 @@ void MixedParamTest(int start) {
CSignature0<int32_t> csig;
{
// Wrap the select code with a callable function that passes constants.
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
Graph graph(&zone);
CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
RawMachineAssembler raw(isolate, &graph, cdesc);
@@ -1135,7 +1151,7 @@ void TestStackSlot(MachineType slot_type, T expected) {
Allocator ralloc(rarray_gp, 1, rarray_fp, 1);
RegisterConfig config(palloc, ralloc);
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
HandleScope scope(isolate);
MachineSignature::Builder builder(&zone, 1, 12);
builder.AddReturn(MachineType::Int32());
diff --git a/deps/v8/test/cctest/compiler/test-run-stubs.cc b/deps/v8/test/cctest/compiler/test-run-stubs.cc
index b34e5d4a51..8b6519eac5 100644
--- a/deps/v8/test/cctest/compiler/test-run-stubs.cc
+++ b/deps/v8/test/cctest/compiler/test-run-stubs.cc
@@ -47,10 +47,11 @@ TEST(RunStringLengthStub) {
Node* vectorParam = graph.NewNode(common.Parameter(4), start);
Node* theCode = graph.NewNode(common.HeapConstant(code));
Node* dummyContext = graph.NewNode(common.NumberConstant(0.0));
+ Node* zero = graph.NewNode(common.Int32Constant(0));
Node* call =
graph.NewNode(common.Call(descriptor), theCode, receiverParam, nameParam,
slotParam, vectorParam, dummyContext, start, start);
- Node* ret = graph.NewNode(common.Return(), call, call, start);
+ Node* ret = graph.NewNode(common.Return(), zero, call, call, start);
Node* end = graph.NewNode(common.End(1), ret);
graph.SetStart(start);
graph.SetEnd(end);
diff --git a/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc b/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
index 0b23669cf7..c3f42388d4 100644
--- a/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-wasm-machops.cc
@@ -51,7 +51,7 @@ static void RunLoadStoreRelocation(MachineType rep) {
raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
new_raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
}
- int32_t OK = 0x29000;
+ uint32_t OK = 0x29000;
RawMachineAssemblerTester<uint32_t> m;
Node* base = m.RelocatableIntPtrConstant(reinterpret_cast<intptr_t>(raw),
RelocInfo::WASM_MEMORY_REFERENCE);
@@ -166,5 +166,5 @@ TEST(Uint32LessThanRelocation) {
UpdateMemoryReferences(code, reinterpret_cast<Address>(1234),
reinterpret_cast<Address>(1234), 0x200, 0x400);
// Check that after limit is increased, index is within bounds.
- CHECK_EQ(0xaced, m.Call());
+ CHECK_EQ(0xacedu, m.Call());
}
diff --git a/deps/v8/test/cctest/heap/heap-tester.h b/deps/v8/test/cctest/heap/heap-tester.h
index a01de69291..99d39ca7ab 100644
--- a/deps/v8/test/cctest/heap/heap-tester.h
+++ b/deps/v8/test/cctest/heap/heap-tester.h
@@ -32,6 +32,8 @@
V(Regress587004) \
V(Regress538257) \
V(Regress589413) \
+ V(Regress658718) \
+ V(Regress670675) \
V(WriteBarriersInCopyJSObject)
#define HEAP_TEST(Name) \
@@ -39,6 +41,11 @@
#Name, true, true); \
void v8::internal::HeapTester::Test##Name()
+#define UNINITIALIZED_HEAP_TEST(Name) \
+ CcTest register_test_##Name(v8::internal::HeapTester::Test##Name, __FILE__, \
+ #Name, true, false); \
+ void v8::internal::HeapTester::Test##Name()
+
#define THREADED_HEAP_TEST(Name) \
RegisterThreadedTest register_##Name(v8::internal::HeapTester::Test##Name, \
#Name); \
diff --git a/deps/v8/test/cctest/heap/heap-utils.cc b/deps/v8/test/cctest/heap/heap-utils.cc
index 4f7d088a94..c44f82f1d9 100644
--- a/deps/v8/test/cctest/heap/heap-utils.cc
+++ b/deps/v8/test/cctest/heap/heap-utils.cc
@@ -38,8 +38,7 @@ std::vector<Handle<FixedArray>> FillOldSpacePageWithFixedArrays(Heap* heap,
const int kArrayLen = heap::FixedArrayLenFromSize(kArraySize);
CHECK_EQ(Page::kAllocatableMemory % kArraySize, 0);
Handle<FixedArray> array;
- for (size_t allocated = 0;
- allocated != (Page::kAllocatableMemory - remainder);
+ for (int allocated = 0; allocated != (Page::kAllocatableMemory - remainder);
allocated += array->Size()) {
if (allocated == (Page::kAllocatableMemory - kArraySize)) {
array = isolate->factory()->NewFixedArray(
@@ -170,6 +169,10 @@ void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
}
void SimulateFullSpace(v8::internal::PagedSpace* space) {
+ i::MarkCompactCollector* collector = space->heap()->mark_compact_collector();
+ if (collector->sweeping_in_progress()) {
+ collector->EnsureSweepingCompleted();
+ }
space->EmptyAllocationInfo();
space->ResetFreeList();
space->ClearStats();
@@ -189,6 +192,21 @@ void GcAndSweep(Heap* heap, AllocationSpace space) {
}
}
+void ForceEvacuationCandidate(Page* page) {
+ CHECK(FLAG_manual_evacuation_candidates_selection);
+ page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ PagedSpace* space = static_cast<PagedSpace*>(page->owner());
+ Address top = space->top();
+ Address limit = space->limit();
+ if (top < limit && Page::FromAllocationAreaAddress(top) == page) {
+ // Create filler object to keep page iterable if it was iterable.
+ int remaining = static_cast<int>(limit - top);
+ space->heap()->CreateFillerObjectAt(top, remaining,
+ ClearRecordedSlots::kNo);
+ space->SetTopAndLimit(nullptr, nullptr);
+ }
+}
+
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/heap-utils.h b/deps/v8/test/cctest/heap/heap-utils.h
index 2f704cb422..a494f54210 100644
--- a/deps/v8/test/cctest/heap/heap-utils.h
+++ b/deps/v8/test/cctest/heap/heap-utils.h
@@ -50,6 +50,8 @@ void AbandonCurrentlyFreeMemory(PagedSpace* space);
void GcAndSweep(Heap* heap, AllocationSpace space);
+void ForceEvacuationCandidate(Page* page);
+
} // namespace heap
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index c01827eee1..d7031e876d 100644
--- a/deps/v8/test/cctest/heap/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -60,10 +60,12 @@ AllocationResult v8::internal::HeapTester::AllocateAfterFailures() {
heap->AllocateFixedArray(10000, TENURED).ToObjectChecked();
// Large object space.
- static const int kLargeObjectSpaceFillerLength = 3 * (Page::kPageSize / 10);
- static const int kLargeObjectSpaceFillerSize = FixedArray::SizeFor(
- kLargeObjectSpaceFillerLength);
- CHECK(kLargeObjectSpaceFillerSize > heap->old_space()->AreaSize());
+ static const size_t kLargeObjectSpaceFillerLength =
+ 3 * (Page::kPageSize / 10);
+ static const size_t kLargeObjectSpaceFillerSize =
+ FixedArray::SizeFor(kLargeObjectSpaceFillerLength);
+ CHECK_GT(kLargeObjectSpaceFillerSize,
+ static_cast<size_t>(heap->old_space()->AreaSize()));
while (heap->OldGenerationSpaceAvailable() > kLargeObjectSpaceFillerSize) {
heap->AllocateFixedArray(
kLargeObjectSpaceFillerLength, TENURED).ToObjectChecked();
@@ -116,11 +118,8 @@ void TestGetter(
v8::internal::HeapTester::TestAllocateAfterFailures()));
}
-
-void TestSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
+void TestSetter(v8::Local<v8::Name> name, v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<v8::Boolean>& info) {
UNREACHABLE();
}
diff --git a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
index 173d1fa85f..0f3663041f 100644
--- a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
@@ -127,7 +127,7 @@ TEST(ArrayBuffer_Compaction) {
heap::GcAndSweep(heap, NEW_SPACE);
Page* page_before_gc = Page::FromAddress(buf1->address());
- page_before_gc->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ heap::ForceEvacuationCandidate(page_before_gc);
CHECK(IsTracked(*buf1));
CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index c69d391f90..19474c3e95 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -749,46 +749,6 @@ TEST(DeleteWeakGlobalHandle) {
CHECK(WeakPointerCleared);
}
-TEST(DoNotPromoteWhiteObjectsOnScavenge) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- Factory* factory = isolate->factory();
-
- HandleScope scope(isolate);
- Handle<Object> white = factory->NewStringFromStaticChars("white");
-
- CHECK(Marking::IsWhite(ObjectMarking::MarkBitFrom(HeapObject::cast(*white))));
-
- CcTest::CollectGarbage(NEW_SPACE);
-
- CHECK(heap->InNewSpace(*white));
-}
-
-TEST(PromoteGreyOrBlackObjectsOnScavenge) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- Factory* factory = isolate->factory();
-
- HandleScope scope(isolate);
- Handle<Object> marked = factory->NewStringFromStaticChars("marked");
-
- IncrementalMarking* marking = heap->incremental_marking();
- marking->Stop();
- heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
- i::GarbageCollectionReason::kTesting);
- while (
- Marking::IsWhite(ObjectMarking::MarkBitFrom(HeapObject::cast(*marked)))) {
- marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
- IncrementalMarking::DO_NOT_FORCE_COMPLETION, StepOrigin::kV8);
- }
-
- CcTest::CollectGarbage(NEW_SPACE);
-
- CHECK(!heap->InNewSpace(*marked));
-}
-
TEST(BytecodeArray) {
static const uint8_t kRawBytes[] = {0xc3, 0x7e, 0xa5, 0x5a};
static const int kRawBytesSize = sizeof(kRawBytes);
@@ -831,7 +791,7 @@ TEST(BytecodeArray) {
// Perform a full garbage collection and force the constant pool to be on an
// evacuation candidate.
Page* evac_page = Page::FromAddress(constant_pool->address());
- evac_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ heap::ForceEvacuationCandidate(evac_page);
CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
// BytecodeArray should survive.
@@ -1078,7 +1038,7 @@ TEST(JSArray) {
// Set array length to 0.
JSArray::SetLength(array, 0);
- CHECK_EQ(Smi::FromInt(0), array->length());
+ CHECK_EQ(Smi::kZero, array->length());
// Must be in fast mode.
CHECK(array->HasFastSmiOrObjectElements());
@@ -1319,8 +1279,10 @@ UNINITIALIZED_TEST(TestCodeFlushing) {
}
// foo should no longer be in the compilation cache
- CHECK(!function->shared()->is_compiled() || function->IsOptimized());
- CHECK(!function->is_compiled() || function->IsOptimized());
+ CHECK(!function->shared()->is_compiled() || function->IsOptimized() ||
+ function->IsInterpreted());
+ CHECK(!function->is_compiled() || function->IsOptimized() ||
+ function->IsInterpreted());
// Call foo to get it recompiled.
CompileRun("foo()");
CHECK(function->shared()->is_compiled());
@@ -1367,7 +1329,8 @@ TEST(TestCodeFlushingPreAged) {
// The code was only run once, so it should be pre-aged and collected on the
// next GC.
CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CHECK(!function->shared()->is_compiled() || function->IsOptimized());
+ CHECK(!function->shared()->is_compiled() || function->IsOptimized() ||
+ function->IsInterpreted());
// Execute the function again twice, and ensure it is reset to the young age.
{ v8::HandleScope scope(CcTest::isolate());
@@ -1387,8 +1350,10 @@ TEST(TestCodeFlushingPreAged) {
}
// foo should no longer be in the compilation cache
- CHECK(!function->shared()->is_compiled() || function->IsOptimized());
- CHECK(!function->is_compiled() || function->IsOptimized());
+ CHECK(!function->shared()->is_compiled() || function->IsOptimized() ||
+ function->IsInterpreted());
+ CHECK(!function->is_compiled() || function->IsOptimized() ||
+ function->IsInterpreted());
// Call foo to get it recompiled.
CompileRun("foo()");
CHECK(function->shared()->is_compiled());
@@ -1436,8 +1401,10 @@ TEST(TestCodeFlushingIncremental) {
heap::SimulateIncrementalMarking(CcTest::heap());
CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
}
- CHECK(!function->shared()->is_compiled() || function->IsOptimized());
- CHECK(!function->is_compiled() || function->IsOptimized());
+ CHECK(!function->shared()->is_compiled() || function->IsOptimized() ||
+ function->IsInterpreted());
+ CHECK(!function->is_compiled() || function->IsOptimized() ||
+ function->IsInterpreted());
// This compile will compile the function again.
{ v8::HandleScope scope(CcTest::isolate());
@@ -1530,8 +1497,10 @@ TEST(TestCodeFlushingIncrementalScavenge) {
// Simulate one final GC to make sure the candidate queue is sane.
CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CHECK(!function->shared()->is_compiled() || function->IsOptimized());
- CHECK(!function->is_compiled() || function->IsOptimized());
+ CHECK(!function->shared()->is_compiled() || function->IsOptimized() ||
+ function->IsInterpreted());
+ CHECK(!function->is_compiled() || function->IsOptimized() ||
+ function->IsInterpreted());
}
@@ -1584,7 +1553,7 @@ TEST(TestCodeFlushingIncrementalAbort) {
// is running so that incremental marking aborts and code flushing is
// disabled.
int position = function->shared()->start_position();
- Handle<Object> breakpoint_object(Smi::FromInt(0), isolate);
+ Handle<Object> breakpoint_object(Smi::kZero, isolate);
EnableDebugger(CcTest::isolate());
isolate->debug()->SetBreakPoint(function, breakpoint_object, &position);
isolate->debug()->ClearBreakPoint(breakpoint_object);
@@ -1646,6 +1615,7 @@ TEST(TestUseOfIncrementalBarrierOnCompileLazy) {
TEST(CompilationCacheCachingBehavior) {
// If we do not flush code, or have the compilation cache turned off, this
// test is invalid.
+ i::FLAG_allow_natives_syntax = true;
if (!FLAG_flush_code || !FLAG_compilation_cache) {
return;
}
@@ -1662,7 +1632,7 @@ TEST(CompilationCacheCachingBehavior) {
" var y = 42;"
" var z = x + y;"
"};"
- "foo()";
+ "foo();";
Handle<String> source = factory->InternalizeUtf8String(raw_source);
Handle<Context> native_context = isolate->native_context();
@@ -2369,7 +2339,7 @@ TEST(GrowAndShrinkNewSpace) {
}
// Explicitly growing should double the space capacity.
- intptr_t old_capacity, new_capacity;
+ size_t old_capacity, new_capacity;
old_capacity = new_space->TotalCapacity();
new_space->Grow();
new_capacity = new_space->TotalCapacity();
@@ -2417,7 +2387,7 @@ TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
v8::HandleScope scope(CcTest::isolate());
NewSpace* new_space = heap->new_space();
- intptr_t old_capacity, new_capacity;
+ size_t old_capacity, new_capacity;
old_capacity = new_space->TotalCapacity();
new_space->Grow();
new_capacity = new_space->TotalCapacity();
@@ -2697,8 +2667,8 @@ TEST(InstanceOfStubWriteBarrier) {
namespace {
int GetProfilerTicks(SharedFunctionInfo* shared) {
- return FLAG_ignition ? shared->profiler_ticks()
- : shared->code()->profiler_ticks();
+ return FLAG_ignition || FLAG_turbo ? shared->profiler_ticks()
+ : shared->code()->profiler_ticks();
}
} // namespace
@@ -4118,6 +4088,7 @@ TEST(Regress165495) {
TEST(Regress169209) {
+ i::FLAG_always_opt = false;
i::FLAG_stress_compaction = false;
i::FLAG_allow_natives_syntax = true;
@@ -4135,11 +4106,15 @@ TEST(Regress169209) {
{
HandleScope inner_scope(isolate);
LocalContext env;
- CompileRun("function f() { return 'foobar'; }"
- "function g(x) { if (x) f(); }"
- "f();"
- "g(false);"
- "g(false);");
+ CompileRun(
+ "function f() { return 'foobar'; }"
+ "function g(x) { if (x) f(); }"
+ "f();"
+ "%BaselineFunctionOnNextCall(f);"
+ "f();"
+ "g(false);"
+ "%BaselineFunctionOnNextCall(g);"
+ "g(false);");
Handle<JSFunction> f = Handle<JSFunction>::cast(
v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
@@ -4159,8 +4134,11 @@ TEST(Regress169209) {
{
HandleScope inner_scope(isolate);
LocalContext env;
- CompileRun("function flushMe() { return 0; }"
- "flushMe(1);");
+ CompileRun(
+ "function flushMe() { return 0; }"
+ "flushMe(1);"
+ "%BaselineFunctionOnNextCall(flushMe);"
+ "flushMe(1);");
Handle<JSFunction> f = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CcTest::global()
@@ -4398,7 +4376,7 @@ TEST(Regress514122) {
// Heap is ready, force {lit_page} to become an evacuation candidate and
// simulate incremental marking to enqueue optimized code map.
FLAG_manual_evacuation_candidates_selection = true;
- evac_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ heap::ForceEvacuationCandidate(evac_page);
heap::SimulateIncrementalMarking(heap);
// No matter whether reachable or not, {boomer} is doomed.
@@ -4597,7 +4575,7 @@ TEST(LargeObjectSlotRecording) {
heap::SimulateFullSpace(heap->old_space());
Handle<FixedArray> lit = isolate->factory()->NewFixedArray(4, TENURED);
Page* evac_page = Page::FromAddress(lit->address());
- evac_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ heap::ForceEvacuationCandidate(evac_page);
FixedArray* old_location = *lit;
// Allocate a large object.
@@ -5603,8 +5581,7 @@ HEAP_TEST(Regress538257) {
heap->CanExpandOldGeneration(old_space->AreaSize());
i++) {
objects[i] = i_isolate->factory()->NewFixedArray(kFixedArrayLen, TENURED);
- Page::FromAddress(objects[i]->address())
- ->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ heap::ForceEvacuationCandidate(Page::FromAddress(objects[i]->address()));
}
heap::SimulateFullSpace(old_space);
heap->CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask,
@@ -5701,7 +5678,8 @@ UNINITIALIZED_TEST(PromotionQueue) {
CHECK(new_space->IsAtMaximumCapacity());
- CHECK(i::FLAG_min_semi_space_size * MB == new_space->TotalCapacity());
+ CHECK_EQ(static_cast<size_t>(i::FLAG_min_semi_space_size * MB),
+ new_space->TotalCapacity());
// Call the scavenger two times to get an empty new space
heap->CollectGarbage(NEW_SPACE, i::GarbageCollectionReason::kTesting);
@@ -5717,7 +5695,8 @@ UNINITIALIZED_TEST(PromotionQueue) {
}
heap->CollectGarbage(NEW_SPACE, i::GarbageCollectionReason::kTesting);
- CHECK(i::FLAG_min_semi_space_size * MB == new_space->TotalCapacity());
+ CHECK_EQ(static_cast<size_t>(i::FLAG_min_semi_space_size * MB),
+ new_space->TotalCapacity());
// Fill-up the first semi-space page.
heap::FillUpOnePage(new_space);
@@ -5755,13 +5734,13 @@ TEST(Regress388880) {
Representation::Tagged(), OMIT_TRANSITION)
.ToHandleChecked();
- int desired_offset = Page::kPageSize - map1->instance_size();
+ size_t desired_offset = Page::kPageSize - map1->instance_size();
// Allocate padding objects in old pointer space so, that object allocated
// afterwards would end at the end of the page.
heap::SimulateFullSpace(heap->old_space());
- int padding_size = desired_offset - Page::kObjectStartOffset;
- heap::CreatePadding(heap, padding_size, TENURED);
+ size_t padding_size = desired_offset - Page::kObjectStartOffset;
+ heap::CreatePadding(heap, static_cast<int>(padding_size), TENURED);
Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED);
o->set_properties(*factory->empty_fixed_array());
@@ -6449,30 +6428,6 @@ TEST(Regress519319) {
}
-HEAP_TEST(TestMemoryReducerSampleJsCalls) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Heap* heap = CcTest::heap();
- Isolate* isolate = CcTest::i_isolate();
- MemoryReducer* memory_reducer = heap->memory_reducer_;
- memory_reducer->SampleAndGetJsCallsPerMs(0);
- isolate->IncrementJsCallsFromApiCounter();
- isolate->IncrementJsCallsFromApiCounter();
- isolate->IncrementJsCallsFromApiCounter();
- double calls_per_ms = memory_reducer->SampleAndGetJsCallsPerMs(1);
- CheckDoubleEquals(3, calls_per_ms);
-
- calls_per_ms = memory_reducer->SampleAndGetJsCallsPerMs(2);
- CheckDoubleEquals(0, calls_per_ms);
-
- isolate->IncrementJsCallsFromApiCounter();
- isolate->IncrementJsCallsFromApiCounter();
- isolate->IncrementJsCallsFromApiCounter();
- isolate->IncrementJsCallsFromApiCounter();
- calls_per_ms = memory_reducer->SampleAndGetJsCallsPerMs(4);
- CheckDoubleEquals(2, calls_per_ms);
-}
-
HEAP_TEST(Regress587004) {
FLAG_concurrent_sweeping = false;
#ifdef VERIFY_HEAP
@@ -6566,7 +6521,7 @@ HEAP_TEST(Regress589413) {
AlwaysAllocateScope always_allocate(isolate);
Handle<HeapObject> ec_obj = factory->NewFixedArray(5000, TENURED);
Page* ec_page = Page::FromAddress(ec_obj->address());
- ec_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ heap::ForceEvacuationCandidate(ec_page);
// Make all arrays point to evacuation candidate so that
// slots are recorded for them.
for (size_t j = 0; j < arrays.size(); j++) {
@@ -6773,8 +6728,7 @@ TEST(Regress631969) {
heap::SimulateFullSpace(heap->old_space());
Handle<String> s1 = factory->NewStringFromStaticChars("123456789", TENURED);
Handle<String> s2 = factory->NewStringFromStaticChars("01234", TENURED);
- Page::FromAddress(s1->address())
- ->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ heap::ForceEvacuationCandidate(Page::FromAddress(s1->address()));
heap::SimulateIncrementalMarking(heap, false);
@@ -6963,49 +6917,6 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
heap::GcAndSweep(heap, OLD_SPACE);
}
-TEST(SlotFilteringAfterBlackAreas) {
- FLAG_black_allocation = true;
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Heap* heap = CcTest::heap();
- Isolate* isolate = heap->isolate();
- MarkCompactCollector* mark_compact_collector = heap->mark_compact_collector();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
-
- i::MarkCompactCollector* collector = heap->mark_compact_collector();
- i::IncrementalMarking* marking = heap->incremental_marking();
- if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
- }
- CHECK(marking->IsMarking() || marking->IsStopped());
- if (marking->IsStopped()) {
- heap->StartIncrementalMarking(i::Heap::kNoGCFlags,
- i::GarbageCollectionReason::kTesting);
- }
- CHECK(marking->IsMarking());
- marking->StartBlackAllocationForTesting();
-
- // Ensure that we allocate a new page, set up a bump pointer area, and
- // perform the allocation in a black area.
- heap::SimulateFullSpace(heap->old_space());
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(10, TENURED);
- Page* page = Page::FromAddress(array->address());
-
- // After allocation we empty the allocation info to limit the black area
- // only on the allocated array.
- heap->old_space()->EmptyAllocationInfo();
-
- // Slots in the black area are part of the black object.
- CHECK(mark_compact_collector->IsSlotInBlackObject(page, array->address()));
- CHECK(mark_compact_collector->IsSlotInBlackObject(
- page, array->address() + array->Size() - kPointerSize));
-
- // Slots after the black area are not part of the black object and have to
- // be filtered out.
- CHECK(!mark_compact_collector->IsSlotInBlackObject(
- page, array->address() + array->Size()));
-}
-
TEST(Regress618958) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -7077,7 +6988,8 @@ TEST(RememberedSetRemoveRange) {
return KEEP_SLOT;
});
- RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, start + kPointerSize);
+ RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, start + kPointerSize,
+ SlotSet::FREE_EMPTY_BUCKETS);
slots[start] = false;
RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) {
CHECK(slots[addr]);
@@ -7085,7 +6997,8 @@ TEST(RememberedSetRemoveRange) {
});
RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start + kPointerSize,
- start + Page::kPageSize);
+ start + Page::kPageSize,
+ SlotSet::FREE_EMPTY_BUCKETS);
slots[start + kPointerSize] = false;
slots[start + Page::kPageSize - kPointerSize] = false;
RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) {
@@ -7093,8 +7006,9 @@ TEST(RememberedSetRemoveRange) {
return KEEP_SLOT;
});
- RememberedSet<OLD_TO_NEW>::RemoveRange(
- chunk, start, start + Page::kPageSize + kPointerSize);
+ RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start,
+ start + Page::kPageSize + kPointerSize,
+ SlotSet::FREE_EMPTY_BUCKETS);
slots[start + Page::kPageSize] = false;
RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) {
CHECK(slots[addr]);
@@ -7102,7 +7016,8 @@ TEST(RememberedSetRemoveRange) {
});
RememberedSet<OLD_TO_NEW>::RemoveRange(
- chunk, chunk->area_end() - kPointerSize, chunk->area_end());
+ chunk, chunk->area_end() - kPointerSize, chunk->area_end(),
+ SlotSet::FREE_EMPTY_BUCKETS);
slots[chunk->area_end() - kPointerSize] = false;
RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) {
CHECK(slots[addr]);
@@ -7110,5 +7025,37 @@ TEST(RememberedSetRemoveRange) {
});
}
+HEAP_TEST(Regress670675) {
+ if (!FLAG_incremental_marking) return;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ Isolate* isolate = heap->isolate();
+ i::MarkCompactCollector* collector = heap->mark_compact_collector();
+ CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+
+ if (collector->sweeping_in_progress()) {
+ collector->EnsureSweepingCompleted();
+ }
+ i::IncrementalMarking* marking = CcTest::heap()->incremental_marking();
+ if (marking->IsStopped()) {
+ marking->Start(i::GarbageCollectionReason::kTesting);
+ }
+ size_t array_length = Page::kPageSize / kPointerSize + 100;
+ size_t n = heap->OldGenerationSpaceAvailable() / array_length;
+ for (size_t i = 0; i < n + 40; i++) {
+ {
+ HandleScope inner_scope(isolate);
+ isolate->factory()->NewFixedArray(static_cast<int>(array_length));
+ }
+ if (marking->IsStopped()) break;
+ double deadline = heap->MonotonicallyIncreasingTimeInMs() + 1;
+ marking->AdvanceIncrementalMarking(
+ deadline, IncrementalMarking::GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
+ }
+ DCHECK(marking->IsStopped());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-mark-compact.cc b/deps/v8/test/cctest/heap/test-mark-compact.cc
index d0f7f82741..0788e26108 100644
--- a/deps/v8/test/cctest/heap/test-mark-compact.cc
+++ b/deps/v8/test/cctest/heap/test-mark-compact.cc
@@ -51,13 +51,9 @@ using v8::Just;
TEST(MarkingDeque) {
CcTest::InitializeVM();
- int mem_size = 20 * kPointerSize;
- byte* mem = NewArray<byte>(20*kPointerSize);
- Address low = reinterpret_cast<Address>(mem);
- Address high = low + mem_size;
- MarkingDeque s;
- s.Initialize(low, high);
-
+ MarkingDeque s(CcTest::i_isolate()->heap());
+ s.SetUp();
+ s.StartUsing();
Address original_address = reinterpret_cast<Address>(&s);
Address current_address = original_address;
while (!s.IsFull()) {
@@ -72,7 +68,9 @@ TEST(MarkingDeque) {
}
CHECK_EQ(original_address, current_address);
- DeleteArray(mem);
+ s.StopUsing();
+ CcTest::i_isolate()->cancelable_task_manager()->CancelAndWait();
+ s.TearDown();
}
TEST(Promotion) {
@@ -415,7 +413,7 @@ static intptr_t MemoryInUse() {
int fd = open("/proc/self/maps", O_RDONLY);
if (fd < 0) return -1;
- const int kBufSize = 10000;
+ const int kBufSize = 20000;
char buffer[kBufSize];
ssize_t length = read(fd, buffer, kBufSize);
intptr_t line_start = 0;
diff --git a/deps/v8/test/cctest/heap/test-page-promotion.cc b/deps/v8/test/cctest/heap/test-page-promotion.cc
index b3ac4960a5..4673f2edcf 100644
--- a/deps/v8/test/cctest/heap/test-page-promotion.cc
+++ b/deps/v8/test/cctest/heap/test-page-promotion.cc
@@ -14,20 +14,22 @@
// src/type-feedback-vector-inl.h
#include "src/type-feedback-vector-inl.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
namespace {
-v8::Isolate* NewIsolateForPagePromotion() {
+v8::Isolate* NewIsolateForPagePromotion(int min_semi_space_size = 8,
+ int max_semi_space_size = 8) {
i::FLAG_page_promotion = true;
i::FLAG_page_promotion_threshold = 0; // %
- i::FLAG_min_semi_space_size = 8;
+ i::FLAG_min_semi_space_size = min_semi_space_size;
// We cannot optimize for size as we require a new space with more than one
// page.
i::FLAG_optimize_for_size = false;
// Set max_semi_space_size because it could've been initialized by an
// implication of optimize_for_size.
- i::FLAG_max_semi_space_size = i::FLAG_min_semi_space_size;
+ i::FLAG_max_semi_space_size = max_semi_space_size;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
@@ -134,5 +136,38 @@ UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) {
}
}
+UNINITIALIZED_HEAP_TEST(Regress658718) {
+ v8::Isolate* isolate = NewIsolateForPagePromotion(4, 8);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::New(isolate)->Enter();
+ Heap* heap = i_isolate->heap();
+ heap->delay_sweeper_tasks_for_testing_ = true;
+ heap->new_space()->Grow();
+ {
+ v8::HandleScope inner_handle_scope(isolate);
+ std::vector<Handle<FixedArray>> handles;
+ heap::SimulateFullSpace(heap->new_space(), &handles);
+ CHECK_GT(handles.size(), 0u);
+ // Last object in handles should definitely be on the last page which does
+ // not contain the age mark.
+ Handle<FixedArray> last_object = handles.back();
+ Page* to_be_promoted_page = Page::FromAddress(last_object->address());
+ CHECK(to_be_promoted_page->Contains(last_object->address()));
+ CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
+ heap->CollectGarbage(OLD_SPACE, i::GarbageCollectionReason::kTesting);
+ CHECK(heap->new_space()->ToSpaceContainsSlow(last_object->address()));
+ CHECK(to_be_promoted_page->Contains(last_object->address()));
+ }
+ heap->CollectGarbage(NEW_SPACE, i::GarbageCollectionReason::kTesting);
+ heap->new_space()->Shrink();
+ heap->memory_allocator()->unmapper()->WaitUntilCompleted();
+ heap->mark_compact_collector()->sweeper().StartSweeperTasks();
+ heap->mark_compact_collector()->EnsureSweepingCompleted();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 262d0c5d58..fc692e331c 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -471,7 +471,7 @@ TEST(LargeObjectSpace) {
CHECK(lo->Contains(ho));
while (true) {
- intptr_t available = lo->Available();
+ size_t available = lo->Available();
{ AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
if (allocation.IsRetry()) break;
}
@@ -503,9 +503,15 @@ TEST(SizeOfInitialHeap) {
// Initial size of LO_SPACE
size_t initial_lo_space = isolate->heap()->lo_space()->Size();
- // The limit for each space for an empty isolate containing just the
- // snapshot.
+// The limit for each space for an empty isolate containing just the
+// snapshot.
+// In PPC the page size is 64K, causing more internal fragmentation
+// hence requiring a larger limit.
+#if V8_OS_LINUX && V8_HOST_ARCH_PPC
+ const size_t kMaxInitialSizePerSpace = 3 * MB;
+#else
const size_t kMaxInitialSizePerSpace = 2 * MB;
+#endif
// Freshly initialized VM gets by with the snapshot size (which is below
// kMaxInitialSizePerSpace per space).
@@ -530,7 +536,8 @@ TEST(SizeOfInitialHeap) {
}
// No large objects required to perform the above steps.
- CHECK_EQ(initial_lo_space, isolate->heap()->lo_space()->Size());
+ CHECK_EQ(initial_lo_space,
+ static_cast<size_t>(isolate->heap()->lo_space()->Size()));
}
static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
@@ -741,7 +748,7 @@ TEST(ShrinkPageToHighWaterMarkNoFiller) {
CcTest::heap()->old_space()->EmptyAllocationInfo();
const size_t shrinked = page->ShrinkToHighWaterMark();
- CHECK_EQ(0, shrinked);
+ CHECK_EQ(0u, shrinked);
}
TEST(ShrinkPageToHighWaterMarkOneWordFiller) {
@@ -767,7 +774,7 @@ TEST(ShrinkPageToHighWaterMarkOneWordFiller) {
CHECK_EQ(filler->map(), CcTest::heap()->one_pointer_filler_map());
const size_t shrinked = page->ShrinkToHighWaterMark();
- CHECK_EQ(0, shrinked);
+ CHECK_EQ(0u, shrinked);
}
TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
@@ -793,7 +800,7 @@ TEST(ShrinkPageToHighWaterMarkTwoWordFiller) {
CHECK_EQ(filler->map(), CcTest::heap()->two_pointer_filler_map());
const size_t shrinked = page->ShrinkToHighWaterMark();
- CHECK_EQ(0, shrinked);
+ CHECK_EQ(0u, shrinked);
}
} // namespace internal
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index 81be1c0028..f7a5bb9467 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -83,7 +83,9 @@ i::Handle<i::BytecodeArray>
BytecodeExpectationsPrinter::GetBytecodeArrayForModule(
v8::Local<v8::Module> module) const {
i::Handle<i::Module> i_module = v8::Utils::OpenHandle(*module);
- return i::handle(i_module->shared()->bytecode_array(), i_isolate());
+ CHECK(!i_module->instantiated());
+ return i::handle(SharedFunctionInfo::cast(i_module->code())->bytecode_array(),
+ i_isolate());
}
i::Handle<i::BytecodeArray>
@@ -217,7 +219,7 @@ void BytecodeExpectationsPrinter::PrintSourcePosition(
if (!source_iterator.done() &&
source_iterator.code_offset() == bytecode_offset) {
stream << "/* " << std::setw(kPositionWidth)
- << source_iterator.source_position();
+ << source_iterator.source_position().ScriptOffset();
if (source_iterator.is_statement()) {
stream << " S> */ ";
} else {
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
index 4d78aa6bfc..d36860174c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
@@ -242,7 +242,7 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 41
+bytecode array length: 43
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(17),
@@ -254,6 +254,7 @@ bytecodes: [
B(Star), R(1),
B(Ldar), R(0),
B(ToNumber), R(2),
+ B(Ldar), R(2),
B(Inc), U8(3),
B(Star), R(0),
B(Ldar), R(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
index 19d83661f0..1cbd05fcea 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
@@ -677,58 +677,42 @@ snippet: "
}
}
"
-frame size: 6
+frame size: 4
parameter count: 1
-bytecode array length: 104
+bytecode array length: 53
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(1),
/* 52 S> */ B(Ldar), R(1),
- B(JumpIfToBooleanFalse), U8(96),
+ B(JumpIfToBooleanFalse), U8(45),
/* 45 E> */ B(StackCheck),
B(Ldar), R(closure),
B(CreateBlockContext), U8(0),
B(PushContext), R(3),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateClosure), U8(1), U8(2),
B(Star), R(0),
/* 73 S> */ B(LdaSmi), U8(1),
- /* 73 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 73 E> */ B(StaCurrentContextSlot), U8(4),
B(Mov), R(0), R(2),
- /* 106 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ /* 106 S> */ B(LdaCurrentContextSlot), U8(4),
B(JumpIfToBooleanFalse), U8(8),
/* 113 S> */ B(PopContext), R(3),
B(PopContext), R(3),
- B(Jump), U8(44),
- /* 126 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(Jump), U8(10),
+ /* 126 S> */ B(LdaCurrentContextSlot), U8(4),
B(Inc), U8(2),
- B(Star), R(4),
- /* 127 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
- B(Ldar), R(4),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 127 E> */ B(StaCurrentContextSlot), U8(4),
B(PopContext), R(3),
- B(JumpLoop), U8(-95), U8(0),
+ B(JumpLoop), U8(-44), U8(0),
B(LdaUndefined),
/* 137 S> */ B(Return),
]
constant pool: [
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["z"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
index 276c8daacb..9058fb8ad1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
@@ -101,18 +101,18 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 34
+bytecode array length: 30
bytecodes: [
/* 30 E> */ B(StackCheck),
B(Ldar), R(closure),
B(CreateBlockContext), U8(0),
B(PushContext), R(2),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateClosure), U8(1), U8(2),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), U8(10),
- /* 53 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 53 E> */ B(StaCurrentContextSlot), U8(4),
B(Mov), R(0), R(1),
B(Ldar), R(0),
/* 88 S> */ B(Jump), U8(2),
@@ -140,62 +140,42 @@ snippet: "
}
x = 4;
"
-frame size: 6
+frame size: 4
parameter count: 1
-bytecode array length: 116
+bytecode array length: 53
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(2),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 42 E> */ B(StaCurrentContextSlot), U8(4),
B(Ldar), R(closure),
B(CreateBlockContext), U8(0),
B(PushContext), R(3),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateClosure), U8(1), U8(2),
B(Star), R(0),
/* 76 S> */ B(LdaSmi), U8(2),
- /* 76 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 76 E> */ B(StaCurrentContextSlot), U8(4),
B(Mov), R(0), R(1),
- /* 118 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ /* 118 S> */ B(LdaCurrentContextSlot), U8(4),
B(JumpIfToBooleanFalse), U8(6),
/* 125 S> */ B(PopContext), R(3),
- B(Jump), U8(29),
+ B(Jump), U8(8),
/* 142 S> */ B(LdaSmi), U8(3),
- B(Star), R(4),
- /* 144 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
- B(Ldar), R(4),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 144 E> */ B(StaCurrentContextSlot), U8(4),
B(PopContext), R(3),
/* 155 S> */ B(LdaSmi), U8(4),
- B(Star), R(4),
- /* 157 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(3),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
- B(Ldar), R(4),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 157 E> */ B(StaCurrentContextSlot), U8(4),
B(LdaUndefined),
/* 162 S> */ B(Return),
]
constant pool: [
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["y"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
index 49e6f71265..40be0533c1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
@@ -14,11 +14,13 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 12
+bytecode array length: 14
bytecodes: [
/* 27 E> */ B(StackCheck),
- /* 32 S> */ B(LdrUndefined), R(1),
- B(LdrGlobal), U8(4), R(0),
+ /* 32 S> */ B(LdaUndefined),
+ B(Star), R(1),
+ B(LdaGlobal), U8(4),
+ B(Star), R(0),
/* 39 E> */ B(Call), R(0), R(1), U8(1), U8(2),
/* 44 S> */ B(Return),
]
@@ -35,11 +37,13 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 24
+bytecode array length: 26
bytecodes: [
/* 34 E> */ B(StackCheck),
- /* 39 S> */ B(LdrUndefined), R(1),
- B(LdrGlobal), U8(4), R(0),
+ /* 39 S> */ B(LdaUndefined),
+ B(Star), R(1),
+ B(LdaGlobal), U8(4),
+ B(Star), R(0),
B(LdaSmi), U8(1),
B(Star), R(2),
B(LdaSmi), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
index b238d95954..e05419fbff 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
@@ -11,22 +11,23 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 89
+bytecode array length: 81
bytecodes: [
B(CreateFunctionContext), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(6), U8(0),
+ B(StaCurrentContextSlot), U8(6),
B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ B(StaCurrentContextSlot), U8(5),
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateClosure), U8(0), U8(2),
/* 36 E> */ B(StaLookupSlotSloppy), U8(1),
- /* 52 S> */ B(LdaConstant), U8(2),
- B(Star), R(4),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(4), U8(1), R(1),
+ /* 52 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ /* 52 E> */ B(LdaLookupGlobalSlot), U8(2), U8(4), U8(1),
+ B(Star), R(1),
B(LdaConstant), U8(3),
B(Star), R(3),
B(LdaZero),
@@ -40,11 +41,12 @@ bytecodes: [
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(1),
- /* 52 E> */ B(Call), R(1), R(2), U8(2), U8(0),
- /* 62 S> */ B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(3), U8(1), R(1),
- /* 69 E> */ B(Call), R(1), R(2), U8(1), U8(4),
+ /* 52 E> */ B(Call), R(1), R(2), U8(2), U8(2),
+ /* 62 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ /* 69 E> */ B(LdaLookupGlobalSlot), U8(1), U8(8), U8(1),
+ B(Star), R(1),
+ /* 69 E> */ B(Call), R(1), R(2), U8(1), U8(6),
/* 74 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
index 56f4f3ae59..eb5b0630f4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
@@ -14,11 +14,11 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 11
bytecodes: [
/* 45 E> */ B(StackCheck),
- /* 50 S> */ B(LdrGlobal), U8(4), R(0),
- B(Ldar), R(0),
+ /* 50 S> */ B(LdaGlobal), U8(4),
+ B(Star), R(0),
/* 57 E> */ B(New), R(0), R(0), U8(0), U8(2),
/* 68 S> */ B(Return),
]
@@ -35,10 +35,11 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 16
+bytecode array length: 17
bytecodes: [
/* 58 E> */ B(StackCheck),
- /* 63 S> */ B(LdrGlobal), U8(4), R(0),
+ /* 63 S> */ B(LdaGlobal), U8(4),
+ B(Star), R(0),
B(LdaSmi), U8(3),
B(Star), R(1),
B(Ldar), R(0),
@@ -63,10 +64,11 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 24
+bytecode array length: 25
bytecodes: [
/* 100 E> */ B(StackCheck),
- /* 105 S> */ B(LdrGlobal), U8(4), R(0),
+ /* 105 S> */ B(LdaGlobal), U8(4),
+ B(Star), R(0),
B(LdaSmi), U8(3),
B(Star), R(1),
B(LdaSmi), U8(4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
index 3d4f5f7cc7..149e668291 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
@@ -72,13 +72,14 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 15
bytecodes: [
/* 10 E> */ B(StackCheck),
- /* 15 S> */ B(LdrUndefined), R(0),
+ /* 15 S> */ B(LdaUndefined),
+ B(Star), R(0),
B(CreateArrayLiteral), U8(0), U8(0), U8(9),
B(Star), R(1),
- B(CallJSRuntime), U8(141), R(0), U8(2),
+ B(CallJSRuntime), U8(154), R(0), U8(2),
/* 44 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
index 8a381f803f..174641bc74 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
@@ -22,12 +22,13 @@ snippet: "
"
frame size: 6
parameter count: 1
-bytecode array length: 36
+bytecode array length: 37
bytecodes: [
B(Mov), R(closure), R(0),
/* 99 E> */ B(StackCheck),
/* 104 S> */ B(LdaConstant), U8(0),
- /* 111 E> */ B(LdrKeyedProperty), R(closure), U8(4), R(4),
+ /* 111 E> */ B(LdaKeyedProperty), R(closure), U8(4),
+ B(Star), R(4),
B(LdaConstant), U8(1),
B(Star), R(5),
B(Mov), R(this), R(3),
@@ -62,12 +63,13 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 45
+bytecode array length: 47
bytecodes: [
B(Mov), R(closure), R(0),
/* 125 E> */ B(StackCheck),
/* 130 S> */ B(LdaConstant), U8(0),
- /* 130 E> */ B(LdrKeyedProperty), R(closure), U8(2), R(2),
+ /* 130 E> */ B(LdaKeyedProperty), R(closure), U8(2),
+ B(Star), R(2),
B(LdaConstant), U8(1),
B(Star), R(3),
B(LdaSmi), U8(2),
@@ -75,7 +77,8 @@ bytecodes: [
B(Mov), R(this), R(1),
/* 138 E> */ B(CallRuntime), U16(Runtime::kStoreToSuper_Strict), R(1), U8(4),
/* 143 S> */ B(LdaConstant), U8(0),
- /* 150 E> */ B(LdrKeyedProperty), R(closure), U8(4), R(2),
+ /* 150 E> */ B(LdaKeyedProperty), R(closure), U8(4),
+ B(Star), R(2),
B(LdaConstant), U8(1),
B(Star), R(3),
B(Mov), R(this), R(1),
@@ -115,7 +118,7 @@ bytecodes: [
B(LdaSmi), U8(1),
B(Star), R(3),
B(Ldar), R(0),
- /* 118 E> */ B(New), R(2), R(3), U8(1), U8(0),
+ /* 118 E> */ B(New), R(2), R(3), U8(1), U8(2),
B(Star), R(2),
B(Ldar), R(this),
B(JumpIfNotHole), U8(4),
@@ -131,7 +134,7 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
B(Star), R(2),
B(LdaSmi), U8(2),
- /* 136 E> */ B(StaNamedPropertyStrict), R(2), U8(1), U8(5),
+ /* 136 E> */ B(StaNamedPropertyStrict), R(2), U8(1), U8(4),
B(Ldar), R(this),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(0),
@@ -170,7 +173,7 @@ bytecodes: [
/* 117 S> */ B(CallRuntime), U16(Runtime::k_GetSuperConstructor), R(1), U8(1),
B(Star), R(2),
B(Ldar), R(0),
- /* 117 E> */ B(New), R(2), R(0), U8(0), U8(0),
+ /* 117 E> */ B(New), R(2), R(0), U8(0), U8(2),
B(Star), R(2),
B(Ldar), R(this),
B(JumpIfNotHole), U8(4),
@@ -186,7 +189,7 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
B(Star), R(2),
B(LdaSmi), U8(2),
- /* 134 E> */ B(StaNamedPropertyStrict), R(2), U8(1), U8(5),
+ /* 134 E> */ B(StaNamedPropertyStrict), R(2), U8(1), U8(4),
B(Ldar), R(this),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
index f1a15639a8..765ce65efd 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
@@ -14,7 +14,7 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 74
+bytecode array length: 75
bytecodes: [
B(LdaTheHole),
B(Star), R(2),
@@ -31,7 +31,8 @@ bytecodes: [
B(Star), R(6),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
B(Star), R(3),
- B(LdrNamedProperty), R(3), U8(1), U8(2), R(4),
+ B(LdaNamedProperty), R(3), U8(1), U8(2),
+ B(Star), R(4),
B(LdaConstant), U8(2),
B(ToName), R(6),
B(CreateClosure), U8(3), U8(2),
@@ -67,7 +68,7 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 74
+bytecode array length: 75
bytecodes: [
B(LdaTheHole),
B(Star), R(2),
@@ -84,7 +85,8 @@ bytecodes: [
B(Star), R(6),
B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
B(Star), R(3),
- B(LdrNamedProperty), R(3), U8(1), U8(2), R(4),
+ B(LdaNamedProperty), R(3), U8(1), U8(2),
+ B(Star), R(4),
B(LdaConstant), U8(2),
B(ToName), R(6),
B(CreateClosure), U8(3), U8(2),
@@ -122,7 +124,7 @@ snippet: "
"
frame size: 11
parameter count: 1
-bytecode array length: 128
+bytecode array length: 121
bytecodes: [
B(CreateFunctionContext), U8(2),
B(PushContext), R(3),
@@ -130,9 +132,9 @@ bytecodes: [
B(Star), R(2),
/* 30 E> */ B(StackCheck),
/* 43 S> */ B(LdaConstant), U8(0),
- /* 43 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 43 E> */ B(StaCurrentContextSlot), U8(4),
/* 57 S> */ B(LdaConstant), U8(1),
- /* 57 E> */ B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 57 E> */ B(StaCurrentContextSlot), U8(5),
B(LdaTheHole),
B(Star), R(0),
/* 62 S> */ B(LdaTheHole),
@@ -145,9 +147,10 @@ bytecodes: [
B(Star), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
B(Star), R(4),
- B(LdrNamedProperty), R(4), U8(3), U8(2), R(5),
- /* 75 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
- B(ToName), R(7),
+ B(LdaNamedProperty), R(4), U8(3), U8(2),
+ B(Star), R(5),
+ B(LdaCurrentContextSlot), U8(4),
+ /* 75 E> */ B(ToName), R(7),
B(CreateClosure), U8(4), U8(2),
B(Star), R(8),
B(LdaSmi), U8(2),
@@ -156,8 +159,8 @@ bytecodes: [
B(Star), R(10),
B(Mov), R(5), R(6),
B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(6), U8(5),
- /* 106 E> */ B(LdaContextSlot), R(context), U8(5), U8(0),
- B(ToName), R(7),
+ B(LdaCurrentContextSlot), U8(5),
+ /* 106 E> */ B(ToName), R(7),
B(LdaConstant), U8(3),
B(TestEqualStrict), R(7), U8(0),
B(Mov), R(4), R(6),
@@ -194,7 +197,7 @@ snippet: "
"
frame size: 8
parameter count: 1
-bytecode array length: 74
+bytecode array length: 61
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(3),
@@ -202,7 +205,7 @@ bytecodes: [
B(Star), R(2),
/* 30 E> */ B(StackCheck),
/* 46 S> */ B(LdaZero),
- /* 46 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 46 E> */ B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
B(Star), R(0),
/* 49 S> */ B(LdaTheHole),
@@ -215,23 +218,19 @@ bytecodes: [
B(Star), R(7),
B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
B(Star), R(4),
- B(LdrNamedProperty), R(4), U8(1), U8(2), R(5),
+ B(LdaNamedProperty), R(4), U8(1), U8(2),
+ B(Star), R(5),
B(CallRuntime), U16(Runtime::kToFastProperties), R(4), U8(1),
B(Star), R(0),
B(Star), R(1),
B(Star), R(2),
- /* 87 S> */ B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
- B(Star), R(4),
- /* 94 E> */ B(New), R(4), R(0), U8(0), U8(4),
+ /* 87 S> */ B(Nop),
+ /* 94 E> */ B(New), R(2), R(0), U8(0), U8(4),
/* 103 S> */ B(Return),
]
constant pool: [
SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["prototype"],
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["C"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
index 053bce6e0f..c5ecec9abf 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
@@ -56,12 +56,13 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 25
+bytecode array length: 26
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
B(Mov), R(1), R(0),
- /* 54 S> */ B(LdrNamedProperty), R(0), U8(1), U8(2), R(2),
+ /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
+ B(Star), R(2),
B(LdaSmi), U8(2),
B(Mul), R(2), U8(4),
/* 61 E> */ B(StaNamedPropertySloppy), R(0), U8(1), U8(5),
@@ -81,14 +82,15 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 28
+bytecode array length: 29
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
B(Mov), R(1), R(0),
/* 52 S> */ B(LdaSmi), U8(1),
B(Star), R(2),
- B(LdrKeyedProperty), R(0), U8(2), R(3),
+ B(LdaKeyedProperty), R(0), U8(2),
+ B(Star), R(3),
B(LdaSmi), U8(2),
B(BitwiseXor), R(3), U8(4),
/* 57 E> */ B(StaKeyedPropertySloppy), R(0), R(2), U8(5),
@@ -107,17 +109,18 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 29
+bytecode array length: 24
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 42 E> */ B(StaCurrentContextSlot), U8(4),
/* 45 S> */ B(CreateClosure), U8(0), U8(2),
- /* 75 S> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(1),
+ /* 75 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(1),
B(BitwiseOrSmi), U8(24), R(1), U8(2),
- /* 77 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 77 E> */ B(StaCurrentContextSlot), U8(4),
B(LdaUndefined),
/* 84 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
index 107844cf6a..f19879c9e2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
@@ -30,23 +30,19 @@ handlers: [
snippet: "
const x = 10; return x;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 20
+bytecode array length: 10
bytecodes: [
B(LdaTheHole),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), U8(10),
B(Star), R(0),
- /* 48 S> */ B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1),
+ /* 48 S> */ B(Nop),
/* 58 S> */ B(Return),
]
constant pool: [
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -84,9 +80,9 @@ handlers: [
snippet: "
const x = 10; x = 20;
"
-frame size: 3
+frame size: 1
parameter count: 1
-bytecode array length: 32
+bytecode array length: 17
bytecodes: [
B(LdaTheHole),
B(Star), R(0),
@@ -94,18 +90,11 @@ bytecodes: [
/* 44 S> */ B(LdaSmi), U8(10),
B(Star), R(0),
/* 48 S> */ B(LdaSmi), U8(20),
- B(Star), R(1),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(2),
- /* 50 E> */ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
- B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
+ /* 50 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
B(LdaUndefined),
/* 56 S> */ B(Return),
]
constant pool: [
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
index f1b696bdff..8ee13434af 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
@@ -11,17 +11,17 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 23
+bytecode array length: 19
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), U8(10),
- /* 44 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 44 E> */ B(StaCurrentContextSlot), U8(4),
B(LdaUndefined),
/* 74 S> */ B(Return),
]
@@ -35,29 +35,24 @@ handlers: [
snippet: "
const x = 10; function f1() {return x;} return x;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 37
+bytecode array length: 20
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), U8(10),
- /* 44 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
- /* 74 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ /* 44 E> */ B(StaCurrentContextSlot), U8(4),
+ /* 74 S> */ B(LdaCurrentContextSlot), U8(4),
/* 84 S> */ B(Return),
]
constant pool: [
SHARED_FUNCTION_INFO_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -68,24 +63,24 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 45
+bytecode array length: 39
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 47 S> */ B(LdaSmi), U8(20),
B(Star), R(2),
- /* 47 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
+ B(LdaCurrentContextSlot), U8(4),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(1),
B(Star), R(3),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ /* 47 E> */ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
- /* 47 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 47 E> */ B(StaCurrentContextSlot), U8(4),
B(LdaUndefined),
/* 80 S> */ B(Return),
]
@@ -100,33 +95,26 @@ handlers: [
snippet: "
const x = 10; x = 20; function f1() {return x;}
"
-frame size: 4
+frame size: 2
parameter count: 1
-bytecode array length: 47
+bytecode array length: 26
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), U8(10),
- /* 44 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 44 E> */ B(StaCurrentContextSlot), U8(4),
/* 48 S> */ B(LdaSmi), U8(20),
- B(Star), R(2),
- /* 50 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
- B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
+ /* 50 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
B(LdaUndefined),
/* 82 S> */ B(Return),
]
constant pool: [
SHARED_FUNCTION_INFO_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
index 4e65f63fa2..9a05916e14 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
@@ -13,12 +13,12 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 15
+bytecode array length: 13
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
B(Ldar), R(arg0),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 10 E> */ B(StackCheck),
/* 19 S> */ B(CreateClosure), U8(0), U8(2),
/* 52 S> */ B(Return),
@@ -36,16 +36,16 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 21
+bytecode array length: 17
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(Ldar), R(arg0),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 10 E> */ B(StackCheck),
/* 27 S> */ B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
- /* 53 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
+ /* 53 S> */ B(LdaCurrentContextSlot), U8(4),
/* 66 S> */ B(Return),
]
constant pool: [
@@ -61,14 +61,14 @@ snippet: "
"
frame size: 1
parameter count: 5
-bytecode array length: 21
+bytecode array length: 17
bytecodes: [
B(CreateFunctionContext), U8(2),
B(PushContext), R(0),
B(Ldar), R(arg0),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ B(StaCurrentContextSlot), U8(5),
B(Ldar), R(arg2),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 10 E> */ B(StackCheck),
/* 29 S> */ B(CreateClosure), U8(0), U8(2),
/* 61 S> */ B(Return),
@@ -86,13 +86,13 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 15
+bytecode array length: 13
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
/* 10 E> */ B(StackCheck),
/* 26 S> */ B(Ldar), R(this),
- /* 26 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 26 E> */ B(StaCurrentContextSlot), U8(4),
/* 32 S> */ B(CreateClosure), U8(0), U8(2),
/* 65 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
index 2eb52731bb..f4b7943740 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
@@ -31,13 +31,13 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 15
+bytecode array length: 13
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 42 E> */ B(StaCurrentContextSlot), U8(4),
/* 45 S> */ B(CreateClosure), U8(0), U8(2),
/* 75 S> */ B(Return),
]
@@ -53,15 +53,15 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 21
+bytecode array length: 17
bytecodes: [
B(CreateFunctionContext), U8(2),
B(PushContext), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 42 E> */ B(StaCurrentContextSlot), U8(4),
/* 53 S> */ B(LdaSmi), U8(2),
- /* 53 E> */ B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 53 E> */ B(StaCurrentContextSlot), U8(5),
/* 56 S> */ B(CreateClosure), U8(0), U8(2),
/* 92 S> */ B(Return),
]
@@ -77,16 +77,17 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 22
+bytecode array length: 21
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
/* 30 E> */ B(StackCheck),
- /* 41 S> */ B(LdrUndefined), R(2),
+ /* 41 S> */ B(LdaUndefined),
+ B(Star), R(2),
B(CreateClosure), U8(0), U8(2),
B(Star), R(1),
/* 64 E> */ B(Call), R(1), R(2), U8(1), U8(2),
- /* 68 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
+ /* 68 S> */ B(LdaCurrentContextSlot), U8(4),
/* 78 S> */ B(Return),
]
constant pool: [
@@ -103,22 +104,22 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 39
+bytecode array length: 31
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 30 E> */ B(StackCheck),
/* 56 S> */ B(LdaSmi), U8(1),
- /* 56 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 56 E> */ B(StaCurrentContextSlot), U8(4),
B(Ldar), R(closure),
B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 69 S> */ B(LdaSmi), U8(2),
- /* 69 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 69 E> */ B(StaCurrentContextSlot), U8(4),
/* 72 S> */ B(CreateClosure), U8(1), U8(2),
B(PopContext), R(0),
/* 104 S> */ B(Return),
@@ -389,523 +390,525 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 1305
+bytecode array length: 791
bytecodes: [
B(CreateFunctionContext), U8(254),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateUnmappedArguments),
- B(Wide), B(StaContextSlot), R16(context), U16(257), U16(0),
+ B(Wide), B(StaCurrentContextSlot), U16(257),
B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ B(StaCurrentContextSlot), U8(5),
/* 30 E> */ B(StackCheck),
/* 57 S> */ B(LdaZero),
- /* 57 E> */ B(StaContextSlot), R(context), U8(6), U8(0),
+ /* 57 E> */ B(StaCurrentContextSlot), U8(6),
/* 69 S> */ B(LdaZero),
- /* 69 E> */ B(StaContextSlot), R(context), U8(7), U8(0),
+ /* 69 E> */ B(StaCurrentContextSlot), U8(7),
/* 81 S> */ B(LdaZero),
- /* 81 E> */ B(StaContextSlot), R(context), U8(8), U8(0),
+ /* 81 E> */ B(StaCurrentContextSlot), U8(8),
/* 93 S> */ B(LdaZero),
- /* 93 E> */ B(StaContextSlot), R(context), U8(9), U8(0),
+ /* 93 E> */ B(StaCurrentContextSlot), U8(9),
/* 105 S> */ B(LdaZero),
- /* 105 E> */ B(StaContextSlot), R(context), U8(10), U8(0),
+ /* 105 E> */ B(StaCurrentContextSlot), U8(10),
/* 117 S> */ B(LdaZero),
- /* 117 E> */ B(StaContextSlot), R(context), U8(11), U8(0),
+ /* 117 E> */ B(StaCurrentContextSlot), U8(11),
/* 129 S> */ B(LdaZero),
- /* 129 E> */ B(StaContextSlot), R(context), U8(12), U8(0),
+ /* 129 E> */ B(StaCurrentContextSlot), U8(12),
/* 141 S> */ B(LdaZero),
- /* 141 E> */ B(StaContextSlot), R(context), U8(13), U8(0),
+ /* 141 E> */ B(StaCurrentContextSlot), U8(13),
/* 153 S> */ B(LdaZero),
- /* 153 E> */ B(StaContextSlot), R(context), U8(14), U8(0),
+ /* 153 E> */ B(StaCurrentContextSlot), U8(14),
/* 165 S> */ B(LdaZero),
- /* 165 E> */ B(StaContextSlot), R(context), U8(15), U8(0),
+ /* 165 E> */ B(StaCurrentContextSlot), U8(15),
/* 178 S> */ B(LdaZero),
- /* 178 E> */ B(StaContextSlot), R(context), U8(16), U8(0),
+ /* 178 E> */ B(StaCurrentContextSlot), U8(16),
/* 191 S> */ B(LdaZero),
- /* 191 E> */ B(StaContextSlot), R(context), U8(17), U8(0),
+ /* 191 E> */ B(StaCurrentContextSlot), U8(17),
/* 204 S> */ B(LdaZero),
- /* 204 E> */ B(StaContextSlot), R(context), U8(18), U8(0),
+ /* 204 E> */ B(StaCurrentContextSlot), U8(18),
/* 217 S> */ B(LdaZero),
- /* 217 E> */ B(StaContextSlot), R(context), U8(19), U8(0),
+ /* 217 E> */ B(StaCurrentContextSlot), U8(19),
/* 230 S> */ B(LdaZero),
- /* 230 E> */ B(StaContextSlot), R(context), U8(20), U8(0),
+ /* 230 E> */ B(StaCurrentContextSlot), U8(20),
/* 243 S> */ B(LdaZero),
- /* 243 E> */ B(StaContextSlot), R(context), U8(21), U8(0),
+ /* 243 E> */ B(StaCurrentContextSlot), U8(21),
/* 256 S> */ B(LdaZero),
- /* 256 E> */ B(StaContextSlot), R(context), U8(22), U8(0),
+ /* 256 E> */ B(StaCurrentContextSlot), U8(22),
/* 269 S> */ B(LdaZero),
- /* 269 E> */ B(StaContextSlot), R(context), U8(23), U8(0),
+ /* 269 E> */ B(StaCurrentContextSlot), U8(23),
/* 282 S> */ B(LdaZero),
- /* 282 E> */ B(StaContextSlot), R(context), U8(24), U8(0),
+ /* 282 E> */ B(StaCurrentContextSlot), U8(24),
/* 295 S> */ B(LdaZero),
- /* 295 E> */ B(StaContextSlot), R(context), U8(25), U8(0),
+ /* 295 E> */ B(StaCurrentContextSlot), U8(25),
/* 308 S> */ B(LdaZero),
- /* 308 E> */ B(StaContextSlot), R(context), U8(26), U8(0),
+ /* 308 E> */ B(StaCurrentContextSlot), U8(26),
/* 321 S> */ B(LdaZero),
- /* 321 E> */ B(StaContextSlot), R(context), U8(27), U8(0),
+ /* 321 E> */ B(StaCurrentContextSlot), U8(27),
/* 334 S> */ B(LdaZero),
- /* 334 E> */ B(StaContextSlot), R(context), U8(28), U8(0),
+ /* 334 E> */ B(StaCurrentContextSlot), U8(28),
/* 347 S> */ B(LdaZero),
- /* 347 E> */ B(StaContextSlot), R(context), U8(29), U8(0),
+ /* 347 E> */ B(StaCurrentContextSlot), U8(29),
/* 360 S> */ B(LdaZero),
- /* 360 E> */ B(StaContextSlot), R(context), U8(30), U8(0),
+ /* 360 E> */ B(StaCurrentContextSlot), U8(30),
/* 373 S> */ B(LdaZero),
- /* 373 E> */ B(StaContextSlot), R(context), U8(31), U8(0),
+ /* 373 E> */ B(StaCurrentContextSlot), U8(31),
/* 386 S> */ B(LdaZero),
- /* 386 E> */ B(StaContextSlot), R(context), U8(32), U8(0),
+ /* 386 E> */ B(StaCurrentContextSlot), U8(32),
/* 399 S> */ B(LdaZero),
- /* 399 E> */ B(StaContextSlot), R(context), U8(33), U8(0),
+ /* 399 E> */ B(StaCurrentContextSlot), U8(33),
/* 412 S> */ B(LdaZero),
- /* 412 E> */ B(StaContextSlot), R(context), U8(34), U8(0),
+ /* 412 E> */ B(StaCurrentContextSlot), U8(34),
/* 425 S> */ B(LdaZero),
- /* 425 E> */ B(StaContextSlot), R(context), U8(35), U8(0),
+ /* 425 E> */ B(StaCurrentContextSlot), U8(35),
/* 438 S> */ B(LdaZero),
- /* 438 E> */ B(StaContextSlot), R(context), U8(36), U8(0),
+ /* 438 E> */ B(StaCurrentContextSlot), U8(36),
/* 451 S> */ B(LdaZero),
- /* 451 E> */ B(StaContextSlot), R(context), U8(37), U8(0),
+ /* 451 E> */ B(StaCurrentContextSlot), U8(37),
/* 464 S> */ B(LdaZero),
- /* 464 E> */ B(StaContextSlot), R(context), U8(38), U8(0),
+ /* 464 E> */ B(StaCurrentContextSlot), U8(38),
/* 477 S> */ B(LdaZero),
- /* 477 E> */ B(StaContextSlot), R(context), U8(39), U8(0),
+ /* 477 E> */ B(StaCurrentContextSlot), U8(39),
/* 490 S> */ B(LdaZero),
- /* 490 E> */ B(StaContextSlot), R(context), U8(40), U8(0),
+ /* 490 E> */ B(StaCurrentContextSlot), U8(40),
/* 503 S> */ B(LdaZero),
- /* 503 E> */ B(StaContextSlot), R(context), U8(41), U8(0),
+ /* 503 E> */ B(StaCurrentContextSlot), U8(41),
/* 516 S> */ B(LdaZero),
- /* 516 E> */ B(StaContextSlot), R(context), U8(42), U8(0),
+ /* 516 E> */ B(StaCurrentContextSlot), U8(42),
/* 529 S> */ B(LdaZero),
- /* 529 E> */ B(StaContextSlot), R(context), U8(43), U8(0),
+ /* 529 E> */ B(StaCurrentContextSlot), U8(43),
/* 542 S> */ B(LdaZero),
- /* 542 E> */ B(StaContextSlot), R(context), U8(44), U8(0),
+ /* 542 E> */ B(StaCurrentContextSlot), U8(44),
/* 555 S> */ B(LdaZero),
- /* 555 E> */ B(StaContextSlot), R(context), U8(45), U8(0),
+ /* 555 E> */ B(StaCurrentContextSlot), U8(45),
/* 568 S> */ B(LdaZero),
- /* 568 E> */ B(StaContextSlot), R(context), U8(46), U8(0),
+ /* 568 E> */ B(StaCurrentContextSlot), U8(46),
/* 581 S> */ B(LdaZero),
- /* 581 E> */ B(StaContextSlot), R(context), U8(47), U8(0),
+ /* 581 E> */ B(StaCurrentContextSlot), U8(47),
/* 594 S> */ B(LdaZero),
- /* 594 E> */ B(StaContextSlot), R(context), U8(48), U8(0),
+ /* 594 E> */ B(StaCurrentContextSlot), U8(48),
/* 607 S> */ B(LdaZero),
- /* 607 E> */ B(StaContextSlot), R(context), U8(49), U8(0),
+ /* 607 E> */ B(StaCurrentContextSlot), U8(49),
/* 620 S> */ B(LdaZero),
- /* 620 E> */ B(StaContextSlot), R(context), U8(50), U8(0),
+ /* 620 E> */ B(StaCurrentContextSlot), U8(50),
/* 633 S> */ B(LdaZero),
- /* 633 E> */ B(StaContextSlot), R(context), U8(51), U8(0),
+ /* 633 E> */ B(StaCurrentContextSlot), U8(51),
/* 646 S> */ B(LdaZero),
- /* 646 E> */ B(StaContextSlot), R(context), U8(52), U8(0),
+ /* 646 E> */ B(StaCurrentContextSlot), U8(52),
/* 659 S> */ B(LdaZero),
- /* 659 E> */ B(StaContextSlot), R(context), U8(53), U8(0),
+ /* 659 E> */ B(StaCurrentContextSlot), U8(53),
/* 672 S> */ B(LdaZero),
- /* 672 E> */ B(StaContextSlot), R(context), U8(54), U8(0),
+ /* 672 E> */ B(StaCurrentContextSlot), U8(54),
/* 685 S> */ B(LdaZero),
- /* 685 E> */ B(StaContextSlot), R(context), U8(55), U8(0),
+ /* 685 E> */ B(StaCurrentContextSlot), U8(55),
/* 698 S> */ B(LdaZero),
- /* 698 E> */ B(StaContextSlot), R(context), U8(56), U8(0),
+ /* 698 E> */ B(StaCurrentContextSlot), U8(56),
/* 711 S> */ B(LdaZero),
- /* 711 E> */ B(StaContextSlot), R(context), U8(57), U8(0),
+ /* 711 E> */ B(StaCurrentContextSlot), U8(57),
/* 724 S> */ B(LdaZero),
- /* 724 E> */ B(StaContextSlot), R(context), U8(58), U8(0),
+ /* 724 E> */ B(StaCurrentContextSlot), U8(58),
/* 737 S> */ B(LdaZero),
- /* 737 E> */ B(StaContextSlot), R(context), U8(59), U8(0),
+ /* 737 E> */ B(StaCurrentContextSlot), U8(59),
/* 750 S> */ B(LdaZero),
- /* 750 E> */ B(StaContextSlot), R(context), U8(60), U8(0),
+ /* 750 E> */ B(StaCurrentContextSlot), U8(60),
/* 763 S> */ B(LdaZero),
- /* 763 E> */ B(StaContextSlot), R(context), U8(61), U8(0),
+ /* 763 E> */ B(StaCurrentContextSlot), U8(61),
/* 776 S> */ B(LdaZero),
- /* 776 E> */ B(StaContextSlot), R(context), U8(62), U8(0),
+ /* 776 E> */ B(StaCurrentContextSlot), U8(62),
/* 789 S> */ B(LdaZero),
- /* 789 E> */ B(StaContextSlot), R(context), U8(63), U8(0),
+ /* 789 E> */ B(StaCurrentContextSlot), U8(63),
/* 802 S> */ B(LdaZero),
- /* 802 E> */ B(StaContextSlot), R(context), U8(64), U8(0),
+ /* 802 E> */ B(StaCurrentContextSlot), U8(64),
/* 815 S> */ B(LdaZero),
- /* 815 E> */ B(StaContextSlot), R(context), U8(65), U8(0),
+ /* 815 E> */ B(StaCurrentContextSlot), U8(65),
/* 828 S> */ B(LdaZero),
- /* 828 E> */ B(StaContextSlot), R(context), U8(66), U8(0),
+ /* 828 E> */ B(StaCurrentContextSlot), U8(66),
/* 841 S> */ B(LdaZero),
- /* 841 E> */ B(StaContextSlot), R(context), U8(67), U8(0),
+ /* 841 E> */ B(StaCurrentContextSlot), U8(67),
/* 854 S> */ B(LdaZero),
- /* 854 E> */ B(StaContextSlot), R(context), U8(68), U8(0),
+ /* 854 E> */ B(StaCurrentContextSlot), U8(68),
/* 867 S> */ B(LdaZero),
- /* 867 E> */ B(StaContextSlot), R(context), U8(69), U8(0),
+ /* 867 E> */ B(StaCurrentContextSlot), U8(69),
/* 880 S> */ B(LdaZero),
- /* 880 E> */ B(StaContextSlot), R(context), U8(70), U8(0),
+ /* 880 E> */ B(StaCurrentContextSlot), U8(70),
/* 893 S> */ B(LdaZero),
- /* 893 E> */ B(StaContextSlot), R(context), U8(71), U8(0),
+ /* 893 E> */ B(StaCurrentContextSlot), U8(71),
/* 906 S> */ B(LdaZero),
- /* 906 E> */ B(StaContextSlot), R(context), U8(72), U8(0),
+ /* 906 E> */ B(StaCurrentContextSlot), U8(72),
/* 919 S> */ B(LdaZero),
- /* 919 E> */ B(StaContextSlot), R(context), U8(73), U8(0),
+ /* 919 E> */ B(StaCurrentContextSlot), U8(73),
/* 932 S> */ B(LdaZero),
- /* 932 E> */ B(StaContextSlot), R(context), U8(74), U8(0),
+ /* 932 E> */ B(StaCurrentContextSlot), U8(74),
/* 945 S> */ B(LdaZero),
- /* 945 E> */ B(StaContextSlot), R(context), U8(75), U8(0),
+ /* 945 E> */ B(StaCurrentContextSlot), U8(75),
/* 958 S> */ B(LdaZero),
- /* 958 E> */ B(StaContextSlot), R(context), U8(76), U8(0),
+ /* 958 E> */ B(StaCurrentContextSlot), U8(76),
/* 971 S> */ B(LdaZero),
- /* 971 E> */ B(StaContextSlot), R(context), U8(77), U8(0),
+ /* 971 E> */ B(StaCurrentContextSlot), U8(77),
/* 984 S> */ B(LdaZero),
- /* 984 E> */ B(StaContextSlot), R(context), U8(78), U8(0),
+ /* 984 E> */ B(StaCurrentContextSlot), U8(78),
/* 997 S> */ B(LdaZero),
- /* 997 E> */ B(StaContextSlot), R(context), U8(79), U8(0),
+ /* 997 E> */ B(StaCurrentContextSlot), U8(79),
/* 1010 S> */ B(LdaZero),
- /* 1010 E> */ B(StaContextSlot), R(context), U8(80), U8(0),
+ /* 1010 E> */ B(StaCurrentContextSlot), U8(80),
/* 1023 S> */ B(LdaZero),
- /* 1023 E> */ B(StaContextSlot), R(context), U8(81), U8(0),
+ /* 1023 E> */ B(StaCurrentContextSlot), U8(81),
/* 1036 S> */ B(LdaZero),
- /* 1036 E> */ B(StaContextSlot), R(context), U8(82), U8(0),
+ /* 1036 E> */ B(StaCurrentContextSlot), U8(82),
/* 1049 S> */ B(LdaZero),
- /* 1049 E> */ B(StaContextSlot), R(context), U8(83), U8(0),
+ /* 1049 E> */ B(StaCurrentContextSlot), U8(83),
/* 1062 S> */ B(LdaZero),
- /* 1062 E> */ B(StaContextSlot), R(context), U8(84), U8(0),
+ /* 1062 E> */ B(StaCurrentContextSlot), U8(84),
/* 1075 S> */ B(LdaZero),
- /* 1075 E> */ B(StaContextSlot), R(context), U8(85), U8(0),
+ /* 1075 E> */ B(StaCurrentContextSlot), U8(85),
/* 1088 S> */ B(LdaZero),
- /* 1088 E> */ B(StaContextSlot), R(context), U8(86), U8(0),
+ /* 1088 E> */ B(StaCurrentContextSlot), U8(86),
/* 1101 S> */ B(LdaZero),
- /* 1101 E> */ B(StaContextSlot), R(context), U8(87), U8(0),
+ /* 1101 E> */ B(StaCurrentContextSlot), U8(87),
/* 1114 S> */ B(LdaZero),
- /* 1114 E> */ B(StaContextSlot), R(context), U8(88), U8(0),
+ /* 1114 E> */ B(StaCurrentContextSlot), U8(88),
/* 1127 S> */ B(LdaZero),
- /* 1127 E> */ B(StaContextSlot), R(context), U8(89), U8(0),
+ /* 1127 E> */ B(StaCurrentContextSlot), U8(89),
/* 1140 S> */ B(LdaZero),
- /* 1140 E> */ B(StaContextSlot), R(context), U8(90), U8(0),
+ /* 1140 E> */ B(StaCurrentContextSlot), U8(90),
/* 1153 S> */ B(LdaZero),
- /* 1153 E> */ B(StaContextSlot), R(context), U8(91), U8(0),
+ /* 1153 E> */ B(StaCurrentContextSlot), U8(91),
/* 1166 S> */ B(LdaZero),
- /* 1166 E> */ B(StaContextSlot), R(context), U8(92), U8(0),
+ /* 1166 E> */ B(StaCurrentContextSlot), U8(92),
/* 1179 S> */ B(LdaZero),
- /* 1179 E> */ B(StaContextSlot), R(context), U8(93), U8(0),
+ /* 1179 E> */ B(StaCurrentContextSlot), U8(93),
/* 1192 S> */ B(LdaZero),
- /* 1192 E> */ B(StaContextSlot), R(context), U8(94), U8(0),
+ /* 1192 E> */ B(StaCurrentContextSlot), U8(94),
/* 1205 S> */ B(LdaZero),
- /* 1205 E> */ B(StaContextSlot), R(context), U8(95), U8(0),
+ /* 1205 E> */ B(StaCurrentContextSlot), U8(95),
/* 1218 S> */ B(LdaZero),
- /* 1218 E> */ B(StaContextSlot), R(context), U8(96), U8(0),
+ /* 1218 E> */ B(StaCurrentContextSlot), U8(96),
/* 1231 S> */ B(LdaZero),
- /* 1231 E> */ B(StaContextSlot), R(context), U8(97), U8(0),
+ /* 1231 E> */ B(StaCurrentContextSlot), U8(97),
/* 1244 S> */ B(LdaZero),
- /* 1244 E> */ B(StaContextSlot), R(context), U8(98), U8(0),
+ /* 1244 E> */ B(StaCurrentContextSlot), U8(98),
/* 1257 S> */ B(LdaZero),
- /* 1257 E> */ B(StaContextSlot), R(context), U8(99), U8(0),
+ /* 1257 E> */ B(StaCurrentContextSlot), U8(99),
/* 1270 S> */ B(LdaZero),
- /* 1270 E> */ B(StaContextSlot), R(context), U8(100), U8(0),
+ /* 1270 E> */ B(StaCurrentContextSlot), U8(100),
/* 1283 S> */ B(LdaZero),
- /* 1283 E> */ B(StaContextSlot), R(context), U8(101), U8(0),
+ /* 1283 E> */ B(StaCurrentContextSlot), U8(101),
/* 1296 S> */ B(LdaZero),
- /* 1296 E> */ B(StaContextSlot), R(context), U8(102), U8(0),
+ /* 1296 E> */ B(StaCurrentContextSlot), U8(102),
/* 1309 S> */ B(LdaZero),
- /* 1309 E> */ B(StaContextSlot), R(context), U8(103), U8(0),
+ /* 1309 E> */ B(StaCurrentContextSlot), U8(103),
/* 1322 S> */ B(LdaZero),
- /* 1322 E> */ B(StaContextSlot), R(context), U8(104), U8(0),
+ /* 1322 E> */ B(StaCurrentContextSlot), U8(104),
/* 1335 S> */ B(LdaZero),
- /* 1335 E> */ B(StaContextSlot), R(context), U8(105), U8(0),
+ /* 1335 E> */ B(StaCurrentContextSlot), U8(105),
/* 1349 S> */ B(LdaZero),
- /* 1349 E> */ B(StaContextSlot), R(context), U8(106), U8(0),
+ /* 1349 E> */ B(StaCurrentContextSlot), U8(106),
/* 1363 S> */ B(LdaZero),
- /* 1363 E> */ B(StaContextSlot), R(context), U8(107), U8(0),
+ /* 1363 E> */ B(StaCurrentContextSlot), U8(107),
/* 1377 S> */ B(LdaZero),
- /* 1377 E> */ B(StaContextSlot), R(context), U8(108), U8(0),
+ /* 1377 E> */ B(StaCurrentContextSlot), U8(108),
/* 1391 S> */ B(LdaZero),
- /* 1391 E> */ B(StaContextSlot), R(context), U8(109), U8(0),
+ /* 1391 E> */ B(StaCurrentContextSlot), U8(109),
/* 1405 S> */ B(LdaZero),
- /* 1405 E> */ B(StaContextSlot), R(context), U8(110), U8(0),
+ /* 1405 E> */ B(StaCurrentContextSlot), U8(110),
/* 1419 S> */ B(LdaZero),
- /* 1419 E> */ B(StaContextSlot), R(context), U8(111), U8(0),
+ /* 1419 E> */ B(StaCurrentContextSlot), U8(111),
/* 1433 S> */ B(LdaZero),
- /* 1433 E> */ B(StaContextSlot), R(context), U8(112), U8(0),
+ /* 1433 E> */ B(StaCurrentContextSlot), U8(112),
/* 1447 S> */ B(LdaZero),
- /* 1447 E> */ B(StaContextSlot), R(context), U8(113), U8(0),
+ /* 1447 E> */ B(StaCurrentContextSlot), U8(113),
/* 1461 S> */ B(LdaZero),
- /* 1461 E> */ B(StaContextSlot), R(context), U8(114), U8(0),
+ /* 1461 E> */ B(StaCurrentContextSlot), U8(114),
/* 1475 S> */ B(LdaZero),
- /* 1475 E> */ B(StaContextSlot), R(context), U8(115), U8(0),
+ /* 1475 E> */ B(StaCurrentContextSlot), U8(115),
/* 1489 S> */ B(LdaZero),
- /* 1489 E> */ B(StaContextSlot), R(context), U8(116), U8(0),
+ /* 1489 E> */ B(StaCurrentContextSlot), U8(116),
/* 1503 S> */ B(LdaZero),
- /* 1503 E> */ B(StaContextSlot), R(context), U8(117), U8(0),
+ /* 1503 E> */ B(StaCurrentContextSlot), U8(117),
/* 1517 S> */ B(LdaZero),
- /* 1517 E> */ B(StaContextSlot), R(context), U8(118), U8(0),
+ /* 1517 E> */ B(StaCurrentContextSlot), U8(118),
/* 1531 S> */ B(LdaZero),
- /* 1531 E> */ B(StaContextSlot), R(context), U8(119), U8(0),
+ /* 1531 E> */ B(StaCurrentContextSlot), U8(119),
/* 1545 S> */ B(LdaZero),
- /* 1545 E> */ B(StaContextSlot), R(context), U8(120), U8(0),
+ /* 1545 E> */ B(StaCurrentContextSlot), U8(120),
/* 1559 S> */ B(LdaZero),
- /* 1559 E> */ B(StaContextSlot), R(context), U8(121), U8(0),
+ /* 1559 E> */ B(StaCurrentContextSlot), U8(121),
/* 1573 S> */ B(LdaZero),
- /* 1573 E> */ B(StaContextSlot), R(context), U8(122), U8(0),
+ /* 1573 E> */ B(StaCurrentContextSlot), U8(122),
/* 1587 S> */ B(LdaZero),
- /* 1587 E> */ B(StaContextSlot), R(context), U8(123), U8(0),
+ /* 1587 E> */ B(StaCurrentContextSlot), U8(123),
/* 1601 S> */ B(LdaZero),
- /* 1601 E> */ B(StaContextSlot), R(context), U8(124), U8(0),
+ /* 1601 E> */ B(StaCurrentContextSlot), U8(124),
/* 1615 S> */ B(LdaZero),
- /* 1615 E> */ B(StaContextSlot), R(context), U8(125), U8(0),
+ /* 1615 E> */ B(StaCurrentContextSlot), U8(125),
/* 1629 S> */ B(LdaZero),
- /* 1629 E> */ B(StaContextSlot), R(context), U8(126), U8(0),
+ /* 1629 E> */ B(StaCurrentContextSlot), U8(126),
/* 1643 S> */ B(LdaZero),
- /* 1643 E> */ B(StaContextSlot), R(context), U8(127), U8(0),
+ /* 1643 E> */ B(StaCurrentContextSlot), U8(127),
/* 1657 S> */ B(LdaZero),
- /* 1657 E> */ B(StaContextSlot), R(context), U8(128), U8(0),
+ /* 1657 E> */ B(StaCurrentContextSlot), U8(128),
/* 1671 S> */ B(LdaZero),
- /* 1671 E> */ B(StaContextSlot), R(context), U8(129), U8(0),
+ /* 1671 E> */ B(StaCurrentContextSlot), U8(129),
/* 1685 S> */ B(LdaZero),
- /* 1685 E> */ B(StaContextSlot), R(context), U8(130), U8(0),
+ /* 1685 E> */ B(StaCurrentContextSlot), U8(130),
/* 1699 S> */ B(LdaZero),
- /* 1699 E> */ B(StaContextSlot), R(context), U8(131), U8(0),
+ /* 1699 E> */ B(StaCurrentContextSlot), U8(131),
/* 1713 S> */ B(LdaZero),
- /* 1713 E> */ B(StaContextSlot), R(context), U8(132), U8(0),
+ /* 1713 E> */ B(StaCurrentContextSlot), U8(132),
/* 1727 S> */ B(LdaZero),
- /* 1727 E> */ B(StaContextSlot), R(context), U8(133), U8(0),
+ /* 1727 E> */ B(StaCurrentContextSlot), U8(133),
/* 1741 S> */ B(LdaZero),
- /* 1741 E> */ B(StaContextSlot), R(context), U8(134), U8(0),
+ /* 1741 E> */ B(StaCurrentContextSlot), U8(134),
/* 1755 S> */ B(LdaZero),
- /* 1755 E> */ B(StaContextSlot), R(context), U8(135), U8(0),
+ /* 1755 E> */ B(StaCurrentContextSlot), U8(135),
/* 1769 S> */ B(LdaZero),
- /* 1769 E> */ B(StaContextSlot), R(context), U8(136), U8(0),
+ /* 1769 E> */ B(StaCurrentContextSlot), U8(136),
/* 1783 S> */ B(LdaZero),
- /* 1783 E> */ B(StaContextSlot), R(context), U8(137), U8(0),
+ /* 1783 E> */ B(StaCurrentContextSlot), U8(137),
/* 1797 S> */ B(LdaZero),
- /* 1797 E> */ B(StaContextSlot), R(context), U8(138), U8(0),
+ /* 1797 E> */ B(StaCurrentContextSlot), U8(138),
/* 1811 S> */ B(LdaZero),
- /* 1811 E> */ B(StaContextSlot), R(context), U8(139), U8(0),
+ /* 1811 E> */ B(StaCurrentContextSlot), U8(139),
/* 1825 S> */ B(LdaZero),
- /* 1825 E> */ B(StaContextSlot), R(context), U8(140), U8(0),
+ /* 1825 E> */ B(StaCurrentContextSlot), U8(140),
/* 1839 S> */ B(LdaZero),
- /* 1839 E> */ B(StaContextSlot), R(context), U8(141), U8(0),
+ /* 1839 E> */ B(StaCurrentContextSlot), U8(141),
/* 1853 S> */ B(LdaZero),
- /* 1853 E> */ B(StaContextSlot), R(context), U8(142), U8(0),
+ /* 1853 E> */ B(StaCurrentContextSlot), U8(142),
/* 1867 S> */ B(LdaZero),
- /* 1867 E> */ B(StaContextSlot), R(context), U8(143), U8(0),
+ /* 1867 E> */ B(StaCurrentContextSlot), U8(143),
/* 1881 S> */ B(LdaZero),
- /* 1881 E> */ B(StaContextSlot), R(context), U8(144), U8(0),
+ /* 1881 E> */ B(StaCurrentContextSlot), U8(144),
/* 1895 S> */ B(LdaZero),
- /* 1895 E> */ B(StaContextSlot), R(context), U8(145), U8(0),
+ /* 1895 E> */ B(StaCurrentContextSlot), U8(145),
/* 1909 S> */ B(LdaZero),
- /* 1909 E> */ B(StaContextSlot), R(context), U8(146), U8(0),
+ /* 1909 E> */ B(StaCurrentContextSlot), U8(146),
/* 1923 S> */ B(LdaZero),
- /* 1923 E> */ B(StaContextSlot), R(context), U8(147), U8(0),
+ /* 1923 E> */ B(StaCurrentContextSlot), U8(147),
/* 1937 S> */ B(LdaZero),
- /* 1937 E> */ B(StaContextSlot), R(context), U8(148), U8(0),
+ /* 1937 E> */ B(StaCurrentContextSlot), U8(148),
/* 1951 S> */ B(LdaZero),
- /* 1951 E> */ B(StaContextSlot), R(context), U8(149), U8(0),
+ /* 1951 E> */ B(StaCurrentContextSlot), U8(149),
/* 1965 S> */ B(LdaZero),
- /* 1965 E> */ B(StaContextSlot), R(context), U8(150), U8(0),
+ /* 1965 E> */ B(StaCurrentContextSlot), U8(150),
/* 1979 S> */ B(LdaZero),
- /* 1979 E> */ B(StaContextSlot), R(context), U8(151), U8(0),
+ /* 1979 E> */ B(StaCurrentContextSlot), U8(151),
/* 1993 S> */ B(LdaZero),
- /* 1993 E> */ B(StaContextSlot), R(context), U8(152), U8(0),
+ /* 1993 E> */ B(StaCurrentContextSlot), U8(152),
/* 2007 S> */ B(LdaZero),
- /* 2007 E> */ B(StaContextSlot), R(context), U8(153), U8(0),
+ /* 2007 E> */ B(StaCurrentContextSlot), U8(153),
/* 2021 S> */ B(LdaZero),
- /* 2021 E> */ B(StaContextSlot), R(context), U8(154), U8(0),
+ /* 2021 E> */ B(StaCurrentContextSlot), U8(154),
/* 2035 S> */ B(LdaZero),
- /* 2035 E> */ B(StaContextSlot), R(context), U8(155), U8(0),
+ /* 2035 E> */ B(StaCurrentContextSlot), U8(155),
/* 2049 S> */ B(LdaZero),
- /* 2049 E> */ B(StaContextSlot), R(context), U8(156), U8(0),
+ /* 2049 E> */ B(StaCurrentContextSlot), U8(156),
/* 2063 S> */ B(LdaZero),
- /* 2063 E> */ B(StaContextSlot), R(context), U8(157), U8(0),
+ /* 2063 E> */ B(StaCurrentContextSlot), U8(157),
/* 2077 S> */ B(LdaZero),
- /* 2077 E> */ B(StaContextSlot), R(context), U8(158), U8(0),
+ /* 2077 E> */ B(StaCurrentContextSlot), U8(158),
/* 2091 S> */ B(LdaZero),
- /* 2091 E> */ B(StaContextSlot), R(context), U8(159), U8(0),
+ /* 2091 E> */ B(StaCurrentContextSlot), U8(159),
/* 2105 S> */ B(LdaZero),
- /* 2105 E> */ B(StaContextSlot), R(context), U8(160), U8(0),
+ /* 2105 E> */ B(StaCurrentContextSlot), U8(160),
/* 2119 S> */ B(LdaZero),
- /* 2119 E> */ B(StaContextSlot), R(context), U8(161), U8(0),
+ /* 2119 E> */ B(StaCurrentContextSlot), U8(161),
/* 2133 S> */ B(LdaZero),
- /* 2133 E> */ B(StaContextSlot), R(context), U8(162), U8(0),
+ /* 2133 E> */ B(StaCurrentContextSlot), U8(162),
/* 2147 S> */ B(LdaZero),
- /* 2147 E> */ B(StaContextSlot), R(context), U8(163), U8(0),
+ /* 2147 E> */ B(StaCurrentContextSlot), U8(163),
/* 2161 S> */ B(LdaZero),
- /* 2161 E> */ B(StaContextSlot), R(context), U8(164), U8(0),
+ /* 2161 E> */ B(StaCurrentContextSlot), U8(164),
/* 2175 S> */ B(LdaZero),
- /* 2175 E> */ B(StaContextSlot), R(context), U8(165), U8(0),
+ /* 2175 E> */ B(StaCurrentContextSlot), U8(165),
/* 2189 S> */ B(LdaZero),
- /* 2189 E> */ B(StaContextSlot), R(context), U8(166), U8(0),
+ /* 2189 E> */ B(StaCurrentContextSlot), U8(166),
/* 2203 S> */ B(LdaZero),
- /* 2203 E> */ B(StaContextSlot), R(context), U8(167), U8(0),
+ /* 2203 E> */ B(StaCurrentContextSlot), U8(167),
/* 2217 S> */ B(LdaZero),
- /* 2217 E> */ B(StaContextSlot), R(context), U8(168), U8(0),
+ /* 2217 E> */ B(StaCurrentContextSlot), U8(168),
/* 2231 S> */ B(LdaZero),
- /* 2231 E> */ B(StaContextSlot), R(context), U8(169), U8(0),
+ /* 2231 E> */ B(StaCurrentContextSlot), U8(169),
/* 2245 S> */ B(LdaZero),
- /* 2245 E> */ B(StaContextSlot), R(context), U8(170), U8(0),
+ /* 2245 E> */ B(StaCurrentContextSlot), U8(170),
/* 2259 S> */ B(LdaZero),
- /* 2259 E> */ B(StaContextSlot), R(context), U8(171), U8(0),
+ /* 2259 E> */ B(StaCurrentContextSlot), U8(171),
/* 2273 S> */ B(LdaZero),
- /* 2273 E> */ B(StaContextSlot), R(context), U8(172), U8(0),
+ /* 2273 E> */ B(StaCurrentContextSlot), U8(172),
/* 2287 S> */ B(LdaZero),
- /* 2287 E> */ B(StaContextSlot), R(context), U8(173), U8(0),
+ /* 2287 E> */ B(StaCurrentContextSlot), U8(173),
/* 2301 S> */ B(LdaZero),
- /* 2301 E> */ B(StaContextSlot), R(context), U8(174), U8(0),
+ /* 2301 E> */ B(StaCurrentContextSlot), U8(174),
/* 2315 S> */ B(LdaZero),
- /* 2315 E> */ B(StaContextSlot), R(context), U8(175), U8(0),
+ /* 2315 E> */ B(StaCurrentContextSlot), U8(175),
/* 2329 S> */ B(LdaZero),
- /* 2329 E> */ B(StaContextSlot), R(context), U8(176), U8(0),
+ /* 2329 E> */ B(StaCurrentContextSlot), U8(176),
/* 2343 S> */ B(LdaZero),
- /* 2343 E> */ B(StaContextSlot), R(context), U8(177), U8(0),
+ /* 2343 E> */ B(StaCurrentContextSlot), U8(177),
/* 2357 S> */ B(LdaZero),
- /* 2357 E> */ B(StaContextSlot), R(context), U8(178), U8(0),
+ /* 2357 E> */ B(StaCurrentContextSlot), U8(178),
/* 2371 S> */ B(LdaZero),
- /* 2371 E> */ B(StaContextSlot), R(context), U8(179), U8(0),
+ /* 2371 E> */ B(StaCurrentContextSlot), U8(179),
/* 2385 S> */ B(LdaZero),
- /* 2385 E> */ B(StaContextSlot), R(context), U8(180), U8(0),
+ /* 2385 E> */ B(StaCurrentContextSlot), U8(180),
/* 2399 S> */ B(LdaZero),
- /* 2399 E> */ B(StaContextSlot), R(context), U8(181), U8(0),
+ /* 2399 E> */ B(StaCurrentContextSlot), U8(181),
/* 2413 S> */ B(LdaZero),
- /* 2413 E> */ B(StaContextSlot), R(context), U8(182), U8(0),
+ /* 2413 E> */ B(StaCurrentContextSlot), U8(182),
/* 2427 S> */ B(LdaZero),
- /* 2427 E> */ B(StaContextSlot), R(context), U8(183), U8(0),
+ /* 2427 E> */ B(StaCurrentContextSlot), U8(183),
/* 2441 S> */ B(LdaZero),
- /* 2441 E> */ B(StaContextSlot), R(context), U8(184), U8(0),
+ /* 2441 E> */ B(StaCurrentContextSlot), U8(184),
/* 2455 S> */ B(LdaZero),
- /* 2455 E> */ B(StaContextSlot), R(context), U8(185), U8(0),
+ /* 2455 E> */ B(StaCurrentContextSlot), U8(185),
/* 2469 S> */ B(LdaZero),
- /* 2469 E> */ B(StaContextSlot), R(context), U8(186), U8(0),
+ /* 2469 E> */ B(StaCurrentContextSlot), U8(186),
/* 2483 S> */ B(LdaZero),
- /* 2483 E> */ B(StaContextSlot), R(context), U8(187), U8(0),
+ /* 2483 E> */ B(StaCurrentContextSlot), U8(187),
/* 2497 S> */ B(LdaZero),
- /* 2497 E> */ B(StaContextSlot), R(context), U8(188), U8(0),
+ /* 2497 E> */ B(StaCurrentContextSlot), U8(188),
/* 2511 S> */ B(LdaZero),
- /* 2511 E> */ B(StaContextSlot), R(context), U8(189), U8(0),
+ /* 2511 E> */ B(StaCurrentContextSlot), U8(189),
/* 2525 S> */ B(LdaZero),
- /* 2525 E> */ B(StaContextSlot), R(context), U8(190), U8(0),
+ /* 2525 E> */ B(StaCurrentContextSlot), U8(190),
/* 2539 S> */ B(LdaZero),
- /* 2539 E> */ B(StaContextSlot), R(context), U8(191), U8(0),
+ /* 2539 E> */ B(StaCurrentContextSlot), U8(191),
/* 2553 S> */ B(LdaZero),
- /* 2553 E> */ B(StaContextSlot), R(context), U8(192), U8(0),
+ /* 2553 E> */ B(StaCurrentContextSlot), U8(192),
/* 2567 S> */ B(LdaZero),
- /* 2567 E> */ B(StaContextSlot), R(context), U8(193), U8(0),
+ /* 2567 E> */ B(StaCurrentContextSlot), U8(193),
/* 2581 S> */ B(LdaZero),
- /* 2581 E> */ B(StaContextSlot), R(context), U8(194), U8(0),
+ /* 2581 E> */ B(StaCurrentContextSlot), U8(194),
/* 2595 S> */ B(LdaZero),
- /* 2595 E> */ B(StaContextSlot), R(context), U8(195), U8(0),
+ /* 2595 E> */ B(StaCurrentContextSlot), U8(195),
/* 2609 S> */ B(LdaZero),
- /* 2609 E> */ B(StaContextSlot), R(context), U8(196), U8(0),
+ /* 2609 E> */ B(StaCurrentContextSlot), U8(196),
/* 2623 S> */ B(LdaZero),
- /* 2623 E> */ B(StaContextSlot), R(context), U8(197), U8(0),
+ /* 2623 E> */ B(StaCurrentContextSlot), U8(197),
/* 2637 S> */ B(LdaZero),
- /* 2637 E> */ B(StaContextSlot), R(context), U8(198), U8(0),
+ /* 2637 E> */ B(StaCurrentContextSlot), U8(198),
/* 2651 S> */ B(LdaZero),
- /* 2651 E> */ B(StaContextSlot), R(context), U8(199), U8(0),
+ /* 2651 E> */ B(StaCurrentContextSlot), U8(199),
/* 2665 S> */ B(LdaZero),
- /* 2665 E> */ B(StaContextSlot), R(context), U8(200), U8(0),
+ /* 2665 E> */ B(StaCurrentContextSlot), U8(200),
/* 2679 S> */ B(LdaZero),
- /* 2679 E> */ B(StaContextSlot), R(context), U8(201), U8(0),
+ /* 2679 E> */ B(StaCurrentContextSlot), U8(201),
/* 2693 S> */ B(LdaZero),
- /* 2693 E> */ B(StaContextSlot), R(context), U8(202), U8(0),
+ /* 2693 E> */ B(StaCurrentContextSlot), U8(202),
/* 2707 S> */ B(LdaZero),
- /* 2707 E> */ B(StaContextSlot), R(context), U8(203), U8(0),
+ /* 2707 E> */ B(StaCurrentContextSlot), U8(203),
/* 2721 S> */ B(LdaZero),
- /* 2721 E> */ B(StaContextSlot), R(context), U8(204), U8(0),
+ /* 2721 E> */ B(StaCurrentContextSlot), U8(204),
/* 2735 S> */ B(LdaZero),
- /* 2735 E> */ B(StaContextSlot), R(context), U8(205), U8(0),
+ /* 2735 E> */ B(StaCurrentContextSlot), U8(205),
/* 2749 S> */ B(LdaZero),
- /* 2749 E> */ B(StaContextSlot), R(context), U8(206), U8(0),
+ /* 2749 E> */ B(StaCurrentContextSlot), U8(206),
/* 2763 S> */ B(LdaZero),
- /* 2763 E> */ B(StaContextSlot), R(context), U8(207), U8(0),
+ /* 2763 E> */ B(StaCurrentContextSlot), U8(207),
/* 2777 S> */ B(LdaZero),
- /* 2777 E> */ B(StaContextSlot), R(context), U8(208), U8(0),
+ /* 2777 E> */ B(StaCurrentContextSlot), U8(208),
/* 2791 S> */ B(LdaZero),
- /* 2791 E> */ B(StaContextSlot), R(context), U8(209), U8(0),
+ /* 2791 E> */ B(StaCurrentContextSlot), U8(209),
/* 2805 S> */ B(LdaZero),
- /* 2805 E> */ B(StaContextSlot), R(context), U8(210), U8(0),
+ /* 2805 E> */ B(StaCurrentContextSlot), U8(210),
/* 2819 S> */ B(LdaZero),
- /* 2819 E> */ B(StaContextSlot), R(context), U8(211), U8(0),
+ /* 2819 E> */ B(StaCurrentContextSlot), U8(211),
/* 2833 S> */ B(LdaZero),
- /* 2833 E> */ B(StaContextSlot), R(context), U8(212), U8(0),
+ /* 2833 E> */ B(StaCurrentContextSlot), U8(212),
/* 2847 S> */ B(LdaZero),
- /* 2847 E> */ B(StaContextSlot), R(context), U8(213), U8(0),
+ /* 2847 E> */ B(StaCurrentContextSlot), U8(213),
/* 2861 S> */ B(LdaZero),
- /* 2861 E> */ B(StaContextSlot), R(context), U8(214), U8(0),
+ /* 2861 E> */ B(StaCurrentContextSlot), U8(214),
/* 2875 S> */ B(LdaZero),
- /* 2875 E> */ B(StaContextSlot), R(context), U8(215), U8(0),
+ /* 2875 E> */ B(StaCurrentContextSlot), U8(215),
/* 2889 S> */ B(LdaZero),
- /* 2889 E> */ B(StaContextSlot), R(context), U8(216), U8(0),
+ /* 2889 E> */ B(StaCurrentContextSlot), U8(216),
/* 2903 S> */ B(LdaZero),
- /* 2903 E> */ B(StaContextSlot), R(context), U8(217), U8(0),
+ /* 2903 E> */ B(StaCurrentContextSlot), U8(217),
/* 2917 S> */ B(LdaZero),
- /* 2917 E> */ B(StaContextSlot), R(context), U8(218), U8(0),
+ /* 2917 E> */ B(StaCurrentContextSlot), U8(218),
/* 2931 S> */ B(LdaZero),
- /* 2931 E> */ B(StaContextSlot), R(context), U8(219), U8(0),
+ /* 2931 E> */ B(StaCurrentContextSlot), U8(219),
/* 2945 S> */ B(LdaZero),
- /* 2945 E> */ B(StaContextSlot), R(context), U8(220), U8(0),
+ /* 2945 E> */ B(StaCurrentContextSlot), U8(220),
/* 2959 S> */ B(LdaZero),
- /* 2959 E> */ B(StaContextSlot), R(context), U8(221), U8(0),
+ /* 2959 E> */ B(StaCurrentContextSlot), U8(221),
/* 2973 S> */ B(LdaZero),
- /* 2973 E> */ B(StaContextSlot), R(context), U8(222), U8(0),
+ /* 2973 E> */ B(StaCurrentContextSlot), U8(222),
/* 2987 S> */ B(LdaZero),
- /* 2987 E> */ B(StaContextSlot), R(context), U8(223), U8(0),
+ /* 2987 E> */ B(StaCurrentContextSlot), U8(223),
/* 3001 S> */ B(LdaZero),
- /* 3001 E> */ B(StaContextSlot), R(context), U8(224), U8(0),
+ /* 3001 E> */ B(StaCurrentContextSlot), U8(224),
/* 3015 S> */ B(LdaZero),
- /* 3015 E> */ B(StaContextSlot), R(context), U8(225), U8(0),
+ /* 3015 E> */ B(StaCurrentContextSlot), U8(225),
/* 3029 S> */ B(LdaZero),
- /* 3029 E> */ B(StaContextSlot), R(context), U8(226), U8(0),
+ /* 3029 E> */ B(StaCurrentContextSlot), U8(226),
/* 3043 S> */ B(LdaZero),
- /* 3043 E> */ B(StaContextSlot), R(context), U8(227), U8(0),
+ /* 3043 E> */ B(StaCurrentContextSlot), U8(227),
/* 3057 S> */ B(LdaZero),
- /* 3057 E> */ B(StaContextSlot), R(context), U8(228), U8(0),
+ /* 3057 E> */ B(StaCurrentContextSlot), U8(228),
/* 3071 S> */ B(LdaZero),
- /* 3071 E> */ B(StaContextSlot), R(context), U8(229), U8(0),
+ /* 3071 E> */ B(StaCurrentContextSlot), U8(229),
/* 3085 S> */ B(LdaZero),
- /* 3085 E> */ B(StaContextSlot), R(context), U8(230), U8(0),
+ /* 3085 E> */ B(StaCurrentContextSlot), U8(230),
/* 3099 S> */ B(LdaZero),
- /* 3099 E> */ B(StaContextSlot), R(context), U8(231), U8(0),
+ /* 3099 E> */ B(StaCurrentContextSlot), U8(231),
/* 3113 S> */ B(LdaZero),
- /* 3113 E> */ B(StaContextSlot), R(context), U8(232), U8(0),
+ /* 3113 E> */ B(StaCurrentContextSlot), U8(232),
/* 3127 S> */ B(LdaZero),
- /* 3127 E> */ B(StaContextSlot), R(context), U8(233), U8(0),
+ /* 3127 E> */ B(StaCurrentContextSlot), U8(233),
/* 3141 S> */ B(LdaZero),
- /* 3141 E> */ B(StaContextSlot), R(context), U8(234), U8(0),
+ /* 3141 E> */ B(StaCurrentContextSlot), U8(234),
/* 3155 S> */ B(LdaZero),
- /* 3155 E> */ B(StaContextSlot), R(context), U8(235), U8(0),
+ /* 3155 E> */ B(StaCurrentContextSlot), U8(235),
/* 3169 S> */ B(LdaZero),
- /* 3169 E> */ B(StaContextSlot), R(context), U8(236), U8(0),
+ /* 3169 E> */ B(StaCurrentContextSlot), U8(236),
/* 3183 S> */ B(LdaZero),
- /* 3183 E> */ B(StaContextSlot), R(context), U8(237), U8(0),
+ /* 3183 E> */ B(StaCurrentContextSlot), U8(237),
/* 3197 S> */ B(LdaZero),
- /* 3197 E> */ B(StaContextSlot), R(context), U8(238), U8(0),
+ /* 3197 E> */ B(StaCurrentContextSlot), U8(238),
/* 3211 S> */ B(LdaZero),
- /* 3211 E> */ B(StaContextSlot), R(context), U8(239), U8(0),
+ /* 3211 E> */ B(StaCurrentContextSlot), U8(239),
/* 3225 S> */ B(LdaZero),
- /* 3225 E> */ B(StaContextSlot), R(context), U8(240), U8(0),
+ /* 3225 E> */ B(StaCurrentContextSlot), U8(240),
/* 3239 S> */ B(LdaZero),
- /* 3239 E> */ B(StaContextSlot), R(context), U8(241), U8(0),
+ /* 3239 E> */ B(StaCurrentContextSlot), U8(241),
/* 3253 S> */ B(LdaZero),
- /* 3253 E> */ B(StaContextSlot), R(context), U8(242), U8(0),
+ /* 3253 E> */ B(StaCurrentContextSlot), U8(242),
/* 3267 S> */ B(LdaZero),
- /* 3267 E> */ B(StaContextSlot), R(context), U8(243), U8(0),
+ /* 3267 E> */ B(StaCurrentContextSlot), U8(243),
/* 3281 S> */ B(LdaZero),
- /* 3281 E> */ B(StaContextSlot), R(context), U8(244), U8(0),
+ /* 3281 E> */ B(StaCurrentContextSlot), U8(244),
/* 3295 S> */ B(LdaZero),
- /* 3295 E> */ B(StaContextSlot), R(context), U8(245), U8(0),
+ /* 3295 E> */ B(StaCurrentContextSlot), U8(245),
/* 3309 S> */ B(LdaZero),
- /* 3309 E> */ B(StaContextSlot), R(context), U8(246), U8(0),
+ /* 3309 E> */ B(StaCurrentContextSlot), U8(246),
/* 3323 S> */ B(LdaZero),
- /* 3323 E> */ B(StaContextSlot), R(context), U8(247), U8(0),
+ /* 3323 E> */ B(StaCurrentContextSlot), U8(247),
/* 3337 S> */ B(LdaZero),
- /* 3337 E> */ B(StaContextSlot), R(context), U8(248), U8(0),
+ /* 3337 E> */ B(StaCurrentContextSlot), U8(248),
/* 3351 S> */ B(LdaZero),
- /* 3351 E> */ B(StaContextSlot), R(context), U8(249), U8(0),
+ /* 3351 E> */ B(StaCurrentContextSlot), U8(249),
/* 3365 S> */ B(LdaZero),
- /* 3365 E> */ B(StaContextSlot), R(context), U8(250), U8(0),
+ /* 3365 E> */ B(StaCurrentContextSlot), U8(250),
/* 3379 S> */ B(LdaZero),
- /* 3379 E> */ B(StaContextSlot), R(context), U8(251), U8(0),
+ /* 3379 E> */ B(StaCurrentContextSlot), U8(251),
/* 3393 S> */ B(LdaZero),
- /* 3393 E> */ B(StaContextSlot), R(context), U8(252), U8(0),
+ /* 3393 E> */ B(StaCurrentContextSlot), U8(252),
/* 3407 S> */ B(LdaZero),
- /* 3407 E> */ B(StaContextSlot), R(context), U8(253), U8(0),
+ /* 3407 E> */ B(StaCurrentContextSlot), U8(253),
/* 3421 S> */ B(LdaZero),
- /* 3421 E> */ B(StaContextSlot), R(context), U8(254), U8(0),
+ /* 3421 E> */ B(StaCurrentContextSlot), U8(254),
/* 3435 S> */ B(LdaZero),
- /* 3435 E> */ B(StaContextSlot), R(context), U8(255), U8(0),
- /* 3438 S> */ B(LdrUndefined), R(2),
- /* 3438 E> */ B(LdrGlobal), U8(2), R(1),
- /* 3438 E> */ B(Call), R(1), R(2), U8(1), U8(0),
+ /* 3435 E> */ B(StaCurrentContextSlot), U8(255),
+ /* 3438 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ B(LdaGlobal), U8(4),
+ B(Star), R(1),
+ /* 3438 E> */ B(Call), R(1), R(2), U8(1), U8(2),
/* 3454 S> */ B(LdaSmi), U8(100),
- /* 3454 E> */ B(Wide), B(StaContextSlot), R16(context), U16(256), U16(0),
- /* 3459 S> */ B(Wide), B(LdaContextSlot), R16(context), U16(256), U16(0),
+ /* 3454 E> */ B(Wide), B(StaCurrentContextSlot), U16(256),
+ /* 3459 S> */ B(Wide), B(LdaCurrentContextSlot), U16(256),
/* 3468 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
index 29e0ec3582..b17f3ecf20 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
@@ -31,12 +31,13 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 16
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 45 S> */ B(ToNumber), R(1),
+ B(Ldar), R(1),
B(Inc), U8(2),
B(Star), R(0),
B(Ldar), R(1),
@@ -73,12 +74,13 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 16
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
B(Star), R(0),
/* 45 S> */ B(ToNumber), R(1),
+ B(Ldar), R(1),
B(Dec), U8(2),
B(Star), R(0),
B(Ldar), R(1),
@@ -95,13 +97,14 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 24
+bytecode array length: 26
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
B(Mov), R(1), R(0),
/* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
B(ToNumber), R(2),
+ B(Ldar), R(2),
B(Inc), U8(6),
/* 66 E> */ B(StaNamedPropertySloppy), R(0), U8(1), U8(4),
B(Ldar), R(2),
@@ -143,7 +146,7 @@ snippet: "
"
frame size: 5
parameter count: 1
-bytecode array length: 29
+bytecode array length: 31
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaConstant), U8(0),
@@ -153,6 +156,7 @@ bytecodes: [
/* 72 S> */ B(Ldar), R(0),
/* 81 E> */ B(LdaKeyedProperty), R(1), U8(2),
B(ToNumber), R(4),
+ B(Ldar), R(4),
B(Dec), U8(6),
/* 86 E> */ B(StaKeyedPropertySloppy), R(1), R(0), U8(4),
B(Ldar), R(4),
@@ -197,18 +201,18 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 27
+bytecode array length: 21
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 42 E> */ B(StaCurrentContextSlot), U8(4),
/* 53 S> */ B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
- /* 78 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
+ /* 78 S> */ B(LdaCurrentContextSlot), U8(4),
B(Inc), U8(2),
- /* 87 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 87 E> */ B(StaCurrentContextSlot), U8(4),
/* 90 S> */ B(Return),
]
constant pool: [
@@ -223,19 +227,20 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 31
+bytecode array length: 27
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(1),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 42 E> */ B(StaCurrentContextSlot), U8(4),
/* 53 S> */ B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
- /* 78 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
+ /* 78 S> */ B(LdaCurrentContextSlot), U8(4),
B(ToNumber), R(2),
+ B(Ldar), R(2),
B(Dec), U8(2),
- /* 86 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 86 E> */ B(StaCurrentContextSlot), U8(4),
B(Ldar), R(2),
/* 90 S> */ B(Return),
]
@@ -251,7 +256,7 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 26
+bytecode array length: 28
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), U8(1),
@@ -260,6 +265,7 @@ bytecodes: [
B(Star), R(1),
/* 63 S> */ B(Ldar), R(0),
B(ToNumber), R(3),
+ B(Ldar), R(3),
B(Inc), U8(2),
B(Star), R(0),
B(LdaSmi), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
index 1c12767e09..cc073cfd66 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
@@ -74,12 +74,12 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 19
+bytecode array length: 17
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(Ldar), R(arg0),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateMappedArguments),
B(Star), R(0),
/* 10 E> */ B(StackCheck),
@@ -99,16 +99,16 @@ snippet: "
"
frame size: 2
parameter count: 4
-bytecode array length: 28
+bytecode array length: 22
bytecodes: [
B(CreateFunctionContext), U8(3),
B(PushContext), R(1),
B(Ldar), R(arg0),
- B(StaContextSlot), R(context), U8(6), U8(0),
+ B(StaCurrentContextSlot), U8(6),
B(Ldar), R(arg1),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ B(StaCurrentContextSlot), U8(5),
B(Ldar), R(arg2),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateMappedArguments),
B(Star), R(0),
/* 10 E> */ B(StackCheck),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
index c960237f09..851b953309 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
@@ -80,7 +80,7 @@ snippet: "
"
frame size: 4
parameter count: 2
-bytecode array length: 26
+bytecode array length: 27
bytecodes: [
B(CreateUnmappedArguments),
B(Star), R(2),
@@ -91,7 +91,8 @@ bytecodes: [
/* 10 E> */ B(StackCheck),
B(Mov), R(arg0), R(1),
/* 29 S> */ B(LdaZero),
- /* 44 E> */ B(LdrKeyedProperty), R(0), U8(2), R(3),
+ /* 44 E> */ B(LdaKeyedProperty), R(0), U8(2),
+ B(Star), R(3),
B(LdaZero),
/* 59 E> */ B(LdaKeyedProperty), R(2), U8(4),
B(Add), R(3), U8(6),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
index a61e993e52..00fa0180e6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
@@ -104,7 +104,7 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 29
+bytecode array length: 31
bytecodes: [
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -113,8 +113,10 @@ bytecodes: [
B(Mov), R(closure), R(3),
B(CallRuntime), U16(Runtime::kDeclareGlobalsForInterpreter), R(1), U8(3),
/* 0 E> */ B(StackCheck),
- /* 16 S> */ B(LdrUndefined), R(2),
- B(LdrGlobal), U8(2), R(1),
+ /* 16 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ B(LdaGlobal), U8(2),
+ B(Star), R(1),
/* 16 E> */ B(Call), R(1), R(2), U8(1), U8(4),
B(Star), R(0),
/* 20 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
index d7d60aa26f..12e421e883 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
@@ -98,16 +98,17 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 29
+bytecode array length: 26
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
/* 30 E> */ B(StackCheck),
/* 56 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
B(Ldar), R(1),
- /* 56 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 56 E> */ B(StaCurrentContextSlot), U8(4),
/* 64 S> */ B(CreateClosure), U8(1), U8(2),
- /* 93 S> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(1),
+ /* 93 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(1),
B(LdaSmi), U8(1),
B(DeletePropertyStrict), R(1),
/* 113 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden
index 08a5aaa871..662ecf14d9 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden
@@ -30,13 +30,13 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 13
+bytecode array length: 12
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 55 S> */ B(LdaSmi), U8(100),
B(Star), R(0),
- /* 42 S> */ B(LdrUndefined), R(1),
- B(Ldar), R(1),
+ /* 42 S> */ B(LdaUndefined),
+ B(Star), R(1),
B(Star), R(2),
/* 63 S> */ B(Nop),
/* 73 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
index 07bd99c1f0..a311aa9101 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
@@ -11,20 +11,21 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 69
+bytecode array length: 62
bytecodes: [
B(CreateFunctionContext), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(6), U8(0),
+ B(StaCurrentContextSlot), U8(6),
B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ B(StaCurrentContextSlot), U8(5),
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(LdaConstant), U8(0),
- B(Star), R(4),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(4), U8(1), R(1),
+ /* 34 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ /* 41 E> */ B(LdaLookupGlobalSlot), U8(0), U8(4), U8(1),
+ B(Star), R(1),
B(LdaConstant), U8(1),
B(Star), R(3),
B(LdaZero),
@@ -38,7 +39,7 @@ bytecodes: [
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(1),
- /* 41 E> */ B(Call), R(1), R(2), U8(2), U8(0),
+ /* 41 E> */ B(Call), R(1), R(2), U8(2), U8(2),
/* 53 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
index a23bb90226..c8cbc7a61d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
@@ -147,40 +147,42 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 83
+bytecode array length: 85
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(1),
B(Mov), R(1), R(0),
/* 77 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(9),
- B(JumpIfUndefined), U8(68),
- B(JumpIfNull), U8(66),
+ B(JumpIfUndefined), U8(70),
+ B(JumpIfNull), U8(68),
B(ToObject), R(1),
B(ForInPrepare), R(1), R(2),
B(LdaZero),
B(Star), R(5),
/* 68 S> */ B(ForInContinue), R(5), R(4),
- B(JumpIfFalse), U8(53),
+ B(JumpIfFalse), U8(55),
B(ForInNext), R(1), R(5), R(2), U8(12),
- B(JumpIfUndefined), U8(39),
+ B(JumpIfUndefined), U8(41),
B(Star), R(6),
/* 67 E> */ B(StaNamedPropertySloppy), R(0), U8(2), U8(10),
/* 62 E> */ B(StackCheck),
/* 95 S> */ B(Nop),
- /* 100 E> */ B(LdrNamedProperty), R(0), U8(2), U8(4), R(6),
+ /* 100 E> */ B(LdaNamedProperty), R(0), U8(2), U8(4),
+ B(Star), R(6),
B(LdaSmi), U8(10),
/* 106 E> */ B(TestEqual), R(6), U8(6),
B(JumpIfFalse), U8(4),
- /* 113 S> */ B(Jump), U8(17),
+ /* 113 S> */ B(Jump), U8(18),
/* 125 S> */ B(Nop),
- /* 130 E> */ B(LdrNamedProperty), R(0), U8(2), U8(7), R(6),
+ /* 130 E> */ B(LdaNamedProperty), R(0), U8(2), U8(7),
+ B(Star), R(6),
B(LdaSmi), U8(20),
/* 136 E> */ B(TestEqual), R(6), U8(9),
B(JumpIfFalse), U8(4),
/* 143 S> */ B(Jump), U8(9),
B(ForInStep), R(5),
B(Star), R(5),
- B(JumpLoop), U8(-53), U8(0),
+ B(JumpLoop), U8(-55), U8(0),
B(LdaUndefined),
/* 152 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 3ede3ec597..96545a94d8 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -11,7 +11,7 @@ snippet: "
"
frame size: 15
parameter count: 1
-bytecode array length: 279
+bytecode array length: 282
bytecodes: [
/* 30 E> */ B(StackCheck),
B(LdaZero),
@@ -21,19 +21,22 @@ bytecodes: [
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(9),
B(Star), R(14),
B(LdaConstant), U8(1),
- /* 48 E> */ B(LdrKeyedProperty), R(14), U8(4), R(13),
- /* 48 E> */ B(Call), R(13), R(14), U8(1), U8(2),
+ /* 48 E> */ B(LdaKeyedProperty), R(14), U8(4),
+ B(Star), R(13),
+ /* 48 E> */ B(CallProperty), R(13), R(14), U8(1), U8(2),
B(Star), R(2),
- /* 45 S> */ B(LdrNamedProperty), R(2), U8(2), U8(8), R(14),
- /* 45 E> */ B(Call), R(14), R(2), U8(1), U8(6),
+ /* 45 S> */ B(LdaNamedProperty), R(2), U8(2), U8(8),
+ B(Star), R(13),
+ /* 45 E> */ B(CallProperty), R(13), R(2), U8(1), U8(6),
B(Star), R(3),
/* 45 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
B(LdaNamedProperty), R(3), U8(3), U8(10),
- B(JumpIfToBooleanTrue), U8(24),
- B(LdrNamedProperty), R(3), U8(4), U8(12), R(5),
+ B(JumpIfToBooleanTrue), U8(25),
+ B(LdaNamedProperty), R(3), U8(4), U8(12),
+ B(Star), R(5),
B(LdaSmi), U8(2),
B(Star), R(4),
B(Mov), R(5), R(0),
@@ -41,8 +44,8 @@ bytecodes: [
B(Mov), R(0), R(1),
B(LdaZero),
B(Star), R(4),
- B(JumpLoop), U8(-49), U8(0),
- B(Jump), U8(37),
+ B(JumpLoop), U8(-51), U8(0),
+ B(Jump), U8(36),
B(Star), R(13),
B(Ldar), R(closure),
B(CreateCatchContext), R(13), U8(5), U8(6),
@@ -53,7 +56,8 @@ bytecodes: [
B(JumpIfFalse), U8(6),
B(LdaSmi), U8(1),
B(Star), R(4),
- B(LdrContextSlot), R(context), U8(4), U8(0), R(13),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(13),
B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
B(PopContext), R(8),
B(LdaSmi), U8(-1),
@@ -66,11 +70,12 @@ bytecodes: [
B(Star), R(11),
B(LdaZero),
B(TestEqualStrict), R(4), U8(15),
- B(JumpIfTrue), U8(121),
+ B(JumpIfTrue), U8(122),
B(LdaUndefined),
B(TestEqualStrict), R(2), U8(16),
- B(JumpIfTrue), U8(115),
- B(LdrNamedProperty), R(2), U8(7), U8(17), R(6),
+ B(JumpIfTrue), U8(116),
+ B(LdaNamedProperty), R(2), U8(7), U8(17),
+ B(Star), R(6),
B(LdaNull),
B(TestEqual), R(6), U8(19),
B(JumpIfFalse), U8(4),
@@ -137,9 +142,9 @@ constant pool: [
FIXED_ARRAY_TYPE,
]
handlers: [
- [7, 118, 124],
- [10, 81, 83],
- [201, 211, 213],
+ [7, 120, 126],
+ [10, 84, 86],
+ [204, 214, 216],
]
---
@@ -149,7 +154,7 @@ snippet: "
"
frame size: 16
parameter count: 1
-bytecode array length: 290
+bytecode array length: 293
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
@@ -159,19 +164,22 @@ bytecodes: [
B(Mov), R(context), R(12),
B(Mov), R(context), R(13),
/* 68 S> */ B(LdaConstant), U8(1),
- /* 68 E> */ B(LdrKeyedProperty), R(0), U8(4), R(14),
- /* 68 E> */ B(Call), R(14), R(0), U8(1), U8(2),
+ /* 68 E> */ B(LdaKeyedProperty), R(0), U8(4),
+ B(Star), R(14),
+ /* 68 E> */ B(CallProperty), R(14), R(0), U8(1), U8(2),
B(Star), R(3),
- /* 65 S> */ B(LdrNamedProperty), R(3), U8(2), U8(8), R(15),
- /* 65 E> */ B(Call), R(15), R(3), U8(1), U8(6),
+ /* 65 S> */ B(LdaNamedProperty), R(3), U8(2), U8(8),
+ B(Star), R(14),
+ /* 65 E> */ B(CallProperty), R(14), R(3), U8(1), U8(6),
B(Star), R(4),
/* 65 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(4), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(4), U8(1),
B(LdaNamedProperty), R(4), U8(3), U8(10),
- B(JumpIfToBooleanTrue), U8(26),
- B(LdrNamedProperty), R(4), U8(4), U8(12), R(6),
+ B(JumpIfToBooleanTrue), U8(27),
+ B(LdaNamedProperty), R(4), U8(4), U8(12),
+ B(Star), R(6),
B(LdaSmi), U8(2),
B(Star), R(5),
B(Mov), R(6), R(1),
@@ -180,8 +188,8 @@ bytecodes: [
/* 73 S> */ B(LdaZero),
B(Star), R(10),
B(Mov), R(1), R(11),
- B(Jump), U8(51),
- B(Jump), U8(37),
+ B(Jump), U8(50),
+ B(Jump), U8(36),
B(Star), R(14),
B(Ldar), R(closure),
B(CreateCatchContext), R(14), U8(5), U8(6),
@@ -192,7 +200,8 @@ bytecodes: [
B(JumpIfFalse), U8(6),
B(LdaSmi), U8(1),
B(Star), R(5),
- B(LdrContextSlot), R(context), U8(4), U8(0), R(14),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(14),
B(CallRuntime), U16(Runtime::kReThrow), R(14), U8(1),
B(PopContext), R(9),
B(LdaSmi), U8(-1),
@@ -205,11 +214,12 @@ bytecodes: [
B(Star), R(12),
B(LdaZero),
B(TestEqualStrict), R(5), U8(15),
- B(JumpIfTrue), U8(121),
+ B(JumpIfTrue), U8(122),
B(LdaUndefined),
B(TestEqualStrict), R(3), U8(16),
- B(JumpIfTrue), U8(115),
- B(LdrNamedProperty), R(3), U8(7), U8(17), R(7),
+ B(JumpIfTrue), U8(116),
+ B(LdaNamedProperty), R(3), U8(7), U8(17),
+ B(Star), R(7),
B(LdaNull),
B(TestEqual), R(7), U8(19),
B(JumpIfFalse), U8(4),
@@ -281,9 +291,9 @@ constant pool: [
FIXED_ARRAY_TYPE,
]
handlers: [
- [11, 118, 124],
- [14, 81, 83],
- [202, 212, 214],
+ [11, 120, 126],
+ [14, 84, 86],
+ [205, 215, 217],
]
---
@@ -295,7 +305,7 @@ snippet: "
"
frame size: 15
parameter count: 1
-bytecode array length: 297
+bytecode array length: 300
bytecodes: [
/* 30 E> */ B(StackCheck),
B(LdaZero),
@@ -305,19 +315,22 @@ bytecodes: [
/* 48 S> */ B(CreateArrayLiteral), U8(0), U8(0), U8(9),
B(Star), R(14),
B(LdaConstant), U8(1),
- /* 48 E> */ B(LdrKeyedProperty), R(14), U8(4), R(13),
- /* 48 E> */ B(Call), R(13), R(14), U8(1), U8(2),
+ /* 48 E> */ B(LdaKeyedProperty), R(14), U8(4),
+ B(Star), R(13),
+ /* 48 E> */ B(CallProperty), R(13), R(14), U8(1), U8(2),
B(Star), R(2),
- /* 45 S> */ B(LdrNamedProperty), R(2), U8(2), U8(8), R(14),
- /* 45 E> */ B(Call), R(14), R(2), U8(1), U8(6),
+ /* 45 S> */ B(LdaNamedProperty), R(2), U8(2), U8(8),
+ B(Star), R(13),
+ /* 45 E> */ B(CallProperty), R(13), R(2), U8(1), U8(6),
B(Star), R(3),
/* 45 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
B(LdaNamedProperty), R(3), U8(3), U8(10),
- B(JumpIfToBooleanTrue), U8(42),
- B(LdrNamedProperty), R(3), U8(4), U8(12), R(5),
+ B(JumpIfToBooleanTrue), U8(43),
+ B(LdaNamedProperty), R(3), U8(4), U8(12),
+ B(Star), R(5),
B(LdaSmi), U8(2),
B(Star), R(4),
B(Mov), R(5), R(0),
@@ -333,8 +346,8 @@ bytecodes: [
/* 104 S> */ B(Jump), U8(8),
B(LdaZero),
B(Star), R(4),
- B(JumpLoop), U8(-67), U8(0),
- B(Jump), U8(37),
+ B(JumpLoop), U8(-69), U8(0),
+ B(Jump), U8(36),
B(Star), R(13),
B(Ldar), R(closure),
B(CreateCatchContext), R(13), U8(5), U8(6),
@@ -345,7 +358,8 @@ bytecodes: [
B(JumpIfFalse), U8(6),
B(LdaSmi), U8(1),
B(Star), R(4),
- B(LdrContextSlot), R(context), U8(4), U8(0), R(13),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(13),
B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
B(PopContext), R(8),
B(LdaSmi), U8(-1),
@@ -358,11 +372,12 @@ bytecodes: [
B(Star), R(11),
B(LdaZero),
B(TestEqualStrict), R(4), U8(17),
- B(JumpIfTrue), U8(121),
+ B(JumpIfTrue), U8(122),
B(LdaUndefined),
B(TestEqualStrict), R(2), U8(18),
- B(JumpIfTrue), U8(115),
- B(LdrNamedProperty), R(2), U8(7), U8(19), R(6),
+ B(JumpIfTrue), U8(116),
+ B(LdaNamedProperty), R(2), U8(7), U8(19),
+ B(Star), R(6),
B(LdaNull),
B(TestEqual), R(6), U8(21),
B(JumpIfFalse), U8(4),
@@ -429,9 +444,9 @@ constant pool: [
FIXED_ARRAY_TYPE,
]
handlers: [
- [7, 136, 142],
- [10, 99, 101],
- [219, 229, 231],
+ [7, 138, 144],
+ [10, 102, 104],
+ [222, 232, 234],
]
---
@@ -441,7 +456,7 @@ snippet: "
"
frame size: 14
parameter count: 1
-bytecode array length: 303
+bytecode array length: 307
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(CreateObjectLiteral), U8(0), U8(0), U8(1), R(8),
@@ -453,30 +468,34 @@ bytecodes: [
/* 77 S> */ B(CreateArrayLiteral), U8(1), U8(1), U8(9),
B(Star), R(13),
B(LdaConstant), U8(2),
- /* 77 E> */ B(LdrKeyedProperty), R(13), U8(4), R(12),
- /* 77 E> */ B(Call), R(12), R(13), U8(1), U8(2),
+ /* 77 E> */ B(LdaKeyedProperty), R(13), U8(4),
+ B(Star), R(12),
+ /* 77 E> */ B(CallProperty), R(12), R(13), U8(1), U8(2),
B(Star), R(1),
- /* 74 S> */ B(LdrNamedProperty), R(1), U8(3), U8(8), R(13),
- /* 74 E> */ B(Call), R(13), R(1), U8(1), U8(6),
+ /* 74 S> */ B(LdaNamedProperty), R(1), U8(3), U8(8),
+ B(Star), R(12),
+ /* 74 E> */ B(CallProperty), R(12), R(1), U8(1), U8(6),
B(Star), R(2),
/* 74 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(2), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
B(LdaNamedProperty), R(2), U8(4), U8(10),
- B(JumpIfToBooleanTrue), U8(29),
- /* 67 E> */ B(LdrNamedProperty), R(2), U8(5), U8(12), R(4),
+ B(JumpIfToBooleanTrue), U8(31),
+ /* 67 E> */ B(LdaNamedProperty), R(2), U8(5), U8(12),
+ B(Star), R(4),
B(LdaSmi), U8(2),
B(Star), R(3),
B(Ldar), R(4),
B(StaNamedPropertySloppy), R(0), U8(6), U8(14),
/* 62 E> */ B(StackCheck),
/* 88 S> */ B(Nop),
- /* 96 E> */ B(LdrNamedProperty), R(0), U8(6), U8(16), R(9),
+ /* 96 E> */ B(LdaNamedProperty), R(0), U8(6), U8(16),
+ B(Star), R(9),
B(LdaZero),
B(Star), R(8),
- B(Jump), U8(51),
- B(Jump), U8(37),
+ B(Jump), U8(50),
+ B(Jump), U8(36),
B(Star), R(12),
B(Ldar), R(closure),
B(CreateCatchContext), R(12), U8(7), U8(8),
@@ -487,7 +506,8 @@ bytecodes: [
B(JumpIfFalse), U8(6),
B(LdaSmi), U8(1),
B(Star), R(3),
- B(LdrContextSlot), R(context), U8(4), U8(0), R(12),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(12),
B(CallRuntime), U16(Runtime::kReThrow), R(12), U8(1),
B(PopContext), R(7),
B(LdaSmi), U8(-1),
@@ -500,11 +520,12 @@ bytecodes: [
B(Star), R(10),
B(LdaZero),
B(TestEqualStrict), R(3), U8(19),
- B(JumpIfTrue), U8(121),
+ B(JumpIfTrue), U8(122),
B(LdaUndefined),
B(TestEqualStrict), R(1), U8(20),
- B(JumpIfTrue), U8(115),
- B(LdrNamedProperty), R(1), U8(9), U8(21), R(5),
+ B(JumpIfTrue), U8(116),
+ B(LdaNamedProperty), R(1), U8(9), U8(21),
+ B(Star), R(5),
B(LdaNull),
B(TestEqual), R(5), U8(23),
B(JumpIfFalse), U8(4),
@@ -578,8 +599,8 @@ constant pool: [
FIXED_ARRAY_TYPE,
]
handlers: [
- [15, 131, 137],
- [18, 94, 96],
- [215, 225, 227],
+ [15, 134, 140],
+ [18, 98, 100],
+ [219, 229, 231],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
index 9a81b88a03..19aa219ac1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
@@ -29,10 +29,11 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 15
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(LdrUndefined), R(1),
+ /* 34 S> */ B(LdaUndefined),
+ B(Star), R(1),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 56 E> */ B(Call), R(0), R(1), U8(1), U8(2),
@@ -50,10 +51,11 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 18
+bytecode array length: 19
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(LdrUndefined), R(1),
+ /* 34 S> */ B(LdaUndefined),
+ B(Star), R(1),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
B(LdaSmi), U8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index 840aa9ae42..7ca3be2345 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -13,7 +13,7 @@ snippet: "
"
frame size: 11
parameter count: 1
-bytecode array length: 212
+bytecode array length: 204
bytecodes: [
B(Ldar), R(new_target),
B(JumpIfUndefined), U8(21),
@@ -21,8 +21,8 @@ bytecodes: [
B(Star), R(1),
B(LdaZero),
B(TestEqualStrict), R(1), U8(0),
- B(JumpIfTrue), U8(61),
- B(LdaSmi), U8(76),
+ B(JumpIfTrue), U8(53),
+ B(LdaSmi), U8(77),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), U8(-2),
@@ -30,16 +30,17 @@ bytecodes: [
B(CreateFunctionContext), U8(2),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 11 E> */ B(StackCheck),
B(Mov), R(context), R(4),
- /* 11 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(6),
- B(Ldar), R(6),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(6),
B(Mov), R(closure), R(5),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(5), U8(2),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 11 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(5), U8(2),
+ B(StaCurrentContextSlot), U8(5),
B(Star), R(5),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(6),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(6),
B(LdaZero),
B(SuspendGenerator), R(6),
B(Ldar), R(5),
@@ -64,10 +65,11 @@ bytecodes: [
B(Star), R(3),
B(LdaZero),
B(Star), R(2),
- B(Jump), U8(35),
+ B(Jump), U8(36),
B(Ldar), R(7),
/* 11 E> */ B(Throw),
- B(LdrUndefined), R(5),
+ B(LdaUndefined),
+ B(Star), R(5),
B(LdaTrue),
B(Star), R(6),
B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(5), U8(2),
@@ -83,7 +85,8 @@ bytecodes: [
B(Star), R(2),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Star), R(4),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(5),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(5),
B(CallRuntime), U16(Runtime::k_GeneratorClose), R(5), U8(1),
B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(4), U8(1),
B(LdaZero),
@@ -108,7 +111,7 @@ bytecodes: [
constant pool: [
]
handlers: [
- [41, 145, 151],
+ [39, 138, 144],
]
---
@@ -118,7 +121,7 @@ snippet: "
"
frame size: 11
parameter count: 1
-bytecode array length: 310
+bytecode array length: 301
bytecodes: [
B(Ldar), R(new_target),
B(JumpIfUndefined), U8(28),
@@ -126,11 +129,11 @@ bytecodes: [
B(Star), R(1),
B(LdaZero),
B(TestEqualStrict), R(1), U8(0),
- B(JumpIfTrue), U8(68),
+ B(JumpIfTrue), U8(60),
B(LdaSmi), U8(1),
B(TestEqualStrict), R(1), U8(0),
B(JumpIfTrueConstant), U8(0),
- B(LdaSmi), U8(76),
+ B(LdaSmi), U8(77),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), U8(-2),
@@ -138,16 +141,17 @@ bytecodes: [
B(CreateFunctionContext), U8(2),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 11 E> */ B(StackCheck),
B(Mov), R(context), R(4),
- /* 11 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(6),
- B(Ldar), R(6),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(6),
B(Mov), R(closure), R(5),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(5), U8(2),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 11 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(5), U8(2),
+ B(StaCurrentContextSlot), U8(5),
B(Star), R(5),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(6),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(6),
B(LdaZero),
B(SuspendGenerator), R(6),
B(Ldar), R(5),
@@ -181,7 +185,8 @@ bytecodes: [
B(Star), R(6),
B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(5), U8(2),
B(Star), R(5),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(6),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(6),
B(LdaSmi), U8(1),
B(SuspendGenerator), R(6),
B(Ldar), R(5),
@@ -206,10 +211,11 @@ bytecodes: [
B(Star), R(3),
B(LdaSmi), U8(1),
B(Star), R(2),
- B(Jump), U8(35),
+ B(Jump), U8(36),
B(Ldar), R(7),
/* 16 E> */ B(Throw),
- B(LdrUndefined), R(5),
+ B(LdaUndefined),
+ B(Star), R(5),
B(LdaTrue),
B(Star), R(6),
B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(5), U8(2),
@@ -225,7 +231,8 @@ bytecodes: [
B(Star), R(2),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Star), R(4),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(5),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(5),
B(CallRuntime), U16(Runtime::k_GeneratorClose), R(5), U8(1),
B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(4), U8(1),
B(LdaZero),
@@ -253,10 +260,10 @@ bytecodes: [
/* 25 S> */ B(Return),
]
constant pool: [
- Smi [141],
+ Smi [132],
]
handlers: [
- [48, 233, 239],
+ [46, 225, 231],
]
---
@@ -266,7 +273,7 @@ snippet: "
"
frame size: 17
parameter count: 1
-bytecode array length: 805
+bytecode array length: 796
bytecodes: [
B(Ldar), R(new_target),
B(JumpIfUndefined), U8(28),
@@ -274,11 +281,11 @@ bytecodes: [
B(Star), R(3),
B(LdaZero),
B(TestEqualStrict), R(3), U8(0),
- B(JumpIfTrue), U8(68),
+ B(JumpIfTrue), U8(60),
B(LdaSmi), U8(1),
B(TestEqualStrict), R(3), U8(0),
B(JumpIfTrueConstant), U8(3),
- B(LdaSmi), U8(76),
+ B(LdaSmi), U8(77),
B(Star), R(4),
B(CallRuntime), U16(Runtime::kAbort), R(4), U8(1),
B(LdaSmi), U8(-2),
@@ -286,16 +293,17 @@ bytecodes: [
B(CreateFunctionContext), U8(9),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 11 E> */ B(StackCheck),
B(Mov), R(context), R(6),
- /* 11 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(8),
- B(Ldar), R(8),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(8),
B(Mov), R(closure), R(7),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(7), U8(2),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 11 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(7), U8(2),
+ B(StaCurrentContextSlot), U8(5),
B(Star), R(7),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(8),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(8),
B(LdaZero),
B(SuspendGenerator), R(8),
B(Ldar), R(7),
@@ -320,14 +328,14 @@ bytecodes: [
B(Star), R(5),
B(LdaZero),
B(Star), R(4),
- B(JumpConstant), U8(20),
+ B(JumpConstant), U8(19),
B(Ldar), R(9),
/* 11 E> */ B(Throw),
B(Ldar), R(closure),
B(CreateBlockContext), U8(0),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(LdaZero),
B(StaContextSlot), R(1), U8(9), U8(0),
B(Mov), R(context), R(9),
@@ -335,32 +343,38 @@ bytecodes: [
/* 30 S> */ B(CreateArrayLiteral), U8(1), U8(0), U8(9),
B(Star), R(12),
B(LdaConstant), U8(2),
- /* 30 E> */ B(LdrKeyedProperty), R(12), U8(4), R(11),
- /* 30 E> */ B(Call), R(11), R(12), U8(1), U8(2),
+ /* 30 E> */ B(LdaKeyedProperty), R(12), U8(4),
+ B(Star), R(11),
+ /* 30 E> */ B(CallProperty), R(11), R(12), U8(1), U8(2),
/* 30 E> */ B(StaContextSlot), R(1), U8(7), U8(0),
B(LdaSmi), U8(-2),
B(TestEqual), R(3), U8(0),
B(JumpIfTrue), U8(18),
B(LdaSmi), U8(1),
B(TestEqualStrict), R(3), U8(0),
- B(JumpIfTrueConstant), U8(9),
- B(LdaSmi), U8(76),
+ B(JumpIfTrueConstant), U8(8),
+ B(LdaSmi), U8(77),
B(Star), R(11),
B(CallRuntime), U16(Runtime::kAbort), R(11), U8(1),
- /* 27 S> */ B(LdrContextSlot), R(1), U8(7), U8(0), R(13),
- B(LdrNamedProperty), R(13), U8(4), U8(8), R(12),
- /* 27 E> */ B(Call), R(12), R(13), U8(1), U8(6),
+ /* 27 S> */ B(LdaContextSlot), R(1), U8(7), U8(0),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(4), U8(8),
+ B(Star), R(11),
+ /* 27 E> */ B(CallProperty), R(11), R(12), U8(1), U8(6),
/* 27 E> */ B(StaContextSlot), R(1), U8(8), U8(0),
B(Star), R(11),
B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
B(ToBooleanLogicalNot),
- B(JumpIfFalse), U8(12),
- B(LdrContextSlot), R(1), U8(8), U8(0), R(11),
+ B(JumpIfFalse), U8(13),
+ B(LdaContextSlot), R(1), U8(8), U8(0),
+ B(Star), R(11),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(LdrContextSlot), R(1), U8(8), U8(0), R(11),
+ B(LdaContextSlot), R(1), U8(8), U8(0),
+ B(Star), R(11),
B(LdaNamedProperty), R(11), U8(5), U8(10),
- B(JumpIfToBooleanTrueConstant), U8(10),
- B(LdrContextSlot), R(1), U8(8), U8(0), R(11),
+ B(JumpIfToBooleanTrueConstant), U8(9),
+ B(LdaContextSlot), R(1), U8(8), U8(0),
+ B(Star), R(11),
B(LdaNamedProperty), R(11), U8(6), U8(12),
B(StaContextSlot), R(1), U8(10), U8(0),
B(LdaSmi), U8(2),
@@ -372,20 +386,17 @@ bytecodes: [
B(CreateBlockContext), U8(7),
B(PushContext), R(2),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(LdaContextSlot), R(1), U8(6), U8(0),
- B(StaContextSlot), R(context), U8(4), U8(0),
- /* 36 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(8),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(13), U8(1),
+ B(StaCurrentContextSlot), U8(4),
+ /* 36 S> */ B(LdaCurrentContextSlot), U8(4),
B(Star), R(11),
B(LdaFalse),
B(Star), R(12),
B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(11), U8(2),
B(Star), R(11),
- B(LdrContextSlot), R(1), U8(5), U8(0), R(12),
+ B(LdaContextSlot), R(1), U8(5), U8(0),
+ B(Star), R(12),
B(LdaSmi), U8(1),
B(SuspendGenerator), R(12),
B(Ldar), R(11),
@@ -422,20 +433,22 @@ bytecodes: [
B(PopContext), R(2),
B(LdaZero),
B(StaContextSlot), R(1), U8(9), U8(0),
- B(Wide), B(JumpLoop), U16(-232), U16(0),
+ B(Wide), B(JumpLoop), U16(-221), U16(0),
B(Jump), U8(44),
B(Star), R(11),
B(Ldar), R(closure),
- B(CreateCatchContext), R(11), U8(11), U8(12),
+ B(CreateCatchContext), R(11), U8(10), U8(11),
B(Star), R(10),
B(PushContext), R(2),
- B(LdrContextSlot), R(1), U8(9), U8(0), R(11),
+ B(LdaContextSlot), R(1), U8(9), U8(0),
+ B(Star), R(11),
B(LdaSmi), U8(2),
B(TestEqualStrict), R(11), U8(14),
B(JumpIfFalse), U8(8),
B(LdaSmi), U8(1),
B(StaContextSlot), R(1), U8(9), U8(0),
- B(LdrContextSlot), R(context), U8(4), U8(0), R(11),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(11),
B(CallRuntime), U16(Runtime::kReThrow), R(11), U8(1),
B(PopContext), R(2),
B(LdaSmi), U8(-1),
@@ -446,62 +459,73 @@ bytecodes: [
B(Star), R(7),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Star), R(9),
- B(LdrContextSlot), R(1), U8(9), U8(0), R(10),
+ B(LdaContextSlot), R(1), U8(9), U8(0),
+ B(Star), R(10),
B(LdaZero),
B(TestEqualStrict), R(10), U8(15),
- B(JumpIfTrueConstant), U8(18),
- B(LdrContextSlot), R(1), U8(7), U8(0), R(10),
+ B(JumpIfTrueConstant), U8(17),
+ B(LdaContextSlot), R(1), U8(7), U8(0),
+ B(Star), R(10),
B(LdaUndefined),
B(TestEqualStrict), R(10), U8(16),
- B(JumpIfTrueConstant), U8(19),
- B(LdrContextSlot), R(1), U8(7), U8(0), R(10),
- B(LdaNamedProperty), R(10), U8(13), U8(17),
+ B(JumpIfTrueConstant), U8(18),
+ B(LdaContextSlot), R(1), U8(7), U8(0),
+ B(Star), R(10),
+ B(LdaNamedProperty), R(10), U8(12), U8(17),
B(StaContextSlot), R(1), U8(11), U8(0),
- B(LdrContextSlot), R(1), U8(11), U8(0), R(10),
+ B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(Star), R(10),
B(LdaNull),
B(TestEqual), R(10), U8(19),
B(JumpIfFalse), U8(4),
- B(JumpConstant), U8(17),
- B(LdrContextSlot), R(1), U8(9), U8(0), R(10),
+ B(JumpConstant), U8(16),
+ B(LdaContextSlot), R(1), U8(9), U8(0),
+ B(Star), R(10),
B(LdaSmi), U8(1),
B(TestEqualStrict), R(10), U8(20),
- B(JumpIfFalse), U8(76),
+ B(JumpIfFalse), U8(78),
B(LdaContextSlot), R(1), U8(11), U8(0),
B(TypeOf),
B(Star), R(10),
- B(LdaConstant), U8(14),
+ B(LdaConstant), U8(13),
B(TestEqualStrict), R(10), U8(21),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
B(Wide), B(LdaSmi), U16(130),
B(Star), R(10),
- B(LdaConstant), U8(15),
+ B(LdaConstant), U8(14),
B(Star), R(11),
B(CallRuntime), U16(Runtime::kNewTypeError), R(10), U8(2),
B(Throw),
B(Mov), R(context), R(10),
- B(LdrContextSlot), R(1), U8(11), U8(0), R(11),
- B(LdrContextSlot), R(1), U8(7), U8(0), R(12),
+ B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(Star), R(11),
+ B(LdaContextSlot), R(1), U8(7), U8(0),
+ B(Star), R(12),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
B(Jump), U8(23),
B(Star), R(11),
B(Ldar), R(closure),
- B(CreateCatchContext), R(11), U8(11), U8(16),
+ B(CreateCatchContext), R(11), U8(10), U8(15),
B(Star), R(10),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Ldar), R(10),
B(PushContext), R(2),
B(PopContext), R(2),
- B(Jump), U8(43),
- B(LdrContextSlot), R(1), U8(11), U8(0), R(10),
- B(LdrContextSlot), R(1), U8(7), U8(0), R(11),
+ B(Jump), U8(47),
+ B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(Star), R(10),
+ B(LdaContextSlot), R(1), U8(7), U8(0),
+ B(Star), R(11),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(10), U8(2),
B(StaContextSlot), R(1), U8(12), U8(0),
- B(LdrContextSlot), R(1), U8(12), U8(0), R(10),
+ B(LdaContextSlot), R(1), U8(12), U8(0),
+ B(Star), R(10),
B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
B(JumpIfToBooleanFalse), U8(4),
- B(Jump), U8(12),
- B(LdrContextSlot), R(1), U8(12), U8(0), R(10),
+ B(Jump), U8(13),
+ B(LdaContextSlot), R(1), U8(12), U8(0),
+ B(Star), R(10),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(9), U8(1),
B(LdaZero),
@@ -516,15 +540,16 @@ bytecodes: [
B(LdaSmi), U8(1),
B(Star), R(4),
B(Mov), R(8), R(5),
- B(Jump), U8(47),
+ B(Jump), U8(48),
B(PopContext), R(1),
B(PopContext), R(1),
B(LdaSmi), U8(2),
B(Star), R(4),
B(Mov), R(8), R(5),
- B(Jump), U8(34),
+ B(Jump), U8(35),
B(PopContext), R(1),
- B(LdrUndefined), R(7),
+ B(LdaUndefined),
+ B(Star), R(7),
B(LdaTrue),
B(Star), R(8),
B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(7), U8(2),
@@ -540,7 +565,8 @@ bytecodes: [
B(Star), R(4),
B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), R(0), U8(0),
B(Star), R(6),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(7),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(7),
B(CallRuntime), U16(Runtime::k_GeneratorClose), R(7), U8(1),
B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), R(6), U8(1),
B(LdaZero),
@@ -576,29 +602,28 @@ constant pool: [
FIXED_ARRAY_TYPE,
FIXED_ARRAY_TYPE,
SYMBOL_TYPE,
- Smi [158],
+ Smi [149],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
FIXED_ARRAY_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
- Smi [146],
- Smi [167],
+ Smi [135],
+ Smi [152],
ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["function"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
FIXED_ARRAY_TYPE,
- Smi [129],
- Smi [166],
- Smi [155],
+ Smi [136],
+ Smi [176],
+ Smi [164],
Smi [601],
]
handlers: [
- [48, 718, 724],
- [153, 458, 464],
- [156, 414, 416],
- [572, 586, 588],
+ [46, 710, 716],
+ [143, 438, 444],
+ [146, 394, 396],
+ [557, 573, 575],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
index f222e9034b..f8bece5c39 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
@@ -14,10 +14,11 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 13
bytecodes: [
/* 26 E> */ B(StackCheck),
- /* 31 S> */ B(LdrGlobal), U8(2), R(0),
+ /* 31 S> */ B(LdaGlobal), U8(2),
+ B(Star), R(0),
B(BitwiseAndSmi), U8(1), R(0), U8(4),
/* 45 E> */ B(StaGlobalSloppy), U8(0), U8(5),
/* 51 S> */ B(Return),
@@ -36,10 +37,11 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 12
+bytecode array length: 13
bytecodes: [
/* 27 E> */ B(StackCheck),
- /* 32 S> */ B(LdrGlobal), U8(2), R(0),
+ /* 32 S> */ B(LdaGlobal), U8(2),
+ B(Star), R(0),
B(AddSmi), U8(1), R(0), U8(4),
/* 51 E> */ B(StaGlobalSloppy), U8(0), U8(5),
/* 57 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
index 2c6616bb58..1e0d530af5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
@@ -36,11 +36,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 13
+bytecode array length: 15
bytecodes: [
/* 26 E> */ B(StackCheck),
/* 31 S> */ B(LdaGlobal), U8(2),
B(ToNumber), R(0),
+ B(Ldar), R(0),
B(Dec), U8(6),
/* 44 E> */ B(StaGlobalSloppy), U8(0), U8(4),
B(Ldar), R(0),
@@ -82,11 +83,12 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 13
+bytecode array length: 15
bytecodes: [
/* 27 E> */ B(StackCheck),
/* 32 S> */ B(LdaGlobal), U8(2),
B(ToNumber), R(0),
+ B(Ldar), R(0),
B(Inc), U8(6),
/* 50 E> */ B(StaGlobalSloppy), U8(0), U8(4),
B(Ldar), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
index 66583f3389..e9540bb446 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
@@ -16,10 +16,11 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 10
bytecodes: [
/* 32 E> */ B(StackCheck),
- /* 39 S> */ B(LdrGlobal), U8(2), R(0),
+ /* 39 S> */ B(LdaGlobal), U8(2),
+ B(Star), R(0),
B(LdaConstant), U8(0),
B(DeletePropertySloppy), R(0),
/* 58 S> */ B(Return),
@@ -41,10 +42,11 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 10
bytecodes: [
/* 28 E> */ B(StackCheck),
- /* 51 S> */ B(LdrGlobal), U8(2), R(0),
+ /* 51 S> */ B(LdaGlobal), U8(2),
+ B(Star), R(0),
B(LdaSmi), U8(1),
B(DeletePropertyStrict), R(0),
/* 71 S> */ B(Return),
@@ -67,8 +69,10 @@ parameter count: 1
bytecode array length: 16
bytecodes: [
/* 32 E> */ B(StackCheck),
- /* 39 S> */ B(LdrContextSlot), R(context), U8(3), U8(0), R(0),
- B(LdrContextSlot), R(0), U8(2), U8(0), R(1),
+ /* 39 S> */ B(LdaCurrentContextSlot), U8(3),
+ B(Star), R(0),
+ B(LdaContextSlot), R(0), U8(2), U8(0),
+ B(Star), R(1),
B(LdaConstant), U8(0),
B(DeletePropertySloppy), R(1),
/* 56 S> */ B(Return),
@@ -92,8 +96,10 @@ parameter count: 1
bytecode array length: 16
bytecodes: [
/* 18 E> */ B(StackCheck),
- /* 25 S> */ B(LdrContextSlot), R(context), U8(3), U8(0), R(0),
- B(LdrContextSlot), R(0), U8(2), U8(0), R(1),
+ /* 25 S> */ B(LdaCurrentContextSlot), U8(3),
+ B(Star), R(0),
+ B(LdaContextSlot), R(0), U8(2), U8(0),
+ B(Star), R(1),
B(LdaConstant), U8(0),
B(DeletePropertySloppy), R(1),
/* 42 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
index d9d8f79e0a..74709fdab9 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
@@ -30,23 +30,19 @@ handlers: [
snippet: "
let x = 10; return x;
"
-frame size: 2
+frame size: 1
parameter count: 1
-bytecode array length: 20
+bytecode array length: 10
bytecodes: [
B(LdaTheHole),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(10),
B(Star), R(0),
- /* 46 S> */ B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1),
+ /* 46 S> */ B(Nop),
/* 56 S> */ B(Return),
]
constant pool: [
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -84,9 +80,9 @@ handlers: [
snippet: "
let x = 10; x = 20;
"
-frame size: 3
+frame size: 1
parameter count: 1
-bytecode array length: 30
+bytecode array length: 14
bytecodes: [
B(LdaTheHole),
B(Star), R(0),
@@ -94,18 +90,11 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), U8(10),
B(Star), R(0),
/* 46 S> */ B(LdaSmi), U8(20),
- B(Star), R(1),
- B(Ldar), R(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(0),
- B(Star), R(2),
- /* 48 E> */ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
- B(Mov), R(1), R(0),
+ B(Star), R(0),
B(LdaUndefined),
/* 54 S> */ B(Return),
]
constant pool: [
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
index eb2a5c6b47..d45386e58b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
@@ -11,17 +11,17 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 23
+bytecode array length: 19
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(10),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 42 E> */ B(StaCurrentContextSlot), U8(4),
B(LdaUndefined),
/* 72 S> */ B(Return),
]
@@ -35,29 +35,24 @@ handlers: [
snippet: "
let x = 10; function f1() {return x;} return x;
"
-frame size: 3
+frame size: 2
parameter count: 1
-bytecode array length: 37
+bytecode array length: 20
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(10),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
- /* 72 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
+ /* 42 E> */ B(StaCurrentContextSlot), U8(4),
+ /* 72 S> */ B(LdaCurrentContextSlot), U8(4),
/* 82 S> */ B(Return),
]
constant pool: [
SHARED_FUNCTION_INFO_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
@@ -68,25 +63,25 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 46
+bytecode array length: 38
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaSmi), U8(20),
B(Star), R(2),
- /* 45 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
+ B(LdaCurrentContextSlot), U8(4),
B(JumpIfNotHole), U8(11),
B(LdaConstant), U8(1),
B(Star), R(3),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
+ /* 45 E> */ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
B(Ldar), R(2),
- B(StaContextSlot), R(context), U8(4), U8(0),
- /* 45 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
+ /* 45 E> */ B(StaCurrentContextSlot), U8(4),
B(LdaUndefined),
/* 78 S> */ B(Return),
]
@@ -101,34 +96,26 @@ handlers: [
snippet: "
let x = 10; x = 20; function f1() {return x;}
"
-frame size: 4
+frame size: 2
parameter count: 1
-bytecode array length: 48
+bytecode array length: 23
bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateClosure), U8(0), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), U8(10),
- /* 42 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 42 E> */ B(StaCurrentContextSlot), U8(4),
/* 46 S> */ B(LdaSmi), U8(20),
- B(Star), R(2),
- /* 48 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1),
- B(Ldar), R(2),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ /* 48 E> */ B(StaCurrentContextSlot), U8(4),
B(LdaUndefined),
/* 80 S> */ B(Return),
]
constant pool: [
SHARED_FUNCTION_INFO_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
index acef8f74ad..07dbbdfe19 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
@@ -12,20 +12,21 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 73
+bytecode array length: 66
bytecodes: [
B(CreateFunctionContext), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(6), U8(0),
+ B(StaCurrentContextSlot), U8(6),
B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ B(StaCurrentContextSlot), U8(5),
/* 10 E> */ B(StackCheck),
- /* 14 S> */ B(LdaConstant), U8(0),
- B(Star), R(4),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(4), U8(1), R(1),
+ /* 14 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ /* 14 E> */ B(LdaLookupGlobalSlot), U8(0), U8(4), U8(1),
+ B(Star), R(1),
B(LdaConstant), U8(1),
B(Star), R(3),
B(LdaZero),
@@ -39,8 +40,8 @@ bytecodes: [
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(1),
- /* 14 E> */ B(Call), R(1), R(2), U8(2), U8(0),
- /* 35 S> */ B(LdaLookupGlobalSlot), U8(2), U8(4), U8(1),
+ /* 14 E> */ B(Call), R(1), R(2), U8(2), U8(2),
+ /* 35 S> */ B(LdaLookupGlobalSlot), U8(2), U8(6), U8(1),
/* 45 S> */ B(Return),
]
constant pool: [
@@ -57,20 +58,21 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 74
+bytecode array length: 67
bytecodes: [
B(CreateFunctionContext), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(6), U8(0),
+ B(StaCurrentContextSlot), U8(6),
B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ B(StaCurrentContextSlot), U8(5),
/* 10 E> */ B(StackCheck),
- /* 14 S> */ B(LdaConstant), U8(0),
- B(Star), R(4),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(4), U8(1), R(1),
+ /* 14 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ /* 14 E> */ B(LdaLookupGlobalSlot), U8(0), U8(4), U8(1),
+ B(Star), R(1),
B(LdaConstant), U8(1),
B(Star), R(3),
B(LdaZero),
@@ -84,8 +86,8 @@ bytecodes: [
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(1),
- /* 14 E> */ B(Call), R(1), R(2), U8(2), U8(0),
- /* 35 S> */ B(LdaLookupGlobalSlotInsideTypeof), U8(2), U8(4), U8(1),
+ /* 14 E> */ B(Call), R(1), R(2), U8(2), U8(2),
+ /* 35 S> */ B(LdaLookupGlobalSlotInsideTypeof), U8(2), U8(6), U8(1),
B(TypeOf),
/* 52 S> */ B(Return),
]
@@ -103,22 +105,23 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 73
+bytecode array length: 66
bytecodes: [
B(CreateFunctionContext), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(6), U8(0),
+ B(StaCurrentContextSlot), U8(6),
B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ B(StaCurrentContextSlot), U8(5),
/* 10 E> */ B(StackCheck),
/* 14 S> */ B(LdaSmi), U8(20),
/* 16 E> */ B(StaLookupSlotSloppy), U8(0),
- /* 22 S> */ B(LdaConstant), U8(1),
- B(Star), R(4),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(4), U8(1), R(1),
+ /* 22 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ /* 29 E> */ B(LdaLookupGlobalSlot), U8(1), U8(4), U8(1),
+ B(Star), R(1),
B(LdaConstant), U8(2),
B(Star), R(3),
B(LdaZero),
@@ -132,7 +135,7 @@ bytecodes: [
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(1),
- /* 29 E> */ B(Call), R(1), R(2), U8(2), U8(0),
+ /* 29 E> */ B(Call), R(1), R(2), U8(2), U8(2),
/* 39 S> */ B(Return),
]
constant pool: [
@@ -154,20 +157,21 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 73
+bytecode array length: 66
bytecodes: [
B(CreateFunctionContext), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(6), U8(0),
+ B(StaCurrentContextSlot), U8(6),
B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ B(StaCurrentContextSlot), U8(5),
/* 38 E> */ B(StackCheck),
- /* 44 S> */ B(LdaConstant), U8(0),
- B(Star), R(4),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(4), U8(1), R(1),
+ /* 44 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ /* 44 E> */ B(LdaLookupGlobalSlot), U8(0), U8(4), U8(1),
+ B(Star), R(1),
B(LdaConstant), U8(1),
B(Star), R(3),
B(LdaZero),
@@ -181,7 +185,7 @@ bytecodes: [
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(1),
- /* 44 E> */ B(Call), R(1), R(2), U8(2), U8(0),
+ /* 44 E> */ B(Call), R(1), R(2), U8(2), U8(2),
/* 66 S> */ B(LdaLookupContextSlot), U8(2), U8(6), U8(1),
/* 76 S> */ B(Return),
]
@@ -204,20 +208,21 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 73
+bytecode array length: 66
bytecodes: [
B(CreateFunctionContext), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(CreateMappedArguments),
- B(StaContextSlot), R(context), U8(6), U8(0),
+ B(StaCurrentContextSlot), U8(6),
B(Ldar), R(new_target),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ B(StaCurrentContextSlot), U8(5),
/* 34 E> */ B(StackCheck),
- /* 40 S> */ B(LdaConstant), U8(0),
- B(Star), R(4),
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), R(4), U8(1), R(1),
+ /* 40 S> */ B(LdaUndefined),
+ B(Star), R(2),
+ /* 40 E> */ B(LdaLookupGlobalSlot), U8(0), U8(4), U8(1),
+ B(Star), R(1),
B(LdaConstant), U8(1),
B(Star), R(3),
B(LdaZero),
@@ -231,8 +236,8 @@ bytecodes: [
B(Mov), R(closure), R(6),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), U8(6),
B(Star), R(1),
- /* 40 E> */ B(Call), R(1), R(2), U8(2), U8(0),
- /* 62 S> */ B(LdaLookupGlobalSlot), U8(2), U8(4), U8(1),
+ /* 40 E> */ B(Call), R(1), R(2), U8(2), U8(2),
+ /* 62 S> */ B(LdaLookupGlobalSlot), U8(2), U8(6), U8(1),
/* 72 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
index 62dbeb7ada..7f13b4a3d5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
@@ -13,7 +13,7 @@ snippet: "
"
frame size: 8
parameter count: 2
-bytecode array length: 133
+bytecode array length: 125
bytecodes: [
B(Ldar), R(new_target),
B(JumpIfUndefined), U8(21),
@@ -21,8 +21,8 @@ bytecodes: [
B(Star), R(1),
B(LdaZero),
B(TestEqualStrict), R(1), U8(0),
- B(JumpIfTrue), U8(71),
- B(LdaSmi), U8(76),
+ B(JumpIfTrue), U8(63),
+ B(LdaSmi), U8(77),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), U8(-2),
@@ -34,15 +34,16 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(3),
- B(Ldar), R(3),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(3),
B(Mov), R(closure), R(2),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
+ B(StaCurrentContextSlot), U8(5),
B(Star), R(2),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(3),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(3),
B(LdaZero),
B(SuspendGenerator), R(3),
B(Ldar), R(2),
@@ -82,7 +83,7 @@ snippet: "
"
frame size: 8
parameter count: 2
-bytecode array length: 133
+bytecode array length: 125
bytecodes: [
B(Ldar), R(new_target),
B(JumpIfUndefined), U8(21),
@@ -90,8 +91,8 @@ bytecodes: [
B(Star), R(1),
B(LdaZero),
B(TestEqualStrict), R(1), U8(0),
- B(JumpIfTrue), U8(71),
- B(LdaSmi), U8(76),
+ B(JumpIfTrue), U8(63),
+ B(LdaSmi), U8(77),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), U8(-2),
@@ -103,15 +104,16 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(3),
- B(Ldar), R(3),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(3),
B(Mov), R(closure), R(2),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
+ B(StaCurrentContextSlot), U8(5),
B(Star), R(2),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(3),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(3),
B(LdaZero),
B(SuspendGenerator), R(3),
B(Ldar), R(2),
@@ -153,7 +155,7 @@ snippet: "
"
frame size: 9
parameter count: 2
-bytecode array length: 223
+bytecode array length: 195
bytecodes: [
B(Ldar), R(new_target),
B(JumpIfUndefined), U8(21),
@@ -161,8 +163,8 @@ bytecodes: [
B(Star), R(2),
B(LdaZero),
B(TestEqualStrict), R(2), U8(0),
- B(JumpIfTrue), U8(71),
- B(LdaSmi), U8(76),
+ B(JumpIfTrue), U8(63),
+ B(LdaSmi), U8(77),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
B(LdaSmi), U8(-2),
@@ -174,15 +176,16 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(4),
- B(Ldar), R(4),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(4),
B(Mov), R(closure), R(3),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
+ B(StaCurrentContextSlot), U8(5),
B(Star), R(3),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(4),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(4),
B(LdaZero),
B(SuspendGenerator), R(4),
B(Ldar), R(3),
@@ -207,37 +210,31 @@ bytecodes: [
/* 64 S> */ B(Return),
B(Ldar), R(5),
/* 0 E> */ B(Throw),
- /* 32 S> */ B(LdrUndefined), R(4),
- B(LdaConstant), U8(1),
- B(Star), R(6),
- B(LdaZero),
- B(Star), R(7),
- /* 32 E> */ B(CallRuntime), U16(Runtime::kLoadModuleImport), R(6), U8(2),
+ /* 32 S> */ B(LdaUndefined),
+ B(Star), R(4),
+ /* 32 E> */ B(LdaModuleVariable), U8(-1), U8(0),
B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(8),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(8), U8(1),
+ B(LdaConstant), U8(1),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
B(Star), R(3),
B(LdaSmi), U8(42),
B(Star), R(5),
/* 32 E> */ B(Call), R(3), R(4), U8(2), U8(2),
B(Ldar), R(closure),
- B(CreateBlockContext), U8(3),
+ B(CreateBlockContext), U8(2),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 47 S> */ B(LdaUndefined),
- /* 47 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
- /* 52 S> */ B(LdrUndefined), R(4),
- B(LdaConstant), U8(1),
- B(Star), R(6),
- B(LdaZero),
- B(Star), R(7),
- /* 52 E> */ B(CallRuntime), U16(Runtime::kLoadModuleImport), R(6), U8(2),
+ /* 47 E> */ B(StaCurrentContextSlot), U8(4),
+ /* 52 S> */ B(LdaUndefined),
+ B(Star), R(4),
+ /* 52 E> */ B(LdaModuleVariable), U8(-1), U8(1),
B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(2),
- B(Star), R(8),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(8), U8(1),
+ B(LdaConstant), U8(1),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
B(Star), R(3),
B(LdaSmi), U8(42),
B(Star), R(5),
@@ -248,7 +245,6 @@ bytecodes: [
]
constant pool: [
FIXED_ARRAY_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["foo"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["goo"],
FIXED_ARRAY_TYPE,
]
@@ -263,7 +259,7 @@ snippet: "
"
frame size: 9
parameter count: 2
-bytecode array length: 208
+bytecode array length: 160
bytecodes: [
B(Ldar), R(new_target),
B(JumpIfUndefined), U8(21),
@@ -271,8 +267,8 @@ bytecodes: [
B(Star), R(2),
B(LdaZero),
B(TestEqualStrict), R(2), U8(0),
- B(JumpIfTrue), U8(71),
- B(LdaSmi), U8(76),
+ B(JumpIfTrue), U8(63),
+ B(LdaSmi), U8(77),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
B(LdaSmi), U8(-2),
@@ -284,15 +280,16 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(4),
- B(Ldar), R(4),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(4),
B(Mov), R(closure), R(3),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
+ B(StaCurrentContextSlot), U8(5),
B(Star), R(3),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(4),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(4),
B(LdaZero),
B(SuspendGenerator), R(4),
B(Ldar), R(3),
@@ -318,40 +315,26 @@ bytecodes: [
B(Ldar), R(5),
/* 0 E> */ B(Throw),
/* 17 S> */ B(LdaSmi), U8(42),
- B(Star), R(4),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- /* 17 E> */ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
- /* 21 S> */ B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kLoadModuleExport), R(3), U8(1),
+ /* 17 E> */ B(StaModuleVariable), U8(1), U8(0),
+ /* 21 S> */ B(LdaModuleVariable), U8(1), U8(0),
B(Inc), U8(2),
- B(Star), R(4),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- /* 24 E> */ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
+ /* 24 E> */ B(StaModuleVariable), U8(1), U8(0),
B(Ldar), R(closure),
- B(CreateBlockContext), U8(2),
+ B(CreateBlockContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 34 S> */ B(LdaUndefined),
- /* 34 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
- /* 39 S> */ B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kLoadModuleExport), R(3), U8(1),
+ /* 34 E> */ B(StaCurrentContextSlot), U8(4),
+ /* 39 S> */ B(LdaModuleVariable), U8(1), U8(1),
B(Inc), U8(3),
- B(Star), R(4),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- /* 42 E> */ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
+ /* 42 E> */ B(StaModuleVariable), U8(1), U8(1),
B(PopContext), R(1),
B(LdaUndefined),
/* 49 S> */ B(Return),
]
constant pool: [
FIXED_ARRAY_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["foo"],
FIXED_ARRAY_TYPE,
]
handlers: [
@@ -365,7 +348,7 @@ snippet: "
"
frame size: 9
parameter count: 2
-bytecode array length: 242
+bytecode array length: 164
bytecodes: [
B(Ldar), R(new_target),
B(JumpIfUndefined), U8(21),
@@ -373,8 +356,8 @@ bytecodes: [
B(Star), R(2),
B(LdaZero),
B(TestEqualStrict), R(2), U8(0),
- B(JumpIfTrue), U8(83),
- B(LdaSmi), U8(76),
+ B(JumpIfTrue), U8(67),
+ B(LdaSmi), U8(77),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
B(LdaSmi), U8(-2),
@@ -386,20 +369,18 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
- B(Star), R(4),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
+ B(StaModuleVariable), U8(1), U8(0),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(4),
- B(Ldar), R(4),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(4),
B(Mov), R(closure), R(3),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
+ B(StaCurrentContextSlot), U8(5),
B(Star), R(3),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(4),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(4),
B(LdaZero),
B(SuspendGenerator), R(4),
B(Ldar), R(3),
@@ -425,48 +406,26 @@ bytecodes: [
B(Ldar), R(5),
/* 0 E> */ B(Throw),
/* 17 S> */ B(LdaSmi), U8(42),
- B(Star), R(4),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- /* 17 E> */ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
- /* 21 S> */ B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kLoadModuleExport), R(3), U8(1),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ /* 17 E> */ B(StaModuleVariable), U8(1), U8(0),
+ /* 21 S> */ B(LdaModuleVariable), U8(1), U8(0),
B(Inc), U8(2),
- B(Star), R(4),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- /* 24 E> */ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
+ /* 24 E> */ B(StaModuleVariable), U8(1), U8(0),
B(Ldar), R(closure),
- B(CreateBlockContext), U8(2),
+ B(CreateBlockContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 34 S> */ B(LdaUndefined),
- /* 34 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
- /* 39 S> */ B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kLoadModuleExport), R(3), U8(1),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ /* 34 E> */ B(StaCurrentContextSlot), U8(4),
+ /* 39 S> */ B(LdaModuleVariable), U8(1), U8(1),
B(Inc), U8(3),
- B(Star), R(4),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- /* 42 E> */ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
+ /* 42 E> */ B(StaModuleVariable), U8(1), U8(1),
B(PopContext), R(1),
B(LdaUndefined),
/* 49 S> */ B(Return),
]
constant pool: [
FIXED_ARRAY_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["foo"],
FIXED_ARRAY_TYPE,
]
handlers: [
@@ -480,7 +439,7 @@ snippet: "
"
frame size: 9
parameter count: 2
-bytecode array length: 230
+bytecode array length: 168
bytecodes: [
B(Ldar), R(new_target),
B(JumpIfUndefined), U8(21),
@@ -488,8 +447,8 @@ bytecodes: [
B(Star), R(2),
B(LdaZero),
B(TestEqualStrict), R(2), U8(0),
- B(JumpIfTrue), U8(83),
- B(LdaSmi), U8(76),
+ B(JumpIfTrue), U8(67),
+ B(LdaSmi), U8(77),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
B(LdaSmi), U8(-2),
@@ -501,20 +460,18 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
- B(Star), R(4),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
+ B(StaModuleVariable), U8(1), U8(0),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(4),
- B(Ldar), R(4),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(4),
B(Mov), R(closure), R(3),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
+ B(StaCurrentContextSlot), U8(5),
B(Star), R(3),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(4),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(4),
B(LdaZero),
B(SuspendGenerator), R(4),
B(Ldar), R(3),
@@ -540,33 +497,18 @@ bytecodes: [
B(Ldar), R(5),
/* 0 E> */ B(Throw),
/* 19 S> */ B(LdaSmi), U8(42),
- B(Star), R(4),
- B(LdaConstant), U8(1),
- B(Star), R(3),
- /* 19 E> */ B(CallRuntime), U16(Runtime::kStoreModuleExport), R(3), U8(2),
- /* 23 S> */ B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kLoadModuleExport), R(3), U8(1),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ /* 19 E> */ B(StaModuleVariable), U8(1), U8(0),
+ /* 23 S> */ B(LdaModuleVariable), U8(1), U8(0),
B(Inc), U8(2),
/* 26 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
B(Ldar), R(closure),
- B(CreateBlockContext), U8(2),
+ B(CreateBlockContext), U8(1),
B(PushContext), R(1),
B(LdaTheHole),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 36 S> */ B(LdaUndefined),
- /* 36 E> */ B(StaContextSlot), R(context), U8(4), U8(0),
- /* 41 S> */ B(LdaConstant), U8(1),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kLoadModuleExport), R(3), U8(1),
- B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ /* 36 E> */ B(StaCurrentContextSlot), U8(4),
+ /* 41 S> */ B(LdaModuleVariable), U8(1), U8(1),
B(Inc), U8(3),
/* 44 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
B(PopContext), R(1),
@@ -575,7 +517,6 @@ bytecodes: [
]
constant pool: [
FIXED_ARRAY_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["foo"],
FIXED_ARRAY_TYPE,
]
handlers: [
@@ -587,7 +528,7 @@ snippet: "
"
frame size: 8
parameter count: 2
-bytecode array length: 159
+bytecode array length: 135
bytecodes: [
B(Ldar), R(new_target),
B(JumpIfUndefined), U8(21),
@@ -595,8 +536,8 @@ bytecodes: [
B(Star), R(1),
B(LdaZero),
B(TestEqualStrict), R(1), U8(0),
- B(JumpIfTrue), U8(83),
- B(LdaSmi), U8(76),
+ B(JumpIfTrue), U8(67),
+ B(LdaSmi), U8(77),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), U8(-2),
@@ -608,20 +549,18 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
- B(Star), R(3),
- B(LdaConstant), U8(1),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kStoreModuleExport), R(2), U8(2),
+ B(StaModuleVariable), U8(1), U8(0),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(3),
- B(Ldar), R(3),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(3),
B(Mov), R(closure), R(2),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
+ B(StaCurrentContextSlot), U8(5),
B(Star), R(2),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(3),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(3),
B(LdaZero),
B(SuspendGenerator), R(3),
B(Ldar), R(2),
@@ -646,17 +585,13 @@ bytecodes: [
/* 32 S> */ B(Return),
B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(CreateClosure), U8(2), U8(0),
- B(Star), R(3),
- B(LdaConstant), U8(1),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kStoreModuleExport), R(2), U8(2),
+ B(CreateClosure), U8(1), U8(0),
+ B(StaModuleVariable), U8(1), U8(0),
B(LdaUndefined),
/* 32 S> */ B(Return),
]
constant pool: [
FIXED_ARRAY_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["default"],
SHARED_FUNCTION_INFO_TYPE,
]
handlers: [
@@ -668,7 +603,7 @@ snippet: "
"
frame size: 8
parameter count: 2
-bytecode array length: 196
+bytecode array length: 170
bytecodes: [
B(Ldar), R(new_target),
B(JumpIfUndefined), U8(21),
@@ -676,8 +611,8 @@ bytecodes: [
B(Star), R(1),
B(LdaZero),
B(TestEqualStrict), R(1), U8(0),
- B(JumpIfTrue), U8(83),
- B(LdaSmi), U8(76),
+ B(JumpIfTrue), U8(67),
+ B(LdaSmi), U8(77),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), U8(-2),
@@ -689,20 +624,18 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
- B(Star), R(3),
- B(LdaConstant), U8(1),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kStoreModuleExport), R(2), U8(2),
+ B(StaModuleVariable), U8(1), U8(0),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(3),
- B(Ldar), R(3),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(3),
B(Mov), R(closure), R(2),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
+ B(StaCurrentContextSlot), U8(5),
B(Star), R(2),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(3),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(3),
B(LdaZero),
B(SuspendGenerator), R(3),
B(Ldar), R(2),
@@ -729,7 +662,7 @@ bytecodes: [
/* 0 E> */ B(Throw),
/* 16 S> */ B(LdaTheHole),
B(Star), R(2),
- B(CreateClosure), U8(2), U8(0),
+ B(CreateClosure), U8(1), U8(0),
B(Star), R(3),
B(LdaSmi), U8(16),
B(Star), R(4),
@@ -737,19 +670,17 @@ bytecodes: [
B(Star), R(5),
B(CallRuntime), U16(Runtime::kDefineClass), R(2), U8(4),
B(Star), R(2),
- B(LdrNamedProperty), R(2), U8(3), U8(2), R(3),
+ B(LdaNamedProperty), R(2), U8(2), U8(2),
+ B(Star), R(3),
B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
- B(StaContextSlot), R(context), U8(6), U8(0),
- /* 16 E> */ B(LdrContextSlot), R(context), U8(6), U8(0), R(3),
- B(LdaConstant), U8(1),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::kStoreModuleExport), R(2), U8(2),
+ B(StaCurrentContextSlot), U8(6),
+ B(LdaCurrentContextSlot), U8(6),
+ /* 16 E> */ B(StaModuleVariable), U8(1), U8(0),
B(LdaUndefined),
/* 26 S> */ B(Return),
]
constant pool: [
FIXED_ARRAY_TYPE,
- ONE_BYTE_INTERNALIZED_STRING_TYPE ["default"],
SHARED_FUNCTION_INFO_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["prototype"],
]
@@ -762,7 +693,7 @@ snippet: "
"
frame size: 8
parameter count: 2
-bytecode array length: 133
+bytecode array length: 125
bytecodes: [
B(Ldar), R(new_target),
B(JumpIfUndefined), U8(21),
@@ -770,8 +701,8 @@ bytecodes: [
B(Star), R(1),
B(LdaZero),
B(TestEqualStrict), R(1), U8(0),
- B(JumpIfTrue), U8(71),
- B(LdaSmi), U8(76),
+ B(JumpIfTrue), U8(63),
+ B(LdaSmi), U8(77),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), U8(-2),
@@ -783,15 +714,16 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(3),
- B(Ldar), R(3),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(3),
B(Mov), R(closure), R(2),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
+ B(StaCurrentContextSlot), U8(5),
B(Star), R(2),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(3),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(3),
B(LdaZero),
B(SuspendGenerator), R(3),
B(Ldar), R(2),
@@ -831,7 +763,7 @@ snippet: "
"
frame size: 8
parameter count: 2
-bytecode array length: 133
+bytecode array length: 125
bytecodes: [
B(Ldar), R(new_target),
B(JumpIfUndefined), U8(21),
@@ -839,8 +771,8 @@ bytecodes: [
B(Star), R(1),
B(LdaZero),
B(TestEqualStrict), R(1), U8(0),
- B(JumpIfTrue), U8(71),
- B(LdaSmi), U8(76),
+ B(JumpIfTrue), U8(63),
+ B(LdaSmi), U8(77),
B(Star), R(2),
B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), U8(-2),
@@ -852,15 +784,16 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
B(PushContext), R(0),
B(Ldar), R(this),
- B(StaContextSlot), R(context), U8(4), U8(0),
+ B(StaCurrentContextSlot), U8(4),
/* 0 E> */ B(StackCheck),
- /* 0 E> */ B(LdrContextSlot), R(context), U8(4), U8(0), R(3),
- B(Ldar), R(3),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(3),
B(Mov), R(closure), R(2),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
- B(StaContextSlot), R(context), U8(5), U8(0),
+ /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
+ B(StaCurrentContextSlot), U8(5),
B(Star), R(2),
- B(LdrContextSlot), R(context), U8(5), U8(0), R(3),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(3),
B(LdaZero),
B(SuspendGenerator), R(3),
B(Ldar), R(2),
@@ -894,3 +827,91 @@ constant pool: [
handlers: [
]
+---
+snippet: "
+ import * as foo from \"bar\"
+ foo.f(foo, foo.x);
+"
+frame size: 8
+parameter count: 2
+bytecode array length: 164
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(21),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(1),
+ B(LdaZero),
+ B(TestEqualStrict), R(1), U8(0),
+ B(JumpIfTrue), U8(73),
+ B(LdaSmi), U8(77),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(LdaConstant), U8(0),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(0),
+ B(Ldar), R(this),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaZero),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kGetModuleNamespace), R(2), U8(1),
+ B(StaCurrentContextSlot), U8(6),
+ /* 0 E> */ B(StackCheck),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(3),
+ B(Mov), R(closure), R(2),
+ /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(2), U8(2),
+ B(StaCurrentContextSlot), U8(5),
+ B(Star), R(2),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(3),
+ B(LdaZero),
+ B(SuspendGenerator), R(3),
+ B(Ldar), R(2),
+ /* 45 S> */ B(Return),
+ B(LdaSmi), U8(-2),
+ B(Star), R(1),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
+ B(Star), R(5),
+ B(LdaZero),
+ B(TestEqualStrict), R(5), U8(0),
+ B(JumpIfTrue), U8(26),
+ B(LdaSmi), U8(2),
+ B(TestEqualStrict), R(5), U8(0),
+ B(JumpIfTrue), U8(16),
+ B(Jump), U8(2),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(CallRuntime), U16(Runtime::k_CreateIterResultObject), R(6), U8(2),
+ /* 45 S> */ B(Return),
+ B(Ldar), R(4),
+ /* 0 E> */ B(Throw),
+ /* 27 S> */ B(LdaCurrentContextSlot), U8(6),
+ B(Star), R(3),
+ /* 30 E> */ B(LdaNamedProperty), R(3), U8(1), U8(4),
+ B(Star), R(2),
+ B(LdaCurrentContextSlot), U8(6),
+ B(Star), R(4),
+ B(LdaCurrentContextSlot), U8(6),
+ B(Star), R(5),
+ /* 41 E> */ B(LdaNamedProperty), R(5), U8(2), U8(6),
+ B(Star), R(5),
+ /* 31 E> */ B(CallProperty), R(2), R(3), U8(3), U8(2),
+ B(LdaUndefined),
+ /* 45 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["f"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
index 397b0de724..30b148b1d6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
@@ -20,12 +20,13 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 13
bytecodes: [
/* 97 E> */ B(StackCheck),
- /* 102 S> */ B(LdrContextSlot), R(context), U8(4), U8(1), R(0),
- /* 120 E> */ B(LdaContextSlot), R(context), U8(4), U8(0),
- B(Mul), R(0), U8(2),
+ /* 102 S> */ B(LdaContextSlot), R(context), U8(4), U8(1),
+ B(Star), R(0),
+ B(LdaCurrentContextSlot), U8(4),
+ /* 120 E> */ B(Mul), R(0), U8(2),
/* 130 S> */ B(Return),
]
constant pool: [
@@ -47,10 +48,10 @@ snippet: "
"
frame size: 0
parameter count: 1
-bytecode array length: 11
+bytecode array length: 9
bytecodes: [
/* 97 E> */ B(StackCheck),
- /* 102 S> */ B(LdaContextSlot), R(context), U8(4), U8(0),
+ /* 102 S> */ B(LdaCurrentContextSlot), U8(4),
/* 111 E> */ B(StaContextSlot), R(context), U8(4), U8(1),
B(LdaUndefined),
/* 123 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
index 96c0428c6c..e401dbf88b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
@@ -13,12 +13,13 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 13
+bytecode array length: 14
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(Nop),
- /* 24 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(4), R(0),
- /* 25 E> */ B(Call), R(0), R(arg0), U8(1), U8(2),
+ /* 24 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(4),
+ B(Star), R(0),
+ /* 25 E> */ B(CallProperty), R(0), R(arg0), U8(1), U8(2),
/* 33 S> */ B(Return),
]
constant pool: [
@@ -34,16 +35,16 @@ snippet: "
"
frame size: 4
parameter count: 4
-bytecode array length: 24
+bytecode array length: 23
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 22 S> */ B(Nop),
- /* 30 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(4), R(0),
- B(Ldar), R(0),
+ /* 30 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(4),
+ B(Star), R(0),
B(Mov), R(arg0), R(1),
B(Mov), R(arg1), R(2),
B(Mov), R(arg2), R(3),
- /* 31 E> */ B(Call), R(0), R(1), U8(3), U8(2),
+ /* 31 E> */ B(CallProperty), R(0), R(1), U8(3), U8(2),
/* 43 S> */ B(Return),
]
constant pool: [
@@ -59,17 +60,18 @@ snippet: "
"
frame size: 4
parameter count: 3
-bytecode array length: 26
+bytecode array length: 27
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 19 S> */ B(Nop),
- /* 27 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(4), R(0),
+ /* 27 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(4),
+ B(Star), R(0),
B(Ldar), R(arg1),
/* 37 E> */ B(Add), R(arg1), U8(6),
B(Star), R(2),
B(Mov), R(arg0), R(1),
B(Mov), R(arg1), R(3),
- /* 28 E> */ B(Call), R(0), R(1), U8(3), U8(2),
+ /* 28 E> */ B(CallProperty), R(0), R(1), U8(3), U8(2),
/* 44 S> */ B(Return),
]
constant pool: [
@@ -474,8 +476,9 @@ bytecodes: [
/* 1160 S> */ B(Nop),
/* 1161 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(256),
/* 1169 S> */ B(Nop),
- /* 1177 E> */ B(Wide), B(LdrNamedProperty), R16(arg0), U16(0), U16(260), R16(0),
- /* 1178 E> */ B(Wide), B(Call), R16(0), R16(arg0), U16(1), U16(258),
+ /* 1177 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(260),
+ B(Star), R(0),
+ /* 1178 E> */ B(Wide), B(CallProperty), R16(0), R16(arg0), U16(1), U16(258),
/* 1186 S> */ B(Return),
]
constant pool: [
@@ -484,3 +487,40 @@ constant pool: [
handlers: [
]
+---
+snippet: "
+ function f(a) { return a.func(1).func(2).func(3); }
+ f(new (function Obj() { this.func = function(a) { return this; }})())
+"
+frame size: 5
+parameter count: 2
+bytecode array length: 55
+bytecodes: [
+ /* 10 E> */ B(StackCheck),
+ /* 16 S> */ B(Nop),
+ /* 24 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(8),
+ B(Star), R(2),
+ B(LdaSmi), U8(1),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(3),
+ /* 25 E> */ B(CallProperty), R(2), R(3), U8(2), U8(6),
+ B(Star), R(2),
+ /* 32 E> */ B(LdaNamedProperty), R(2), U8(0), U8(10),
+ B(Star), R(1),
+ B(LdaSmi), U8(2),
+ B(Star), R(3),
+ /* 33 E> */ B(CallProperty), R(1), R(2), U8(2), U8(4),
+ B(Star), R(1),
+ /* 40 E> */ B(LdaNamedProperty), R(1), U8(0), U8(12),
+ B(Star), R(0),
+ B(LdaSmi), U8(3),
+ B(Star), R(2),
+ /* 41 E> */ B(CallProperty), R(0), R(1), U8(2), U8(2),
+ /* 50 S> */ B(Return),
+]
+constant pool: [
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["func"],
+]
+handlers: [
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
index 09f073e859..70e75645b2 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
@@ -91,12 +91,12 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 15
+bytecode array length: 14
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 25 S> */ B(Nop),
- /* 25 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(2), R(0),
- B(Ldar), R(0),
+ /* 25 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
+ B(Star), R(0),
/* 32 S> */ B(LdaSmi), U8(-124),
/* 40 E> */ B(LdaKeyedProperty), R(arg0), U8(4),
/* 48 S> */ B(Return),
@@ -245,393 +245,393 @@ snippet: "
"
frame size: 1
parameter count: 2
-bytecode array length: 1040
+bytecode array length: 911
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 27 S> */ B(Nop),
- /* 32 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(2), R(0),
- B(Ldar), R(0),
+ /* 32 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
+ B(Star), R(0),
/* 41 S> */ B(Nop),
- /* 46 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(4), R(0),
- B(Ldar), R(0),
+ /* 46 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(4),
+ B(Star), R(0),
/* 55 S> */ B(Nop),
- /* 60 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(6), R(0),
- B(Ldar), R(0),
+ /* 60 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(6),
+ B(Star), R(0),
/* 69 S> */ B(Nop),
- /* 74 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(8), R(0),
- B(Ldar), R(0),
+ /* 74 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(8),
+ B(Star), R(0),
/* 83 S> */ B(Nop),
- /* 88 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(10), R(0),
- B(Ldar), R(0),
+ /* 88 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(10),
+ B(Star), R(0),
/* 97 S> */ B(Nop),
- /* 102 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(12), R(0),
- B(Ldar), R(0),
+ /* 102 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(12),
+ B(Star), R(0),
/* 111 S> */ B(Nop),
- /* 116 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(14), R(0),
- B(Ldar), R(0),
+ /* 116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(14),
+ B(Star), R(0),
/* 125 S> */ B(Nop),
- /* 130 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(16), R(0),
- B(Ldar), R(0),
+ /* 130 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(16),
+ B(Star), R(0),
/* 139 S> */ B(Nop),
- /* 144 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(18), R(0),
- B(Ldar), R(0),
+ /* 144 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(18),
+ B(Star), R(0),
/* 153 S> */ B(Nop),
- /* 158 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(20), R(0),
- B(Ldar), R(0),
+ /* 158 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(20),
+ B(Star), R(0),
/* 167 S> */ B(Nop),
- /* 172 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(22), R(0),
- B(Ldar), R(0),
+ /* 172 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(22),
+ B(Star), R(0),
/* 181 S> */ B(Nop),
- /* 186 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(24), R(0),
- B(Ldar), R(0),
+ /* 186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(24),
+ B(Star), R(0),
/* 195 S> */ B(Nop),
- /* 200 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(26), R(0),
- B(Ldar), R(0),
+ /* 200 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(26),
+ B(Star), R(0),
/* 209 S> */ B(Nop),
- /* 214 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(28), R(0),
- B(Ldar), R(0),
+ /* 214 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(28),
+ B(Star), R(0),
/* 223 S> */ B(Nop),
- /* 228 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(30), R(0),
- B(Ldar), R(0),
+ /* 228 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(30),
+ B(Star), R(0),
/* 237 S> */ B(Nop),
- /* 242 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(32), R(0),
- B(Ldar), R(0),
+ /* 242 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(32),
+ B(Star), R(0),
/* 251 S> */ B(Nop),
- /* 256 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(34), R(0),
- B(Ldar), R(0),
+ /* 256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(34),
+ B(Star), R(0),
/* 265 S> */ B(Nop),
- /* 270 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(36), R(0),
- B(Ldar), R(0),
+ /* 270 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(36),
+ B(Star), R(0),
/* 279 S> */ B(Nop),
- /* 284 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(38), R(0),
- B(Ldar), R(0),
+ /* 284 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(38),
+ B(Star), R(0),
/* 293 S> */ B(Nop),
- /* 298 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(40), R(0),
- B(Ldar), R(0),
+ /* 298 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(40),
+ B(Star), R(0),
/* 307 S> */ B(Nop),
- /* 312 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(42), R(0),
- B(Ldar), R(0),
+ /* 312 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(42),
+ B(Star), R(0),
/* 321 S> */ B(Nop),
- /* 326 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(44), R(0),
- B(Ldar), R(0),
+ /* 326 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(44),
+ B(Star), R(0),
/* 335 S> */ B(Nop),
- /* 340 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(46), R(0),
- B(Ldar), R(0),
+ /* 340 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(46),
+ B(Star), R(0),
/* 349 S> */ B(Nop),
- /* 354 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(48), R(0),
- B(Ldar), R(0),
+ /* 354 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(48),
+ B(Star), R(0),
/* 363 S> */ B(Nop),
- /* 368 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(50), R(0),
- B(Ldar), R(0),
+ /* 368 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(50),
+ B(Star), R(0),
/* 377 S> */ B(Nop),
- /* 382 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(52), R(0),
- B(Ldar), R(0),
+ /* 382 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(52),
+ B(Star), R(0),
/* 391 S> */ B(Nop),
- /* 396 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(54), R(0),
- B(Ldar), R(0),
+ /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(54),
+ B(Star), R(0),
/* 405 S> */ B(Nop),
- /* 410 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(56), R(0),
- B(Ldar), R(0),
+ /* 410 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(56),
+ B(Star), R(0),
/* 419 S> */ B(Nop),
- /* 424 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(58), R(0),
- B(Ldar), R(0),
+ /* 424 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(58),
+ B(Star), R(0),
/* 433 S> */ B(Nop),
- /* 438 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(60), R(0),
- B(Ldar), R(0),
+ /* 438 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(60),
+ B(Star), R(0),
/* 447 S> */ B(Nop),
- /* 452 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(62), R(0),
- B(Ldar), R(0),
+ /* 452 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(62),
+ B(Star), R(0),
/* 461 S> */ B(Nop),
- /* 466 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(64), R(0),
- B(Ldar), R(0),
+ /* 466 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(64),
+ B(Star), R(0),
/* 475 S> */ B(Nop),
- /* 480 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(66), R(0),
- B(Ldar), R(0),
+ /* 480 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(66),
+ B(Star), R(0),
/* 489 S> */ B(Nop),
- /* 494 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(68), R(0),
- B(Ldar), R(0),
+ /* 494 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(68),
+ B(Star), R(0),
/* 503 S> */ B(Nop),
- /* 508 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(70), R(0),
- B(Ldar), R(0),
+ /* 508 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(70),
+ B(Star), R(0),
/* 517 S> */ B(Nop),
- /* 522 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(72), R(0),
- B(Ldar), R(0),
+ /* 522 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(72),
+ B(Star), R(0),
/* 531 S> */ B(Nop),
- /* 536 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(74), R(0),
- B(Ldar), R(0),
+ /* 536 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(74),
+ B(Star), R(0),
/* 545 S> */ B(Nop),
- /* 550 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(76), R(0),
- B(Ldar), R(0),
+ /* 550 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(76),
+ B(Star), R(0),
/* 559 S> */ B(Nop),
- /* 564 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(78), R(0),
- B(Ldar), R(0),
+ /* 564 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(78),
+ B(Star), R(0),
/* 573 S> */ B(Nop),
- /* 578 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(80), R(0),
- B(Ldar), R(0),
+ /* 578 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(80),
+ B(Star), R(0),
/* 587 S> */ B(Nop),
- /* 592 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(82), R(0),
- B(Ldar), R(0),
+ /* 592 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(82),
+ B(Star), R(0),
/* 601 S> */ B(Nop),
- /* 606 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(84), R(0),
- B(Ldar), R(0),
+ /* 606 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(84),
+ B(Star), R(0),
/* 615 S> */ B(Nop),
- /* 620 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(86), R(0),
- B(Ldar), R(0),
+ /* 620 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(86),
+ B(Star), R(0),
/* 629 S> */ B(Nop),
- /* 634 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(88), R(0),
- B(Ldar), R(0),
+ /* 634 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(88),
+ B(Star), R(0),
/* 643 S> */ B(Nop),
- /* 648 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(90), R(0),
- B(Ldar), R(0),
+ /* 648 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(90),
+ B(Star), R(0),
/* 657 S> */ B(Nop),
- /* 662 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(92), R(0),
- B(Ldar), R(0),
+ /* 662 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(92),
+ B(Star), R(0),
/* 671 S> */ B(Nop),
- /* 676 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(94), R(0),
- B(Ldar), R(0),
+ /* 676 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(94),
+ B(Star), R(0),
/* 685 S> */ B(Nop),
- /* 690 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(96), R(0),
- B(Ldar), R(0),
+ /* 690 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(96),
+ B(Star), R(0),
/* 699 S> */ B(Nop),
- /* 704 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(98), R(0),
- B(Ldar), R(0),
+ /* 704 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(98),
+ B(Star), R(0),
/* 713 S> */ B(Nop),
- /* 718 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(100), R(0),
- B(Ldar), R(0),
+ /* 718 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(100),
+ B(Star), R(0),
/* 727 S> */ B(Nop),
- /* 732 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(102), R(0),
- B(Ldar), R(0),
+ /* 732 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(102),
+ B(Star), R(0),
/* 741 S> */ B(Nop),
- /* 746 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(104), R(0),
- B(Ldar), R(0),
+ /* 746 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(104),
+ B(Star), R(0),
/* 755 S> */ B(Nop),
- /* 760 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(106), R(0),
- B(Ldar), R(0),
+ /* 760 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(106),
+ B(Star), R(0),
/* 769 S> */ B(Nop),
- /* 774 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(108), R(0),
- B(Ldar), R(0),
+ /* 774 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(108),
+ B(Star), R(0),
/* 783 S> */ B(Nop),
- /* 788 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(110), R(0),
- B(Ldar), R(0),
+ /* 788 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(110),
+ B(Star), R(0),
/* 797 S> */ B(Nop),
- /* 802 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(112), R(0),
- B(Ldar), R(0),
+ /* 802 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(112),
+ B(Star), R(0),
/* 811 S> */ B(Nop),
- /* 816 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(114), R(0),
- B(Ldar), R(0),
+ /* 816 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(114),
+ B(Star), R(0),
/* 825 S> */ B(Nop),
- /* 830 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(116), R(0),
- B(Ldar), R(0),
+ /* 830 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(116),
+ B(Star), R(0),
/* 839 S> */ B(Nop),
- /* 844 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(118), R(0),
- B(Ldar), R(0),
+ /* 844 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(118),
+ B(Star), R(0),
/* 853 S> */ B(Nop),
- /* 858 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(120), R(0),
- B(Ldar), R(0),
+ /* 858 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(120),
+ B(Star), R(0),
/* 867 S> */ B(Nop),
- /* 872 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(122), R(0),
- B(Ldar), R(0),
+ /* 872 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(122),
+ B(Star), R(0),
/* 881 S> */ B(Nop),
- /* 886 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(124), R(0),
- B(Ldar), R(0),
+ /* 886 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(124),
+ B(Star), R(0),
/* 895 S> */ B(Nop),
- /* 900 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(126), R(0),
- B(Ldar), R(0),
+ /* 900 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(126),
+ B(Star), R(0),
/* 909 S> */ B(Nop),
- /* 914 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(128), R(0),
- B(Ldar), R(0),
+ /* 914 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(128),
+ B(Star), R(0),
/* 923 S> */ B(Nop),
- /* 928 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(130), R(0),
- B(Ldar), R(0),
+ /* 928 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(130),
+ B(Star), R(0),
/* 937 S> */ B(Nop),
- /* 942 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(132), R(0),
- B(Ldar), R(0),
+ /* 942 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(132),
+ B(Star), R(0),
/* 951 S> */ B(Nop),
- /* 956 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(134), R(0),
- B(Ldar), R(0),
+ /* 956 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(134),
+ B(Star), R(0),
/* 965 S> */ B(Nop),
- /* 970 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(136), R(0),
- B(Ldar), R(0),
+ /* 970 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(136),
+ B(Star), R(0),
/* 979 S> */ B(Nop),
- /* 984 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(138), R(0),
- B(Ldar), R(0),
+ /* 984 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(138),
+ B(Star), R(0),
/* 993 S> */ B(Nop),
- /* 998 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(140), R(0),
- B(Ldar), R(0),
+ /* 998 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(140),
+ B(Star), R(0),
/* 1007 S> */ B(Nop),
- /* 1012 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(142), R(0),
- B(Ldar), R(0),
+ /* 1012 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(142),
+ B(Star), R(0),
/* 1021 S> */ B(Nop),
- /* 1026 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(144), R(0),
- B(Ldar), R(0),
+ /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(144),
+ B(Star), R(0),
/* 1035 S> */ B(Nop),
- /* 1040 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(146), R(0),
- B(Ldar), R(0),
+ /* 1040 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(146),
+ B(Star), R(0),
/* 1049 S> */ B(Nop),
- /* 1054 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(148), R(0),
- B(Ldar), R(0),
+ /* 1054 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(148),
+ B(Star), R(0),
/* 1063 S> */ B(Nop),
- /* 1068 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(150), R(0),
- B(Ldar), R(0),
+ /* 1068 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(150),
+ B(Star), R(0),
/* 1077 S> */ B(Nop),
- /* 1082 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(152), R(0),
- B(Ldar), R(0),
+ /* 1082 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(152),
+ B(Star), R(0),
/* 1091 S> */ B(Nop),
- /* 1096 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(154), R(0),
- B(Ldar), R(0),
+ /* 1096 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(154),
+ B(Star), R(0),
/* 1105 S> */ B(Nop),
- /* 1110 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(156), R(0),
- B(Ldar), R(0),
+ /* 1110 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(156),
+ B(Star), R(0),
/* 1119 S> */ B(Nop),
- /* 1124 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(158), R(0),
- B(Ldar), R(0),
+ /* 1124 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(158),
+ B(Star), R(0),
/* 1133 S> */ B(Nop),
- /* 1138 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(160), R(0),
- B(Ldar), R(0),
+ /* 1138 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(160),
+ B(Star), R(0),
/* 1147 S> */ B(Nop),
- /* 1152 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(162), R(0),
- B(Ldar), R(0),
+ /* 1152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(162),
+ B(Star), R(0),
/* 1161 S> */ B(Nop),
- /* 1166 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(164), R(0),
- B(Ldar), R(0),
+ /* 1166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(164),
+ B(Star), R(0),
/* 1175 S> */ B(Nop),
- /* 1180 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(166), R(0),
- B(Ldar), R(0),
+ /* 1180 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(166),
+ B(Star), R(0),
/* 1189 S> */ B(Nop),
- /* 1194 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(168), R(0),
- B(Ldar), R(0),
+ /* 1194 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(168),
+ B(Star), R(0),
/* 1203 S> */ B(Nop),
- /* 1208 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(170), R(0),
- B(Ldar), R(0),
+ /* 1208 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(170),
+ B(Star), R(0),
/* 1217 S> */ B(Nop),
- /* 1222 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(172), R(0),
- B(Ldar), R(0),
+ /* 1222 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(172),
+ B(Star), R(0),
/* 1231 S> */ B(Nop),
- /* 1236 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(174), R(0),
- B(Ldar), R(0),
+ /* 1236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(174),
+ B(Star), R(0),
/* 1245 S> */ B(Nop),
- /* 1250 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(176), R(0),
- B(Ldar), R(0),
+ /* 1250 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(176),
+ B(Star), R(0),
/* 1259 S> */ B(Nop),
- /* 1264 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(178), R(0),
- B(Ldar), R(0),
+ /* 1264 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(178),
+ B(Star), R(0),
/* 1273 S> */ B(Nop),
- /* 1278 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(180), R(0),
- B(Ldar), R(0),
+ /* 1278 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(180),
+ B(Star), R(0),
/* 1287 S> */ B(Nop),
- /* 1292 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(182), R(0),
- B(Ldar), R(0),
+ /* 1292 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(182),
+ B(Star), R(0),
/* 1301 S> */ B(Nop),
- /* 1306 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(184), R(0),
- B(Ldar), R(0),
+ /* 1306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(184),
+ B(Star), R(0),
/* 1315 S> */ B(Nop),
- /* 1320 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(186), R(0),
- B(Ldar), R(0),
+ /* 1320 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(186),
+ B(Star), R(0),
/* 1329 S> */ B(Nop),
- /* 1334 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(188), R(0),
- B(Ldar), R(0),
+ /* 1334 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(188),
+ B(Star), R(0),
/* 1343 S> */ B(Nop),
- /* 1348 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(190), R(0),
- B(Ldar), R(0),
+ /* 1348 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(190),
+ B(Star), R(0),
/* 1357 S> */ B(Nop),
- /* 1362 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(192), R(0),
- B(Ldar), R(0),
+ /* 1362 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(192),
+ B(Star), R(0),
/* 1371 S> */ B(Nop),
- /* 1376 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(194), R(0),
- B(Ldar), R(0),
+ /* 1376 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(194),
+ B(Star), R(0),
/* 1385 S> */ B(Nop),
- /* 1390 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(196), R(0),
- B(Ldar), R(0),
+ /* 1390 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(196),
+ B(Star), R(0),
/* 1399 S> */ B(Nop),
- /* 1404 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(198), R(0),
- B(Ldar), R(0),
+ /* 1404 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(198),
+ B(Star), R(0),
/* 1413 S> */ B(Nop),
- /* 1418 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(200), R(0),
- B(Ldar), R(0),
+ /* 1418 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(200),
+ B(Star), R(0),
/* 1427 S> */ B(Nop),
- /* 1432 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(202), R(0),
- B(Ldar), R(0),
+ /* 1432 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(202),
+ B(Star), R(0),
/* 1441 S> */ B(Nop),
- /* 1446 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(204), R(0),
- B(Ldar), R(0),
+ /* 1446 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(204),
+ B(Star), R(0),
/* 1455 S> */ B(Nop),
- /* 1460 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(206), R(0),
- B(Ldar), R(0),
+ /* 1460 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(206),
+ B(Star), R(0),
/* 1469 S> */ B(Nop),
- /* 1474 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(208), R(0),
- B(Ldar), R(0),
+ /* 1474 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(208),
+ B(Star), R(0),
/* 1483 S> */ B(Nop),
- /* 1488 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(210), R(0),
- B(Ldar), R(0),
+ /* 1488 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(210),
+ B(Star), R(0),
/* 1497 S> */ B(Nop),
- /* 1502 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(212), R(0),
- B(Ldar), R(0),
+ /* 1502 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(212),
+ B(Star), R(0),
/* 1511 S> */ B(Nop),
- /* 1516 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(214), R(0),
- B(Ldar), R(0),
+ /* 1516 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(214),
+ B(Star), R(0),
/* 1525 S> */ B(Nop),
- /* 1530 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(216), R(0),
- B(Ldar), R(0),
+ /* 1530 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(216),
+ B(Star), R(0),
/* 1539 S> */ B(Nop),
- /* 1544 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(218), R(0),
- B(Ldar), R(0),
+ /* 1544 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(218),
+ B(Star), R(0),
/* 1553 S> */ B(Nop),
- /* 1558 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(220), R(0),
- B(Ldar), R(0),
+ /* 1558 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(220),
+ B(Star), R(0),
/* 1567 S> */ B(Nop),
- /* 1572 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(222), R(0),
- B(Ldar), R(0),
+ /* 1572 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(222),
+ B(Star), R(0),
/* 1581 S> */ B(Nop),
- /* 1586 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(224), R(0),
- B(Ldar), R(0),
+ /* 1586 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(224),
+ B(Star), R(0),
/* 1595 S> */ B(Nop),
- /* 1600 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(226), R(0),
- B(Ldar), R(0),
+ /* 1600 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(226),
+ B(Star), R(0),
/* 1609 S> */ B(Nop),
- /* 1614 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(228), R(0),
- B(Ldar), R(0),
+ /* 1614 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(228),
+ B(Star), R(0),
/* 1623 S> */ B(Nop),
- /* 1628 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(230), R(0),
- B(Ldar), R(0),
+ /* 1628 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(230),
+ B(Star), R(0),
/* 1637 S> */ B(Nop),
- /* 1642 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(232), R(0),
- B(Ldar), R(0),
+ /* 1642 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(232),
+ B(Star), R(0),
/* 1651 S> */ B(Nop),
- /* 1656 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(234), R(0),
- B(Ldar), R(0),
+ /* 1656 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(234),
+ B(Star), R(0),
/* 1665 S> */ B(Nop),
- /* 1670 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(236), R(0),
- B(Ldar), R(0),
+ /* 1670 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(236),
+ B(Star), R(0),
/* 1679 S> */ B(Nop),
- /* 1684 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(238), R(0),
- B(Ldar), R(0),
+ /* 1684 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(238),
+ B(Star), R(0),
/* 1693 S> */ B(Nop),
- /* 1698 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(240), R(0),
- B(Ldar), R(0),
+ /* 1698 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(240),
+ B(Star), R(0),
/* 1707 S> */ B(Nop),
- /* 1712 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(242), R(0),
- B(Ldar), R(0),
+ /* 1712 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(242),
+ B(Star), R(0),
/* 1721 S> */ B(Nop),
- /* 1726 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(244), R(0),
- B(Ldar), R(0),
+ /* 1726 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(244),
+ B(Star), R(0),
/* 1735 S> */ B(Nop),
- /* 1740 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(246), R(0),
- B(Ldar), R(0),
+ /* 1740 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(246),
+ B(Star), R(0),
/* 1749 S> */ B(Nop),
- /* 1754 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(248), R(0),
- B(Ldar), R(0),
+ /* 1754 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(248),
+ B(Star), R(0),
/* 1763 S> */ B(Nop),
- /* 1768 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(250), R(0),
- B(Ldar), R(0),
+ /* 1768 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(250),
+ B(Star), R(0),
/* 1777 S> */ B(Nop),
- /* 1782 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(252), R(0),
- B(Ldar), R(0),
+ /* 1782 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(252),
+ B(Star), R(0),
/* 1791 S> */ B(Nop),
- /* 1796 E> */ B(LdrNamedProperty), R(arg0), U8(0), U8(254), R(0),
- B(Ldar), R(0),
+ /* 1796 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(254),
+ B(Star), R(0),
/* 1805 S> */ B(Nop),
- /* 1810 E> */ B(Wide), B(LdrNamedProperty), R16(arg0), U16(0), U16(256), R16(0),
- B(Ldar), R(0),
+ /* 1810 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(256),
+ B(Star), R(0),
/* 1819 S> */ B(Nop),
/* 1827 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(258),
/* 1834 S> */ B(Return),
@@ -780,393 +780,393 @@ snippet: "
"
frame size: 1
parameter count: 3
-bytecode array length: 1038
+bytecode array length: 909
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 30 S> */ B(Ldar), R(arg1),
- /* 36 E> */ B(LdrKeyedProperty), R(arg0), U8(2), R(0),
- B(Ldar), R(0),
+ /* 36 E> */ B(LdaKeyedProperty), R(arg0), U8(2),
+ B(Star), R(0),
/* 42 S> */ B(Ldar), R(arg1),
- /* 48 E> */ B(LdrKeyedProperty), R(arg0), U8(4), R(0),
- B(Ldar), R(0),
+ /* 48 E> */ B(LdaKeyedProperty), R(arg0), U8(4),
+ B(Star), R(0),
/* 54 S> */ B(Ldar), R(arg1),
- /* 60 E> */ B(LdrKeyedProperty), R(arg0), U8(6), R(0),
- B(Ldar), R(0),
+ /* 60 E> */ B(LdaKeyedProperty), R(arg0), U8(6),
+ B(Star), R(0),
/* 66 S> */ B(Ldar), R(arg1),
- /* 72 E> */ B(LdrKeyedProperty), R(arg0), U8(8), R(0),
- B(Ldar), R(0),
+ /* 72 E> */ B(LdaKeyedProperty), R(arg0), U8(8),
+ B(Star), R(0),
/* 78 S> */ B(Ldar), R(arg1),
- /* 84 E> */ B(LdrKeyedProperty), R(arg0), U8(10), R(0),
- B(Ldar), R(0),
+ /* 84 E> */ B(LdaKeyedProperty), R(arg0), U8(10),
+ B(Star), R(0),
/* 90 S> */ B(Ldar), R(arg1),
- /* 96 E> */ B(LdrKeyedProperty), R(arg0), U8(12), R(0),
- B(Ldar), R(0),
+ /* 96 E> */ B(LdaKeyedProperty), R(arg0), U8(12),
+ B(Star), R(0),
/* 102 S> */ B(Ldar), R(arg1),
- /* 108 E> */ B(LdrKeyedProperty), R(arg0), U8(14), R(0),
- B(Ldar), R(0),
+ /* 108 E> */ B(LdaKeyedProperty), R(arg0), U8(14),
+ B(Star), R(0),
/* 114 S> */ B(Ldar), R(arg1),
- /* 120 E> */ B(LdrKeyedProperty), R(arg0), U8(16), R(0),
- B(Ldar), R(0),
+ /* 120 E> */ B(LdaKeyedProperty), R(arg0), U8(16),
+ B(Star), R(0),
/* 126 S> */ B(Ldar), R(arg1),
- /* 132 E> */ B(LdrKeyedProperty), R(arg0), U8(18), R(0),
- B(Ldar), R(0),
+ /* 132 E> */ B(LdaKeyedProperty), R(arg0), U8(18),
+ B(Star), R(0),
/* 138 S> */ B(Ldar), R(arg1),
- /* 144 E> */ B(LdrKeyedProperty), R(arg0), U8(20), R(0),
- B(Ldar), R(0),
+ /* 144 E> */ B(LdaKeyedProperty), R(arg0), U8(20),
+ B(Star), R(0),
/* 150 S> */ B(Ldar), R(arg1),
- /* 156 E> */ B(LdrKeyedProperty), R(arg0), U8(22), R(0),
- B(Ldar), R(0),
+ /* 156 E> */ B(LdaKeyedProperty), R(arg0), U8(22),
+ B(Star), R(0),
/* 162 S> */ B(Ldar), R(arg1),
- /* 168 E> */ B(LdrKeyedProperty), R(arg0), U8(24), R(0),
- B(Ldar), R(0),
+ /* 168 E> */ B(LdaKeyedProperty), R(arg0), U8(24),
+ B(Star), R(0),
/* 174 S> */ B(Ldar), R(arg1),
- /* 180 E> */ B(LdrKeyedProperty), R(arg0), U8(26), R(0),
- B(Ldar), R(0),
+ /* 180 E> */ B(LdaKeyedProperty), R(arg0), U8(26),
+ B(Star), R(0),
/* 186 S> */ B(Ldar), R(arg1),
- /* 192 E> */ B(LdrKeyedProperty), R(arg0), U8(28), R(0),
- B(Ldar), R(0),
+ /* 192 E> */ B(LdaKeyedProperty), R(arg0), U8(28),
+ B(Star), R(0),
/* 198 S> */ B(Ldar), R(arg1),
- /* 204 E> */ B(LdrKeyedProperty), R(arg0), U8(30), R(0),
- B(Ldar), R(0),
+ /* 204 E> */ B(LdaKeyedProperty), R(arg0), U8(30),
+ B(Star), R(0),
/* 210 S> */ B(Ldar), R(arg1),
- /* 216 E> */ B(LdrKeyedProperty), R(arg0), U8(32), R(0),
- B(Ldar), R(0),
+ /* 216 E> */ B(LdaKeyedProperty), R(arg0), U8(32),
+ B(Star), R(0),
/* 222 S> */ B(Ldar), R(arg1),
- /* 228 E> */ B(LdrKeyedProperty), R(arg0), U8(34), R(0),
- B(Ldar), R(0),
+ /* 228 E> */ B(LdaKeyedProperty), R(arg0), U8(34),
+ B(Star), R(0),
/* 234 S> */ B(Ldar), R(arg1),
- /* 240 E> */ B(LdrKeyedProperty), R(arg0), U8(36), R(0),
- B(Ldar), R(0),
+ /* 240 E> */ B(LdaKeyedProperty), R(arg0), U8(36),
+ B(Star), R(0),
/* 246 S> */ B(Ldar), R(arg1),
- /* 252 E> */ B(LdrKeyedProperty), R(arg0), U8(38), R(0),
- B(Ldar), R(0),
+ /* 252 E> */ B(LdaKeyedProperty), R(arg0), U8(38),
+ B(Star), R(0),
/* 258 S> */ B(Ldar), R(arg1),
- /* 264 E> */ B(LdrKeyedProperty), R(arg0), U8(40), R(0),
- B(Ldar), R(0),
+ /* 264 E> */ B(LdaKeyedProperty), R(arg0), U8(40),
+ B(Star), R(0),
/* 270 S> */ B(Ldar), R(arg1),
- /* 276 E> */ B(LdrKeyedProperty), R(arg0), U8(42), R(0),
- B(Ldar), R(0),
+ /* 276 E> */ B(LdaKeyedProperty), R(arg0), U8(42),
+ B(Star), R(0),
/* 282 S> */ B(Ldar), R(arg1),
- /* 288 E> */ B(LdrKeyedProperty), R(arg0), U8(44), R(0),
- B(Ldar), R(0),
+ /* 288 E> */ B(LdaKeyedProperty), R(arg0), U8(44),
+ B(Star), R(0),
/* 294 S> */ B(Ldar), R(arg1),
- /* 300 E> */ B(LdrKeyedProperty), R(arg0), U8(46), R(0),
- B(Ldar), R(0),
+ /* 300 E> */ B(LdaKeyedProperty), R(arg0), U8(46),
+ B(Star), R(0),
/* 306 S> */ B(Ldar), R(arg1),
- /* 312 E> */ B(LdrKeyedProperty), R(arg0), U8(48), R(0),
- B(Ldar), R(0),
+ /* 312 E> */ B(LdaKeyedProperty), R(arg0), U8(48),
+ B(Star), R(0),
/* 318 S> */ B(Ldar), R(arg1),
- /* 324 E> */ B(LdrKeyedProperty), R(arg0), U8(50), R(0),
- B(Ldar), R(0),
+ /* 324 E> */ B(LdaKeyedProperty), R(arg0), U8(50),
+ B(Star), R(0),
/* 330 S> */ B(Ldar), R(arg1),
- /* 336 E> */ B(LdrKeyedProperty), R(arg0), U8(52), R(0),
- B(Ldar), R(0),
+ /* 336 E> */ B(LdaKeyedProperty), R(arg0), U8(52),
+ B(Star), R(0),
/* 342 S> */ B(Ldar), R(arg1),
- /* 348 E> */ B(LdrKeyedProperty), R(arg0), U8(54), R(0),
- B(Ldar), R(0),
+ /* 348 E> */ B(LdaKeyedProperty), R(arg0), U8(54),
+ B(Star), R(0),
/* 354 S> */ B(Ldar), R(arg1),
- /* 360 E> */ B(LdrKeyedProperty), R(arg0), U8(56), R(0),
- B(Ldar), R(0),
+ /* 360 E> */ B(LdaKeyedProperty), R(arg0), U8(56),
+ B(Star), R(0),
/* 366 S> */ B(Ldar), R(arg1),
- /* 372 E> */ B(LdrKeyedProperty), R(arg0), U8(58), R(0),
- B(Ldar), R(0),
+ /* 372 E> */ B(LdaKeyedProperty), R(arg0), U8(58),
+ B(Star), R(0),
/* 378 S> */ B(Ldar), R(arg1),
- /* 384 E> */ B(LdrKeyedProperty), R(arg0), U8(60), R(0),
- B(Ldar), R(0),
+ /* 384 E> */ B(LdaKeyedProperty), R(arg0), U8(60),
+ B(Star), R(0),
/* 390 S> */ B(Ldar), R(arg1),
- /* 396 E> */ B(LdrKeyedProperty), R(arg0), U8(62), R(0),
- B(Ldar), R(0),
+ /* 396 E> */ B(LdaKeyedProperty), R(arg0), U8(62),
+ B(Star), R(0),
/* 402 S> */ B(Ldar), R(arg1),
- /* 408 E> */ B(LdrKeyedProperty), R(arg0), U8(64), R(0),
- B(Ldar), R(0),
+ /* 408 E> */ B(LdaKeyedProperty), R(arg0), U8(64),
+ B(Star), R(0),
/* 414 S> */ B(Ldar), R(arg1),
- /* 420 E> */ B(LdrKeyedProperty), R(arg0), U8(66), R(0),
- B(Ldar), R(0),
+ /* 420 E> */ B(LdaKeyedProperty), R(arg0), U8(66),
+ B(Star), R(0),
/* 426 S> */ B(Ldar), R(arg1),
- /* 432 E> */ B(LdrKeyedProperty), R(arg0), U8(68), R(0),
- B(Ldar), R(0),
+ /* 432 E> */ B(LdaKeyedProperty), R(arg0), U8(68),
+ B(Star), R(0),
/* 438 S> */ B(Ldar), R(arg1),
- /* 444 E> */ B(LdrKeyedProperty), R(arg0), U8(70), R(0),
- B(Ldar), R(0),
+ /* 444 E> */ B(LdaKeyedProperty), R(arg0), U8(70),
+ B(Star), R(0),
/* 450 S> */ B(Ldar), R(arg1),
- /* 456 E> */ B(LdrKeyedProperty), R(arg0), U8(72), R(0),
- B(Ldar), R(0),
+ /* 456 E> */ B(LdaKeyedProperty), R(arg0), U8(72),
+ B(Star), R(0),
/* 462 S> */ B(Ldar), R(arg1),
- /* 468 E> */ B(LdrKeyedProperty), R(arg0), U8(74), R(0),
- B(Ldar), R(0),
+ /* 468 E> */ B(LdaKeyedProperty), R(arg0), U8(74),
+ B(Star), R(0),
/* 474 S> */ B(Ldar), R(arg1),
- /* 480 E> */ B(LdrKeyedProperty), R(arg0), U8(76), R(0),
- B(Ldar), R(0),
+ /* 480 E> */ B(LdaKeyedProperty), R(arg0), U8(76),
+ B(Star), R(0),
/* 486 S> */ B(Ldar), R(arg1),
- /* 492 E> */ B(LdrKeyedProperty), R(arg0), U8(78), R(0),
- B(Ldar), R(0),
+ /* 492 E> */ B(LdaKeyedProperty), R(arg0), U8(78),
+ B(Star), R(0),
/* 498 S> */ B(Ldar), R(arg1),
- /* 504 E> */ B(LdrKeyedProperty), R(arg0), U8(80), R(0),
- B(Ldar), R(0),
+ /* 504 E> */ B(LdaKeyedProperty), R(arg0), U8(80),
+ B(Star), R(0),
/* 510 S> */ B(Ldar), R(arg1),
- /* 516 E> */ B(LdrKeyedProperty), R(arg0), U8(82), R(0),
- B(Ldar), R(0),
+ /* 516 E> */ B(LdaKeyedProperty), R(arg0), U8(82),
+ B(Star), R(0),
/* 522 S> */ B(Ldar), R(arg1),
- /* 528 E> */ B(LdrKeyedProperty), R(arg0), U8(84), R(0),
- B(Ldar), R(0),
+ /* 528 E> */ B(LdaKeyedProperty), R(arg0), U8(84),
+ B(Star), R(0),
/* 534 S> */ B(Ldar), R(arg1),
- /* 540 E> */ B(LdrKeyedProperty), R(arg0), U8(86), R(0),
- B(Ldar), R(0),
+ /* 540 E> */ B(LdaKeyedProperty), R(arg0), U8(86),
+ B(Star), R(0),
/* 546 S> */ B(Ldar), R(arg1),
- /* 552 E> */ B(LdrKeyedProperty), R(arg0), U8(88), R(0),
- B(Ldar), R(0),
+ /* 552 E> */ B(LdaKeyedProperty), R(arg0), U8(88),
+ B(Star), R(0),
/* 558 S> */ B(Ldar), R(arg1),
- /* 564 E> */ B(LdrKeyedProperty), R(arg0), U8(90), R(0),
- B(Ldar), R(0),
+ /* 564 E> */ B(LdaKeyedProperty), R(arg0), U8(90),
+ B(Star), R(0),
/* 570 S> */ B(Ldar), R(arg1),
- /* 576 E> */ B(LdrKeyedProperty), R(arg0), U8(92), R(0),
- B(Ldar), R(0),
+ /* 576 E> */ B(LdaKeyedProperty), R(arg0), U8(92),
+ B(Star), R(0),
/* 582 S> */ B(Ldar), R(arg1),
- /* 588 E> */ B(LdrKeyedProperty), R(arg0), U8(94), R(0),
- B(Ldar), R(0),
+ /* 588 E> */ B(LdaKeyedProperty), R(arg0), U8(94),
+ B(Star), R(0),
/* 594 S> */ B(Ldar), R(arg1),
- /* 600 E> */ B(LdrKeyedProperty), R(arg0), U8(96), R(0),
- B(Ldar), R(0),
+ /* 600 E> */ B(LdaKeyedProperty), R(arg0), U8(96),
+ B(Star), R(0),
/* 606 S> */ B(Ldar), R(arg1),
- /* 612 E> */ B(LdrKeyedProperty), R(arg0), U8(98), R(0),
- B(Ldar), R(0),
+ /* 612 E> */ B(LdaKeyedProperty), R(arg0), U8(98),
+ B(Star), R(0),
/* 618 S> */ B(Ldar), R(arg1),
- /* 624 E> */ B(LdrKeyedProperty), R(arg0), U8(100), R(0),
- B(Ldar), R(0),
+ /* 624 E> */ B(LdaKeyedProperty), R(arg0), U8(100),
+ B(Star), R(0),
/* 630 S> */ B(Ldar), R(arg1),
- /* 636 E> */ B(LdrKeyedProperty), R(arg0), U8(102), R(0),
- B(Ldar), R(0),
+ /* 636 E> */ B(LdaKeyedProperty), R(arg0), U8(102),
+ B(Star), R(0),
/* 642 S> */ B(Ldar), R(arg1),
- /* 648 E> */ B(LdrKeyedProperty), R(arg0), U8(104), R(0),
- B(Ldar), R(0),
+ /* 648 E> */ B(LdaKeyedProperty), R(arg0), U8(104),
+ B(Star), R(0),
/* 654 S> */ B(Ldar), R(arg1),
- /* 660 E> */ B(LdrKeyedProperty), R(arg0), U8(106), R(0),
- B(Ldar), R(0),
+ /* 660 E> */ B(LdaKeyedProperty), R(arg0), U8(106),
+ B(Star), R(0),
/* 666 S> */ B(Ldar), R(arg1),
- /* 672 E> */ B(LdrKeyedProperty), R(arg0), U8(108), R(0),
- B(Ldar), R(0),
+ /* 672 E> */ B(LdaKeyedProperty), R(arg0), U8(108),
+ B(Star), R(0),
/* 678 S> */ B(Ldar), R(arg1),
- /* 684 E> */ B(LdrKeyedProperty), R(arg0), U8(110), R(0),
- B(Ldar), R(0),
+ /* 684 E> */ B(LdaKeyedProperty), R(arg0), U8(110),
+ B(Star), R(0),
/* 690 S> */ B(Ldar), R(arg1),
- /* 696 E> */ B(LdrKeyedProperty), R(arg0), U8(112), R(0),
- B(Ldar), R(0),
+ /* 696 E> */ B(LdaKeyedProperty), R(arg0), U8(112),
+ B(Star), R(0),
/* 702 S> */ B(Ldar), R(arg1),
- /* 708 E> */ B(LdrKeyedProperty), R(arg0), U8(114), R(0),
- B(Ldar), R(0),
+ /* 708 E> */ B(LdaKeyedProperty), R(arg0), U8(114),
+ B(Star), R(0),
/* 714 S> */ B(Ldar), R(arg1),
- /* 720 E> */ B(LdrKeyedProperty), R(arg0), U8(116), R(0),
- B(Ldar), R(0),
+ /* 720 E> */ B(LdaKeyedProperty), R(arg0), U8(116),
+ B(Star), R(0),
/* 726 S> */ B(Ldar), R(arg1),
- /* 732 E> */ B(LdrKeyedProperty), R(arg0), U8(118), R(0),
- B(Ldar), R(0),
+ /* 732 E> */ B(LdaKeyedProperty), R(arg0), U8(118),
+ B(Star), R(0),
/* 738 S> */ B(Ldar), R(arg1),
- /* 744 E> */ B(LdrKeyedProperty), R(arg0), U8(120), R(0),
- B(Ldar), R(0),
+ /* 744 E> */ B(LdaKeyedProperty), R(arg0), U8(120),
+ B(Star), R(0),
/* 750 S> */ B(Ldar), R(arg1),
- /* 756 E> */ B(LdrKeyedProperty), R(arg0), U8(122), R(0),
- B(Ldar), R(0),
+ /* 756 E> */ B(LdaKeyedProperty), R(arg0), U8(122),
+ B(Star), R(0),
/* 762 S> */ B(Ldar), R(arg1),
- /* 768 E> */ B(LdrKeyedProperty), R(arg0), U8(124), R(0),
- B(Ldar), R(0),
+ /* 768 E> */ B(LdaKeyedProperty), R(arg0), U8(124),
+ B(Star), R(0),
/* 774 S> */ B(Ldar), R(arg1),
- /* 780 E> */ B(LdrKeyedProperty), R(arg0), U8(126), R(0),
- B(Ldar), R(0),
+ /* 780 E> */ B(LdaKeyedProperty), R(arg0), U8(126),
+ B(Star), R(0),
/* 786 S> */ B(Ldar), R(arg1),
- /* 792 E> */ B(LdrKeyedProperty), R(arg0), U8(128), R(0),
- B(Ldar), R(0),
+ /* 792 E> */ B(LdaKeyedProperty), R(arg0), U8(128),
+ B(Star), R(0),
/* 798 S> */ B(Ldar), R(arg1),
- /* 804 E> */ B(LdrKeyedProperty), R(arg0), U8(130), R(0),
- B(Ldar), R(0),
+ /* 804 E> */ B(LdaKeyedProperty), R(arg0), U8(130),
+ B(Star), R(0),
/* 810 S> */ B(Ldar), R(arg1),
- /* 816 E> */ B(LdrKeyedProperty), R(arg0), U8(132), R(0),
- B(Ldar), R(0),
+ /* 816 E> */ B(LdaKeyedProperty), R(arg0), U8(132),
+ B(Star), R(0),
/* 822 S> */ B(Ldar), R(arg1),
- /* 828 E> */ B(LdrKeyedProperty), R(arg0), U8(134), R(0),
- B(Ldar), R(0),
+ /* 828 E> */ B(LdaKeyedProperty), R(arg0), U8(134),
+ B(Star), R(0),
/* 834 S> */ B(Ldar), R(arg1),
- /* 840 E> */ B(LdrKeyedProperty), R(arg0), U8(136), R(0),
- B(Ldar), R(0),
+ /* 840 E> */ B(LdaKeyedProperty), R(arg0), U8(136),
+ B(Star), R(0),
/* 846 S> */ B(Ldar), R(arg1),
- /* 852 E> */ B(LdrKeyedProperty), R(arg0), U8(138), R(0),
- B(Ldar), R(0),
+ /* 852 E> */ B(LdaKeyedProperty), R(arg0), U8(138),
+ B(Star), R(0),
/* 858 S> */ B(Ldar), R(arg1),
- /* 864 E> */ B(LdrKeyedProperty), R(arg0), U8(140), R(0),
- B(Ldar), R(0),
+ /* 864 E> */ B(LdaKeyedProperty), R(arg0), U8(140),
+ B(Star), R(0),
/* 870 S> */ B(Ldar), R(arg1),
- /* 876 E> */ B(LdrKeyedProperty), R(arg0), U8(142), R(0),
- B(Ldar), R(0),
+ /* 876 E> */ B(LdaKeyedProperty), R(arg0), U8(142),
+ B(Star), R(0),
/* 882 S> */ B(Ldar), R(arg1),
- /* 888 E> */ B(LdrKeyedProperty), R(arg0), U8(144), R(0),
- B(Ldar), R(0),
+ /* 888 E> */ B(LdaKeyedProperty), R(arg0), U8(144),
+ B(Star), R(0),
/* 894 S> */ B(Ldar), R(arg1),
- /* 900 E> */ B(LdrKeyedProperty), R(arg0), U8(146), R(0),
- B(Ldar), R(0),
+ /* 900 E> */ B(LdaKeyedProperty), R(arg0), U8(146),
+ B(Star), R(0),
/* 906 S> */ B(Ldar), R(arg1),
- /* 912 E> */ B(LdrKeyedProperty), R(arg0), U8(148), R(0),
- B(Ldar), R(0),
+ /* 912 E> */ B(LdaKeyedProperty), R(arg0), U8(148),
+ B(Star), R(0),
/* 918 S> */ B(Ldar), R(arg1),
- /* 924 E> */ B(LdrKeyedProperty), R(arg0), U8(150), R(0),
- B(Ldar), R(0),
+ /* 924 E> */ B(LdaKeyedProperty), R(arg0), U8(150),
+ B(Star), R(0),
/* 930 S> */ B(Ldar), R(arg1),
- /* 936 E> */ B(LdrKeyedProperty), R(arg0), U8(152), R(0),
- B(Ldar), R(0),
+ /* 936 E> */ B(LdaKeyedProperty), R(arg0), U8(152),
+ B(Star), R(0),
/* 942 S> */ B(Ldar), R(arg1),
- /* 948 E> */ B(LdrKeyedProperty), R(arg0), U8(154), R(0),
- B(Ldar), R(0),
+ /* 948 E> */ B(LdaKeyedProperty), R(arg0), U8(154),
+ B(Star), R(0),
/* 954 S> */ B(Ldar), R(arg1),
- /* 960 E> */ B(LdrKeyedProperty), R(arg0), U8(156), R(0),
- B(Ldar), R(0),
+ /* 960 E> */ B(LdaKeyedProperty), R(arg0), U8(156),
+ B(Star), R(0),
/* 966 S> */ B(Ldar), R(arg1),
- /* 972 E> */ B(LdrKeyedProperty), R(arg0), U8(158), R(0),
- B(Ldar), R(0),
+ /* 972 E> */ B(LdaKeyedProperty), R(arg0), U8(158),
+ B(Star), R(0),
/* 978 S> */ B(Ldar), R(arg1),
- /* 984 E> */ B(LdrKeyedProperty), R(arg0), U8(160), R(0),
- B(Ldar), R(0),
+ /* 984 E> */ B(LdaKeyedProperty), R(arg0), U8(160),
+ B(Star), R(0),
/* 990 S> */ B(Ldar), R(arg1),
- /* 996 E> */ B(LdrKeyedProperty), R(arg0), U8(162), R(0),
- B(Ldar), R(0),
+ /* 996 E> */ B(LdaKeyedProperty), R(arg0), U8(162),
+ B(Star), R(0),
/* 1002 S> */ B(Ldar), R(arg1),
- /* 1008 E> */ B(LdrKeyedProperty), R(arg0), U8(164), R(0),
- B(Ldar), R(0),
+ /* 1008 E> */ B(LdaKeyedProperty), R(arg0), U8(164),
+ B(Star), R(0),
/* 1014 S> */ B(Ldar), R(arg1),
- /* 1020 E> */ B(LdrKeyedProperty), R(arg0), U8(166), R(0),
- B(Ldar), R(0),
+ /* 1020 E> */ B(LdaKeyedProperty), R(arg0), U8(166),
+ B(Star), R(0),
/* 1026 S> */ B(Ldar), R(arg1),
- /* 1032 E> */ B(LdrKeyedProperty), R(arg0), U8(168), R(0),
- B(Ldar), R(0),
+ /* 1032 E> */ B(LdaKeyedProperty), R(arg0), U8(168),
+ B(Star), R(0),
/* 1038 S> */ B(Ldar), R(arg1),
- /* 1044 E> */ B(LdrKeyedProperty), R(arg0), U8(170), R(0),
- B(Ldar), R(0),
+ /* 1044 E> */ B(LdaKeyedProperty), R(arg0), U8(170),
+ B(Star), R(0),
/* 1050 S> */ B(Ldar), R(arg1),
- /* 1056 E> */ B(LdrKeyedProperty), R(arg0), U8(172), R(0),
- B(Ldar), R(0),
+ /* 1056 E> */ B(LdaKeyedProperty), R(arg0), U8(172),
+ B(Star), R(0),
/* 1062 S> */ B(Ldar), R(arg1),
- /* 1068 E> */ B(LdrKeyedProperty), R(arg0), U8(174), R(0),
- B(Ldar), R(0),
+ /* 1068 E> */ B(LdaKeyedProperty), R(arg0), U8(174),
+ B(Star), R(0),
/* 1074 S> */ B(Ldar), R(arg1),
- /* 1080 E> */ B(LdrKeyedProperty), R(arg0), U8(176), R(0),
- B(Ldar), R(0),
+ /* 1080 E> */ B(LdaKeyedProperty), R(arg0), U8(176),
+ B(Star), R(0),
/* 1086 S> */ B(Ldar), R(arg1),
- /* 1092 E> */ B(LdrKeyedProperty), R(arg0), U8(178), R(0),
- B(Ldar), R(0),
+ /* 1092 E> */ B(LdaKeyedProperty), R(arg0), U8(178),
+ B(Star), R(0),
/* 1098 S> */ B(Ldar), R(arg1),
- /* 1104 E> */ B(LdrKeyedProperty), R(arg0), U8(180), R(0),
- B(Ldar), R(0),
+ /* 1104 E> */ B(LdaKeyedProperty), R(arg0), U8(180),
+ B(Star), R(0),
/* 1110 S> */ B(Ldar), R(arg1),
- /* 1116 E> */ B(LdrKeyedProperty), R(arg0), U8(182), R(0),
- B(Ldar), R(0),
+ /* 1116 E> */ B(LdaKeyedProperty), R(arg0), U8(182),
+ B(Star), R(0),
/* 1122 S> */ B(Ldar), R(arg1),
- /* 1128 E> */ B(LdrKeyedProperty), R(arg0), U8(184), R(0),
- B(Ldar), R(0),
+ /* 1128 E> */ B(LdaKeyedProperty), R(arg0), U8(184),
+ B(Star), R(0),
/* 1134 S> */ B(Ldar), R(arg1),
- /* 1140 E> */ B(LdrKeyedProperty), R(arg0), U8(186), R(0),
- B(Ldar), R(0),
+ /* 1140 E> */ B(LdaKeyedProperty), R(arg0), U8(186),
+ B(Star), R(0),
/* 1146 S> */ B(Ldar), R(arg1),
- /* 1152 E> */ B(LdrKeyedProperty), R(arg0), U8(188), R(0),
- B(Ldar), R(0),
+ /* 1152 E> */ B(LdaKeyedProperty), R(arg0), U8(188),
+ B(Star), R(0),
/* 1158 S> */ B(Ldar), R(arg1),
- /* 1164 E> */ B(LdrKeyedProperty), R(arg0), U8(190), R(0),
- B(Ldar), R(0),
+ /* 1164 E> */ B(LdaKeyedProperty), R(arg0), U8(190),
+ B(Star), R(0),
/* 1170 S> */ B(Ldar), R(arg1),
- /* 1176 E> */ B(LdrKeyedProperty), R(arg0), U8(192), R(0),
- B(Ldar), R(0),
+ /* 1176 E> */ B(LdaKeyedProperty), R(arg0), U8(192),
+ B(Star), R(0),
/* 1182 S> */ B(Ldar), R(arg1),
- /* 1188 E> */ B(LdrKeyedProperty), R(arg0), U8(194), R(0),
- B(Ldar), R(0),
+ /* 1188 E> */ B(LdaKeyedProperty), R(arg0), U8(194),
+ B(Star), R(0),
/* 1194 S> */ B(Ldar), R(arg1),
- /* 1200 E> */ B(LdrKeyedProperty), R(arg0), U8(196), R(0),
- B(Ldar), R(0),
+ /* 1200 E> */ B(LdaKeyedProperty), R(arg0), U8(196),
+ B(Star), R(0),
/* 1206 S> */ B(Ldar), R(arg1),
- /* 1212 E> */ B(LdrKeyedProperty), R(arg0), U8(198), R(0),
- B(Ldar), R(0),
+ /* 1212 E> */ B(LdaKeyedProperty), R(arg0), U8(198),
+ B(Star), R(0),
/* 1218 S> */ B(Ldar), R(arg1),
- /* 1224 E> */ B(LdrKeyedProperty), R(arg0), U8(200), R(0),
- B(Ldar), R(0),
+ /* 1224 E> */ B(LdaKeyedProperty), R(arg0), U8(200),
+ B(Star), R(0),
/* 1230 S> */ B(Ldar), R(arg1),
- /* 1236 E> */ B(LdrKeyedProperty), R(arg0), U8(202), R(0),
- B(Ldar), R(0),
+ /* 1236 E> */ B(LdaKeyedProperty), R(arg0), U8(202),
+ B(Star), R(0),
/* 1242 S> */ B(Ldar), R(arg1),
- /* 1248 E> */ B(LdrKeyedProperty), R(arg0), U8(204), R(0),
- B(Ldar), R(0),
+ /* 1248 E> */ B(LdaKeyedProperty), R(arg0), U8(204),
+ B(Star), R(0),
/* 1254 S> */ B(Ldar), R(arg1),
- /* 1260 E> */ B(LdrKeyedProperty), R(arg0), U8(206), R(0),
- B(Ldar), R(0),
+ /* 1260 E> */ B(LdaKeyedProperty), R(arg0), U8(206),
+ B(Star), R(0),
/* 1266 S> */ B(Ldar), R(arg1),
- /* 1272 E> */ B(LdrKeyedProperty), R(arg0), U8(208), R(0),
- B(Ldar), R(0),
+ /* 1272 E> */ B(LdaKeyedProperty), R(arg0), U8(208),
+ B(Star), R(0),
/* 1278 S> */ B(Ldar), R(arg1),
- /* 1284 E> */ B(LdrKeyedProperty), R(arg0), U8(210), R(0),
- B(Ldar), R(0),
+ /* 1284 E> */ B(LdaKeyedProperty), R(arg0), U8(210),
+ B(Star), R(0),
/* 1290 S> */ B(Ldar), R(arg1),
- /* 1296 E> */ B(LdrKeyedProperty), R(arg0), U8(212), R(0),
- B(Ldar), R(0),
+ /* 1296 E> */ B(LdaKeyedProperty), R(arg0), U8(212),
+ B(Star), R(0),
/* 1302 S> */ B(Ldar), R(arg1),
- /* 1308 E> */ B(LdrKeyedProperty), R(arg0), U8(214), R(0),
- B(Ldar), R(0),
+ /* 1308 E> */ B(LdaKeyedProperty), R(arg0), U8(214),
+ B(Star), R(0),
/* 1314 S> */ B(Ldar), R(arg1),
- /* 1320 E> */ B(LdrKeyedProperty), R(arg0), U8(216), R(0),
- B(Ldar), R(0),
+ /* 1320 E> */ B(LdaKeyedProperty), R(arg0), U8(216),
+ B(Star), R(0),
/* 1326 S> */ B(Ldar), R(arg1),
- /* 1332 E> */ B(LdrKeyedProperty), R(arg0), U8(218), R(0),
- B(Ldar), R(0),
+ /* 1332 E> */ B(LdaKeyedProperty), R(arg0), U8(218),
+ B(Star), R(0),
/* 1338 S> */ B(Ldar), R(arg1),
- /* 1344 E> */ B(LdrKeyedProperty), R(arg0), U8(220), R(0),
- B(Ldar), R(0),
+ /* 1344 E> */ B(LdaKeyedProperty), R(arg0), U8(220),
+ B(Star), R(0),
/* 1350 S> */ B(Ldar), R(arg1),
- /* 1356 E> */ B(LdrKeyedProperty), R(arg0), U8(222), R(0),
- B(Ldar), R(0),
+ /* 1356 E> */ B(LdaKeyedProperty), R(arg0), U8(222),
+ B(Star), R(0),
/* 1362 S> */ B(Ldar), R(arg1),
- /* 1368 E> */ B(LdrKeyedProperty), R(arg0), U8(224), R(0),
- B(Ldar), R(0),
+ /* 1368 E> */ B(LdaKeyedProperty), R(arg0), U8(224),
+ B(Star), R(0),
/* 1374 S> */ B(Ldar), R(arg1),
- /* 1380 E> */ B(LdrKeyedProperty), R(arg0), U8(226), R(0),
- B(Ldar), R(0),
+ /* 1380 E> */ B(LdaKeyedProperty), R(arg0), U8(226),
+ B(Star), R(0),
/* 1386 S> */ B(Ldar), R(arg1),
- /* 1392 E> */ B(LdrKeyedProperty), R(arg0), U8(228), R(0),
- B(Ldar), R(0),
+ /* 1392 E> */ B(LdaKeyedProperty), R(arg0), U8(228),
+ B(Star), R(0),
/* 1398 S> */ B(Ldar), R(arg1),
- /* 1404 E> */ B(LdrKeyedProperty), R(arg0), U8(230), R(0),
- B(Ldar), R(0),
+ /* 1404 E> */ B(LdaKeyedProperty), R(arg0), U8(230),
+ B(Star), R(0),
/* 1410 S> */ B(Ldar), R(arg1),
- /* 1416 E> */ B(LdrKeyedProperty), R(arg0), U8(232), R(0),
- B(Ldar), R(0),
+ /* 1416 E> */ B(LdaKeyedProperty), R(arg0), U8(232),
+ B(Star), R(0),
/* 1422 S> */ B(Ldar), R(arg1),
- /* 1428 E> */ B(LdrKeyedProperty), R(arg0), U8(234), R(0),
- B(Ldar), R(0),
+ /* 1428 E> */ B(LdaKeyedProperty), R(arg0), U8(234),
+ B(Star), R(0),
/* 1434 S> */ B(Ldar), R(arg1),
- /* 1440 E> */ B(LdrKeyedProperty), R(arg0), U8(236), R(0),
- B(Ldar), R(0),
+ /* 1440 E> */ B(LdaKeyedProperty), R(arg0), U8(236),
+ B(Star), R(0),
/* 1446 S> */ B(Ldar), R(arg1),
- /* 1452 E> */ B(LdrKeyedProperty), R(arg0), U8(238), R(0),
- B(Ldar), R(0),
+ /* 1452 E> */ B(LdaKeyedProperty), R(arg0), U8(238),
+ B(Star), R(0),
/* 1458 S> */ B(Ldar), R(arg1),
- /* 1464 E> */ B(LdrKeyedProperty), R(arg0), U8(240), R(0),
- B(Ldar), R(0),
+ /* 1464 E> */ B(LdaKeyedProperty), R(arg0), U8(240),
+ B(Star), R(0),
/* 1470 S> */ B(Ldar), R(arg1),
- /* 1476 E> */ B(LdrKeyedProperty), R(arg0), U8(242), R(0),
- B(Ldar), R(0),
+ /* 1476 E> */ B(LdaKeyedProperty), R(arg0), U8(242),
+ B(Star), R(0),
/* 1482 S> */ B(Ldar), R(arg1),
- /* 1488 E> */ B(LdrKeyedProperty), R(arg0), U8(244), R(0),
- B(Ldar), R(0),
+ /* 1488 E> */ B(LdaKeyedProperty), R(arg0), U8(244),
+ B(Star), R(0),
/* 1494 S> */ B(Ldar), R(arg1),
- /* 1500 E> */ B(LdrKeyedProperty), R(arg0), U8(246), R(0),
- B(Ldar), R(0),
+ /* 1500 E> */ B(LdaKeyedProperty), R(arg0), U8(246),
+ B(Star), R(0),
/* 1506 S> */ B(Ldar), R(arg1),
- /* 1512 E> */ B(LdrKeyedProperty), R(arg0), U8(248), R(0),
- B(Ldar), R(0),
+ /* 1512 E> */ B(LdaKeyedProperty), R(arg0), U8(248),
+ B(Star), R(0),
/* 1518 S> */ B(Ldar), R(arg1),
- /* 1524 E> */ B(LdrKeyedProperty), R(arg0), U8(250), R(0),
- B(Ldar), R(0),
+ /* 1524 E> */ B(LdaKeyedProperty), R(arg0), U8(250),
+ B(Star), R(0),
/* 1530 S> */ B(Ldar), R(arg1),
- /* 1536 E> */ B(LdrKeyedProperty), R(arg0), U8(252), R(0),
- B(Ldar), R(0),
+ /* 1536 E> */ B(LdaKeyedProperty), R(arg0), U8(252),
+ B(Star), R(0),
/* 1542 S> */ B(Ldar), R(arg1),
- /* 1548 E> */ B(LdrKeyedProperty), R(arg0), U8(254), R(0),
- B(Ldar), R(0),
+ /* 1548 E> */ B(LdaKeyedProperty), R(arg0), U8(254),
+ B(Star), R(0),
/* 1554 S> */ B(Ldar), R(arg1),
- /* 1560 E> */ B(Wide), B(LdrKeyedProperty), R16(arg0), U16(256), R16(0),
- B(Ldar), R(0),
+ /* 1560 E> */ B(Wide), B(LdaKeyedProperty), R16(arg0), U16(256),
+ B(Star), R(0),
/* 1566 S> */ B(Ldar), R(arg1),
/* 1575 E> */ B(Wide), B(LdaKeyedProperty), R16(arg0), U16(258),
/* 1579 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
index 03973619fd..cdb00db659 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
@@ -47,15 +47,16 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 22
+bytecode array length: 23
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(0), U8(0),
B(Star), R(1),
- /* 47 E> */ B(LdrNamedProperty), R(1), U8(1), U8(4), R(0),
+ /* 47 E> */ B(LdaNamedProperty), R(1), U8(1), U8(4),
+ B(Star), R(0),
B(LdaConstant), U8(2),
B(Star), R(2),
- /* 48 E> */ B(Call), R(0), R(1), U8(2), U8(2),
+ /* 48 E> */ B(CallProperty), R(0), R(1), U8(2), U8(2),
/* 62 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
index 59052b85d8..d7b7917baf 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
@@ -10,9 +10,9 @@ top level: yes
snippet: "
var a = { func: function() { } };
"
-frame size: 5
+frame size: 4
parameter count: 1
-bytecode array length: 45
+bytecode array length: 42
bytecodes: [
B(LdaConstant), U8(0),
B(Star), R(1),
@@ -24,11 +24,10 @@ bytecodes: [
/* 8 S> */ B(LdaConstant), U8(1),
B(Star), R(1),
B(LdaZero),
- B(CreateObjectLiteral), U8(2), U8(0), U8(1), R(4),
+ B(CreateObjectLiteral), U8(2), U8(0), U8(1), R(3),
B(Star), R(2),
B(CreateClosure), U8(3), U8(0),
- B(StaNamedPropertySloppy), R(4), U8(4), U8(4),
- B(Mov), R(4), R(3),
+ B(StaNamedPropertySloppy), R(3), U8(4), U8(4),
B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3),
B(LdaUndefined),
/* 33 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
index c9f8790384..660a01b242 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
@@ -96,7 +96,7 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 23
+bytecode array length: 22
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(Wide), B(LdaSmi), U16(1234),
@@ -105,8 +105,8 @@ bytecodes: [
/* 66 E> */ B(Mul), R(0), U8(2),
B(Star), R(2),
B(SubSmi), U8(1), R(2), U8(3),
- B(LdrUndefined), R(1),
- B(Ldar), R(1),
+ B(LdaUndefined),
+ B(Star), R(1),
/* 74 S> */ B(Nop),
/* 84 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/source-position-matcher.cc b/deps/v8/test/cctest/interpreter/source-position-matcher.cc
index 30d545abc3..082ac01ef3 100644
--- a/deps/v8/test/cctest/interpreter/source-position-matcher.cc
+++ b/deps/v8/test/cctest/interpreter/source-position-matcher.cc
@@ -213,7 +213,8 @@ void SourcePositionMatcher::MoveToNextStatement(
if (iterator->is_statement()) {
break;
}
- positions->push_back({iterator->code_offset(), iterator->source_position(),
+ positions->push_back({iterator->code_offset(),
+ iterator->source_position().raw(),
iterator->is_statement()});
iterator->Advance();
}
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index fbcd297dd6..5a20d86297 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -464,6 +464,9 @@ TEST(PropertyCall) {
REPEAT_127(" a.func;\n") //
" return a.func(); }\n"
"f(" FUNC_ARG ")",
+
+ "function f(a) { return a.func(1).func(2).func(3); }\n"
+ "f(new (function Obj() { this.func = function(a) { return this; }})())",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
@@ -2238,6 +2241,9 @@ TEST(Modules) {
"export {foo as goo} from \"bar\"\n",
"export * from \"bar\"\n",
+
+ "import * as foo from \"bar\"\n"
+ "foo.f(foo, foo.x);\n",
};
CHECK(CompareTexts(BuildActual(printer, snippets),
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 77c146edaf..f3f4021f88 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -265,7 +265,7 @@ TEST(InterpreterShiftOpsSmi) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
FeedbackVectorSpec feedback_spec(&zone);
@@ -304,7 +304,7 @@ TEST(InterpreterBinaryOpsSmi) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
FeedbackVectorSpec feedback_spec(&zone);
@@ -345,7 +345,7 @@ TEST(InterpreterBinaryOpsHeapNumber) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 1);
FeedbackVectorSpec feedback_spec(&zone);
@@ -380,7 +380,7 @@ TEST(InterpreterStringAdd) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
struct TestCase {
Handle<Object> lhs;
@@ -472,7 +472,7 @@ TEST(InterpreterParameter1) {
TEST(InterpreterParameter8) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 8, 0, 0);
FeedbackVectorSpec feedback_spec(&zone);
@@ -527,7 +527,7 @@ TEST(InterpreterParameter8) {
TEST(InterpreterBinaryOpTypeFeedback) {
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
- i::Zone zone(isolate->allocator());
+ i::Zone zone(isolate->allocator(), ZONE_NAME);
struct BinaryOpExpectation {
Token::Value op;
@@ -673,7 +673,7 @@ TEST(InterpreterBinaryOpTypeFeedback) {
TEST(InterpreterBinaryOpSmiTypeFeedback) {
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
- i::Zone zone(isolate->allocator());
+ i::Zone zone(isolate->allocator(), ZONE_NAME);
struct BinaryOpExpectation {
Token::Value op;
@@ -708,7 +708,7 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
isolate->factory()->NewHeapNumber(3.1415 - 2.0),
BinaryOperationFeedback::kNumber},
{Token::Value::SUB, isolate->factory()->NewStringFromAsciiChecked("2"), 2,
- Handle<Smi>(Smi::FromInt(0), isolate), BinaryOperationFeedback::kAny},
+ Handle<Smi>(Smi::kZero, isolate), BinaryOperationFeedback::kAny},
// BIT_OR
{Token::Value::BIT_OR, Handle<Smi>(Smi::FromInt(4), isolate), 1,
Handle<Smi>(Smi::FromInt(5), isolate),
@@ -726,7 +726,7 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
Handle<Smi>(Smi::FromInt(2), isolate), BinaryOperationFeedback::kNumber},
{Token::Value::BIT_AND,
isolate->factory()->NewStringFromAsciiChecked("2"), 1,
- Handle<Smi>(Smi::FromInt(0), isolate), BinaryOperationFeedback::kAny},
+ Handle<Smi>(Smi::kZero, isolate), BinaryOperationFeedback::kAny},
// SHL
{Token::Value::SHL, Handle<Smi>(Smi::FromInt(3), isolate), 1,
Handle<Smi>(Smi::FromInt(6), isolate),
@@ -741,7 +741,7 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
Handle<Smi>(Smi::FromInt(1), isolate),
BinaryOperationFeedback::kSignedSmall},
{Token::Value::SAR, isolate->factory()->NewHeapNumber(3.1415), 2,
- Handle<Smi>(Smi::FromInt(0), isolate), BinaryOperationFeedback::kNumber},
+ Handle<Smi>(Smi::kZero, isolate), BinaryOperationFeedback::kNumber},
{Token::Value::SAR, isolate->factory()->NewStringFromAsciiChecked("2"), 1,
Handle<Smi>(Smi::FromInt(1), isolate), BinaryOperationFeedback::kAny}};
@@ -777,7 +777,7 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
TEST(InterpreterUnaryOpFeedback) {
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
- i::Zone zone(isolate->allocator());
+ i::Zone zone(isolate->allocator(), ZONE_NAME);
Handle<Smi> smi_one = Handle<Smi>(Smi::FromInt(1), isolate);
Handle<Smi> smi_max = Handle<Smi>(Smi::FromInt(Smi::kMaxValue), isolate);
@@ -854,7 +854,7 @@ TEST(InterpreterUnaryOpFeedback) {
TEST(InterpreterBitwiseTypeFeedback) {
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
- i::Zone zone(isolate->allocator());
+ i::Zone zone(isolate->allocator(), ZONE_NAME);
const Token::Value kBitwiseBinaryOperators[] = {
Token::Value::BIT_OR, Token::Value::BIT_XOR, Token::Value::BIT_AND,
Token::Value::SHL, Token::Value::SHR, Token::Value::SAR};
@@ -1030,7 +1030,7 @@ TEST(InterpreterLoadNamedProperty) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
FeedbackVectorSpec feedback_spec(&zone);
FeedbackVectorSlot slot = feedback_spec.AddLoadICSlot();
@@ -1083,7 +1083,7 @@ TEST(InterpreterLoadKeyedProperty) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
FeedbackVectorSpec feedback_spec(&zone);
FeedbackVectorSlot slot = feedback_spec.AddKeyedLoadICSlot();
@@ -1125,7 +1125,7 @@ TEST(InterpreterStoreNamedProperty) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
FeedbackVectorSpec feedback_spec(&zone);
FeedbackVectorSlot slot = feedback_spec.AddStoreICSlot();
@@ -1184,7 +1184,7 @@ TEST(InterpreterStoreKeyedProperty) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
FeedbackVectorSpec feedback_spec(&zone);
FeedbackVectorSlot slot = feedback_spec.AddKeyedStoreICSlot();
@@ -1231,7 +1231,7 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
FeedbackVectorSpec feedback_spec(&zone);
FeedbackVectorSlot slot = feedback_spec.AddLoadICSlot();
@@ -1255,7 +1255,7 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
.StoreAccumulatorInRegister(reg)
.MoveRegister(builder.Parameter(0), args[0]);
- builder.Call(reg, args, call_slot_index, tail_call_mode);
+ builder.Call(reg, args, call_slot_index, Call::GLOBAL_CALL, tail_call_mode);
builder.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -1277,7 +1277,7 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
builder.LoadNamedProperty(builder.Parameter(0), name, slot_index)
.StoreAccumulatorInRegister(reg)
.MoveRegister(builder.Parameter(0), args[0]);
- builder.Call(reg, args, call_slot_index, tail_call_mode);
+ builder.Call(reg, args, call_slot_index, Call::GLOBAL_CALL, tail_call_mode);
builder.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -1308,7 +1308,7 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
.LoadLiteral(Smi::FromInt(11))
.StoreAccumulatorInRegister(args[2]);
- builder.Call(reg, args, call_slot_index, tail_call_mode);
+ builder.Call(reg, args, call_slot_index, Call::GLOBAL_CALL, tail_call_mode);
builder.Return();
@@ -1356,7 +1356,7 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
.LoadLiteral(factory->NewStringFromAsciiChecked("j"))
.StoreAccumulatorInRegister(args[10]);
- builder.Call(reg, args, call_slot_index, tail_call_mode);
+ builder.Call(reg, args, call_slot_index, Call::GLOBAL_CALL, tail_call_mode);
builder.Return();
@@ -1407,7 +1407,7 @@ static BytecodeArrayBuilder& IncrementRegister(BytecodeArrayBuilder& builder,
TEST(InterpreterJumps) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 2);
FeedbackVectorSpec feedback_spec(&zone);
@@ -1421,7 +1421,7 @@ TEST(InterpreterJumps) {
Register reg(0), scratch(1);
BytecodeLabel label[3];
- builder.LoadLiteral(Smi::FromInt(0))
+ builder.LoadLiteral(Smi::kZero)
.StoreAccumulatorInRegister(reg)
.Jump(&label[1]);
SetRegister(builder, reg, 1024, scratch).Bind(&label[0]);
@@ -1446,7 +1446,7 @@ TEST(InterpreterJumps) {
TEST(InterpreterConditionalJumps) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 2);
FeedbackVectorSpec feedback_spec(&zone);
@@ -1463,7 +1463,7 @@ TEST(InterpreterConditionalJumps) {
BytecodeLabel label[2];
BytecodeLabel done, done1;
- builder.LoadLiteral(Smi::FromInt(0))
+ builder.LoadLiteral(Smi::kZero)
.StoreAccumulatorInRegister(reg)
.LoadFalse()
.JumpIfFalse(&label[0]);
@@ -1496,7 +1496,7 @@ TEST(InterpreterConditionalJumps2) {
// TODO(oth): Add tests for all conditional jumps near and far.
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 2);
FeedbackVectorSpec feedback_spec(&zone);
@@ -1513,7 +1513,7 @@ TEST(InterpreterConditionalJumps2) {
BytecodeLabel label[2];
BytecodeLabel done, done1;
- builder.LoadLiteral(Smi::FromInt(0))
+ builder.LoadLiteral(Smi::kZero)
.StoreAccumulatorInRegister(reg)
.LoadFalse()
.JumpIfFalse(&label[0]);
@@ -1547,7 +1547,7 @@ TEST(InterpreterJumpConstantWith16BitOperand) {
Isolate* isolate = handles.main_isolate();
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 1, 0, 257);
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
FeedbackVectorSpec feedback_spec(&zone);
FeedbackVectorSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
@@ -1557,7 +1557,7 @@ TEST(InterpreterJumpConstantWith16BitOperand) {
Register reg(0), scratch(256);
BytecodeLabel done, fake;
- builder.LoadLiteral(Smi::FromInt(0));
+ builder.LoadLiteral(Smi::kZero);
builder.StoreAccumulatorInRegister(reg);
// Consume all 8-bit operands
for (int i = 1; i <= 256; i++) {
@@ -1570,7 +1570,7 @@ TEST(InterpreterJumpConstantWith16BitOperand) {
// Emit more than 16-bit immediate operands worth of code to jump over.
builder.Bind(&fake);
for (int i = 0; i < 6600; i++) {
- builder.LoadLiteral(Smi::FromInt(0)); // 1-byte
+ builder.LoadLiteral(Smi::kZero); // 1-byte
builder.BinaryOperation(Token::Value::ADD, scratch,
vector->GetIndex(slot)); // 6-bytes
builder.StoreAccumulatorInRegister(scratch); // 4-bytes
@@ -1607,14 +1607,14 @@ TEST(InterpreterJumpWith32BitOperand) {
Register reg(0);
BytecodeLabel done;
- builder.LoadLiteral(Smi::FromInt(0));
+ builder.LoadLiteral(Smi::kZero);
builder.StoreAccumulatorInRegister(reg);
// Consume all 16-bit constant pool entries
for (int i = 1; i <= 65536; i++) {
builder.LoadLiteral(isolate->factory()->NewNumber(i));
}
builder.Jump(&done);
- builder.LoadLiteral(Smi::FromInt(0));
+ builder.LoadLiteral(Smi::kZero);
builder.Bind(&done);
builder.Return();
@@ -1692,7 +1692,7 @@ TEST(InterpreterSmiComparisons) {
for (size_t j = 0; j < arraysize(inputs); j++) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 1);
FeedbackVectorSpec feedback_spec(&zone);
@@ -1739,7 +1739,7 @@ TEST(InterpreterHeapNumberComparisons) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 1);
FeedbackVectorSpec feedback_spec(&zone);
@@ -1775,7 +1775,7 @@ TEST(InterpreterStringComparisons) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
std::string inputs[] = {"A", "abc", "z", "", "Foo!", "Foo"};
@@ -1841,7 +1841,7 @@ TEST(InterpreterMixedComparisons) {
Isolate* isolate = handles.main_isolate();
Factory* factory = isolate->factory();
BytecodeArrayBuilder builder(isolate, handles.main_zone(), 0, 0, 1);
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
FeedbackVectorSpec feedback_spec(&zone);
FeedbackVectorSlot slot = feedback_spec.AddInterpreterCompareICSlot();
@@ -2315,7 +2315,7 @@ TEST(InterpreterConstruct) {
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
- CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(0));
+ CHECK_EQ(Smi::cast(*return_val), Smi::kZero);
}
@@ -2528,10 +2528,10 @@ TEST(InterpreterLogicalAnd) {
std::make_pair("var a, b = 10; return a && b;\n",
factory->undefined_value()),
std::make_pair("var a = 0, b = 10; return a && b / a;\n",
- handle(Smi::FromInt(0), isolate)),
+ handle(Smi::kZero, isolate)),
std::make_pair("var a = '0', b = 10; return a && b;\n",
handle(Smi::FromInt(10), isolate)),
- std::make_pair("return 0.0 && 3.2;\n", handle(Smi::FromInt(0), isolate)),
+ std::make_pair("return 0.0 && 3.2;\n", handle(Smi::kZero, isolate)),
std::make_pair("return 'a' && 'b';\n",
factory->NewStringFromStaticChars("b")),
std::make_pair("return 'a' && 0 || 'b', 'c';\n",
@@ -2683,16 +2683,12 @@ TEST(InterpreterCountOperators) {
handle(Smi::FromInt(4), isolate)),
std::make_pair("var a = 5; return a--;",
handle(Smi::FromInt(5), isolate)),
- std::make_pair("var a = 5.2; return --a;",
- factory->NewHeapNumber(4.2)),
- std::make_pair("var a = 'string'; return ++a;",
- factory->nan_value()),
- std::make_pair("var a = 'string'; return a--;",
- factory->nan_value()),
+ std::make_pair("var a = 5.2; return --a;", factory->NewHeapNumber(4.2)),
+ std::make_pair("var a = 'string'; return ++a;", factory->nan_value()),
+ std::make_pair("var a = 'string'; return a--;", factory->nan_value()),
std::make_pair("var a = true; return ++a;",
handle(Smi::FromInt(2), isolate)),
- std::make_pair("var a = false; return a--;",
- handle(Smi::FromInt(0), isolate)),
+ std::make_pair("var a = false; return a--;", handle(Smi::kZero, isolate)),
std::make_pair("var a = { val: 11 }; return ++a.val;",
handle(Smi::FromInt(12), isolate)),
std::make_pair("var a = { val: 11 }; return a.val--;",
@@ -2712,9 +2708,9 @@ TEST(InterpreterCountOperators) {
std::make_pair("var i = 1; if(i--) { return 1; } else { return 2; };",
handle(Smi::FromInt(1), isolate)),
std::make_pair("var i = -2; do {} while(i++) {}; return i;",
- handle(Smi::FromInt(1), isolate)),
+ handle(Smi::FromInt(1), isolate)),
std::make_pair("var i = -1; for(; i++; ) {}; return i",
- handle(Smi::FromInt(1), isolate)),
+ handle(Smi::FromInt(1), isolate)),
std::make_pair("var i = 20; switch(i++) {\n"
" case 20: return 1;\n"
" default: return 2;\n"
@@ -3895,19 +3891,19 @@ TEST(InterpreterLookupContextSlot) {
std::tuple<const char*, const char*, Handle<Object>> lookup_slot[] = {
// Eval in inner context.
std::make_tuple("var x = 0;", "eval(''); return x;",
- handle(Smi::FromInt(0), isolate)),
+ handle(Smi::kZero, isolate)),
std::make_tuple("var x = 0;", "eval('var x = 1'); return x;",
handle(Smi::FromInt(1), isolate)),
std::make_tuple("var x = 0;",
"'use strict'; eval('var x = 1'); return x;",
- handle(Smi::FromInt(0), isolate)),
+ handle(Smi::kZero, isolate)),
// Eval in outer context.
std::make_tuple("var x = 0; eval('');", "return x;",
- handle(Smi::FromInt(0), isolate)),
+ handle(Smi::kZero, isolate)),
std::make_tuple("var x = 0; eval('var x = 1');", "return x;",
handle(Smi::FromInt(1), isolate)),
std::make_tuple("'use strict'; var x = 0; eval('var x = 1');",
- "return x;", handle(Smi::FromInt(0), isolate)),
+ "return x;", handle(Smi::kZero, isolate)),
};
for (size_t i = 0; i < arraysize(lookup_slot); i++) {
@@ -3937,18 +3933,18 @@ TEST(InterpreterLookupGlobalSlot) {
std::tuple<const char*, const char*, Handle<Object>> lookup_slot[] = {
// Eval in inner context.
std::make_tuple("x = 0;", "eval(''); return x;",
- handle(Smi::FromInt(0), isolate)),
+ handle(Smi::kZero, isolate)),
std::make_tuple("x = 0;", "eval('var x = 1'); return x;",
handle(Smi::FromInt(1), isolate)),
std::make_tuple("x = 0;", "'use strict'; eval('var x = 1'); return x;",
- handle(Smi::FromInt(0), isolate)),
+ handle(Smi::kZero, isolate)),
// Eval in outer context.
std::make_tuple("x = 0; eval('');", "return x;",
- handle(Smi::FromInt(0), isolate)),
+ handle(Smi::kZero, isolate)),
std::make_tuple("x = 0; eval('var x = 1');", "return x;",
handle(Smi::FromInt(1), isolate)),
std::make_tuple("'use strict'; x = 0; eval('var x = 1');", "return x;",
- handle(Smi::FromInt(0), isolate)),
+ handle(Smi::kZero, isolate)),
};
for (size_t i = 0; i < arraysize(lookup_slot); i++) {
diff --git a/deps/v8/test/cctest/libplatform/test-tracing.cc b/deps/v8/test/cctest/libplatform/test-tracing.cc
index 66fa0e05a5..6189ed2dfc 100644
--- a/deps/v8/test/cctest/libplatform/test-tracing.cc
+++ b/deps/v8/test/cctest/libplatform/test-tracing.cc
@@ -14,12 +14,9 @@ namespace tracing {
TEST(TestTraceConfig) {
LocalContext env;
TraceConfig* trace_config = new TraceConfig();
- trace_config->EnableSampling();
trace_config->AddIncludedCategory("v8");
trace_config->AddIncludedCategory(TRACE_DISABLED_BY_DEFAULT("v8.runtime"));
- trace_config->AddExcludedCategory("v8.cpu_profile");
- CHECK_EQ(trace_config->IsSamplingEnabled(), true);
CHECK_EQ(trace_config->IsSystraceEnabled(), false);
CHECK_EQ(trace_config->IsArgumentFilterEnabled(), false);
CHECK_EQ(trace_config->IsCategoryGroupEnabled("v8"), true);
@@ -41,8 +38,8 @@ TEST(TestTraceObject) {
CHECK_EQ(category_enabled_flag, *trace_object.category_enabled_flag());
CHECK_EQ(std::string("Test.Trace"), std::string(trace_object.name()));
CHECK_EQ(std::string("Test.Scope"), std::string(trace_object.scope()));
- CHECK_EQ(0, trace_object.duration());
- CHECK_EQ(0, trace_object.cpu_duration());
+ CHECK_EQ(0u, trace_object.duration());
+ CHECK_EQ(0u, trace_object.cpu_duration());
}
class ConvertableToTraceFormatMock : public v8::ConvertableToTraceFormat {
@@ -189,7 +186,7 @@ TEST(TestTracingController) {
TRACE_EVENT0("v8", "v8.Test3");
tracing_controller.StopTracing();
- CHECK_EQ(2, writer->events().size());
+ CHECK_EQ(2u, writer->events().size());
CHECK_EQ(std::string("v8.Test"), writer->events()[0]);
CHECK_EQ(std::string("v8.Test3"), writer->events()[1]);
@@ -297,7 +294,7 @@ TEST(TestTracingControllerMultipleArgsAndCopy) {
GetJSONStrings(all_names, trace_str, "\"name\"", "\"", "\"");
GetJSONStrings(all_cats, trace_str, "\"cat\"", "\"", "\"");
- CHECK_EQ(all_args.size(), 24);
+ CHECK_EQ(all_args.size(), 24u);
CHECK_EQ(all_args[0], "\"aa\":11");
CHECK_EQ(all_args[1], "\"bb\":22");
CHECK_EQ(all_args[2], "\"cc\":33");
diff --git a/deps/v8/test/cctest/parsing/test-scanner-streams.cc b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
index fffd1200f2..3f5ae50d45 100644
--- a/deps/v8/test/cctest/parsing/test-scanner-streams.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
@@ -27,7 +27,7 @@ class ChunkSource : public v8::ScriptCompiler::ExternalSourceStream {
// If extra_chunky, we'll use increasingly large chunk sizes.
// If not, we'll have a single chunk of full length.
size_t chunk_size = extra_chunky ? 1 : len;
- for (size_t i = 0; i < len; i += chunk_size, chunk_size *= 2) {
+ for (size_t i = 0; i < len; i += chunk_size, chunk_size++) {
chunks_.push_back({data + i, i::Min(chunk_size, len - i)});
}
chunks_.push_back({nullptr, 0});
@@ -132,6 +132,13 @@ TEST(Utf8StreamBOM) {
stream->Seek(5);
CHECK_EQ(unicode_ucs2[5], stream->Advance());
+
+ // Try again, but make sure we have to seek 'backwards'.
+ while (v8::internal::Utf16CharacterStream::kEndOfInput != stream->Advance()) {
+ // Do nothing. We merely advance the stream to the end of its input.
+ }
+ stream->Seek(5);
+ CHECK_EQ(unicode_ucs2[5], stream->Advance());
}
TEST(Utf8SplitBOM) {
diff --git a/deps/v8/test/cctest/parsing/test-scanner.cc b/deps/v8/test/cctest/parsing/test-scanner.cc
index 2577aa5868..12884ba106 100644
--- a/deps/v8/test/cctest/parsing/test-scanner.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner.cc
@@ -17,8 +17,8 @@ namespace {
const char src_simple[] = "function foo() { var x = 2 * a() + b; }";
-std::unique_ptr<Scanner> make_scanner(const char* src) {
- std::unique_ptr<Scanner> scanner(new Scanner(new UnicodeCache()));
+std::unique_ptr<Scanner> make_scanner(const char* src, UnicodeCache* cache) {
+ std::unique_ptr<Scanner> scanner(new Scanner(cache));
scanner->Initialize(ScannerStream::ForTesting(src).release());
return scanner;
}
@@ -30,11 +30,13 @@ std::unique_ptr<Scanner> make_scanner(const char* src) {
#define DCHECK_TOK(a, b) DCHECK_EQ(Token::Name(a), Token::Name(b))
TEST(Bookmarks) {
+ UnicodeCache unicode_cache;
+
// Scan through the given source and record the tokens for use as reference
// below.
std::vector<Token::Value> tokens;
{
- auto scanner = make_scanner(src_simple);
+ auto scanner = make_scanner(src_simple, &unicode_cache);
do {
tokens.push_back(scanner->Next());
} while (scanner->current_token() != Token::EOS);
@@ -48,7 +50,7 @@ TEST(Bookmarks) {
// - scan until the end.
// At each step, compare to the reference token sequence generated above.
for (size_t bookmark_pos = 0; bookmark_pos < tokens.size(); bookmark_pos++) {
- auto scanner = make_scanner(src_simple);
+ auto scanner = make_scanner(src_simple, &unicode_cache);
Scanner::BookmarkScope bookmark(scanner.get());
for (size_t i = 0; i < std::min(bookmark_pos + 10, tokens.size()); i++) {
@@ -77,8 +79,9 @@ TEST(AllThePushbacks) {
{"<!-- xx -->\nx", {Token::IDENTIFIER, Token::EOS}},
};
+ UnicodeCache unicode_cache;
for (const auto& test_case : test_cases) {
- auto scanner = make_scanner(test_case.src);
+ auto scanner = make_scanner(test_case.src, &unicode_cache);
for (size_t i = 0; test_case.tokens[i] != Token::EOS; i++) {
DCHECK_TOK(test_case.tokens[i], scanner->Next());
}
diff --git a/deps/v8/test/cctest/test-api-accessors.cc b/deps/v8/test/cctest/test-api-accessors.cc
index cda16cdbcb..e9773e918d 100644
--- a/deps/v8/test/cctest/test-api-accessors.cc
+++ b/deps/v8/test/cctest/test-api-accessors.cc
@@ -4,9 +4,9 @@
#include "test/cctest/cctest.h"
-#include "include/v8.h"
#include "include/v8-experimental.h"
-
+#include "include/v8.h"
+#include "src/api.h"
namespace i = v8::internal;
@@ -111,3 +111,139 @@ TEST(FastAccessors) {
"}",
31415);
}
+
+// The goal is to avoid the callback.
+static void UnreachableCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& info) {
+ UNREACHABLE();
+}
+
+TEST(CachedAccessor) {
+ // Crankshaft support for fast accessors is not implemented; crankshafted
+ // code uses the slow accessor which breaks this test's expectations.
+ v8::internal::FLAG_always_opt = false;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ // Create 'foo' class, with a hidden property.
+ v8::Local<v8::ObjectTemplate> foo = v8::ObjectTemplate::New(isolate);
+
+ v8::Local<v8::Private> priv =
+ v8::Private::ForApi(isolate, v8_str("Foo#draft"));
+
+ foo->SetAccessorProperty(v8_str("draft"), v8::FunctionTemplate::NewWithCache(
+ isolate, UnreachableCallback,
+ priv, v8::Local<v8::Value>()));
+
+ // Create 'obj', instance of 'foo'.
+ v8::Local<v8::Object> obj = foo->NewInstance(env.local()).ToLocalChecked();
+
+ // Install the private property on the instance.
+ CHECK(obj->SetPrivate(isolate->GetCurrentContext(), priv,
+ v8::Undefined(isolate))
+ .FromJust());
+
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
+
+ // Access cached accessor.
+ ExpectUndefined("obj.draft");
+
+ // Set hidden property.
+ CHECK(obj->SetPrivate(isolate->GetCurrentContext(), priv,
+ v8_str("Shhh, I'm private!"))
+ .FromJust());
+
+ ExpectString("obj.draft", "Shhh, I'm private!");
+
+ // Stress the accessor to use the IC.
+ ExpectString(
+ "var result = '';"
+ "for (var i = 0; i < 10; ++i) { "
+ " result = obj.draft; "
+ "} "
+ "result; ",
+ "Shhh, I'm private!");
+}
+
+TEST(CachedAccessorCrankshaft) {
+ i::FLAG_allow_natives_syntax = true;
+ // v8::internal::FLAG_always_opt = false;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ // Create 'foo' class, with a hidden property.
+ v8::Local<v8::ObjectTemplate> foo = v8::ObjectTemplate::New(isolate);
+ v8::Local<v8::Private> priv =
+ v8::Private::ForApi(isolate, v8_str("Foo#draft"));
+
+ // Install the private property on the template.
+ // foo->SetPrivate(priv, v8::Undefined(isolate));
+
+ foo->SetAccessorProperty(v8_str("draft"), v8::FunctionTemplate::NewWithCache(
+ isolate, UnreachableCallback,
+ priv, v8::Local<v8::Value>()));
+
+ // Create 'obj', instance of 'foo'.
+ v8::Local<v8::Object> obj = foo->NewInstance(env.local()).ToLocalChecked();
+
+ // Install the private property on the instance.
+ CHECK(obj->SetPrivate(isolate->GetCurrentContext(), priv,
+ v8::Undefined(isolate))
+ .FromJust());
+
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
+
+ // Access surrogate accessor.
+ ExpectUndefined("obj.draft");
+
+ // Set hidden property.
+ CHECK(obj->SetPrivate(env.local(), priv, v8::Integer::New(isolate, 123))
+ .FromJust());
+
+ // Test ICs.
+ CompileRun(
+ "function f() {"
+ " var x;"
+ " for (var i = 0; i < 100; i++) {"
+ " x = obj.draft;"
+ " }"
+ " return x;"
+ "}");
+
+ ExpectInt32("f()", 123);
+
+ // Reset hidden property.
+ CHECK(obj->SetPrivate(env.local(), priv, v8::Integer::New(isolate, 456))
+ .FromJust());
+
+ // Test Crankshaft.
+ CompileRun("%OptimizeFunctionOnNextCall(f);");
+
+ ExpectInt32("f()", 456);
+
+ CHECK(obj->SetPrivate(env.local(), priv, v8::Integer::New(isolate, 456))
+ .FromJust());
+ // Test non-global ICs.
+ CompileRun(
+ "function g() {"
+ " var x = obj;"
+ " var r = 0;"
+ " for (var i = 0; i < 100; i++) {"
+ " r = x.draft;"
+ " }"
+ " return r;"
+ "}");
+
+ ExpectInt32("g()", 456);
+
+ // Reset hidden property.
+ CHECK(obj->SetPrivate(env.local(), priv, v8::Integer::New(isolate, 789))
+ .FromJust());
+
+ // Test non-global access in Crankshaft.
+ CompileRun("%OptimizeFunctionOnNextCall(g);");
+
+ ExpectInt32("g()", 789);
+}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 8317a06aa2..c578115888 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -151,6 +151,8 @@ UNINITIALIZED_TEST(InitializeAndDisposeMultiple) {
for (int i = 0; i < 3; ++i) CHECK(v8::V8::Dispose());
}
+// Tests that Smi::kZero is set up properly.
+UNINITIALIZED_TEST(SmiZero) { CHECK_EQ(i::Smi::kZero, i::Smi::kZero); }
THREADED_TEST(Handles) {
v8::HandleScope scope(CcTest::isolate());
@@ -7768,7 +7770,7 @@ static void IndependentWeakHandle(bool global_gc, bool interlinked) {
FlagAndPersistent object_a, object_b;
- intptr_t big_heap_size;
+ size_t big_heap_size;
{
v8::HandleScope handle_scope(iso);
@@ -9702,15 +9704,6 @@ TEST(DetachGlobal) {
result = CompileRun("other.p");
CHECK(result->IsInt32());
CHECK_EQ(24, result->Int32Value(env3).FromJust());
-
- // Change security token for env3 to something different from env1 and env2.
- env3->SetSecurityToken(v8_str("bar"));
-
- // Check that we do not have access to other.p in env1. |other| is now
- // the global object for env3 which has a different security token,
- // so access should be blocked.
- result = CompileRun("other.p");
- CHECK(result.IsEmpty());
}
@@ -10562,7 +10555,7 @@ THREADED_TEST(ObjectGetOwnPropertyNames) {
v8::PropertyFilter::ALL_PROPERTIES |
v8::PropertyFilter::SKIP_SYMBOLS))
.ToLocal(&properties));
- CHECK_EQ(5, properties->Length());
+ CHECK_EQ(5u, properties->Length());
v8::Local<v8::Value> property;
CHECK(properties->Get(context.local(), 4).ToLocal(&property) &&
property->IsString());
@@ -10578,7 +10571,7 @@ THREADED_TEST(ObjectGetOwnPropertyNames) {
CHECK(value->GetOwnPropertyNames(context.local(), v8::ONLY_ENUMERABLE)
.ToLocal(&properties));
- CHECK_EQ(4, properties->Length());
+ CHECK_EQ(4u, properties->Length());
for (int i = 0; i < 4; ++i) {
v8::Local<v8::Value> property;
CHECK(properties->Get(context.local(), i).ToLocal(&property) &&
@@ -13827,34 +13820,23 @@ void ApiTestFuzzer::CallTest() {
test_number_);
}
-// Lets not be needlessly self-referential.
-TEST(Threading1) {
- ApiTestFuzzer::SetUp(ApiTestFuzzer::FIRST_PART);
- ApiTestFuzzer::RunAllTests();
- ApiTestFuzzer::TearDown();
-}
-
-
-TEST(Threading2) {
- ApiTestFuzzer::SetUp(ApiTestFuzzer::SECOND_PART);
- ApiTestFuzzer::RunAllTests();
- ApiTestFuzzer::TearDown();
-}
-
-
-TEST(Threading3) {
- ApiTestFuzzer::SetUp(ApiTestFuzzer::THIRD_PART);
- ApiTestFuzzer::RunAllTests();
- ApiTestFuzzer::TearDown();
-}
-
+#define THREADING_TEST(INDEX, NAME) \
+ TEST(Threading##INDEX) { \
+ ApiTestFuzzer::SetUp(ApiTestFuzzer::NAME); \
+ ApiTestFuzzer::RunAllTests(); \
+ ApiTestFuzzer::TearDown(); \
+ }
-TEST(Threading4) {
- ApiTestFuzzer::SetUp(ApiTestFuzzer::FOURTH_PART);
- ApiTestFuzzer::RunAllTests();
- ApiTestFuzzer::TearDown();
-}
+THREADING_TEST(1, FIRST_PART)
+THREADING_TEST(2, SECOND_PART)
+THREADING_TEST(3, THIRD_PART)
+THREADING_TEST(4, FOURTH_PART)
+THREADING_TEST(5, FIFTH_PART)
+THREADING_TEST(6, SIXTH_PART)
+THREADING_TEST(7, SEVENTH_PART)
+THREADING_TEST(8, EIGHTH_PART)
+#undef THREADING_TEST
static void ThrowInJS(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
@@ -14579,7 +14561,7 @@ void SetFunctionEntryHookTest::RunTest() {
RunLoopInNewEnv(isolate);
// Check the expected invocation counts.
- if (!i::FLAG_ignition) {
+ if (!i::FLAG_ignition && !i::FLAG_turbo) {
CHECK_EQ(2, CountInvocations(NULL, "bar"));
CHECK_EQ(200, CountInvocations("bar", "foo"));
CHECK_EQ(200, CountInvocations(NULL, "foo"));
@@ -14644,7 +14626,7 @@ static bool FunctionNameIs(const char* expected,
// "LazyCompile:<type><function_name>" or Function:<type><function_name>,
// where the type is one of "*", "~" or "".
static const char* kPreamble;
- if (!i::FLAG_lazy || (i::FLAG_ignition && i::FLAG_ignition_eager)) {
+ if (!i::FLAG_lazy) {
kPreamble = "Function:";
} else {
kPreamble = "LazyCompile:";
@@ -14820,8 +14802,9 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
for (int i = 0; i < kIterations; ++i) {
LocalContext env(isolate);
i::AlwaysAllocateScope always_allocate(i_isolate);
- i::heap::SimulateFullSpace(i::FLAG_ignition ? heap->old_space()
- : heap->code_space());
+ i::heap::SimulateFullSpace(i::FLAG_ignition || i::FLAG_turbo
+ ? heap->old_space()
+ : heap->code_space());
CompileRun(script);
// Keep a strong reference to the code object in the handle scope.
@@ -15374,7 +15357,7 @@ THREADED_TEST(AccessChecksReenabledCorrectly) {
// Tests that ScriptData can be serialized and deserialized.
TEST(PreCompileSerialization) {
// Producing cached parser data while parsing eagerly is not supported.
- if (!i::FLAG_lazy || (i::FLAG_ignition && i::FLAG_ignition_eager)) return;
+ if (!i::FLAG_lazy) return;
v8::V8::Initialize();
LocalContext env;
@@ -21753,10 +21736,6 @@ TEST(ScopedMicrotasks) {
env->GetIsolate()->SetMicrotasksPolicy(v8::MicrotasksPolicy::kAuto);
}
-#ifdef ENABLE_DISASSEMBLER
-// FLAG_test_primary_stub_cache and FLAG_test_secondary_stub_cache are read
-// only when ENABLE_DISASSEMBLER is not defined.
-
namespace {
int probes_counter = 0;
@@ -21774,6 +21753,14 @@ int* LookupCounter(const char* name) {
return NULL;
}
+} // namespace
+
+#ifdef ENABLE_DISASSEMBLER
+// FLAG_test_primary_stub_cache and FLAG_test_secondary_stub_cache are read
+// only when ENABLE_DISASSEMBLER is not defined.
+
+namespace {
+
const char* kMegamorphicTestProgram =
"function CreateClass(name) {\n"
" var src = \n"
@@ -21802,7 +21789,7 @@ void TestStubCache(bool primary) {
// The test does not work with interpreter because bytecode handlers taken
// from the snapshot already refer to ICs with disabled counters and there
// is no way to trigger bytecode handlers recompilation.
- if (i::FLAG_ignition) return;
+ if (i::FLAG_ignition || i::FLAG_turbo) return;
i::FLAG_native_code_counters = true;
if (primary) {
@@ -21826,12 +21813,10 @@ void TestStubCache(bool primary) {
// Enforce recompilation of IC stubs that access megamorphic stub cache
// to respect enabled native code counters and stub cache test flags.
i::CodeStub::Major code_stub_keys[] = {
- i::CodeStub::LoadIC, i::CodeStub::LoadICTrampoline,
- i::CodeStub::LoadICTF, i::CodeStub::LoadICTrampolineTF,
- i::CodeStub::KeyedLoadIC, i::CodeStub::KeyedLoadICTrampoline,
- i::CodeStub::StoreIC, i::CodeStub::StoreICTrampoline,
- i::CodeStub::StoreICTF, i::CodeStub::StoreICTrampolineTF,
- i::CodeStub::KeyedStoreIC, i::CodeStub::KeyedStoreICTrampoline,
+ i::CodeStub::LoadIC, i::CodeStub::LoadICTrampoline,
+ i::CodeStub::KeyedLoadICTF, i::CodeStub::KeyedLoadICTrampolineTF,
+ i::CodeStub::StoreIC, i::CodeStub::StoreICTrampoline,
+ i::CodeStub::KeyedStoreIC, i::CodeStub::KeyedStoreICTrampoline,
};
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Heap* heap = i_isolate->heap();
@@ -21870,22 +21855,10 @@ void TestStubCache(bool primary) {
} // namespace
UNINITIALIZED_TEST(PrimaryStubCache) {
- i::FLAG_tf_load_ic_stub = false;
TestStubCache(true);
}
UNINITIALIZED_TEST(SecondaryStubCache) {
- i::FLAG_tf_load_ic_stub = false;
- TestStubCache(false);
-}
-
-UNINITIALIZED_TEST(PrimaryStubCacheTF) {
- i::FLAG_tf_load_ic_stub = true;
- TestStubCache(true);
-}
-
-UNINITIALIZED_TEST(SecondaryStubCacheTF) {
- i::FLAG_tf_load_ic_stub = true;
TestStubCache(false);
}
@@ -22689,6 +22662,7 @@ TEST(AccessCheckThrows) {
// Create a context and set an x property on it's global object.
LocalContext context0(NULL, global_template);
v8::Local<v8::Object> global0 = context0->Global();
+ CHECK(global0->Set(context0.local(), v8_str("x"), global0).FromJust());
// Create a context with a different security token so that the
// failed access check callback will be called on each access.
@@ -22743,6 +22717,128 @@ TEST(AccessCheckThrows) {
isolate->SetFailedAccessCheckCallbackFunction(NULL);
}
+TEST(AccessCheckInIC) {
+ // The test does not work with interpreter because bytecode handlers taken
+ // from the snapshot already refer to ICs with disabled counters and there
+ // is no way to trigger bytecode handlers recompilation.
+ if (i::FLAG_ignition || i::FLAG_turbo) return;
+
+ i::FLAG_native_code_counters = true;
+ i::FLAG_crankshaft = false;
+ i::FLAG_turbo = false;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ create_params.counter_lookup_callback = LookupCounter;
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ LocalContext env(isolate);
+ v8::HandleScope scope(isolate);
+
+ {
+ // Enforce recompilation of IC stubs that access megamorphic stub cache
+ // to respect enabled native code counters and stub cache test flags.
+ i::CodeStub::Major code_stub_keys[] = {
+ i::CodeStub::LoadIC, i::CodeStub::LoadICTrampoline,
+ i::CodeStub::KeyedLoadICTF, i::CodeStub::KeyedLoadICTrampolineTF,
+ i::CodeStub::StoreIC, i::CodeStub::StoreICTrampoline,
+ i::CodeStub::KeyedStoreIC, i::CodeStub::KeyedStoreICTrampoline,
+ };
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Heap* heap = i_isolate->heap();
+ i::Handle<i::UnseededNumberDictionary> dict(heap->code_stubs());
+ for (size_t i = 0; i < arraysize(code_stub_keys); i++) {
+ dict = i::UnseededNumberDictionary::DeleteKey(dict, code_stub_keys[i]);
+ }
+ heap->SetRootCodeStubs(*dict);
+ }
+
+ // Create an ObjectTemplate for global objects and install access
+ // check callbacks that will block access.
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate);
+ global_template->SetAccessCheckCallback(AccessCounter);
+
+ // Create a context and set an x property on its global object.
+ LocalContext context0(isolate, NULL, global_template);
+ v8::Local<v8::Object> global0 = context0->Global();
+ CHECK(global0->Set(context0.local(), v8_str("x"), global0).FromJust());
+
+ // Create a context with a different security token so that the
+ // failed access check callback will be called on each access.
+ LocalContext context1(isolate, NULL, global_template);
+ CHECK(context1->Global()
+ ->Set(context1.local(), v8_str("other"), global0)
+ .FromJust());
+
+ // Set different security tokens.
+ Local<Value> token0 = v8_str("token0");
+ context0.local()->SetSecurityToken(token0);
+ context1.local()->SetSecurityToken(v8_str("token1"));
+
+ int initial_probes = probes_counter;
+ int initial_misses = misses_counter;
+ int initial_updates = updates_counter;
+ access_count = 0;
+
+ // Create megamorphic load ic with a handler for "global0.x" compiled for
+ // context0.
+ CompileRun(context0.local(),
+ "Number(1).__proto__.x = null;\n"
+ "String(1).__proto__.x = null;\n"
+ "function get0(o) { return o.x; };\n"
+ "get0({x:1});\n" // premonomorphic
+ "get0({x:1,a:0});\n" // monomorphic
+ "get0({x:1,b:0});\n" // polymorphic
+ "get0('str');\n"
+ "get0(1.1);\n"
+ "get0(this);\n" // megamorphic
+ "");
+ CHECK_EQ(0, probes_counter - initial_probes);
+ CHECK_EQ(0, misses_counter - initial_misses);
+ CHECK_EQ(5, updates_counter - initial_updates);
+
+ // Create megamorphic load ic in context1.
+ CompileRun(context1.local(),
+ "function get1(o) { return o.x; };\n"
+ "get1({x:1});\n" // premonomorphic
+ "get1({x:1,a:0});\n" // monomorphic
+ "get1({x:1,b:0});\n" // polymorphic
+ "get1({x:1,c:0});\n"
+ "get1({x:1,d:0});\n"
+ "get1({x:1,e:0});\n" // megamorphic
+ "");
+ CHECK_EQ(0, access_count);
+ CHECK_EQ(0, probes_counter - initial_probes);
+ CHECK_EQ(0, misses_counter - initial_misses);
+ CHECK_EQ(10, updates_counter - initial_updates);
+
+ // Feed the |other| to the load ic and ensure that it doesn't pick the
+ // handler for "global0.x" compiled for context0 from the megamorphic
+ // cache but create another handler for "global0.x" compiled for context1
+ // and ensure the access check callback is triggered.
+ CompileRun(context1.local(), "get1(other)");
+ CHECK_EQ(1, access_count); // Access check callback must be triggered.
+
+ // Feed the primitive objects to the load ic and ensure that it doesn't
+ // pick handlers for primitive maps from the megamorphic stub cache even
+ // if the security token matches.
+ context1.local()->SetSecurityToken(token0);
+ CHECK(CompileRun(context1.local(), "get1(1.1)")
+ .ToLocalChecked()
+ ->IsUndefined());
+ CHECK(CompileRun(context1.local(), "get1('str')")
+ .ToLocalChecked()
+ ->IsUndefined());
+
+ CHECK_EQ(1, access_count); // Access check callback must be triggered.
+ CHECK_EQ(3, probes_counter - initial_probes);
+ CHECK_EQ(0, misses_counter - initial_misses);
+ CHECK_EQ(13, updates_counter - initial_updates);
+ }
+ isolate->Dispose();
+}
class RequestInterruptTestBase {
public:
@@ -24027,65 +24123,69 @@ TEST(ScriptPositionInfo) {
v8::internal::Script::PositionInfo info;
- // With offset.
-
- // Behave as if 0 was passed if position is negative.
- CHECK(script1->GetPositionInfo(-1, &info, script1->WITH_OFFSET));
- CHECK_EQ(13, info.line);
- CHECK_EQ(0, info.column);
- CHECK_EQ(0, info.line_start);
- CHECK_EQ(8, info.line_end);
-
- CHECK(script1->GetPositionInfo(0, &info, script1->WITH_OFFSET));
- CHECK_EQ(13, info.line);
- CHECK_EQ(0, info.column);
- CHECK_EQ(0, info.line_start);
- CHECK_EQ(8, info.line_end);
-
- CHECK(script1->GetPositionInfo(8, &info, script1->WITH_OFFSET));
- CHECK_EQ(13, info.line);
- CHECK_EQ(8, info.column);
- CHECK_EQ(0, info.line_start);
- CHECK_EQ(8, info.line_end);
-
- CHECK(script1->GetPositionInfo(9, &info, script1->WITH_OFFSET));
- CHECK_EQ(14, info.line);
- CHECK_EQ(0, info.column);
- CHECK_EQ(9, info.line_start);
- CHECK_EQ(17, info.line_end);
-
- // Fail when position is larger than script size.
- CHECK(!script1->GetPositionInfo(220384, &info, script1->WITH_OFFSET));
-
- // Without offset.
-
- // Behave as if 0 was passed if position is negative.
- CHECK(script1->GetPositionInfo(-1, &info, script1->NO_OFFSET));
- CHECK_EQ(0, info.line);
- CHECK_EQ(0, info.column);
- CHECK_EQ(0, info.line_start);
- CHECK_EQ(8, info.line_end);
-
- CHECK(script1->GetPositionInfo(0, &info, script1->NO_OFFSET));
- CHECK_EQ(0, info.line);
- CHECK_EQ(0, info.column);
- CHECK_EQ(0, info.line_start);
- CHECK_EQ(8, info.line_end);
-
- CHECK(script1->GetPositionInfo(8, &info, script1->NO_OFFSET));
- CHECK_EQ(0, info.line);
- CHECK_EQ(8, info.column);
- CHECK_EQ(0, info.line_start);
- CHECK_EQ(8, info.line_end);
-
- CHECK(script1->GetPositionInfo(9, &info, script1->NO_OFFSET));
- CHECK_EQ(1, info.line);
- CHECK_EQ(0, info.column);
- CHECK_EQ(9, info.line_start);
- CHECK_EQ(17, info.line_end);
-
- // Fail when position is larger than script size.
- CHECK(!script1->GetPositionInfo(220384, &info, script1->NO_OFFSET));
+ for (int i = 0; i < 2; ++i) {
+ // With offset.
+
+ // Behave as if 0 was passed if position is negative.
+ CHECK(script1->GetPositionInfo(-1, &info, script1->WITH_OFFSET));
+ CHECK_EQ(13, info.line);
+ CHECK_EQ(0, info.column);
+ CHECK_EQ(0, info.line_start);
+ CHECK_EQ(8, info.line_end);
+
+ CHECK(script1->GetPositionInfo(0, &info, script1->WITH_OFFSET));
+ CHECK_EQ(13, info.line);
+ CHECK_EQ(0, info.column);
+ CHECK_EQ(0, info.line_start);
+ CHECK_EQ(8, info.line_end);
+
+ CHECK(script1->GetPositionInfo(8, &info, script1->WITH_OFFSET));
+ CHECK_EQ(13, info.line);
+ CHECK_EQ(8, info.column);
+ CHECK_EQ(0, info.line_start);
+ CHECK_EQ(8, info.line_end);
+
+ CHECK(script1->GetPositionInfo(9, &info, script1->WITH_OFFSET));
+ CHECK_EQ(14, info.line);
+ CHECK_EQ(0, info.column);
+ CHECK_EQ(9, info.line_start);
+ CHECK_EQ(17, info.line_end);
+
+ // Fail when position is larger than script size.
+ CHECK(!script1->GetPositionInfo(220384, &info, script1->WITH_OFFSET));
+
+ // Without offset.
+
+ // Behave as if 0 was passed if position is negative.
+ CHECK(script1->GetPositionInfo(-1, &info, script1->NO_OFFSET));
+ CHECK_EQ(0, info.line);
+ CHECK_EQ(0, info.column);
+ CHECK_EQ(0, info.line_start);
+ CHECK_EQ(8, info.line_end);
+
+ CHECK(script1->GetPositionInfo(0, &info, script1->NO_OFFSET));
+ CHECK_EQ(0, info.line);
+ CHECK_EQ(0, info.column);
+ CHECK_EQ(0, info.line_start);
+ CHECK_EQ(8, info.line_end);
+
+ CHECK(script1->GetPositionInfo(8, &info, script1->NO_OFFSET));
+ CHECK_EQ(0, info.line);
+ CHECK_EQ(8, info.column);
+ CHECK_EQ(0, info.line_start);
+ CHECK_EQ(8, info.line_end);
+
+ CHECK(script1->GetPositionInfo(9, &info, script1->NO_OFFSET));
+ CHECK_EQ(1, info.line);
+ CHECK_EQ(0, info.column);
+ CHECK_EQ(9, info.line_start);
+ CHECK_EQ(17, info.line_end);
+
+ // Fail when position is larger than script size.
+ CHECK(!script1->GetPositionInfo(220384, &info, script1->NO_OFFSET));
+
+ i::Script::InitLineEnds(script1);
+ }
}
void CheckMagicComments(Local<Script> script, const char* expected_source_url,
@@ -24853,7 +24953,7 @@ TEST(InvalidParserCacheData) {
v8::V8::Initialize();
v8::HandleScope scope(CcTest::isolate());
LocalContext context;
- if (i::FLAG_lazy && !(i::FLAG_ignition && i::FLAG_ignition_eager)) {
+ if (i::FLAG_lazy) {
// Cached parser data is not consumed while parsing eagerly.
TestInvalidCacheData(v8::ScriptCompiler::kConsumeParserCache);
}
@@ -24869,7 +24969,7 @@ TEST(InvalidCodeCacheData) {
TEST(ParserCacheRejectedGracefully) {
// Producing cached parser data while parsing eagerly is not supported.
- if (!i::FLAG_lazy || (i::FLAG_ignition && i::FLAG_ignition_eager)) return;
+ if (!i::FLAG_lazy) return;
i::FLAG_min_preparse_length = 0;
v8::V8::Initialize();
@@ -25296,6 +25396,12 @@ TEST(ExtrasUtilsObject) {
rejected_promise->Catch(env.local(), store).ToLocalChecked();
isolate->RunMicrotasks();
CHECK_EQ(3, CompileRun("result")->Int32Value(env.local()).FromJust());
+
+ auto rejected_but_handled_promise =
+ result->Get(env.local(), v8_str("rejectedButHandledPromise"))
+ .ToLocalChecked()
+ .As<v8::Promise>();
+ CHECK_EQ(true, rejected_but_handled_promise->HasHandler());
}
@@ -25959,3 +26065,122 @@ TEST(EvalInAccessCheckedContext) {
CHECK_EQ(42, x_value->Int32Value(context1).FromJust());
context1->Exit();
}
+
+THREADED_TEST(ImmutableProtoWithParent) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ Local<v8::FunctionTemplate> parent = v8::FunctionTemplate::New(isolate);
+
+ Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
+ templ->Inherit(parent);
+ templ->PrototypeTemplate()->SetImmutableProto();
+
+ Local<v8::Function> function =
+ templ->GetFunction(context.local()).ToLocalChecked();
+ Local<v8::Object> instance =
+ function->NewInstance(context.local()).ToLocalChecked();
+ Local<v8::Object> prototype =
+ instance->Get(context.local(), v8_str("__proto__"))
+ .ToLocalChecked()
+ ->ToObject(context.local())
+ .ToLocalChecked();
+
+ // Look up the prototype
+ Local<v8::Value> original_proto =
+ prototype->Get(context.local(), v8_str("__proto__")).ToLocalChecked();
+
+ // Setting the prototype (e.g., to null) throws
+ CHECK(
+ prototype->SetPrototype(context.local(), v8::Null(isolate)).IsNothing());
+
+ // The original prototype is still there
+ Local<Value> new_proto =
+ prototype->Get(context.local(), v8_str("__proto__")).ToLocalChecked();
+ CHECK(new_proto->IsObject());
+ CHECK(new_proto.As<v8::Object>()
+ ->Equals(context.local(), original_proto)
+ .FromJust());
+}
+
+TEST(InternalFieldsOnGlobalProxy) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New(isolate);
+ obj_template->SetInternalFieldCount(1);
+
+ v8::Local<v8::Context> context = Context::New(isolate, nullptr, obj_template);
+ v8::Local<v8::Object> global = context->Global();
+ CHECK_EQ(1, global->InternalFieldCount());
+}
+
+THREADED_TEST(ImmutableProtoGlobal) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ Local<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
+ global_template->SetImmutableProto();
+ v8::Local<Context> context = Context::New(isolate, 0, global_template);
+ Context::Scope context_scope(context);
+ v8::Local<Value> result = CompileRun(
+ "global = this;"
+ "(function() {"
+ " try {"
+ " global.__proto__ = {};"
+ " return 0;"
+ " } catch (e) {"
+ " return 1;"
+ " }"
+ "})()");
+ CHECK(result->Equals(context, v8::Integer::New(CcTest::isolate(), 1))
+ .FromJust());
+}
+
+THREADED_TEST(MutableProtoGlobal) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ Local<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
+ v8::Local<Context> context = Context::New(isolate, 0, global_template);
+ Context::Scope context_scope(context);
+ v8::Local<Value> result = CompileRun(
+ "global = this;"
+ "(function() {"
+ " try {"
+ " global.__proto__ = {};"
+ " return 0;"
+ " } catch (e) {"
+ " return 1;"
+ " }"
+ "})()");
+ CHECK(result->Equals(context, v8::Integer::New(CcTest::isolate(), 0))
+ .FromJust());
+}
+
+TEST(InternalFieldsOnTypedArray) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = env.local();
+ Context::Scope context_scope(context);
+ v8::Local<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(isolate, 1);
+ v8::Local<v8::Uint8Array> array = v8::Uint8Array::New(buffer, 0, 1);
+ for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+ CHECK_EQ(static_cast<void*>(nullptr),
+ array->GetAlignedPointerFromInternalField(i));
+ }
+}
+
+TEST(InternalFieldsOnDataView) {
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::Context> context = env.local();
+ Context::Scope context_scope(context);
+ v8::Local<v8::ArrayBuffer> buffer = v8::ArrayBuffer::New(isolate, 1);
+ v8::Local<v8::DataView> array = v8::DataView::New(buffer, 0, 1);
+ for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
+ CHECK_EQ(static_cast<void*>(nullptr),
+ array->GetAlignedPointerFromInternalField(i));
+ }
+}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index de024f8869..08f1f5a9f3 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -1818,13 +1818,11 @@ TEST(uxtah) {
}
}
-
#define TEST_RBIT(expected_, input_) \
t.input = input_; \
t.result = 0; \
dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
- CHECK_EQ(expected_, t.result);
-
+ CHECK_EQ(static_cast<uint32_t>(expected_), t.result);
TEST(rbit) {
CcTest::InitializeVM();
@@ -2808,21 +2806,21 @@ TEST(unaligned_loads) {
#endif
uint64_t data = UINT64_C(0x84838281807f7e7d);
dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 0, 0, 0);
- CHECK_EQ(0x00007e7d, t.ldrh);
- CHECK_EQ(0x00007e7d, t.ldrsh);
- CHECK_EQ(0x807f7e7d, t.ldr);
+ CHECK_EQ(0x00007e7du, t.ldrh);
+ CHECK_EQ(0x00007e7du, t.ldrsh);
+ CHECK_EQ(0x807f7e7du, t.ldr);
dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 1, 0, 0);
- CHECK_EQ(0x00007f7e, t.ldrh);
- CHECK_EQ(0x00007f7e, t.ldrsh);
- CHECK_EQ(0x81807f7e, t.ldr);
+ CHECK_EQ(0x00007f7eu, t.ldrh);
+ CHECK_EQ(0x00007f7eu, t.ldrsh);
+ CHECK_EQ(0x81807f7eu, t.ldr);
dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 2, 0, 0);
- CHECK_EQ(0x0000807f, t.ldrh);
- CHECK_EQ(0xffff807f, t.ldrsh);
- CHECK_EQ(0x8281807f, t.ldr);
+ CHECK_EQ(0x0000807fu, t.ldrh);
+ CHECK_EQ(0xffff807fu, t.ldrsh);
+ CHECK_EQ(0x8281807fu, t.ldr);
dummy = CALL_GENERATED_CODE(isolate, f, &t, &data, 3, 0, 0);
- CHECK_EQ(0x00008180, t.ldrh);
- CHECK_EQ(0xffff8180, t.ldrsh);
- CHECK_EQ(0x83828180, t.ldr);
+ CHECK_EQ(0x00008180u, t.ldrh);
+ CHECK_EQ(0xffff8180u, t.ldrsh);
+ CHECK_EQ(0x83828180u, t.ldr);
}
TEST(unaligned_stores) {
diff --git a/deps/v8/test/cctest/test-ast-types.cc b/deps/v8/test/cctest/test-ast-types.cc
index 39d2d70eb0..942989d63a 100644
--- a/deps/v8/test/cctest/test-ast-types.cc
+++ b/deps/v8/test/cctest/test-ast-types.cc
@@ -48,7 +48,7 @@ struct Tests {
Tests()
: isolate(CcTest::InitIsolateOnce()),
scope(isolate),
- zone(isolate->allocator()),
+ zone(isolate->allocator(), ZONE_NAME),
T(&zone, isolate, isolate->random_number_generator()) {}
bool IsBitset(AstType* type) { return type->IsBitsetForTesting(); }
diff --git a/deps/v8/test/cctest/test-ast.cc b/deps/v8/test/cctest/test-ast.cc
index 5512bfce96..dfb4d11236 100644
--- a/deps/v8/test/cctest/test-ast.cc
+++ b/deps/v8/test/cctest/test-ast.cc
@@ -40,7 +40,7 @@ TEST(List) {
CHECK_EQ(0, list->length());
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
AstValueFactory value_factory(&zone, 0);
AstNodeFactory factory(&value_factory);
AstNode* node = factory.NewEmptyStatement(kNoSourcePosition);
@@ -62,7 +62,7 @@ TEST(List) {
TEST(ConcatStrings) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
AstValueFactory value_factory(&zone, 0);
const AstRawString* one_byte = value_factory.GetOneByteString("a");
diff --git a/deps/v8/test/cctest/test-atomicops.cc b/deps/v8/test/cctest/test-atomicops.cc
index 4a59ba68e3..5ac4d5e473 100644
--- a/deps/v8/test/cctest/test-atomicops.cc
+++ b/deps/v8/test/cctest/test-atomicops.cc
@@ -193,11 +193,6 @@ static void TestStore() {
NoBarrier_Store(&value, kVal2);
CHECK_EQU(kVal2, value);
- Acquire_Store(&value, kVal1);
- CHECK_EQU(kVal1, value);
- Acquire_Store(&value, kVal2);
- CHECK_EQU(kVal2, value);
-
Release_Store(&value, kVal1);
CHECK_EQU(kVal1, value);
Release_Store(&value, kVal2);
@@ -238,11 +233,6 @@ static void TestLoad() {
CHECK_EQU(kVal1, Acquire_Load(&value));
value = kVal2;
CHECK_EQU(kVal2, Acquire_Load(&value));
-
- value = kVal1;
- CHECK_EQU(kVal1, Release_Load(&value));
- value = kVal2;
- CHECK_EQU(kVal2, Release_Load(&value));
}
diff --git a/deps/v8/test/cctest/test-bit-vector.cc b/deps/v8/test/cctest/test-bit-vector.cc
index 99c5a68d45..a6a1e4001a 100644
--- a/deps/v8/test/cctest/test-bit-vector.cc
+++ b/deps/v8/test/cctest/test-bit-vector.cc
@@ -36,7 +36,7 @@ using namespace v8::internal;
TEST(BitVector) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
{
BitVector v(15, &zone);
v.Add(1);
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 588f430131..da2773fdca 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -61,13 +61,14 @@ TEST(LoadInstanceType) {
Handle<Smi>::cast(result.ToHandleChecked())->value());
}
-TEST(BitFieldDecode) {
+TEST(DecodeWordFromWord32) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
CodeStubAssemblerTester m(isolate, descriptor);
class TestBitField : public BitField<unsigned, 3, 3> {};
- m.Return(m.SmiTag(m.BitFieldDecode<TestBitField>(m.Int32Constant(0x2f))));
+ m.Return(
+ m.SmiTag(m.DecodeWordFromWord32<TestBitField>(m.Int32Constant(0x2f))));
Handle<Code> code = m.GenerateCode();
FunctionTester ft(descriptor, code);
MaybeHandle<Object> result = ft.Call();
@@ -119,6 +120,102 @@ TEST(ComputeIntegerHash) {
}
}
+TEST(ToString) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 1;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+ m.Return(m.ToString(m.Parameter(kNumParams + 2), m.Parameter(0)));
+
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ Handle<FixedArray> test_cases = isolate->factory()->NewFixedArray(5);
+ Handle<FixedArray> smi_test = isolate->factory()->NewFixedArray(2);
+ smi_test->set(0, Smi::FromInt(42));
+ Handle<String> str(isolate->factory()->InternalizeUtf8String("42"));
+ smi_test->set(1, *str);
+ test_cases->set(0, *smi_test);
+
+ Handle<FixedArray> number_test = isolate->factory()->NewFixedArray(2);
+ Handle<HeapNumber> num(isolate->factory()->NewHeapNumber(3.14));
+ number_test->set(0, *num);
+ str = isolate->factory()->InternalizeUtf8String("3.14");
+ number_test->set(1, *str);
+ test_cases->set(1, *number_test);
+
+ Handle<FixedArray> string_test = isolate->factory()->NewFixedArray(2);
+ str = isolate->factory()->InternalizeUtf8String("test");
+ string_test->set(0, *str);
+ string_test->set(1, *str);
+ test_cases->set(2, *string_test);
+
+ Handle<FixedArray> oddball_test = isolate->factory()->NewFixedArray(2);
+ oddball_test->set(0, isolate->heap()->undefined_value());
+ str = isolate->factory()->InternalizeUtf8String("undefined");
+ oddball_test->set(1, *str);
+ test_cases->set(3, *oddball_test);
+
+ Handle<FixedArray> tostring_test = isolate->factory()->NewFixedArray(2);
+ Handle<FixedArray> js_array_storage = isolate->factory()->NewFixedArray(2);
+ js_array_storage->set(0, Smi::FromInt(1));
+ js_array_storage->set(1, Smi::FromInt(2));
+ Handle<JSArray> js_array = isolate->factory()->NewJSArray(2);
+ JSArray::SetContent(js_array, js_array_storage);
+ tostring_test->set(0, *js_array);
+ str = isolate->factory()->InternalizeUtf8String("1,2");
+ tostring_test->set(1, *str);
+ test_cases->set(4, *tostring_test);
+
+ for (int i = 0; i < 5; ++i) {
+ Handle<FixedArray> test = handle(FixedArray::cast(test_cases->get(i)));
+ Handle<Object> obj = handle(test->get(0), isolate);
+ Handle<String> expected = handle(String::cast(test->get(1)));
+ Handle<Object> result = ft.Call(obj).ToHandleChecked();
+ CHECK(result->IsString());
+ CHECK(String::Equals(Handle<String>::cast(result), expected));
+ }
+}
+
+TEST(FlattenString) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ const int kNumParams = 1;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+ m.Return(m.FlattenString(m.Parameter(0)));
+
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ Handle<FixedArray> test_cases(isolate->factory()->NewFixedArray(4));
+ Handle<String> expected(
+ isolate->factory()->InternalizeUtf8String("hello, world!"));
+ test_cases->set(0, *expected);
+
+ Handle<String> string(
+ isolate->factory()->InternalizeUtf8String("filler hello, world! filler"));
+ Handle<String> sub_string(
+ isolate->factory()->NewProperSubString(string, 7, 20));
+ test_cases->set(1, *sub_string);
+
+ Handle<String> hello(isolate->factory()->InternalizeUtf8String("hello,"));
+ Handle<String> world(isolate->factory()->InternalizeUtf8String(" world!"));
+ Handle<String> cons_str(
+ isolate->factory()->NewConsString(hello, world).ToHandleChecked());
+ test_cases->set(2, *cons_str);
+
+ Handle<String> empty(isolate->factory()->InternalizeUtf8String(""));
+ Handle<String> fake_cons_str(
+ isolate->factory()->NewConsString(expected, empty).ToHandleChecked());
+ test_cases->set(3, *fake_cons_str);
+
+ for (int i = 0; i < 4; ++i) {
+ Handle<String> test = handle(String::cast(test_cases->get(i)));
+ Handle<Object> result = ft.Call(test).ToHandleChecked();
+ CHECK(result->IsString());
+ CHECK(Handle<String>::cast(result)->IsFlat());
+ CHECK(String::Equals(Handle<String>::cast(result), expected));
+ }
+}
+
TEST(TryToName) {
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
@@ -173,7 +270,7 @@ TEST(TryToName) {
{
// TryToName(<zero smi>) => if_keyisindex: smi value.
- Handle<Object> key(Smi::FromInt(0), isolate);
+ Handle<Object> key(Smi::kZero, isolate);
ft.CheckTrue(key, expect_index, key);
}
@@ -251,6 +348,37 @@ TEST(TryToName) {
namespace {
template <typename Dictionary>
+void TestEntryToIndex() {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 1;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+ {
+ Node* entry = m.SmiUntag(m.Parameter(0));
+ Node* result = m.EntryToIndex<Dictionary>(entry);
+ m.Return(m.SmiTag(result));
+ }
+
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ // Test a wide range of entries but staying linear in the first 100 entries.
+ for (int entry = 0; entry < Dictionary::kMaxCapacity;
+ entry = entry * 1.01 + 1) {
+ Handle<Object> result =
+ ft.Call(handle(Smi::FromInt(entry), isolate)).ToHandleChecked();
+ CHECK_EQ(Dictionary::EntryToIndex(entry), Smi::cast(*result)->value());
+ }
+}
+
+TEST(NameDictionaryEntryToIndex) { TestEntryToIndex<NameDictionary>(); }
+TEST(GlobalDictionaryEntryToIndex) { TestEntryToIndex<GlobalDictionary>(); }
+
+} // namespace
+
+namespace {
+
+template <typename Dictionary>
void TestNameDictionaryLookup() {
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
@@ -944,7 +1072,7 @@ TEST(TryLookupElement) {
FunctionTester ft(code, kNumParams);
Factory* factory = isolate->factory();
- Handle<Object> smi0(Smi::FromInt(0), isolate);
+ Handle<Object> smi0(Smi::kZero, isolate);
Handle<Object> smi1(Smi::FromInt(1), isolate);
Handle<Object> smi7(Smi::FromInt(7), isolate);
Handle<Object> smi13(Smi::FromInt(13), isolate);
@@ -1189,9 +1317,9 @@ void TestStubCacheOffsetCalculation(StubCache::Table table) {
factory->sloppy_arguments_elements_map(),
};
- for (int name_index = 0; name_index < arraysize(names); name_index++) {
+ for (size_t name_index = 0; name_index < arraysize(names); name_index++) {
Handle<Name> name = names[name_index];
- for (int map_index = 0; map_index < arraysize(maps); map_index++) {
+ for (size_t map_index = 0; map_index < arraysize(maps); map_index++) {
Handle<Map> map = maps[map_index];
int expected_result;
@@ -1257,12 +1385,12 @@ TEST(TryProbeStubCache) {
m.TryProbeStubCache(&stub_cache, receiver, name, &if_handler, &var_handler,
&if_miss);
m.Bind(&if_handler);
- m.BranchIfWordEqual(expected_handler, var_handler.value(), &passed,
- &failed);
+ m.Branch(m.WordEqual(expected_handler, var_handler.value()), &passed,
+ &failed);
m.Bind(&if_miss);
- m.BranchIfWordEqual(expected_handler, m.IntPtrConstant(0), &passed,
- &failed);
+ m.Branch(m.WordEqual(expected_handler, m.IntPtrConstant(0)), &passed,
+ &failed);
m.Bind(&passed);
m.Return(m.BooleanConstant(true));
@@ -1346,14 +1474,14 @@ TEST(TryProbeStubCache) {
int index = rand_gen.NextInt();
Handle<Name> name = names[index % names.size()];
Handle<JSObject> receiver = receivers[index % receivers.size()];
- Code* handler = stub_cache.Get(*name, receiver->map());
+ Object* handler = stub_cache.Get(*name, receiver->map());
if (handler == nullptr) {
queried_non_existing = true;
} else {
queried_existing = true;
}
- Handle<Code> expected_handler(handler, isolate);
+ Handle<Object> expected_handler(handler, isolate);
ft.CheckTrue(receiver, name, expected_handler);
}
@@ -1362,14 +1490,14 @@ TEST(TryProbeStubCache) {
int index2 = rand_gen.NextInt();
Handle<Name> name = names[index1 % names.size()];
Handle<JSObject> receiver = receivers[index2 % receivers.size()];
- Code* handler = stub_cache.Get(*name, receiver->map());
+ Object* handler = stub_cache.Get(*name, receiver->map());
if (handler == nullptr) {
queried_non_existing = true;
} else {
queried_existing = true;
}
- Handle<Code> expected_handler(handler, isolate);
+ Handle<Object> expected_handler(handler, isolate);
ft.CheckTrue(receiver, name, expected_handler);
}
// Ensure we performed both kind of queries.
@@ -1382,7 +1510,8 @@ TEST(GotoIfException) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 1;
- CodeStubAssemblerTester m(isolate, kNumParams);
+ // Emulate TFJ builtin
+ CodeStubAssemblerTester m(isolate, kNumParams, Code::BUILTIN);
Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
Node* to_string_tag =
@@ -1401,9 +1530,6 @@ TEST(GotoIfException) {
Handle<Code> code = m.GenerateCode();
CHECK(!code.is_null());
- // Emulate TFJ builtin
- code->set_flags(Code::ComputeFlags(Code::BUILTIN));
-
FunctionTester ft(code, kNumParams);
Handle<Object> result = ft.Call().ToHandleChecked();
@@ -1423,7 +1549,8 @@ TEST(GotoIfExceptionMultiple) {
Isolate* isolate(CcTest::InitIsolateOnce());
const int kNumParams = 4; // receiver, first, second, third
- CodeStubAssemblerTester m(isolate, kNumParams);
+ // Emulate TFJ builtin
+ CodeStubAssemblerTester m(isolate, kNumParams, Code::BUILTIN);
Node* context = m.HeapConstant(Handle<Context>(isolate->native_context()));
Node* first_value = m.Parameter(0);
@@ -1468,9 +1595,6 @@ TEST(GotoIfExceptionMultiple) {
Handle<Code> code = m.GenerateCode();
CHECK(!code.is_null());
- // Emulate TFJ builtin
- code->set_flags(Code::ComputeFlags(Code::BUILTIN));
-
FunctionTester ft(code, kNumParams);
Handle<Object> result;
@@ -1509,5 +1633,350 @@ TEST(GotoIfExceptionMultiple) {
CHECK(constructor->SameValue(*isolate->type_error_function()));
}
+TEST(AllocateJSObjectFromMap) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ Factory* factory = isolate->factory();
+
+ const int kNumParams = 3;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+
+ {
+ Node* map = m.Parameter(0);
+ Node* properties = m.Parameter(1);
+ Node* elements = m.Parameter(2);
+
+ Node* result = m.AllocateJSObjectFromMap(map, properties, elements);
+
+ m.Return(result);
+ }
+
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ Handle<Map> maps[] = {
+ handle(isolate->object_function()->initial_map(), isolate),
+ handle(isolate->array_function()->initial_map(), isolate),
+ };
+
+#define VERIFY(result, map_value, properties_value, elements_value) \
+ CHECK_EQ(result->map(), map_value); \
+ CHECK_EQ(result->properties(), properties_value); \
+ CHECK_EQ(result->elements(), elements_value);
+
+ {
+ Handle<Object> empty_fixed_array = factory->empty_fixed_array();
+ for (size_t i = 0; i < arraysize(maps); i++) {
+ Handle<Map> map = maps[i];
+ Handle<JSObject> result = Handle<JSObject>::cast(
+ ft.Call(map, empty_fixed_array, empty_fixed_array).ToHandleChecked());
+ VERIFY(result, *map, *empty_fixed_array, *empty_fixed_array);
+ CHECK(result->HasFastProperties());
+#ifdef VERIFY_HEAP
+ isolate->heap()->Verify();
+#endif
+ }
+ }
+
+ {
+ // TODO(cbruni): handle in-object properties
+ Handle<JSObject> object = Handle<JSObject>::cast(
+ v8::Utils::OpenHandle(*CompileRun("var object = {a:1,b:2, 1:1, 2:2}; "
+ "object")));
+ JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0,
+ "Normalize");
+ Handle<JSObject> result = Handle<JSObject>::cast(
+ ft.Call(handle(object->map()), handle(object->properties()),
+ handle(object->elements()))
+ .ToHandleChecked());
+ VERIFY(result, object->map(), object->properties(), object->elements());
+ CHECK(!result->HasFastProperties());
+#ifdef VERIFY_HEAP
+ isolate->heap()->Verify();
+#endif
+ }
+#undef VERIFY
+}
+
+TEST(AllocateNameDictionary) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 1;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+
+ {
+ Node* capacity = m.Parameter(0);
+ Node* result = m.AllocateNameDictionary(m.SmiUntag(capacity));
+ m.Return(result);
+ }
+
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ {
+ for (int i = 0; i < 256; i = i * 1.1 + 1) {
+ Handle<Object> result =
+ ft.Call(handle(Smi::FromInt(i), isolate)).ToHandleChecked();
+ Handle<NameDictionary> dict = NameDictionary::New(isolate, i);
+ // Both dictionaries should be memory equal.
+ int size =
+ FixedArrayBase::kHeaderSize + (dict->length() - 1) * kPointerSize;
+ CHECK_EQ(0, memcmp(*dict, *result, size));
+ }
+ }
+}
+
+TEST(PopAndReturnConstant) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 4;
+ const int kNumProgramaticParams = 2;
+ CodeStubAssemblerTester m(isolate, kNumParams - kNumProgramaticParams);
+
+ // Call a function that return |kNumProgramaticParams| parameters in addition
+ // to those specified by the static descriptor. |kNumProgramaticParams| is
+ // specified as a constant.
+ m.PopAndReturn(m.Int32Constant(kNumProgramaticParams),
+ m.SmiConstant(Smi::FromInt(1234)));
+
+ Handle<Code> code = m.GenerateCode();
+ CHECK(!code.is_null());
+
+ FunctionTester ft(code, kNumParams);
+ Handle<Object> result;
+ for (int test_count = 0; test_count < 100; ++test_count) {
+ result = ft.Call(isolate->factory()->undefined_value(),
+ Handle<Smi>(Smi::FromInt(1234), isolate),
+ isolate->factory()->undefined_value(),
+ isolate->factory()->undefined_value())
+ .ToHandleChecked();
+ CHECK_EQ(1234, Handle<Smi>::cast(result)->value());
+ }
+}
+
+TEST(PopAndReturnVariable) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 4;
+ const int kNumProgramaticParams = 2;
+ CodeStubAssemblerTester m(isolate, kNumParams - kNumProgramaticParams);
+
+ // Call a function that return |kNumProgramaticParams| parameters in addition
+ // to those specified by the static descriptor. |kNumProgramaticParams| is
+ // passed in as a parameter to the function so that it can't be recongized as
+ // a constant.
+ m.PopAndReturn(m.SmiUntag(m.Parameter(1)), m.SmiConstant(Smi::FromInt(1234)));
+
+ Handle<Code> code = m.GenerateCode();
+ CHECK(!code.is_null());
+
+ FunctionTester ft(code, kNumParams);
+ Handle<Object> result;
+ for (int test_count = 0; test_count < 100; ++test_count) {
+ result = ft.Call(isolate->factory()->undefined_value(),
+ Handle<Smi>(Smi::FromInt(1234), isolate),
+ isolate->factory()->undefined_value(),
+ Handle<Smi>(Smi::FromInt(kNumProgramaticParams), isolate))
+ .ToHandleChecked();
+ CHECK_EQ(1234, Handle<Smi>::cast(result)->value());
+ }
+}
+
+TEST(OneToTwoByteStringCopy) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ CodeStubAssemblerTester m(isolate, 2);
+
+ m.CopyStringCharacters(
+ m.Parameter(0), m.Parameter(1), m.SmiConstant(Smi::FromInt(0)),
+ m.SmiConstant(Smi::FromInt(0)), m.SmiConstant(Smi::FromInt(5)),
+ String::ONE_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
+ CodeStubAssembler::SMI_PARAMETERS);
+ m.Return(m.SmiConstant(Smi::FromInt(0)));
+
+ Handle<Code> code = m.GenerateCode();
+ CHECK(!code.is_null());
+
+ Handle<String> string1 = isolate->factory()->InternalizeUtf8String("abcde");
+ uc16 array[] = {1000, 1001, 1002, 1003, 1004};
+ Vector<const uc16> str(array);
+ Handle<String> string2 =
+ isolate->factory()->NewStringFromTwoByte(str).ToHandleChecked();
+ FunctionTester ft(code, 2);
+ ft.Call(string1, string2);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[0],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars()[0]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[1],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars()[1]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[2],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars()[2]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[3],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars()[3]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[4],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars()[4]);
+}
+
+TEST(OneToOneByteStringCopy) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ CodeStubAssemblerTester m(isolate, 2);
+
+ m.CopyStringCharacters(
+ m.Parameter(0), m.Parameter(1), m.SmiConstant(Smi::FromInt(0)),
+ m.SmiConstant(Smi::FromInt(0)), m.SmiConstant(Smi::FromInt(5)),
+ String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING,
+ CodeStubAssembler::SMI_PARAMETERS);
+ m.Return(m.SmiConstant(Smi::FromInt(0)));
+
+ Handle<Code> code = m.GenerateCode();
+ CHECK(!code.is_null());
+
+ Handle<String> string1 = isolate->factory()->InternalizeUtf8String("abcde");
+ uint8_t array[] = {100, 101, 102, 103, 104};
+ Vector<const uint8_t> str(array);
+ Handle<String> string2 =
+ isolate->factory()->NewStringFromOneByte(str).ToHandleChecked();
+ FunctionTester ft(code, 2);
+ ft.Call(string1, string2);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[0],
+ Handle<SeqOneByteString>::cast(string2)->GetChars()[0]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[1],
+ Handle<SeqOneByteString>::cast(string2)->GetChars()[1]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[2],
+ Handle<SeqOneByteString>::cast(string2)->GetChars()[2]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[3],
+ Handle<SeqOneByteString>::cast(string2)->GetChars()[3]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[4],
+ Handle<SeqOneByteString>::cast(string2)->GetChars()[4]);
+}
+
+TEST(OneToOneByteStringCopyNonZeroStart) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ CodeStubAssemblerTester m(isolate, 2);
+
+ m.CopyStringCharacters(
+ m.Parameter(0), m.Parameter(1), m.SmiConstant(Smi::FromInt(0)),
+ m.SmiConstant(Smi::FromInt(3)), m.SmiConstant(Smi::FromInt(2)),
+ String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING,
+ CodeStubAssembler::SMI_PARAMETERS);
+ m.Return(m.SmiConstant(Smi::FromInt(0)));
+
+ Handle<Code> code = m.GenerateCode();
+ CHECK(!code.is_null());
+
+ Handle<String> string1 = isolate->factory()->InternalizeUtf8String("abcde");
+ uint8_t array[] = {100, 101, 102, 103, 104};
+ Vector<const uint8_t> str(array);
+ Handle<String> string2 =
+ isolate->factory()->NewStringFromOneByte(str).ToHandleChecked();
+ FunctionTester ft(code, 2);
+ ft.Call(string1, string2);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[0],
+ Handle<SeqOneByteString>::cast(string2)->GetChars()[3]);
+ CHECK_EQ(Handle<SeqOneByteString>::cast(string1)->GetChars()[1],
+ Handle<SeqOneByteString>::cast(string2)->GetChars()[4]);
+ CHECK_EQ(100, Handle<SeqOneByteString>::cast(string2)->GetChars()[0]);
+ CHECK_EQ(101, Handle<SeqOneByteString>::cast(string2)->GetChars()[1]);
+ CHECK_EQ(102, Handle<SeqOneByteString>::cast(string2)->GetChars()[2]);
+}
+
+TEST(TwoToTwoByteStringCopy) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ CodeStubAssemblerTester m(isolate, 2);
+
+ m.CopyStringCharacters(
+ m.Parameter(0), m.Parameter(1), m.SmiConstant(Smi::FromInt(0)),
+ m.SmiConstant(Smi::FromInt(0)), m.SmiConstant(Smi::FromInt(5)),
+ String::TWO_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
+ CodeStubAssembler::SMI_PARAMETERS);
+ m.Return(m.SmiConstant(Smi::FromInt(0)));
+
+ Handle<Code> code = m.GenerateCode();
+ CHECK(!code.is_null());
+
+ uc16 array1[] = {2000, 2001, 2002, 2003, 2004};
+ Vector<const uc16> str1(array1);
+ Handle<String> string1 =
+ isolate->factory()->NewStringFromTwoByte(str1).ToHandleChecked();
+ uc16 array2[] = {1000, 1001, 1002, 1003, 1004};
+ Vector<const uc16> str2(array2);
+ Handle<String> string2 =
+ isolate->factory()->NewStringFromTwoByte(str2).ToHandleChecked();
+ FunctionTester ft(code, 2);
+ ft.Call(string1, string2);
+ CHECK_EQ(Handle<SeqTwoByteString>::cast(string1)->GetChars()[0],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars()[0]);
+ CHECK_EQ(Handle<SeqTwoByteString>::cast(string1)->GetChars()[1],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars()[1]);
+ CHECK_EQ(Handle<SeqTwoByteString>::cast(string1)->GetChars()[2],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars()[2]);
+ CHECK_EQ(Handle<SeqTwoByteString>::cast(string1)->GetChars()[3],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars()[3]);
+ CHECK_EQ(Handle<SeqTwoByteString>::cast(string1)->GetChars()[4],
+ Handle<SeqTwoByteString>::cast(string2)->GetChars()[4]);
+}
+
+TEST(Arguments) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 4;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+
+ CodeStubArguments arguments(&m, m.IntPtrConstant(3));
+
+ CSA_ASSERT(
+ &m, m.WordEqual(arguments.AtIndex(0), m.SmiConstant(Smi::FromInt(12))));
+ CSA_ASSERT(
+ &m, m.WordEqual(arguments.AtIndex(1), m.SmiConstant(Smi::FromInt(13))));
+ CSA_ASSERT(
+ &m, m.WordEqual(arguments.AtIndex(2), m.SmiConstant(Smi::FromInt(14))));
+
+ m.Return(arguments.GetReceiver());
+
+ Handle<Code> code = m.GenerateCode();
+ CHECK(!code.is_null());
+
+ FunctionTester ft(code, kNumParams);
+ Handle<Object> result = ft.Call(isolate->factory()->undefined_value(),
+ Handle<Smi>(Smi::FromInt(12), isolate),
+ Handle<Smi>(Smi::FromInt(13), isolate),
+ Handle<Smi>(Smi::FromInt(14), isolate))
+ .ToHandleChecked();
+ CHECK_EQ(*isolate->factory()->undefined_value(), *result);
+}
+
+TEST(ArgumentsForEach) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 4;
+ CodeStubAssemblerTester m(isolate, kNumParams);
+
+ CodeStubArguments arguments(&m, m.IntPtrConstant(3));
+
+ CodeStubAssemblerTester::Variable sum(&m,
+ MachineType::PointerRepresentation());
+ CodeStubAssemblerTester::VariableList list({&sum}, m.zone());
+
+ sum.Bind(m.IntPtrConstant(0));
+
+ arguments.ForEach(list, [&m, &sum](CodeStubAssembler* assembler, Node* arg) {
+ sum.Bind(assembler->IntPtrAdd(sum.value(), arg));
+ });
+
+ m.Return(sum.value());
+
+ Handle<Code> code = m.GenerateCode();
+ CHECK(!code.is_null());
+
+ FunctionTester ft(code, kNumParams);
+ Handle<Object> result = ft.Call(isolate->factory()->undefined_value(),
+ Handle<Smi>(Smi::FromInt(12), isolate),
+ Handle<Smi>(Smi::FromInt(13), isolate),
+ Handle<Smi>(Smi::FromInt(14), isolate))
+ .ToHandleChecked();
+ CHECK_EQ(Smi::FromInt(12 + 13 + 14), *result);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index bce3fb2394..ef7506b58f 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -322,9 +322,10 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
// of the full code.
CHECK(f->IsOptimized());
// If the baseline code is bytecode, then it will not have deoptimization
- // support. has_deoptimization_support() check is only required if the
+ // support. The has_deoptimization_support() check is only required if the
// baseline code is from fullcodegen.
- CHECK(f->shared()->has_deoptimization_support() || i::FLAG_ignition);
+ CHECK(f->shared()->has_deoptimization_support() || i::FLAG_ignition ||
+ i::FLAG_turbo);
object = f->feedback_vector()->Get(slot_for_a);
CHECK(object->IsWeakCell() &&
WeakCell::cast(object)->value()->IsJSFunction());
@@ -332,8 +333,7 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
TEST(FeedbackVectorUnaffectedByScopeChanges) {
- if (i::FLAG_always_opt || !i::FLAG_lazy ||
- (FLAG_ignition && FLAG_ignition_eager)) {
+ if (i::FLAG_always_opt || !i::FLAG_lazy) {
return;
}
CcTest::InitializeVM();
@@ -406,150 +406,6 @@ TEST(OptimizedCodeSharing1) {
}
}
-// Test that optimized code for different closures is actually shared.
-TEST(OptimizedCodeSharing2) {
- if (FLAG_stress_compaction) return;
- FLAG_allow_natives_syntax = true;
- FLAG_native_context_specialization = false;
- FLAG_turbo_cache_shared_code = true;
- const char* flag = "--turbo-filter=*";
- FlagList::SetFlagsFromString(flag, StrLength(flag));
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Script> script = v8_compile(
- "function MakeClosure() {"
- " return function() { return x; };"
- "}");
- Handle<Code> reference_code;
- {
- LocalContext env;
- env->Global()
- ->Set(env.local(), v8_str("x"), v8::Integer::New(CcTest::isolate(), 23))
- .FromJust();
- script->GetUnboundScript()
- ->BindToCurrentContext()
- ->Run(env.local())
- .ToLocalChecked();
- CompileRun(
- "var closure0 = MakeClosure();"
- "%DebugPrint(closure0());"
- "%OptimizeFunctionOnNextCall(closure0);"
- "%DebugPrint(closure0());");
- Handle<JSFunction> fun0 = Handle<JSFunction>::cast(
- v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- env->Global()
- ->Get(env.local(), v8_str("closure0"))
- .ToLocalChecked())));
- CHECK(fun0->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
- reference_code = handle(fun0->code());
- }
- for (int i = 0; i < 3; i++) {
- LocalContext env;
- env->Global()
- ->Set(env.local(), v8_str("x"), v8::Integer::New(CcTest::isolate(), i))
- .FromJust();
- script->GetUnboundScript()
- ->BindToCurrentContext()
- ->Run(env.local())
- .ToLocalChecked();
- CompileRun(
- "var closure0 = MakeClosure();"
- "%DebugPrint(closure0());"
- "%OptimizeFunctionOnNextCall(closure0);"
- "%DebugPrint(closure0());"
- "var closure1 = MakeClosure(); closure1();"
- "var closure2 = MakeClosure(); closure2();");
- Handle<JSFunction> fun1 = Handle<JSFunction>::cast(
- v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- env->Global()
- ->Get(env.local(), v8_str("closure1"))
- .ToLocalChecked())));
- Handle<JSFunction> fun2 = Handle<JSFunction>::cast(
- v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- env->Global()
- ->Get(env.local(), v8_str("closure2"))
- .ToLocalChecked())));
- CHECK(fun1->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
- CHECK(fun2->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
- CHECK_EQ(*reference_code, fun1->code());
- CHECK_EQ(*reference_code, fun2->code());
- }
-}
-
-// Test that optimized code for different closures is actually shared.
-TEST(OptimizedCodeSharing3) {
- if (FLAG_stress_compaction) return;
- FLAG_allow_natives_syntax = true;
- FLAG_native_context_specialization = false;
- FLAG_turbo_cache_shared_code = true;
- const char* flag = "--turbo-filter=*";
- FlagList::SetFlagsFromString(flag, StrLength(flag));
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- v8::Local<v8::Script> script = v8_compile(
- "function MakeClosure() {"
- " return function() { return x; };"
- "}");
- Handle<Code> reference_code;
- {
- LocalContext env;
- env->Global()
- ->Set(env.local(), v8_str("x"), v8::Integer::New(CcTest::isolate(), 23))
- .FromJust();
- script->GetUnboundScript()
- ->BindToCurrentContext()
- ->Run(env.local())
- .ToLocalChecked();
- CompileRun(
- "var closure0 = MakeClosure();"
- "%DebugPrint(closure0());"
- "%OptimizeFunctionOnNextCall(closure0);"
- "%DebugPrint(closure0());");
- Handle<JSFunction> fun0 = Handle<JSFunction>::cast(
- v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- env->Global()
- ->Get(env.local(), v8_str("closure0"))
- .ToLocalChecked())));
- CHECK(fun0->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
- reference_code = handle(fun0->code());
- // Evict only the context-dependent entry from the optimized code map. This
- // leaves it in a state where only the context-independent entry exists.
- fun0->shared()->TrimOptimizedCodeMap(SharedFunctionInfo::kEntryLength);
- }
- for (int i = 0; i < 3; i++) {
- LocalContext env;
- env->Global()
- ->Set(env.local(), v8_str("x"), v8::Integer::New(CcTest::isolate(), i))
- .FromJust();
- script->GetUnboundScript()
- ->BindToCurrentContext()
- ->Run(env.local())
- .ToLocalChecked();
- CompileRun(
- "var closure0 = MakeClosure();"
- "%DebugPrint(closure0());"
- "%OptimizeFunctionOnNextCall(closure0);"
- "%DebugPrint(closure0());"
- "var closure1 = MakeClosure(); closure1();"
- "var closure2 = MakeClosure(); closure2();");
- Handle<JSFunction> fun1 = Handle<JSFunction>::cast(
- v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- env->Global()
- ->Get(env.local(), v8_str("closure1"))
- .ToLocalChecked())));
- Handle<JSFunction> fun2 = Handle<JSFunction>::cast(
- v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
- env->Global()
- ->Get(env.local(), v8_str("closure2"))
- .ToLocalChecked())));
- CHECK(fun1->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
- CHECK(fun2->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
- CHECK_EQ(*reference_code, fun1->code());
- CHECK_EQ(*reference_code, fun2->code());
- }
-}
-
-
TEST(CompileFunctionInContext) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -763,53 +619,6 @@ TEST(SplitConstantsInFullCompiler) {
}
#endif
-static void IsBaselineCompiled(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- Handle<Object> object = v8::Utils::OpenHandle(*args[0]);
- Handle<JSFunction> function = Handle<JSFunction>::cast(object);
- bool is_baseline = function->shared()->code()->kind() == Code::FUNCTION;
- return args.GetReturnValue().Set(is_baseline);
-}
-
-static void InstallIsBaselineCompiledHelper(v8::Isolate* isolate) {
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
- v8::Local<v8::FunctionTemplate> t =
- v8::FunctionTemplate::New(isolate, IsBaselineCompiled);
- CHECK(context->Global()
- ->Set(context, v8_str("IsBaselineCompiled"),
- t->GetFunction(context).ToLocalChecked())
- .FromJust());
-}
-
-TEST(IgnitionBaselineOnReturn) {
- // TODO(4280): Remove this entire test once --ignition-preserve-bytecode is
- // the default and the flag is removed. This test doesn't provide benefit any
- // longer once {InterpreterActivationsFinder} is gone.
- if (FLAG_ignition_preserve_bytecode) return;
- FLAG_allow_natives_syntax = true;
- FLAG_always_opt = false;
- CcTest::InitializeVM();
- FLAG_ignition = true;
- Isolate* isolate = CcTest::i_isolate();
- isolate->interpreter()->Initialize();
- v8::HandleScope scope(CcTest::isolate());
- InstallIsBaselineCompiledHelper(CcTest::isolate());
-
- CompileRun(
- "var is_baseline_in_function, is_baseline_after_return;\n"
- "var return_val;\n"
- "function f() {\n"
- " %CompileBaseline(f);\n"
- " is_baseline_in_function = IsBaselineCompiled(f);\n"
- " return 1234;\n"
- "};\n"
- "return_val = f();\n"
- "is_baseline_after_return = IsBaselineCompiled(f);\n");
- CHECK_EQ(false, GetGlobalProperty("is_baseline_in_function")->BooleanValue());
- CHECK_EQ(true, GetGlobalProperty("is_baseline_after_return")->BooleanValue());
- CHECK_EQ(1234.0, GetGlobalProperty("return_val")->Number());
-}
-
TEST(IgnitionEntryTrampolineSelfHealing) {
FLAG_allow_natives_syntax = true;
FLAG_always_opt = false;
diff --git a/deps/v8/test/cctest/test-conversions.cc b/deps/v8/test/cctest/test-conversions.cc
index 87dc99c3f0..2fee58a46d 100644
--- a/deps/v8/test/cctest/test-conversions.cc
+++ b/deps/v8/test/cctest/test-conversions.cc
@@ -425,7 +425,7 @@ TEST(NoHandlesForTryNumberToSize) {
SealHandleScope no_handles(isolate);
Smi* smi = Smi::FromInt(1);
CHECK(TryNumberToSize(smi, &result));
- CHECK_EQ(result, 1);
+ CHECK_EQ(result, 1u);
}
result = 0;
{
@@ -434,7 +434,7 @@ TEST(NoHandlesForTryNumberToSize) {
{
SealHandleScope no_handles(isolate);
CHECK(TryNumberToSize(*heap_number1, &result));
- CHECK_EQ(result, 2);
+ CHECK_EQ(result, 2u);
}
Handle<HeapNumber> heap_number2 = isolate->factory()->NewHeapNumber(
static_cast<double>(std::numeric_limits<size_t>::max()) + 10000.0);
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 2f92f54d37..a69dd4cad4 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -38,6 +38,9 @@
#include "test/cctest/cctest.h"
#include "test/cctest/profiler-extension.h"
+#include "include/libplatform/v8-tracing.h"
+#include "src/tracing/trace-event.h"
+
using i::CodeEntry;
using i::CpuProfile;
using i::CpuProfiler;
@@ -63,15 +66,21 @@ static size_t offset(const char* src, const char* substring) {
return static_cast<size_t>(it - src);
}
+template <typename A, typename B>
+static int dist(A a, B b) {
+ return abs(static_cast<int>(a) - static_cast<int>(b));
+}
+
static const char* reason(const i::DeoptimizeReason reason) {
return i::DeoptimizeReasonToString(reason);
}
TEST(StartStop) {
- CpuProfilesCollection profiles(CcTest::i_isolate());
- ProfileGenerator generator(&profiles);
+ i::Isolate* isolate = CcTest::i_isolate();
+ CpuProfilesCollection profiles(isolate);
+ ProfileGenerator generator(isolate, &profiles);
std::unique_ptr<ProfilerEventsProcessor> processor(
- new ProfilerEventsProcessor(CcTest::i_isolate(), &generator,
+ new ProfilerEventsProcessor(isolate, &generator,
v8::base::TimeDelta::FromMicroseconds(100)));
processor->Start();
processor->StopSynchronously();
@@ -154,10 +163,9 @@ TEST(CodeEvents) {
i::AbstractCode* args4_code = CreateCode(&env);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
- ProfileGenerator* generator = new ProfileGenerator(profiles);
- ProfilerEventsProcessor* processor =
- new ProfilerEventsProcessor(CcTest::i_isolate(), generator,
- v8::base::TimeDelta::FromMicroseconds(100));
+ ProfileGenerator* generator = new ProfileGenerator(isolate, profiles);
+ ProfilerEventsProcessor* processor = new ProfilerEventsProcessor(
+ isolate, generator, v8::base::TimeDelta::FromMicroseconds(100));
CpuProfiler profiler(isolate, profiles, generator, processor);
profiles->StartProfiling("", false);
processor->Start();
@@ -223,7 +231,7 @@ TEST(TickEvents) {
i::AbstractCode* frame3_code = CreateCode(&env);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
- ProfileGenerator* generator = new ProfileGenerator(profiles);
+ ProfileGenerator* generator = new ProfileGenerator(isolate, profiles);
ProfilerEventsProcessor* processor =
new ProfilerEventsProcessor(CcTest::i_isolate(), generator,
v8::base::TimeDelta::FromMicroseconds(100));
@@ -296,7 +304,7 @@ TEST(Issue1398) {
i::AbstractCode* code = CreateCode(&env);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
- ProfileGenerator* generator = new ProfileGenerator(profiles);
+ ProfileGenerator* generator = new ProfileGenerator(isolate, profiles);
ProfilerEventsProcessor* processor =
new ProfilerEventsProcessor(CcTest::i_isolate(), generator,
v8::base::TimeDelta::FromMicroseconds(100));
@@ -1027,7 +1035,6 @@ static void TickLines(bool optimize) {
CcTest::InitializeVM();
LocalContext env;
i::FLAG_allow_natives_syntax = true;
- i::FLAG_turbo_source_positions = true;
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
i::HandleScope scope(isolate);
@@ -1070,7 +1077,7 @@ static void TickLines(bool optimize) {
CHECK(code_address);
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
- ProfileGenerator* generator = new ProfileGenerator(profiles);
+ ProfileGenerator* generator = new ProfileGenerator(isolate, profiles);
ProfilerEventsProcessor* processor =
new ProfilerEventsProcessor(CcTest::i_isolate(), generator,
v8::base::TimeDelta::FromMicroseconds(100));
@@ -1903,10 +1910,9 @@ TEST(SourceLocation) {
.ToLocalChecked();
}
-
static const char* inlined_source =
- "function opt_function(left, right) { var k = left / 10; var r = 10 / "
- "right; return k + r; }\n";
+ "function opt_function(left, right) { var k = left*right; return k + 1; "
+ "}\n";
// 0.........1.........2.........3.........4....*....5.........6......*..7
@@ -1933,7 +1939,7 @@ TEST(DeoptAtFirstLevelInlinedSource) {
"\n"
"test(10, 10);\n"
"\n"
- "test(undefined, 10);\n"
+ "test(undefined, 1e9);\n"
"\n"
"stopProfiling();\n"
"\n";
@@ -1968,10 +1974,11 @@ TEST(DeoptAtFirstLevelInlinedSource) {
CHECK_EQ(1U, deopt_infos.size());
const v8::CpuProfileDeoptInfo& info = deopt_infos[0];
- CHECK_EQ(reason(i::DeoptimizeReason::kNotAHeapNumber), info.deopt_reason);
+ CHECK(reason(i::DeoptimizeReason::kNotASmi) == info.deopt_reason ||
+ reason(i::DeoptimizeReason::kNotAHeapNumber) == info.deopt_reason);
CHECK_EQ(2U, info.stack.size());
CHECK_EQ(inlined_script_id, info.stack[0].script_id);
- CHECK_EQ(offset(inlined_source, "left /"), info.stack[0].position);
+ CHECK_LE(dist(offset(inlined_source, "*right"), info.stack[0].position), 1);
CHECK_EQ(script_id, info.stack[1].script_id);
CHECK_EQ(offset(source, "opt_function(left,"), info.stack[1].position);
@@ -1993,7 +2000,7 @@ TEST(DeoptAtSecondLevelInlinedSource) {
// 0.........1.........2.........3.........4.........5.........6.........7
const char* source =
"function test2(left, right) { return opt_function(left, right); }\n"
- "function test1(left, right) { return test2(left, right); }\n"
+ "function test1(left, right) { return test2(left, right); } \n"
"\n"
"startProfiling();\n"
"\n"
@@ -2003,7 +2010,7 @@ TEST(DeoptAtSecondLevelInlinedSource) {
"\n"
"test1(10, 10);\n"
"\n"
- "test1(undefined, 10);\n"
+ "test1(undefined, 1e9);\n"
"\n"
"stopProfiling();\n"
"\n";
@@ -2041,10 +2048,11 @@ TEST(DeoptAtSecondLevelInlinedSource) {
CHECK_EQ(1U, deopt_infos.size());
const v8::CpuProfileDeoptInfo info = deopt_infos[0];
- CHECK_EQ(reason(i::DeoptimizeReason::kNotAHeapNumber), info.deopt_reason);
+ CHECK(reason(i::DeoptimizeReason::kNotASmi) == info.deopt_reason ||
+ reason(i::DeoptimizeReason::kNotAHeapNumber) == info.deopt_reason);
CHECK_EQ(3U, info.stack.size());
CHECK_EQ(inlined_script_id, info.stack[0].script_id);
- CHECK_EQ(offset(inlined_source, "left /"), info.stack[0].position);
+ CHECK_LE(dist(offset(inlined_source, "*right"), info.stack[0].position), 1);
CHECK_EQ(script_id, info.stack[1].script_id);
CHECK_EQ(offset(source, "opt_function(left,"), info.stack[1].position);
CHECK_EQ(offset(source, "test2(left, right);"), info.stack[2].position);
@@ -2098,3 +2106,79 @@ TEST(DeoptUntrackedFunction) {
iprofiler->DeleteProfile(iprofile);
}
+
+using v8::platform::tracing::TraceBuffer;
+using v8::platform::tracing::TraceConfig;
+using v8::platform::tracing::TraceObject;
+
+namespace {
+
+class CpuProfileEventChecker : public v8::platform::tracing::TraceWriter {
+ public:
+ void AppendTraceEvent(TraceObject* trace_event) override {
+ if (trace_event->name() != std::string("Profile") &&
+ trace_event->name() != std::string("ProfileChunk"))
+ return;
+ CHECK(!profile_id_ || trace_event->id() == profile_id_);
+ CHECK_EQ(1, trace_event->num_args());
+ CHECK_EQ(TRACE_VALUE_TYPE_CONVERTABLE, trace_event->arg_types()[0]);
+ profile_id_ = trace_event->id();
+ v8::ConvertableToTraceFormat* arg =
+ trace_event->arg_convertables()[0].get();
+ arg->AppendAsTraceFormat(&result_json_);
+ }
+ void Flush() override {}
+
+ std::string result_json() const { return result_json_; }
+
+ private:
+ std::string result_json_;
+ uint64_t profile_id_ = 0;
+};
+
+} // namespace
+
+TEST(TracingCpuProfiler) {
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ v8::Platform* default_platform = v8::platform::CreateDefaultPlatform();
+ i::V8::SetPlatformForTesting(default_platform);
+
+ v8::platform::tracing::TracingController tracing_controller;
+ v8::platform::SetTracingController(default_platform, &tracing_controller);
+
+ CpuProfileEventChecker* event_checker = new CpuProfileEventChecker();
+ TraceBuffer* ring_buffer =
+ TraceBuffer::CreateTraceBufferRingBuffer(1, event_checker);
+ tracing_controller.Initialize(ring_buffer);
+ TraceConfig* trace_config = new TraceConfig();
+ trace_config->AddIncludedCategory(
+ TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"));
+
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ {
+ tracing_controller.StartTracing(trace_config);
+ auto profiler = v8::TracingCpuProfiler::Create(env->GetIsolate());
+ CompileRun("function foo() { } foo();");
+ tracing_controller.StopTracing();
+ CompileRun("function bar() { } bar();");
+ }
+
+ const char* profile_checker =
+ "function checkProfile(profile) {\n"
+ " if (typeof profile['startTime'] !== 'number') return 'startTime';\n"
+ " return '';\n"
+ "}\n"
+ "checkProfile(";
+ std::string profile_json = event_checker->result_json();
+ CHECK_LT(0u, profile_json.length());
+ printf("Profile JSON: %s\n", profile_json.c_str());
+ std::string code = profile_checker + profile_json + ")";
+ v8::Local<v8::Value> result =
+ CompileRunChecked(CcTest::isolate(), code.c_str());
+ v8::String::Utf8Value value(result);
+ printf("Check result: %*s\n", value.length(), *value);
+ CHECK_EQ(0, value.length());
+
+ i::V8::SetPlatformForTesting(old_platform);
+}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 9d63e7b3a6..3b43d9d11f 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -33,13 +33,13 @@
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/platform.h"
#include "src/compilation-cache.h"
+#include "src/debug/debug-interface.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frames.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
-
using ::v8::base::Mutex;
using ::v8::base::LockGuard;
using ::v8::base::ConditionVariable;
@@ -319,6 +319,11 @@ static void ChangeBreakOnExceptionFromJS(v8::Isolate* isolate, bool caught,
}
}
+// Change break on exception using the native API call.
+static void ChangeBreakOnExceptionFromAPI(
+ v8::Isolate* isolate, v8::DebugInterface::ExceptionBreakState state) {
+ v8::DebugInterface::ChangeBreakOnException(isolate, state);
+}
// Prepare to step to next break location.
static void PrepareStep(StepAction step_action) {
@@ -3986,6 +3991,48 @@ TEST(BreakOnException) {
edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
DebugEventCounterCheck(4, 3, 2);
+ // No break on exception using native API
+ DebugEventCounterClear();
+ MessageCallbackCountClear();
+ ChangeBreakOnExceptionFromAPI(env->GetIsolate(),
+ v8::DebugInterface::NoBreakOnException);
+ caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ DebugEventCounterCheck(0, 0, 0);
+ CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
+ DebugEventCounterCheck(0, 0, 1);
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
+ DebugEventCounterCheck(0, 0, 2);
+ edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ DebugEventCounterCheck(0, 0, 2);
+
+ // // Break on uncaught exception using native API
+ DebugEventCounterClear();
+ MessageCallbackCountClear();
+ ChangeBreakOnExceptionFromAPI(env->GetIsolate(),
+ v8::DebugInterface::BreakOnUncaughtException);
+ caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ DebugEventCounterCheck(0, 0, 0);
+ CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
+ DebugEventCounterCheck(1, 1, 1);
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
+ DebugEventCounterCheck(2, 2, 2);
+ edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ DebugEventCounterCheck(3, 3, 2);
+
+ // // Break on exception and uncaught exception using native API
+ DebugEventCounterClear();
+ MessageCallbackCountClear();
+ ChangeBreakOnExceptionFromAPI(env->GetIsolate(),
+ v8::DebugInterface::BreakOnAnyException);
+ caught->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ DebugEventCounterCheck(1, 0, 0);
+ CHECK(notCaught->Call(context, env->Global(), 0, NULL).IsEmpty());
+ DebugEventCounterCheck(2, 1, 1);
+ CHECK(notCaughtFinally->Call(context, env->Global(), 0, NULL).IsEmpty());
+ DebugEventCounterCheck(3, 2, 2);
+ edgeCaseFinally->Call(context, env->Global(), 0, NULL).ToLocalChecked();
+ DebugEventCounterCheck(4, 3, 2);
+
v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
CheckDebuggerUnloaded(env->GetIsolate());
env->GetIsolate()->RemoveMessageListeners(MessageCallbackCount);
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index c1184fa2b4..d7344a142f 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -212,7 +212,7 @@ TEST(HashTableRehash) {
for (int i = 0; i < capacity - 1; i++) {
t->insert(i, i * i, i);
}
- t->Rehash(handle(Smi::FromInt(0), isolate));
+ t->Rehash(handle(Smi::kZero, isolate));
for (int i = 0; i < capacity - 1; i++) {
CHECK_EQ(i, t->lookup(i * i));
}
@@ -225,7 +225,7 @@ TEST(HashTableRehash) {
for (int i = 0; i < capacity / 2; i++) {
t->insert(i, i * i, i);
}
- t->Rehash(handle(Smi::FromInt(0), isolate));
+ t->Rehash(handle(Smi::kZero, isolate));
for (int i = 0; i < capacity / 2; i++) {
CHECK_EQ(i, t->lookup(i * i));
}
@@ -304,7 +304,7 @@ TEST(SetRequiresCopyOnCapacityChange) {
dict->SetRequiresCopyOnCapacityChange();
Handle<Name> key = isolate->factory()->InternalizeString(
v8::Utils::OpenHandle(*v8_str("key")));
- Handle<Object> value = handle(Smi::FromInt(0), isolate);
+ Handle<Object> value = handle(Smi::kZero, isolate);
Handle<NameDictionary> new_dict =
NameDictionary::Add(dict, key, value, PropertyDetails::Empty());
CHECK_NE(*dict, *new_dict);
diff --git a/deps/v8/test/cctest/test-extra.js b/deps/v8/test/cctest/test-extra.js
index b3752d97b2..0cc4df4cc4 100644
--- a/deps/v8/test/cctest/test-extra.js
+++ b/deps/v8/test/cctest/test-extra.js
@@ -65,11 +65,16 @@
return (arg1 === arg2 && arg2 === 'x') ? 3 : -1;
}, null, new v8.InternalPackedArray('x', 'x')));
+ const rejectedButHandledPromise = v8.createPromise();
+ v8.rejectPromise(rejectedButHandledPromise, 4);
+ v8.markPromiseAsHandled(rejectedButHandledPromise);
+
return {
privateSymbol: v8.createPrivateSymbol('sym'),
fulfilledPromise, // should be fulfilled with 1
fulfilledPromise2, // should be fulfilled with 2
- rejectedPromise // should be rejected with 3
+ rejectedPromise, // should be rejected with 3
+ rejectedButHandledPromise // should be rejected but have a handler
};
};
})
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index af9c6feef0..1f9ddc6f55 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -36,7 +36,7 @@ TEST(VectorStructure) {
v8::HandleScope scope(context->GetIsolate());
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
// Empty vectors are the empty fixed array.
StaticFeedbackVectorSpec empty;
@@ -103,7 +103,7 @@ TEST(VectorICMetadata) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
Isolate* isolate = CcTest::i_isolate();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
FeedbackVectorSpec spec(&zone);
// Set metadata.
@@ -158,7 +158,7 @@ TEST(VectorSlotClearing) {
v8::HandleScope scope(context->GetIsolate());
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
// We only test clearing FeedbackVectorSlots, not FeedbackVectorSlots.
// The reason is that FeedbackVectorSlots need a full code environment
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index 771d3f116d..df6a06bfc7 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -473,7 +473,7 @@ TEST(ReconfigureAccessorToNonExistingDataField) {
map, 0, kData, NONE, Representation::None(), none_type, FORCE_FIELD);
CHECK_EQ(*new_map, *new_map2);
- Handle<Object> value(Smi::FromInt(0), isolate);
+ Handle<Object> value(Smi::kZero, isolate);
Handle<Map> prepared_map = Map::PrepareForDataProperty(new_map, 0, value);
// None to Smi generalization is trivial, map does not change.
CHECK_EQ(*new_map, *prepared_map);
@@ -589,7 +589,7 @@ static void TestGeneralizeRepresentation(
CHECK(map->is_stable());
CHECK(expectations.Check(*map));
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
if (is_detached_map) {
detach_point_map = Map::ReconfigureProperty(
@@ -607,7 +607,7 @@ static void TestGeneralizeRepresentation(
CompilationDependencies dependencies(isolate, &zone);
CHECK(!dependencies.HasAborted());
- dependencies.AssumeFieldType(field_owner);
+ dependencies.AssumeFieldOwner(field_owner);
Handle<Map> new_map =
Map::ReconfigureProperty(map, property_index, kData, NONE,
@@ -981,11 +981,11 @@ static void TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
CHECK(map2->is_stable());
CHECK(expectations2.Check(*map2));
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
Handle<Map> field_owner(map->FindFieldOwner(kSplitProp), isolate);
CompilationDependencies dependencies(isolate, &zone);
CHECK(!dependencies.HasAborted());
- dependencies.AssumeFieldType(field_owner);
+ dependencies.AssumeFieldOwner(field_owner);
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
@@ -1066,11 +1066,11 @@ static void TestReconfigureDataFieldAttribute_GeneralizeRepresentationTrivial(
CHECK(map2->is_stable());
CHECK(expectations2.Check(*map2));
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
Handle<Map> field_owner(map->FindFieldOwner(kSplitProp), isolate);
CompilationDependencies dependencies(isolate, &zone);
CHECK(!dependencies.HasAborted());
- dependencies.AssumeFieldType(field_owner);
+ dependencies.AssumeFieldOwner(field_owner);
// Reconfigure attributes of property |kSplitProp| of |map2| to NONE, which
// should generalize representations in |map1|.
@@ -1597,11 +1597,11 @@ static void TestReconfigureElementsKind_GeneralizeRepresentation(
CHECK(map2->is_stable());
CHECK(expectations2.Check(*map2));
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
Handle<Map> field_owner(map->FindFieldOwner(kDiffProp), isolate);
CompilationDependencies dependencies(isolate, &zone);
CHECK(!dependencies.HasAborted());
- dependencies.AssumeFieldType(field_owner);
+ dependencies.AssumeFieldOwner(field_owner);
// Reconfigure elements kinds of |map2|, which should generalize
// representations in |map|.
@@ -1690,11 +1690,11 @@ static void TestReconfigureElementsKind_GeneralizeRepresentationTrivial(
CHECK(map2->is_stable());
CHECK(expectations2.Check(*map2));
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
Handle<Map> field_owner(map->FindFieldOwner(kDiffProp), isolate);
CompilationDependencies dependencies(isolate, &zone);
CHECK(!dependencies.HasAborted());
- dependencies.AssumeFieldType(field_owner);
+ dependencies.AssumeFieldOwner(field_owner);
// Reconfigure elements kinds of |map2|, which should generalize
// representations in |map|.
@@ -2332,7 +2332,7 @@ TEST(TransitionDataFieldToDataField) {
Isolate* isolate = CcTest::i_isolate();
Handle<FieldType> any_type = FieldType::Any(isolate);
- Handle<Object> value1 = handle(Smi::FromInt(0), isolate);
+ Handle<Object> value1 = handle(Smi::kZero, isolate);
TransitionToDataFieldOperator transition_op1(Representation::Smi(), any_type,
value1);
@@ -2416,7 +2416,7 @@ TEST(FieldTypeConvertSimple) {
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
CHECK_EQ(FieldType::Any()->Convert(&zone), AstType::NonInternal());
CHECK_EQ(FieldType::None()->Convert(&zone), AstType::None());
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 4b16c031d9..8c43bbfa54 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -141,6 +141,19 @@ TEST(LocalVar) {
CheckFunctionName(script, "return 2", "fun2");
}
+TEST(ObjectProperty) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ v8::Local<v8::Script> script =
+ Compile(CcTest::isolate(),
+ "var obj = {\n"
+ " fun1: function() { return 1; },\n"
+ " fun2: class { constructor() { return 2; } }\n"
+ "}");
+ CheckFunctionName(script, "return 1", "obj.fun1");
+ CheckFunctionName(script, "return 2", "obj.fun2");
+}
TEST(InConstructor) {
CcTest::InitializeVM();
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index d777432f2f..256b74c616 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -467,8 +467,8 @@ TEST(PhatomHandlesWithoutCallbacks) {
g2.SetWeak();
}
- CHECK_EQ(0, isolate->NumberOfPhantomHandleResetsSinceLastCall());
+ CHECK_EQ(0u, isolate->NumberOfPhantomHandleResetsSinceLastCall());
CcTest::CollectAllAvailableGarbage();
- CHECK_EQ(2, isolate->NumberOfPhantomHandleResetsSinceLastCall());
- CHECK_EQ(0, isolate->NumberOfPhantomHandleResetsSinceLastCall());
+ CHECK_EQ(2u, isolate->NumberOfPhantomHandleResetsSinceLastCall());
+ CHECK_EQ(0u, isolate->NumberOfPhantomHandleResetsSinceLastCall());
}
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 17893b3b48..9721477b28 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -360,7 +360,7 @@ TEST(HeapSnapshotCodeObjects) {
}
}
CHECK(compiled_references_x);
- if (i::FLAG_lazy && !(i::FLAG_ignition && i::FLAG_ignition_eager)) {
+ if (i::FLAG_lazy) {
CHECK(!lazy_references_x);
}
}
@@ -2443,7 +2443,7 @@ TEST(CheckCodeNames) {
CHECK(node);
const char* builtin_path1[] = {"::(GC roots)", "::(Builtins)",
- "::(KeyedLoadIC_Megamorphic builtin)"};
+ "::(KeyedLoadIC_Slow builtin)"};
node = GetNodeByPath(snapshot, builtin_path1, arraysize(builtin_path1));
CHECK(node);
diff --git a/deps/v8/test/cctest/test-liveedit.cc b/deps/v8/test/cctest/test-liveedit.cc
index 6cc6c70214..7525676b84 100644
--- a/deps/v8/test/cctest/test-liveedit.cc
+++ b/deps/v8/test/cctest/test-liveedit.cc
@@ -96,7 +96,7 @@ void CompareStringsOneWay(const char* s1, const char* s2,
StringCompareInput input(s1, s2);
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
DiffChunkStruct* first_chunk;
ListDiffOutputWriter writer(&first_chunk, &zone);
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index a4bd2ed0dd..09257bcda2 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -81,9 +81,8 @@ static void construct_call(const v8::FunctionCallbackInfo<v8::Value>& args) {
frame_iterator.Advance();
CHECK(frame_iterator.frame()->is_construct());
frame_iterator.Advance();
- if (i::FLAG_ignition) {
+ if (frame_iterator.frame()->type() == i::StackFrame::STUB) {
// Skip over bytecode handler frame.
- CHECK(frame_iterator.frame()->type() == i::StackFrame::STUB);
frame_iterator.Advance();
}
i::StackFrame* calling_frame = frame_iterator.frame();
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index 24ab60e972..06efc58cfa 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -42,98 +42,6 @@ typedef void* (*F)(int x, int y, int p2, int p3, int p4);
#define __ masm->
-
-static byte to_non_zero(int n) {
- return static_cast<unsigned>(n) % 255 + 1;
-}
-
-
-static bool all_zeroes(const byte* beg, const byte* end) {
- CHECK(beg);
- CHECK(beg <= end);
- while (beg < end) {
- if (*beg++ != 0)
- return false;
- }
- return true;
-}
-
-
-TEST(CopyBytes) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope handles(isolate);
-
- const int data_size = 1 * KB;
- size_t act_size;
-
- // Allocate two blocks to copy data between.
- byte* src_buffer =
- static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
- CHECK(src_buffer);
- CHECK(act_size >= static_cast<size_t>(data_size));
- byte* dest_buffer =
- static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
- CHECK(dest_buffer);
- CHECK(act_size >= static_cast<size_t>(data_size));
-
- // Storage for R0 and R1.
- byte* r0_;
- byte* r1_;
-
- MacroAssembler assembler(isolate, NULL, 0,
- v8::internal::CodeObjectRequired::kYes);
- MacroAssembler* masm = &assembler;
-
- // Code to be generated: The stuff in CopyBytes followed by a store of R0 and
- // R1, respectively.
- __ CopyBytes(r0, r1, r2, r3);
- __ mov(r2, Operand(reinterpret_cast<int>(&r0_)));
- __ mov(r3, Operand(reinterpret_cast<int>(&r1_)));
- __ str(r0, MemOperand(r2));
- __ str(r1, MemOperand(r3));
- __ bx(lr);
-
- CodeDesc desc;
- masm->GetCode(&desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
-
- F f = FUNCTION_CAST<F>(code->entry());
-
- // Initialise source data with non-zero bytes.
- for (int i = 0; i < data_size; i++) {
- src_buffer[i] = to_non_zero(i);
- }
-
- const int fuzz = 11;
-
- for (int size = 0; size < 600; size++) {
- for (const byte* src = src_buffer; src < src_buffer + fuzz; src++) {
- for (byte* dest = dest_buffer; dest < dest_buffer + fuzz; dest++) {
- memset(dest_buffer, 0, data_size);
- CHECK(dest + size < dest_buffer + data_size);
- (void)CALL_GENERATED_CODE(isolate, f, reinterpret_cast<int>(src),
- reinterpret_cast<int>(dest), size, 0, 0);
- // R0 and R1 should point at the first byte after the copied data.
- CHECK_EQ(src + size, r0_);
- CHECK_EQ(dest + size, r1_);
- // Check that we haven't written outside the target area.
- CHECK(all_zeroes(dest_buffer, dest));
- CHECK(all_zeroes(dest + size, dest_buffer + data_size));
- // Check the target area.
- CHECK_EQ(0, memcmp(src, dest, size));
- }
- }
- }
-
- // Check that the source data hasn't been clobbered.
- for (int i = 0; i < data_size; i++) {
- CHECK(src_buffer[i] == to_non_zero(i));
- }
-}
-
-
typedef int (*F5)(void*, void*, void*, void*, void*);
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index 9561db691e..2f3edee7da 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -44,22 +44,6 @@ typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
#define __ masm->
-
-static byte to_non_zero(int n) {
- return static_cast<unsigned>(n) % 255 + 1;
-}
-
-
-static bool all_zeroes(const byte* beg, const byte* end) {
- CHECK(beg);
- CHECK(beg <= end);
- while (beg < end) {
- if (*beg++ != 0)
- return false;
- }
- return true;
-}
-
TEST(BYTESWAP) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -126,81 +110,6 @@ TEST(BYTESWAP) {
CHECK_EQ(static_cast<int32_t>(0xDE2C0000), t.r5);
}
-TEST(CopyBytes) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope handles(isolate);
-
- const int data_size = 1 * KB;
- size_t act_size;
-
- // Allocate two blocks to copy data between.
- byte* src_buffer =
- static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
- CHECK(src_buffer);
- CHECK(act_size >= static_cast<size_t>(data_size));
- byte* dest_buffer =
- static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
- CHECK(dest_buffer);
- CHECK(act_size >= static_cast<size_t>(data_size));
-
- // Storage for a0 and a1.
- byte* a0_;
- byte* a1_;
-
- MacroAssembler assembler(isolate, NULL, 0,
- v8::internal::CodeObjectRequired::kYes);
- MacroAssembler* masm = &assembler;
-
- // Code to be generated: The stuff in CopyBytes followed by a store of a0 and
- // a1, respectively.
- __ CopyBytes(a0, a1, a2, a3);
- __ li(a2, Operand(reinterpret_cast<int>(&a0_)));
- __ li(a3, Operand(reinterpret_cast<int>(&a1_)));
- __ sw(a0, MemOperand(a2));
- __ jr(ra);
- __ sw(a1, MemOperand(a3));
-
- CodeDesc desc;
- masm->GetCode(&desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
-
- ::F f = FUNCTION_CAST< ::F>(code->entry());
-
- // Initialise source data with non-zero bytes.
- for (int i = 0; i < data_size; i++) {
- src_buffer[i] = to_non_zero(i);
- }
-
- const int fuzz = 11;
-
- for (int size = 0; size < 600; size++) {
- for (const byte* src = src_buffer; src < src_buffer + fuzz; src++) {
- for (byte* dest = dest_buffer; dest < dest_buffer + fuzz; dest++) {
- memset(dest_buffer, 0, data_size);
- CHECK(dest + size < dest_buffer + data_size);
- (void)CALL_GENERATED_CODE(isolate, f, reinterpret_cast<int>(src),
- reinterpret_cast<int>(dest), size, 0, 0);
- // a0 and a1 should point at the first byte after the copied data.
- CHECK_EQ(src + size, a0_);
- CHECK_EQ(dest + size, a1_);
- // Check that we haven't written outside the target area.
- CHECK(all_zeroes(dest_buffer, dest));
- CHECK(all_zeroes(dest + size, dest_buffer + data_size));
- // Check the target area.
- CHECK_EQ(0, memcmp(src, dest, size));
- }
- }
- }
-
- // Check that the source data hasn't been clobbered.
- for (int i = 0; i < data_size; i++) {
- CHECK(src_buffer[i] == to_non_zero(i));
- }
-}
-
-
static void TestNaN(const char *code) {
// NaN value is different on MIPS and x86 architectures, and TEST(NaNx)
// tests checks the case where a x86 NaN value is serialized into the
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index 1dc260ff01..04811f6af5 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -45,22 +45,6 @@ typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
#define __ masm->
-
-static byte to_non_zero(int n) {
- return static_cast<unsigned>(n) % 255 + 1;
-}
-
-
-static bool all_zeroes(const byte* beg, const byte* end) {
- CHECK(beg);
- CHECK(beg <= end);
- while (beg < end) {
- if (*beg++ != 0)
- return false;
- }
- return true;
-}
-
TEST(BYTESWAP) {
DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
CcTest::InitializeVM();
@@ -145,81 +129,6 @@ TEST(BYTESWAP) {
CHECK_EQ(static_cast<int64_t>(0xC3151AC800000000), t.r7);
}
-TEST(CopyBytes) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- HandleScope handles(isolate);
-
- const int data_size = 1 * KB;
- size_t act_size;
-
- // Allocate two blocks to copy data between.
- byte* src_buffer =
- static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
- CHECK(src_buffer);
- CHECK(act_size >= static_cast<size_t>(data_size));
- byte* dest_buffer =
- static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
- CHECK(dest_buffer);
- CHECK(act_size >= static_cast<size_t>(data_size));
-
- // Storage for a0 and a1.
- byte* a0_;
- byte* a1_;
-
- MacroAssembler assembler(isolate, NULL, 0,
- v8::internal::CodeObjectRequired::kYes);
- MacroAssembler* masm = &assembler;
-
- // Code to be generated: The stuff in CopyBytes followed by a store of a0 and
- // a1, respectively.
- __ CopyBytes(a0, a1, a2, a3);
- __ li(a2, Operand(reinterpret_cast<int64_t>(&a0_)));
- __ li(a3, Operand(reinterpret_cast<int64_t>(&a1_)));
- __ sd(a0, MemOperand(a2));
- __ jr(ra);
- __ sd(a1, MemOperand(a3));
-
- CodeDesc desc;
- masm->GetCode(&desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
-
- ::F f = FUNCTION_CAST< ::F>(code->entry());
-
- // Initialise source data with non-zero bytes.
- for (int i = 0; i < data_size; i++) {
- src_buffer[i] = to_non_zero(i);
- }
-
- const int fuzz = 11;
-
- for (int size = 0; size < 600; size++) {
- for (const byte* src = src_buffer; src < src_buffer + fuzz; src++) {
- for (byte* dest = dest_buffer; dest < dest_buffer + fuzz; dest++) {
- memset(dest_buffer, 0, data_size);
- CHECK(dest + size < dest_buffer + data_size);
- (void)CALL_GENERATED_CODE(isolate, f, reinterpret_cast<int64_t>(src),
- reinterpret_cast<int64_t>(dest), size, 0, 0);
- // a0 and a1 should point at the first byte after the copied data.
- CHECK_EQ(src + size, a0_);
- CHECK_EQ(dest + size, a1_);
- // Check that we haven't written outside the target area.
- CHECK(all_zeroes(dest_buffer, dest));
- CHECK(all_zeroes(dest + size, dest_buffer + data_size));
- // Check the target area.
- CHECK_EQ(0, memcmp(src, dest, size));
- }
- }
- }
-
- // Check that the source data hasn't been clobbered.
- for (int i = 0; i < data_size; i++) {
- CHECK(src_buffer[i] == to_non_zero(i));
- }
-}
-
-
TEST(LoadConstants) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index cfdb75d1e8..1f6c247288 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -176,7 +176,7 @@ TEST(SmiMove) {
EntryCode(masm);
Label exit;
- TestMoveSmi(masm, &exit, 1, Smi::FromInt(0));
+ TestMoveSmi(masm, &exit, 1, Smi::kZero);
TestMoveSmi(masm, &exit, 2, Smi::FromInt(127));
TestMoveSmi(masm, &exit, 3, Smi::FromInt(128));
TestMoveSmi(masm, &exit, 4, Smi::FromInt(255));
@@ -315,7 +315,7 @@ TEST(Integer32ToSmi) {
__ movq(rax, Immediate(1)); // Test number.
__ movl(rcx, Immediate(0));
__ Integer32ToSmi(rcx, rcx);
- __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(0)));
+ __ Set(rdx, reinterpret_cast<intptr_t>(Smi::kZero));
__ cmpq(rcx, rdx);
__ j(not_equal, &exit);
@@ -352,7 +352,7 @@ TEST(Integer32ToSmi) {
__ movq(rax, Immediate(6)); // Test number.
__ movl(rcx, Immediate(0));
__ Integer32ToSmi(r8, rcx);
- __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(0)));
+ __ Set(rdx, reinterpret_cast<intptr_t>(Smi::kZero));
__ cmpq(r8, rdx);
__ j(not_equal, &exit);
diff --git a/deps/v8/test/cctest/test-modules.cc b/deps/v8/test/cctest/test-modules.cc
index c33a5a124b..e086a407f9 100644
--- a/deps/v8/test/cctest/test-modules.cc
+++ b/deps/v8/test/cctest/test-modules.cc
@@ -21,16 +21,14 @@ using v8::Value;
MaybeLocal<Module> AlwaysEmptyResolveCallback(Local<Context> context,
Local<String> specifier,
- Local<Module> referrer,
- Local<Value> data) {
+ Local<Module> referrer) {
return MaybeLocal<Module>();
}
static int g_count = 0;
MaybeLocal<Module> FailOnSecondCallResolveCallback(Local<Context> context,
Local<String> specifier,
- Local<Module> referrer,
- Local<Value> data) {
+ Local<Module> referrer) {
if (g_count++ > 0) return MaybeLocal<Module>();
Local<String> source_text = v8_str("");
ScriptOrigin origin(v8_str("module.js"));
@@ -67,8 +65,7 @@ TEST(ModuleInstantiationFailures) {
}
static MaybeLocal<Module> CompileSpecifierAsModuleResolveCallback(
- Local<Context> context, Local<String> specifier, Local<Module> referrer,
- Local<Value> data) {
+ Local<Context> context, Local<String> specifier, Local<Module> referrer) {
ScriptOrigin origin(v8_str("module.js"));
ScriptCompiler::Source source(specifier, origin);
return ScriptCompiler::CompileModule(CcTest::isolate(), &source)
@@ -93,19 +90,4 @@ TEST(ModuleEvaluation) {
ExpectInt32("Object.expando", 10);
}
-TEST(EmbedderData) {
- Isolate* isolate = CcTest::isolate();
- HandleScope scope(isolate);
- LocalContext env;
-
- Local<String> source_text = v8_str("");
- ScriptOrigin origin(v8_str("file.js"));
- ScriptCompiler::Source source(source_text, origin);
- Local<Module> module =
- ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
- CHECK(module->GetEmbedderData()->IsUndefined());
- module->SetEmbedderData(v8_num(42));
- CHECK_EQ(42, Local<v8::Int32>::Cast(module->GetEmbedderData())->Value());
-}
-
} // anonymous namespace
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 921cebcad6..5a5a734236 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -168,36 +168,38 @@ TEST(ScanHTMLEndComments) {
for (int i = 0; tests[i]; i++) {
const char* source = tests[i];
auto stream = i::ScannerStream::ForTesting(source);
- i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(stream.get());
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->heap()->HashSeed());
- i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
- stack_limit);
+ i::PendingCompilationErrorHandler pending_error_handler;
+ i::PreParser preparser(
+ &zone, &scanner, &ast_value_factory, &pending_error_handler,
+ CcTest::i_isolate()->counters()->runtime_call_stats(), stack_limit);
preparser.set_allow_lazy(true);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
- CHECK(!log.HasError());
+ CHECK(!pending_error_handler.has_pending_error());
}
for (int i = 0; fail_tests[i]; i++) {
const char* source = fail_tests[i];
auto stream = i::ScannerStream::ForTesting(source);
- i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(stream.get());
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->heap()->HashSeed());
- i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
- stack_limit);
+ i::PendingCompilationErrorHandler pending_error_handler;
+ i::PreParser preparser(
+ &zone, &scanner, &ast_value_factory, &pending_error_handler,
+ CcTest::i_isolate()->counters()->runtime_call_stats(), stack_limit);
preparser.set_allow_lazy(true);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
// Even in the case of a syntax error, kPreParseSuccess is returned.
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
- CHECK(log.HasError());
+ CHECK(pending_error_handler.has_pending_error());
}
}
@@ -218,7 +220,7 @@ class ScriptResource : public v8::String::ExternalOneByteStringResource {
TEST(UsingCachedData) {
// Producing cached parser data while parsing eagerly is not supported.
- if (!i::FLAG_lazy || (i::FLAG_ignition && i::FLAG_ignition_eager)) return;
+ if (!i::FLAG_lazy) return;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handles(isolate);
@@ -271,7 +273,7 @@ TEST(UsingCachedData) {
TEST(PreparseFunctionDataIsUsed) {
// Producing cached parser data while parsing eagerly is not supported.
- if (!i::FLAG_lazy || (i::FLAG_ignition && i::FLAG_ignition_eager)) return;
+ if (!i::FLAG_lazy) return;
// This tests that we actually do use the function data generated by the
// preparser.
@@ -287,14 +289,30 @@ TEST(PreparseFunctionDataIsUsed) {
i::GetCurrentStackPosition() - 128 * 1024);
const char* good_code[] = {
- "function this_is_lazy() { var a; } function foo() { return 25; } foo();",
- "var this_is_lazy = () => { var a; }; var foo = () => 25; foo();",
+ "function z() { var a; } function f() { return 25; } f();",
+ "var z = function () { var a; }; function f() { return 25; } f();",
+ "function *z() { var a; } function f() { return 25; } f();",
+ "var z = function *() { var a; }; function f() { return 25; } f();",
+ "function z(p1, p2) { var a; } function f() { return 25; } f();",
+ "var z = function (p1, p2) { var a; }; function f() { return 25; } f();",
+ "function *z(p1, p2) { var a; } function f() { return 25; } f();",
+ "var z = function *(p1, p2) { var a; }; function f() { return 25; } f();",
+ "var z = () => { var a; }; function f() { return 25; } f();",
+ "var z = (p1, p2) => { var a; }; function f() { return 25; } f();",
};
// Insert a syntax error inside the lazy function.
const char* bad_code[] = {
- "function this_is_lazy() { if ( } function foo() { return 25; } foo();",
- "var this_is_lazy = () => { if ( }; var foo = () => 25; foo();",
+ "function z() { if ( } function f() { return 25; } f();",
+ "var z = function () { if ( }; function f() { return 25; } f();",
+ "function *z() { if ( } function f() { return 25; } f();",
+ "var z = function *() { if ( }; function f() { return 25; } f();",
+ "function z(p1, p2) { if ( } function f() { return 25; } f();",
+ "var z = function (p1, p2) { if ( }; function f() { return 25; } f();",
+ "function *z(p1, p2) { if ( } function f() { return 25; } f();",
+ "var z = function *(p1, p2) { if ( }; function f() { return 25; } f();",
+ "var z = () => { if ( }; function f() { return 25; } f();",
+ "var z = (p1, p2) => { if ( }; function f() { return 25; } f();",
};
for (unsigned i = 0; i < arraysize(good_code); i++) {
@@ -342,20 +360,21 @@ TEST(StandAlonePreParser) {
uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
auto stream = i::ScannerStream::ForTesting(programs[i]);
- i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(stream.get());
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->heap()->HashSeed());
- i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
- stack_limit);
+ i::PendingCompilationErrorHandler pending_error_handler;
+ i::PreParser preparser(
+ &zone, &scanner, &ast_value_factory, &pending_error_handler,
+ CcTest::i_isolate()->counters()->runtime_call_stats(), stack_limit);
preparser.set_allow_lazy(true);
preparser.set_allow_natives(true);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
- CHECK(!log.HasError());
+ CHECK(!pending_error_handler.has_pending_error());
}
}
@@ -363,6 +382,7 @@ TEST(StandAlonePreParser) {
TEST(StandAlonePreParserNoNatives) {
v8::V8::Initialize();
+ i::Isolate* isolate = CcTest::i_isolate();
CcTest::i_isolate()->stack_guard()->SetStackLimit(
i::GetCurrentStackPosition() - 128 * 1024);
@@ -372,23 +392,24 @@ TEST(StandAlonePreParserNoNatives) {
NULL
};
- uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
+ uintptr_t stack_limit = isolate->stack_guard()->real_climit();
for (int i = 0; programs[i]; i++) {
auto stream = i::ScannerStream::ForTesting(programs[i]);
- i::CompleteParserRecorder log;
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
+ i::Scanner scanner(isolate->unicode_cache());
scanner.Initialize(stream.get());
// Preparser defaults to disallowing natives syntax.
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->heap()->HashSeed());
- i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
- stack_limit);
+ i::PendingCompilationErrorHandler pending_error_handler;
+ i::PreParser preparser(
+ &zone, &scanner, &ast_value_factory, &pending_error_handler,
+ isolate->counters()->runtime_call_stats(), stack_limit);
preparser.set_allow_lazy(true);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
- CHECK(log.HasError());
+ CHECK(pending_error_handler.has_pending_error());
}
}
@@ -444,97 +465,92 @@ TEST(RegressChromium62639) {
// failed in debug mode, and sometimes crashed in release mode.
auto stream = i::ScannerStream::ForTesting(program);
- i::CompleteParserRecorder log;
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
scanner.Initialize(stream.get());
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(&zone,
CcTest::i_isolate()->heap()->HashSeed());
- i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
+ i::PendingCompilationErrorHandler pending_error_handler;
+ i::PreParser preparser(&zone, &scanner, &ast_value_factory,
+ &pending_error_handler,
+ isolate->counters()->runtime_call_stats(),
CcTest::i_isolate()->stack_guard()->real_climit());
preparser.set_allow_lazy(true);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
// Even in the case of a syntax error, kPreParseSuccess is returned.
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
- CHECK(log.HasError());
+ CHECK(pending_error_handler.has_pending_error());
}
TEST(Regress928) {
- v8::V8::Initialize();
- i::Isolate* isolate = CcTest::i_isolate();
-
- // Preparsing didn't consider the catch clause of a try statement
- // as with-content, which made it assume that a function inside
- // the block could be lazily compiled, and an extra, unexpected,
- // entry was added to the data.
- isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
- 128 * 1024);
+ // Test only applies when lazy parsing.
+ if (!i::FLAG_lazy) return;
+ i::FLAG_min_preparse_length = 0;
+ // Tests that the first non-toplevel function is not included in the preparse
+ // data.
const char* program =
"try { } catch (e) { var foo = function () { /* first */ } }"
"var bar = function () { /* second */ }";
- v8::HandleScope handles(CcTest::isolate());
- auto stream = i::ScannerStream::ForTesting(program);
- i::CompleteParserRecorder log;
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(stream.get());
- i::Zone zone(CcTest::i_isolate()->allocator());
- i::AstValueFactory ast_value_factory(&zone,
- CcTest::i_isolate()->heap()->HashSeed());
- i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
- CcTest::i_isolate()->stack_guard()->real_climit());
- preparser.set_allow_lazy(true);
- i::PreParser::PreParseResult result = preparser.PreParseProgram();
- CHECK_EQ(i::PreParser::kPreParseSuccess, result);
- i::ScriptData* sd = log.GetScriptData();
- i::ParseData* pd = i::ParseData::FromCachedData(sd);
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handles(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+ v8::ScriptCompiler::Source script_source(v8_str(program));
+ v8::ScriptCompiler::Compile(context, &script_source,
+ v8::ScriptCompiler::kProduceParserCache)
+ .ToLocalChecked();
+
+ const v8::ScriptCompiler::CachedData* cached_data =
+ script_source.GetCachedData();
+ i::ScriptData script_data(cached_data->data, cached_data->length);
+ std::unique_ptr<i::ParseData> pd(i::ParseData::FromCachedData(&script_data));
pd->Initialize();
int first_function =
static_cast<int>(strstr(program, "function") - program);
- int first_lbrace = first_function + i::StrLength("function () ");
- CHECK_EQ('{', program[first_lbrace]);
- i::FunctionEntry entry1 = pd->GetFunctionEntry(first_lbrace);
+ int first_lparen = first_function + i::StrLength("function ");
+ CHECK_EQ('(', program[first_lparen]);
+ i::FunctionEntry entry1 = pd->GetFunctionEntry(first_lparen);
CHECK(!entry1.is_valid());
int second_function =
- static_cast<int>(strstr(program + first_lbrace, "function") - program);
- int second_lbrace =
- second_function + i::StrLength("function () ");
- CHECK_EQ('{', program[second_lbrace]);
- i::FunctionEntry entry2 = pd->GetFunctionEntry(second_lbrace);
+ static_cast<int>(strstr(program + first_lparen, "function") - program);
+ int second_lparen = second_function + i::StrLength("function ");
+ CHECK_EQ('(', program[second_lparen]);
+ i::FunctionEntry entry2 = pd->GetFunctionEntry(second_lparen);
CHECK(entry2.is_valid());
CHECK_EQ('}', program[entry2.end_pos() - 1]);
- delete sd;
- delete pd;
}
TEST(PreParseOverflow) {
v8::V8::Initialize();
+ i::Isolate* isolate = CcTest::i_isolate();
- CcTest::i_isolate()->stack_guard()->SetStackLimit(
- i::GetCurrentStackPosition() - 128 * 1024);
+ isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+ 128 * 1024);
size_t kProgramSize = 1024 * 1024;
std::unique_ptr<char[]> program(i::NewArray<char>(kProgramSize + 1));
memset(program.get(), '(', kProgramSize);
program[kProgramSize] = '\0';
- uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
+ uintptr_t stack_limit = isolate->stack_guard()->real_climit();
auto stream = i::ScannerStream::ForTesting(program.get(), kProgramSize);
- i::CompleteParserRecorder log;
- i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
+ i::Scanner scanner(isolate->unicode_cache());
scanner.Initialize(stream.get());
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(&zone,
CcTest::i_isolate()->heap()->HashSeed());
- i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
- stack_limit);
+ i::PendingCompilationErrorHandler pending_error_handler;
+ i::PreParser preparser(
+ &zone, &scanner, &ast_value_factory, &pending_error_handler,
+ isolate->counters()->runtime_call_stats(), stack_limit);
preparser.set_allow_lazy(true);
i::PreParser::PreParseResult result = preparser.PreParseProgram();
CHECK_EQ(i::PreParser::kPreParseStackOverflow, result);
@@ -630,7 +646,7 @@ void TestScanRegExp(const char* re_source, const char* expected) {
CHECK(start == i::Token::DIV || start == i::Token::ASSIGN_DIV);
CHECK(scanner.ScanRegExpPattern());
scanner.Next(); // Current token is now the regexp literal.
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(&zone,
CcTest::i_isolate()->heap()->HashSeed());
const i::AstRawString* current_symbol =
@@ -807,10 +823,9 @@ TEST(ScopeUsesArgumentsSuperThis) {
factory->NewStringFromUtf8(i::CStrVector(program.start()))
.ToHandleChecked();
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
- info.set_global();
CHECK(parser.Parse(&info));
CHECK(i::Rewriter::Rewrite(&info));
i::DeclarationScope::Analyze(&info, i::AnalyzeMode::kRegular);
@@ -866,16 +881,14 @@ static void CheckParsesToNumber(const char* source, bool with_dot) {
i::ParseInfo info(handles.main_zone(), script);
i::Parser parser(&info);
- info.set_global();
- info.set_lazy(false);
info.set_allow_lazy_parsing(false);
info.set_toplevel(true);
CHECK(i::Compiler::ParseAndAnalyze(&info));
- CHECK(info.scope()->declarations()->length() == 1);
- i::FunctionLiteral* fun =
- info.scope()->declarations()->at(0)->AsFunctionDeclaration()->fun();
+ CHECK_EQ(1, info.scope()->declarations()->LengthForTest());
+ i::Declaration* decl = info.scope()->declarations()->AtForTest(0);
+ i::FunctionLiteral* fun = decl->AsFunctionDeclaration()->fun();
CHECK(fun->body()->length() == 1);
CHECK(fun->body()->at(0)->IsReturnStatement());
i::ReturnStatement* ret = fun->body()->at(0)->AsReturnStatement();
@@ -1164,11 +1177,10 @@ TEST(ScopePositions) {
i::CStrVector(program.start())).ToHandleChecked();
CHECK_EQ(source->length(), kProgramSize);
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
parser.set_allow_lazy(true);
- info.set_global();
info.set_language_mode(source_data[i].language_mode);
parser.Parse(&info);
CHECK(info.literal() != NULL);
@@ -1214,7 +1226,7 @@ TEST(DiscardFunctionBody) {
i::Handle<i::String> source_code =
factory->NewStringFromUtf8(i::CStrVector(source)).ToHandleChecked();
i::Handle<i::Script> script = factory->NewScript(source_code);
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::ParseInfo info(&zone, script);
info.set_allow_lazy_parsing();
i::Parser parser(&info);
@@ -1228,8 +1240,11 @@ TEST(DiscardFunctionBody) {
AsCall()->expression()->AsFunctionLiteral();
i::Scope* inner_scope = inner->scope();
i::FunctionLiteral* fun = nullptr;
- if (inner_scope->declarations()->length() > 0) {
- fun = inner_scope->declarations()->at(0)->AsFunctionDeclaration()->fun();
+ if (!inner_scope->declarations()->is_empty()) {
+ fun = inner_scope->declarations()
+ ->AtForTest(0)
+ ->AsFunctionDeclaration()
+ ->fun();
} else {
// TODO(conradw): This path won't be hit until the other test cases can be
// uncommented.
@@ -1264,33 +1279,10 @@ const char* ReadString(unsigned* start) {
}
-i::Handle<i::String> FormatMessage(i::Vector<unsigned> data) {
- i::Isolate* isolate = CcTest::i_isolate();
- int message = data[i::PreparseDataConstants::kMessageTemplatePos];
- int arg_count = data[i::PreparseDataConstants::kMessageArgCountPos];
- i::Handle<i::Object> arg_object;
- if (arg_count == 1) {
- // Position after text found by skipping past length field and
- // length field content words.
- const char* arg =
- ReadString(&data[i::PreparseDataConstants::kMessageArgPos]);
- arg_object = v8::Utils::OpenHandle(*v8_str(arg));
- i::DeleteArray(arg);
- } else {
- CHECK_EQ(0, arg_count);
- arg_object = isolate->factory()->undefined_value();
- }
-
- data.Dispose();
- return i::MessageTemplate::FormatMessage(isolate, message, arg_object);
-}
-
enum ParserFlag {
kAllowLazy,
kAllowNatives,
kAllowHarmonyFunctionSent,
- kAllowHarmonyRestrictiveDeclarations,
- kAllowHarmonyForIn,
kAllowHarmonyAsyncAwait,
kAllowHarmonyRestrictiveGenerators,
kAllowHarmonyTrailingCommas,
@@ -1310,9 +1302,6 @@ void SetParserFlags(i::ParserBase<Traits>* parser,
parser->set_allow_natives(flags.Contains(kAllowNatives));
parser->set_allow_harmony_function_sent(
flags.Contains(kAllowHarmonyFunctionSent));
- parser->set_allow_harmony_restrictive_declarations(
- flags.Contains(kAllowHarmonyRestrictiveDeclarations));
- parser->set_allow_harmony_for_in(flags.Contains(kAllowHarmonyForIn));
parser->set_allow_harmony_async_await(
flags.Contains(kAllowHarmonyAsyncAwait));
parser->set_allow_harmony_restrictive_generators(
@@ -1337,37 +1326,33 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
int parser_materialized_literals = -2;
// Preparse the data.
- i::CompleteParserRecorder log;
+ i::PendingCompilationErrorHandler pending_error_handler;
if (test_preparser) {
i::Scanner scanner(isolate->unicode_cache());
std::unique_ptr<i::Utf16CharacterStream> stream(
i::ScannerStream::For(source));
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->heap()->HashSeed());
- i::PreParser preparser(&zone, &scanner, &ast_value_factory, &log,
- stack_limit);
+ i::PreParser preparser(
+ &zone, &scanner, &ast_value_factory, &pending_error_handler,
+ isolate->counters()->runtime_call_stats(), stack_limit);
SetParserFlags(&preparser, flags);
scanner.Initialize(stream.get());
i::PreParser::PreParseResult result =
preparser.PreParseProgram(&preparser_materialized_literals, is_module);
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
}
- bool preparse_error = log.HasError();
// Parse the data
i::FunctionLiteral* function;
{
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
SetParserFlags(&parser, flags);
- if (is_module) {
- info.set_module();
- } else {
- info.set_global();
- }
+ if (is_module) info.set_module();
parser.Parse(&info);
function = info.literal();
if (function) {
@@ -1397,7 +1382,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
CHECK(false);
}
- if (test_preparser && !preparse_error) {
+ if (test_preparser && !pending_error_handler.has_pending_error()) {
v8::base::OS::Print(
"Parser failed on:\n"
"\t%s\n"
@@ -1410,7 +1395,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
// Check that preparser and parser produce the same error.
if (test_preparser) {
i::Handle<i::String> preparser_message =
- FormatMessage(log.ErrorMessageData());
+ pending_error_handler.FormatMessage(CcTest::i_isolate());
if (!i::String::Equals(message_string, preparser_message)) {
v8::base::OS::Print(
"Expected parser and preparser to produce the same error on:\n"
@@ -1423,7 +1408,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
CHECK(false);
}
}
- } else if (test_preparser && preparse_error) {
+ } else if (test_preparser && pending_error_handler.has_pending_error()) {
v8::base::OS::Print(
"Preparser failed on:\n"
"\t%s\n"
@@ -1431,7 +1416,9 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
"\t%s\n"
"However, the parser succeeded",
source->ToCString().get(),
- FormatMessage(log.ErrorMessageData())->ToCString().get());
+ pending_error_handler.FormatMessage(CcTest::i_isolate())
+ ->ToCString()
+ .get());
CHECK(false);
} else if (result == kError) {
v8::base::OS::Print(
@@ -2493,7 +2480,7 @@ TEST(DontRegressPreParserDataSizes) {
i::Handle<i::String> source =
factory->NewStringFromUtf8(i::CStrVector(program)).ToHandleChecked();
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::ParseInfo info(&zone, script);
i::ScriptData* sd = NULL;
info.set_cached_data(&sd);
@@ -3154,7 +3141,7 @@ TEST(FuncNameInferrerEscaped) {
TEST(RegressionLazyFunctionWithErrorWithArg) {
// Test only applies when lazy parsing.
- if (!i::FLAG_lazy || (i::FLAG_ignition && i::FLAG_ignition_eager)) return;
+ if (!i::FLAG_lazy) return;
// The bug occurred when a lazy function had an error which requires a
// parameter (such as "unknown label" here). The error message was processed
@@ -3199,7 +3186,7 @@ TEST(SerializationOfMaybeAssignmentFlag) {
i::Handle<i::String> source = factory->InternalizeUtf8String(program.start());
source->PrintOn(stdout);
printf("\n");
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
v8::Local<v8::Value> v = CompileRun(src);
i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
@@ -3249,7 +3236,7 @@ TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
i::Handle<i::String> source = factory->InternalizeUtf8String(program.start());
source->PrintOn(stdout);
printf("\n");
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
v8::Local<v8::Value> v = CompileRun(src);
i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
@@ -3280,7 +3267,7 @@ TEST(InnerAssignment) {
const char* prefix = "function f() {";
const char* midfix = " function g() {";
- const char* suffix = "}}";
+ const char* suffix = "}}; f";
struct { const char* source; bool assigned; bool strict; } outers[] = {
// Actual assignments.
{ "var x; var x = 5;", true, false },
@@ -3360,10 +3347,6 @@ TEST(InnerAssignment) {
{ "(function(x) { eval(''); })", true, false },
};
- // Used to trigger lazy parsing of the outer function.
- int comment_len = 2048;
- i::ScopedVector<char> comment(comment_len + 1);
- i::SNPrintF(comment, "/*%0*d*/", comment_len - 4, 0);
int prefix_len = Utf8LengthHelper(prefix);
int midfix_len = Utf8LengthHelper(midfix);
int suffix_len = Utf8LengthHelper(suffix);
@@ -3376,33 +3359,44 @@ TEST(InnerAssignment) {
const char* inner = inners[j].source;
int inner_len = Utf8LengthHelper(inner);
- const char* comment_chars = lazy ? comment.start() : "";
- int len = prefix_len + (lazy ? comment_len : 0) + outer_len +
- midfix_len + inner_len + suffix_len;
+ int len = prefix_len + outer_len + midfix_len + inner_len + suffix_len;
i::ScopedVector<char> program(len + 1);
- i::SNPrintF(program, "%s%s%s%s%s%s", comment_chars, prefix, outer,
- midfix, inner, suffix);
- i::Handle<i::String> source =
- factory->InternalizeUtf8String(program.start());
- source->PrintOn(stdout);
- printf("\n");
-
- i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone(CcTest::i_isolate()->allocator());
- i::ParseInfo info(&zone, script);
- i::Parser parser(&info);
- CHECK(parser.Parse(&info));
- CHECK(i::Compiler::Analyze(&info));
- CHECK(info.literal() != NULL);
-
- i::Scope* scope = info.literal()->scope();
- i::Scope* inner_scope = scope->inner_scope();
- DCHECK_NOT_NULL(inner_scope);
- DCHECK_NULL(inner_scope->sibling());
+ i::SNPrintF(program, "%s%s%s%s%s", prefix, outer, midfix, inner,
+ suffix);
+
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
+ std::unique_ptr<i::ParseInfo> info;
+ if (lazy) {
+ printf("%s\n", program.start());
+ v8::Local<v8::Value> v = CompileRun(program.start());
+ i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
+ i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
+ i::Handle<i::SharedFunctionInfo> shared = i::handle(f->shared());
+ info = std::unique_ptr<i::ParseInfo>(new i::ParseInfo(&zone, shared));
+ } else {
+ i::Handle<i::String> source =
+ factory->InternalizeUtf8String(program.start());
+ source->PrintOn(stdout);
+ printf("\n");
+ i::Handle<i::Script> script = factory->NewScript(source);
+ info = std::unique_ptr<i::ParseInfo>(new i::ParseInfo(&zone, script));
+ }
+ i::Parser parser(info.get());
+ CHECK(parser.Parse(info.get()));
+ CHECK(i::Compiler::Analyze(info.get()));
+ CHECK(info->literal() != NULL);
+
+ i::Scope* scope = info->literal()->scope();
+ if (!lazy) {
+ scope = scope->inner_scope();
+ }
+ DCHECK_NOT_NULL(scope);
+ DCHECK_NULL(scope->sibling());
+ DCHECK(scope->is_function_scope());
const i::AstRawString* var_name =
- info.ast_value_factory()->GetOneByteString("x");
- i::Variable* var = inner_scope->Lookup(var_name);
+ info->ast_value_factory()->GetOneByteString("x");
+ i::Variable* var = scope->Lookup(var_name);
bool expected = outers[i].assigned || inners[j].assigned;
CHECK(var != NULL);
CHECK(var->is_used() || !expected);
@@ -5650,7 +5644,7 @@ TEST(BasicImportExportParsing) {
// Show that parsing as a module works
{
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
info.set_module();
@@ -5676,10 +5670,9 @@ TEST(BasicImportExportParsing) {
// And that parsing a script does not.
{
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
- info.set_global();
CHECK(!parser.Parse(&info));
isolate->clear_pending_exception();
}
@@ -5769,7 +5762,7 @@ TEST(ImportExportParsingErrors) {
factory->NewStringFromAsciiChecked(kErrorSources[i]);
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
info.set_module();
@@ -5807,7 +5800,7 @@ TEST(ModuleTopLevelFunctionDecl) {
factory->NewStringFromAsciiChecked(kErrorSources[i]);
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
info.set_module();
@@ -6006,7 +5999,7 @@ TEST(ModuleParsingInternals) {
"export {foob};";
i::Handle<i::String> source = factory->NewStringFromAsciiChecked(kSource);
i::Handle<i::Script> script = factory->NewScript(source);
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
info.set_module();
@@ -6019,92 +6012,100 @@ TEST(ModuleParsingInternals) {
CHECK_NULL(outer_scope->outer_scope());
CHECK(module_scope->is_module_scope());
const i::ModuleDescriptor::Entry* entry;
- i::ZoneList<i::Declaration*>* declarations = module_scope->declarations();
- CHECK_EQ(13, declarations->length());
+ i::Declaration::List* declarations = module_scope->declarations();
+ CHECK_EQ(13, declarations->LengthForTest());
- CHECK(declarations->at(0)->proxy()->raw_name()->IsOneByteEqualTo("x"));
- CHECK(declarations->at(0)->proxy()->var()->mode() == i::LET);
- CHECK(declarations->at(0)->proxy()->var()->binding_needs_init());
- CHECK(declarations->at(0)->proxy()->var()->location() ==
+ CHECK(declarations->AtForTest(0)->proxy()->raw_name()->IsOneByteEqualTo("x"));
+ CHECK(declarations->AtForTest(0)->proxy()->var()->mode() == i::LET);
+ CHECK(declarations->AtForTest(0)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(0)->proxy()->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(declarations->at(1)->proxy()->raw_name()->IsOneByteEqualTo("z"));
- CHECK(declarations->at(1)->proxy()->var()->mode() == i::CONST);
- CHECK(declarations->at(1)->proxy()->var()->binding_needs_init());
- CHECK(declarations->at(1)->proxy()->var()->location() ==
+ CHECK(declarations->AtForTest(1)->proxy()->raw_name()->IsOneByteEqualTo("z"));
+ CHECK(declarations->AtForTest(1)->proxy()->var()->mode() == i::CONST);
+ CHECK(declarations->AtForTest(1)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(1)->proxy()->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(declarations->at(2)->proxy()->raw_name()->IsOneByteEqualTo("n"));
- CHECK(declarations->at(2)->proxy()->var()->mode() == i::CONST);
- CHECK(declarations->at(2)->proxy()->var()->binding_needs_init());
- CHECK(declarations->at(2)->proxy()->var()->location() ==
+ CHECK(declarations->AtForTest(2)->proxy()->raw_name()->IsOneByteEqualTo("n"));
+ CHECK(declarations->AtForTest(2)->proxy()->var()->mode() == i::CONST);
+ CHECK(declarations->AtForTest(2)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(2)->proxy()->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(declarations->at(3)->proxy()->raw_name()->IsOneByteEqualTo("foo"));
- CHECK(declarations->at(3)->proxy()->var()->mode() == i::VAR);
- CHECK(!declarations->at(3)->proxy()->var()->binding_needs_init());
- CHECK(declarations->at(3)->proxy()->var()->location() ==
+ CHECK(
+ declarations->AtForTest(3)->proxy()->raw_name()->IsOneByteEqualTo("foo"));
+ CHECK(declarations->AtForTest(3)->proxy()->var()->mode() == i::VAR);
+ CHECK(!declarations->AtForTest(3)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(3)->proxy()->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(declarations->at(4)->proxy()->raw_name()->IsOneByteEqualTo("goo"));
- CHECK(declarations->at(4)->proxy()->var()->mode() == i::LET);
- CHECK(!declarations->at(4)->proxy()->var()->binding_needs_init());
- CHECK(declarations->at(4)->proxy()->var()->location() ==
+ CHECK(
+ declarations->AtForTest(4)->proxy()->raw_name()->IsOneByteEqualTo("goo"));
+ CHECK(declarations->AtForTest(4)->proxy()->var()->mode() == i::LET);
+ CHECK(!declarations->AtForTest(4)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(4)->proxy()->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(declarations->at(5)->proxy()->raw_name()->IsOneByteEqualTo("hoo"));
- CHECK(declarations->at(5)->proxy()->var()->mode() == i::LET);
- CHECK(declarations->at(5)->proxy()->var()->binding_needs_init());
- CHECK(declarations->at(5)->proxy()->var()->location() ==
+ CHECK(
+ declarations->AtForTest(5)->proxy()->raw_name()->IsOneByteEqualTo("hoo"));
+ CHECK(declarations->AtForTest(5)->proxy()->var()->mode() == i::LET);
+ CHECK(declarations->AtForTest(5)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(5)->proxy()->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(declarations->at(6)->proxy()->raw_name()->IsOneByteEqualTo("joo"));
- CHECK(declarations->at(6)->proxy()->var()->mode() == i::CONST);
- CHECK(declarations->at(6)->proxy()->var()->binding_needs_init());
- CHECK(declarations->at(6)->proxy()->var()->location() ==
+ CHECK(
+ declarations->AtForTest(6)->proxy()->raw_name()->IsOneByteEqualTo("joo"));
+ CHECK(declarations->AtForTest(6)->proxy()->var()->mode() == i::CONST);
+ CHECK(declarations->AtForTest(6)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(6)->proxy()->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(
- declarations->at(7)->proxy()->raw_name()->IsOneByteEqualTo("*default*"));
- CHECK(declarations->at(7)->proxy()->var()->mode() == i::CONST);
- CHECK(declarations->at(7)->proxy()->var()->binding_needs_init());
- CHECK(declarations->at(7)->proxy()->var()->location() ==
+ CHECK(declarations->AtForTest(7)->proxy()->raw_name()->IsOneByteEqualTo(
+ "*default*"));
+ CHECK(declarations->AtForTest(7)->proxy()->var()->mode() == i::CONST);
+ CHECK(declarations->AtForTest(7)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(7)->proxy()->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(
- declarations->at(8)->proxy()->raw_name()->IsOneByteEqualTo("nonexport"));
- CHECK(declarations->at(8)->proxy()->var()->binding_needs_init());
- CHECK(declarations->at(8)->proxy()->var()->location() !=
+ CHECK(declarations->AtForTest(8)->proxy()->raw_name()->IsOneByteEqualTo(
+ "nonexport"));
+ CHECK(declarations->AtForTest(8)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(8)->proxy()->var()->location() !=
i::VariableLocation::MODULE);
- CHECK(declarations->at(9)->proxy()->raw_name()->IsOneByteEqualTo("mm"));
- CHECK(declarations->at(9)->proxy()->var()->mode() == i::CONST);
- CHECK(declarations->at(9)->proxy()->var()->binding_needs_init());
- CHECK(declarations->at(9)->proxy()->var()->location() ==
+ CHECK(
+ declarations->AtForTest(9)->proxy()->raw_name()->IsOneByteEqualTo("mm"));
+ CHECK(declarations->AtForTest(9)->proxy()->var()->mode() == i::CONST);
+ CHECK(declarations->AtForTest(9)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(9)->proxy()->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(declarations->at(10)->proxy()->raw_name()->IsOneByteEqualTo("aa"));
- CHECK(declarations->at(10)->proxy()->var()->mode() == i::CONST);
- CHECK(declarations->at(10)->proxy()->var()->binding_needs_init());
- CHECK(declarations->at(10)->proxy()->var()->location() ==
+ CHECK(
+ declarations->AtForTest(10)->proxy()->raw_name()->IsOneByteEqualTo("aa"));
+ CHECK(declarations->AtForTest(10)->proxy()->var()->mode() == i::CONST);
+ CHECK(declarations->AtForTest(10)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(10)->proxy()->var()->location() ==
i::VariableLocation::MODULE);
- CHECK(declarations->at(11)->proxy()->raw_name()->IsOneByteEqualTo("loo"));
- CHECK(declarations->at(11)->proxy()->var()->mode() == i::CONST);
- CHECK(!declarations->at(11)->proxy()->var()->binding_needs_init());
- CHECK(declarations->at(11)->proxy()->var()->location() !=
+ CHECK(declarations->AtForTest(11)->proxy()->raw_name()->IsOneByteEqualTo(
+ "loo"));
+ CHECK(declarations->AtForTest(11)->proxy()->var()->mode() == i::CONST);
+ CHECK(!declarations->AtForTest(11)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(11)->proxy()->var()->location() !=
i::VariableLocation::MODULE);
- CHECK(declarations->at(12)->proxy()->raw_name()->IsOneByteEqualTo("foob"));
- CHECK(declarations->at(12)->proxy()->var()->mode() == i::CONST);
- CHECK(!declarations->at(12)->proxy()->var()->binding_needs_init());
- CHECK(declarations->at(12)->proxy()->var()->location() ==
+ CHECK(declarations->AtForTest(12)->proxy()->raw_name()->IsOneByteEqualTo(
+ "foob"));
+ CHECK(declarations->AtForTest(12)->proxy()->var()->mode() == i::CONST);
+ CHECK(!declarations->AtForTest(12)->proxy()->var()->binding_needs_init());
+ CHECK(declarations->AtForTest(12)->proxy()->var()->location() ==
i::VariableLocation::MODULE);
i::ModuleDescriptor* descriptor = module_scope->module();
CHECK_NOT_NULL(descriptor);
- CHECK_EQ(5, descriptor->module_requests().size());
+ CHECK_EQ(5u, descriptor->module_requests().size());
for (const auto& elem : descriptor->module_requests()) {
if (elem.first->IsOneByteEqualTo("m.js"))
CHECK_EQ(elem.second, 0);
@@ -6126,34 +6127,34 @@ TEST(ModuleParsingInternals) {
CheckEntry(descriptor->special_exports().at(2), "bb", nullptr, "aa",
0); // !!!
- CHECK_EQ(8, descriptor->regular_exports().size());
+ CHECK_EQ(8u, descriptor->regular_exports().size());
entry = descriptor->regular_exports()
- .find(declarations->at(3)->proxy()->raw_name())
+ .find(declarations->AtForTest(3)->proxy()->raw_name())
->second;
CheckEntry(entry, "foo", "foo", nullptr, -1);
entry = descriptor->regular_exports()
- .find(declarations->at(4)->proxy()->raw_name())
+ .find(declarations->AtForTest(4)->proxy()->raw_name())
->second;
CheckEntry(entry, "goo", "goo", nullptr, -1);
entry = descriptor->regular_exports()
- .find(declarations->at(5)->proxy()->raw_name())
+ .find(declarations->AtForTest(5)->proxy()->raw_name())
->second;
CheckEntry(entry, "hoo", "hoo", nullptr, -1);
entry = descriptor->regular_exports()
- .find(declarations->at(6)->proxy()->raw_name())
+ .find(declarations->AtForTest(6)->proxy()->raw_name())
->second;
CheckEntry(entry, "joo", "joo", nullptr, -1);
entry = descriptor->regular_exports()
- .find(declarations->at(7)->proxy()->raw_name())
+ .find(declarations->AtForTest(7)->proxy()->raw_name())
->second;
CheckEntry(entry, "default", "*default*", nullptr, -1);
entry = descriptor->regular_exports()
- .find(declarations->at(12)->proxy()->raw_name())
+ .find(declarations->AtForTest(12)->proxy()->raw_name())
->second;
CheckEntry(entry, "foob", "foob", nullptr, -1);
// TODO(neis): The next lines are terrible. Find a better way.
- auto name_x = declarations->at(0)->proxy()->raw_name();
- CHECK_EQ(2, descriptor->regular_exports().count(name_x));
+ auto name_x = declarations->AtForTest(0)->proxy()->raw_name();
+ CHECK_EQ(2u, descriptor->regular_exports().count(name_x));
auto it = descriptor->regular_exports().equal_range(name_x).first;
entry = it->second;
if (entry->export_name->IsOneByteEqualTo("y")) {
@@ -6171,18 +6172,22 @@ TEST(ModuleParsingInternals) {
CheckEntry(descriptor->namespace_imports().at(1), nullptr, "foob", nullptr,
4);
- CHECK_EQ(4, descriptor->regular_imports().size());
- entry = descriptor->regular_imports().find(
- declarations->at(1)->proxy()->raw_name())->second;
+ CHECK_EQ(4u, descriptor->regular_imports().size());
+ entry = descriptor->regular_imports()
+ .find(declarations->AtForTest(1)->proxy()->raw_name())
+ ->second;
CheckEntry(entry, nullptr, "z", "q", 0);
- entry = descriptor->regular_imports().find(
- declarations->at(2)->proxy()->raw_name())->second;
+ entry = descriptor->regular_imports()
+ .find(declarations->AtForTest(2)->proxy()->raw_name())
+ ->second;
CheckEntry(entry, nullptr, "n", "default", 1);
- entry = descriptor->regular_imports().find(
- declarations->at(9)->proxy()->raw_name())->second;
+ entry = descriptor->regular_imports()
+ .find(declarations->AtForTest(9)->proxy()->raw_name())
+ ->second;
CheckEntry(entry, nullptr, "mm", "m", 0);
- entry = descriptor->regular_imports().find(
- declarations->at(10)->proxy()->raw_name())->second;
+ entry = descriptor->regular_imports()
+ .find(declarations->AtForTest(10)->proxy()->raw_name())
+ ->second;
CheckEntry(entry, nullptr, "aa", "aa", 0);
}
@@ -6255,10 +6260,9 @@ void TestLanguageMode(const char* source,
i::Handle<i::Script> script =
factory->NewScript(factory->NewStringFromAsciiChecked(source));
- i::Zone zone(CcTest::i_isolate()->allocator());
+ i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::ParseInfo info(&zone, script);
i::Parser parser(&info);
- info.set_global();
parser.Parse(&info);
CHECK(info.literal() != NULL);
CHECK_EQ(expected_language_mode, info.literal()->language_mode());
@@ -7632,11 +7636,6 @@ TEST(FunctionDeclarationError) {
// Invalid in all contexts
const char* error_data[] = {
"try function foo() {} catch (e) {}",
- NULL
- };
- // Valid in sloppy mode only, and only when the
- // --harmony-restrictive-declarations flag is off
- const char* unrestricted_data[] = {
"do function foo() {} while (0);",
"for (;false;) function foo() {}",
"for (var i = 0; i < 1; i++) function f() { };",
@@ -7668,8 +7667,7 @@ TEST(FunctionDeclarationError) {
// "{ function* f() {} function f() {} }",
NULL
};
- // Valid only in sloppy mode, with or without
- // --harmony-restrictive-declarations
+ // Valid only in sloppy mode.
const char* sloppy_data[] = {
"if (true) function foo() {}",
"if (false) {} else function f() { };",
@@ -7680,30 +7678,13 @@ TEST(FunctionDeclarationError) {
};
// clang-format on
- static const ParserFlag restrictive_flags[] = {
- kAllowHarmonyRestrictiveDeclarations};
-
// Nothing parses in strict mode without a SyntaxError
RunParserSyncTest(strict_context, error_data, kError);
- RunParserSyncTest(strict_context, error_data, kError, NULL, 0,
- restrictive_flags, arraysize(restrictive_flags));
- RunParserSyncTest(strict_context, unrestricted_data, kError);
- RunParserSyncTest(strict_context, unrestricted_data, kError, NULL, 0,
- restrictive_flags, arraysize(restrictive_flags));
RunParserSyncTest(strict_context, sloppy_data, kError);
- RunParserSyncTest(strict_context, sloppy_data, kError, NULL, 0,
- restrictive_flags, arraysize(restrictive_flags));
- // In sloppy mode, some things are successful, depending on the flag
+ // In sloppy mode, sloppy_data is successful
RunParserSyncTest(sloppy_context, error_data, kError);
- RunParserSyncTest(sloppy_context, error_data, kError, NULL, 0,
- restrictive_flags, arraysize(restrictive_flags));
- RunParserSyncTest(sloppy_context, unrestricted_data, kSuccess);
- RunParserSyncTest(sloppy_context, unrestricted_data, kError, NULL, 0,
- restrictive_flags, arraysize(restrictive_flags));
RunParserSyncTest(sloppy_context, sloppy_data, kSuccess);
- RunParserSyncTest(sloppy_context, sloppy_data, kSuccess, restrictive_flags,
- arraysize(restrictive_flags));
}
TEST(ExponentiationOperator) {
@@ -8114,22 +8095,29 @@ TEST(AsyncAwaitModuleErrors) {
TEST(RestrictiveForInErrors) {
// clang-format off
- const char* context_data[][2] = {
+ const char* strict_context_data[][2] = {
{ "'use strict'", "" },
+ { NULL, NULL }
+ };
+ const char* sloppy_context_data[][2] = {
{ "", "" },
{ NULL, NULL }
};
const char* error_data[] = {
- "for (var x = 0 in {});",
"for (const x = 0 in {});",
"for (let x = 0 in {});",
NULL
};
+ const char* sloppy_data[] = {
+ "for (var x = 0 in {});",
+ NULL
+ };
// clang-format on
- static const ParserFlag always_flags[] = {kAllowHarmonyForIn};
- RunParserSyncTest(context_data, error_data, kError, nullptr, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, error_data, kError);
+ RunParserSyncTest(strict_context_data, sloppy_data, kError);
+ RunParserSyncTest(sloppy_context_data, error_data, kError);
+ RunParserSyncTest(sloppy_context_data, sloppy_data, kSuccess);
}
TEST(NoDuplicateGeneratorsInBlock) {
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 272dec39b4..b7aba6e9da 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -344,11 +344,12 @@ class TestSetup {
TEST(RecordTickSample) {
TestSetup test_setup;
- CpuProfilesCollection profiles(CcTest::i_isolate());
- CpuProfiler profiler(CcTest::i_isolate());
+ i::Isolate* isolate = CcTest::i_isolate();
+ CpuProfilesCollection profiles(isolate);
+ CpuProfiler profiler(isolate);
profiles.set_cpu_profiler(&profiler);
profiles.StartProfiling("", false);
- ProfileGenerator generator(&profiles);
+ ProfileGenerator generator(isolate, &profiles);
CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
CodeEntry* entry2 = new CodeEntry(i::Logger::FUNCTION_TAG, "bbb");
CodeEntry* entry3 = new CodeEntry(i::Logger::FUNCTION_TAG, "ccc");
@@ -416,11 +417,12 @@ static void CheckNodeIds(ProfileNode* node, unsigned* expectedId) {
TEST(SampleIds) {
TestSetup test_setup;
- CpuProfilesCollection profiles(CcTest::i_isolate());
- CpuProfiler profiler(CcTest::i_isolate());
+ i::Isolate* isolate = CcTest::i_isolate();
+ CpuProfilesCollection profiles(isolate);
+ CpuProfiler profiler(isolate);
profiles.set_cpu_profiler(&profiler);
profiles.StartProfiling("", true);
- ProfileGenerator generator(&profiles);
+ ProfileGenerator generator(isolate, &profiles);
CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
CodeEntry* entry2 = new CodeEntry(i::Logger::FUNCTION_TAG, "bbb");
CodeEntry* entry3 = new CodeEntry(i::Logger::FUNCTION_TAG, "ccc");
@@ -473,11 +475,12 @@ TEST(SampleIds) {
TEST(NoSamples) {
TestSetup test_setup;
- CpuProfilesCollection profiles(CcTest::i_isolate());
- CpuProfiler profiler(CcTest::i_isolate());
+ i::Isolate* isolate = CcTest::i_isolate();
+ CpuProfilesCollection profiles(isolate);
+ CpuProfiler profiler(isolate);
profiles.set_cpu_profiler(&profiler);
profiles.StartProfiling("", false);
- ProfileGenerator generator(&profiles);
+ ProfileGenerator generator(isolate, &profiles);
CodeEntry* entry1 = new CodeEntry(i::Logger::FUNCTION_TAG, "aaa");
generator.code_map()->AddCode(ToAddress(0x1500), entry1, 0x200);
@@ -681,7 +684,7 @@ TEST(LineNumber) {
profiler.processor()->StopSynchronously();
- bool is_lazy = i::FLAG_lazy && !(i::FLAG_ignition && i::FLAG_ignition_eager);
+ bool is_lazy = i::FLAG_lazy;
CHECK_EQ(1, GetFunctionLineNumber(profiler, env, "foo_at_the_first_line"));
CHECK_EQ(is_lazy ? 0 : 4,
GetFunctionLineNumber(profiler, env, "lazy_func_at_forth_line"));
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 504a52bc28..a9d941d998 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -98,7 +98,7 @@ using namespace v8::internal;
static bool CheckParse(const char* input) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
return v8::internal::RegExpParser::ParseRegExp(
@@ -109,7 +109,7 @@ static bool CheckParse(const char* input) {
static void CheckParseEq(const char* input, const char* expected,
bool unicode = false) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
JSRegExp::Flags flags = JSRegExp::kNone;
@@ -129,7 +129,7 @@ static void CheckParseEq(const char* input, const char* expected,
static bool CheckSimple(const char* input) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(
@@ -147,7 +147,7 @@ struct MinMaxPair {
static MinMaxPair CheckMinMaxMatch(const char* input) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(
@@ -477,7 +477,7 @@ TEST(ParserRegression) {
static void ExpectError(const char* input, const char* expected,
bool unicode = false) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
JSRegExp::Flags flags = JSRegExp::kNone;
@@ -565,7 +565,7 @@ static bool NotWord(uc16 c) {
static void TestCharacterClassEscapes(uc16 c, bool (pred)(uc16 c)) {
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ZoneList<CharacterRange>* ranges =
new(&zone) ZoneList<CharacterRange>(2, &zone);
CharacterRange::AddClassEscape(c, ranges, &zone);
@@ -616,7 +616,7 @@ static RegExpNode* Compile(const char* input, bool multiline, bool unicode,
static void Execute(const char* input, bool multiline, bool unicode,
bool is_one_byte, bool dot_output = false) {
v8::HandleScope scope(CcTest::isolate());
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
RegExpNode* node = Compile(input, multiline, unicode, is_one_byte, &zone);
USE(node);
#ifdef DEBUG
@@ -654,7 +654,7 @@ static unsigned PseudoRandom(int i, int j) {
TEST(SplayTreeSimple) {
static const unsigned kLimit = 1000;
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ZoneSplayTree<TestConfig> tree(&zone);
bool seen[kLimit];
for (unsigned i = 0; i < kLimit; i++) seen[i] = false;
@@ -721,7 +721,7 @@ TEST(DispatchTableConstruction) {
}
}
// Enter test data into dispatch table.
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
DispatchTable table(&zone);
for (int i = 0; i < kRangeCount; i++) {
uc16* range = ranges[i];
@@ -835,7 +835,7 @@ TEST(MacroAssemblerNativeSuccess) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
4);
@@ -873,7 +873,7 @@ TEST(MacroAssemblerNativeSimple) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
4);
@@ -940,7 +940,7 @@ TEST(MacroAssemblerNativeSimpleUC16) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::UC16,
4);
@@ -1013,7 +1013,7 @@ TEST(MacroAssemblerNativeBacktrack) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
0);
@@ -1054,7 +1054,7 @@ TEST(MacroAssemblerNativeBackReferenceLATIN1) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
4);
@@ -1104,7 +1104,7 @@ TEST(MacroAssemblerNativeBackReferenceUC16) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::UC16,
4);
@@ -1157,7 +1157,7 @@ TEST(MacroAssemblernativeAtStart) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
0);
@@ -1217,7 +1217,7 @@ TEST(MacroAssemblerNativeBackRefNoCase) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
4);
@@ -1276,7 +1276,7 @@ TEST(MacroAssemblerNativeRegisters) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
6);
@@ -1378,7 +1378,7 @@ TEST(MacroAssemblerStackOverflow) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
0);
@@ -1417,7 +1417,7 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
ContextInitializer initializer;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ArchRegExpMacroAssembler m(isolate, &zone, NativeRegExpMacroAssembler::LATIN1,
2);
@@ -1465,7 +1465,7 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
TEST(MacroAssembler) {
byte codes[1024];
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
RegExpMacroAssemblerIrregexp m(CcTest::i_isolate(), Vector<byte>(codes, 1024),
&zone);
// ^f(o)o.
@@ -1533,7 +1533,7 @@ TEST(AddInverseToTable) {
static const int kLimit = 1000;
static const int kRangeCount = 16;
for (int t = 0; t < 10; t++) {
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ZoneList<CharacterRange>* ranges =
new(&zone) ZoneList<CharacterRange>(kRangeCount, &zone);
for (int i = 0; i < kRangeCount; i++) {
@@ -1554,7 +1554,7 @@ TEST(AddInverseToTable) {
CHECK_EQ(is_on, set->Get(0) == false);
}
}
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ZoneList<CharacterRange>* ranges =
new(&zone) ZoneList<CharacterRange>(1, &zone);
ranges->Add(CharacterRange::Range(0xFFF0, 0xFFFE), &zone);
@@ -1667,7 +1667,7 @@ TEST(UncanonicalizeEquivalence) {
static void TestRangeCaseIndependence(Isolate* isolate, CharacterRange input,
Vector<CharacterRange> expected) {
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
int count = expected.length();
ZoneList<CharacterRange>* list =
new(&zone) ZoneList<CharacterRange>(count, &zone);
@@ -1736,7 +1736,7 @@ static bool InClass(uc32 c, ZoneList<CharacterRange>* ranges) {
TEST(UnicodeRangeSplitter) {
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ZoneList<CharacterRange>* base =
new(&zone) ZoneList<CharacterRange>(1, &zone);
base->Add(CharacterRange::Everything(), &zone);
@@ -1780,7 +1780,7 @@ TEST(UnicodeRangeSplitter) {
TEST(CanonicalizeCharacterSets) {
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ZoneList<CharacterRange>* list =
new(&zone) ZoneList<CharacterRange>(4, &zone);
CharacterSet set(list);
@@ -1841,7 +1841,7 @@ TEST(CanonicalizeCharacterSets) {
TEST(CharacterRangeMerge) {
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
ZoneList<CharacterRange> l1(4, &zone);
ZoneList<CharacterRange> l2(4, &zone);
// Create all combinations of intersections of ranges, both singletons and
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc b/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
index b2e6ec3a3b..4fd186e565 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-arm.cc
@@ -50,7 +50,7 @@ TEST(WasmRelocationArmMemoryReference) {
code->Print(os);
::printf("f() = %d\n\n", ret_value);
#endif
- size_t offset = 1234;
+ int offset = 1234;
// Relocating references by offset
int mode_mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE);
@@ -103,7 +103,7 @@ TEST(WasmRelocationArmMemorySizeReference) {
CSignature0<int32_t> csig;
CodeRunner<int32_t> runnable(isolate, code, &csig);
int32_t ret_value = runnable.Call();
- CHECK_NE(ret_value, 0xdeadbeef);
+ CHECK_NE(ret_value, bit_cast<int32_t>(0xdeadbeef));
#ifdef DEBUG
OFStream os(stdout);
@@ -124,7 +124,7 @@ TEST(WasmRelocationArmMemorySizeReference) {
}
ret_value = runnable.Call();
- CHECK_NE(ret_value, 0xdeadbeef);
+ CHECK_NE(ret_value, bit_cast<int32_t>(0xdeadbeef));
#ifdef DEBUG
code->Print(os);
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc b/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
index 3b49f00afc..c59a01aaed 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-arm64.cc
@@ -52,7 +52,7 @@ TEST(WasmRelocationArm64MemoryReference) {
code->Print(os);
::printf("f() = %" PRIx64 "\n\n", ret_value);
#endif
- size_t offset = 1234;
+ int offset = 1234;
// Relocating reference by offset
int mode_mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE);
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc b/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
index 305d0089c3..f2241d5f92 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-ia32.cc
@@ -25,7 +25,7 @@ static int32_t DummyStaticFunction(Object* result) { return 1; }
TEST(WasmRelocationIa32MemoryReference) {
Isolate* isolate = CcTest::i_isolate();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
Assembler assm(isolate, buffer, sizeof buffer);
@@ -56,7 +56,7 @@ TEST(WasmRelocationIa32MemoryReference) {
disasm::Disassembler::Disassemble(stdout, begin, end);
#endif
- size_t offset = 1234;
+ int offset = 1234;
// Relocating references by offset
int mode_mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE);
@@ -87,7 +87,7 @@ TEST(WasmRelocationIa32MemoryReference) {
TEST(WasmRelocationIa32MemorySizeReference) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
Assembler assm(isolate, buffer, sizeof buffer);
@@ -114,7 +114,7 @@ TEST(WasmRelocationIa32MemorySizeReference) {
CodeRunner<int32_t> runnable(isolate, code, &csig);
int32_t ret_value = runnable.Call();
- CHECK_NE(ret_value, 0xdeadbeef);
+ CHECK_NE(ret_value, bit_cast<int32_t>(0xdeadbeef));
#ifdef OBJECT_PRINT
OFStream os(stdout);
@@ -138,7 +138,7 @@ TEST(WasmRelocationIa32MemorySizeReference) {
}
ret_value = runnable.Call();
- CHECK_NE(ret_value, 0xdeadbeef);
+ CHECK_NE(ret_value, bit_cast<int32_t>(0xdeadbeef));
#ifdef OBJECT_PRINT
code->Print(os);
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc b/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
index 11fa45164e..a5df703410 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-x64.cc
@@ -52,7 +52,7 @@ TEST(WasmRelocationX64MemoryReference) {
byte* end = begin + code->instruction_size();
disasm::Disassembler::Disassemble(stdout, begin, end);
#endif
- size_t offset = 1234;
+ int offset = 1234;
// Relocating references by offset
int mode_mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE);
@@ -107,7 +107,7 @@ TEST(WasmRelocationX64WasmMemorySizeReference) {
CSignature0<int64_t> csig;
CodeRunner<int64_t> runnable(isolate, code, &csig);
int64_t ret_value = runnable.Call();
- CHECK_NE(ret_value, 0xdeadbeef);
+ CHECK_NE(ret_value, bit_cast<uint32_t>(0xdeadbeef));
#ifdef OBJECT_PRINT
OFStream os(stdout);
@@ -130,7 +130,7 @@ TEST(WasmRelocationX64WasmMemorySizeReference) {
}
ret_value = runnable.Call();
- CHECK_NE(ret_value, 0xdeadbeef);
+ CHECK_NE(ret_value, bit_cast<uint32_t>(0xdeadbeef));
#ifdef OBJECT_PRINT
code->Print(os);
diff --git a/deps/v8/test/cctest/test-run-wasm-relocation-x87.cc b/deps/v8/test/cctest/test-run-wasm-relocation-x87.cc
index 2156e96ecb..17b340ac9e 100644
--- a/deps/v8/test/cctest/test-run-wasm-relocation-x87.cc
+++ b/deps/v8/test/cctest/test-run-wasm-relocation-x87.cc
@@ -25,7 +25,7 @@ static int32_t DummyStaticFunction(Object* result) { return 1; }
TEST(WasmRelocationX87MemoryReference) {
Isolate* isolate = CcTest::i_isolate();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
Assembler assm(isolate, buffer, sizeof buffer);
@@ -56,7 +56,7 @@ TEST(WasmRelocationX87MemoryReference) {
disasm::Disassembler::Disassemble(stdout, begin, end);
#endif
- size_t offset = 1234;
+ int offset = 1234;
// Relocating references by offset
int mode_mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE);
@@ -87,7 +87,7 @@ TEST(WasmRelocationX87MemoryReference) {
TEST(WasmRelocationX87MemorySizeReference) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
HandleScope scope(isolate);
v8::internal::byte buffer[4096];
Assembler assm(isolate, buffer, sizeof buffer);
@@ -114,7 +114,7 @@ TEST(WasmRelocationX87MemorySizeReference) {
CodeRunner<int32_t> runnable(isolate, code, &csig);
int32_t ret_value = runnable.Call();
- CHECK_NE(ret_value, 0xdeadbeef);
+ CHECK_NE(ret_value, bit_cast<int32_t>(0xdeadbeef));
#ifdef OBJECT_PRINT
OFStream os(stdout);
@@ -138,7 +138,7 @@ TEST(WasmRelocationX87MemorySizeReference) {
}
ret_value = runnable.Call();
- CHECK_NE(ret_value, 0xdeadbeef);
+ CHECK_NE(ret_value, bit_cast<int32_t>(0xdeadbeef));
#ifdef OBJECT_PRINT
code->Print(os);
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 37992fa7e7..1b5497f772 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -50,9 +50,10 @@
using namespace v8::internal;
-void DisableTurbofan() {
- const char* flag = "--turbo-filter=\"\"";
- FlagList::SetFlagsFromString(flag, StrLength(flag));
+void DisableAlwaysOpt() {
+ // Isolates prepared for serialization do not optimize. The only exception is
+ // with the flag --always-opt.
+ FLAG_always_opt = false;
}
@@ -150,10 +151,7 @@ static void SanityCheck(v8::Isolate* v8_isolate) {
}
UNINITIALIZED_TEST(StartupSerializerOnce) {
- // The serialize-deserialize tests only work if the VM is built without
- // serialization. That doesn't matter. We don't need to be able to
- // serialize a snapshot in a VM that is booted from a snapshot.
- DisableTurbofan();
+ DisableAlwaysOpt();
v8::Isolate* isolate = TestIsolate::NewInitialized(true);
Vector<const byte> blob = Serialize(isolate);
isolate = Deserialize(blob);
@@ -171,7 +169,7 @@ UNINITIALIZED_TEST(StartupSerializerOnce) {
}
UNINITIALIZED_TEST(StartupSerializerTwice) {
- DisableTurbofan();
+ DisableAlwaysOpt();
v8::Isolate* isolate = TestIsolate::NewInitialized(true);
Vector<const byte> blob1 = Serialize(isolate);
Vector<const byte> blob2 = Serialize(isolate);
@@ -191,7 +189,7 @@ UNINITIALIZED_TEST(StartupSerializerTwice) {
}
UNINITIALIZED_TEST(StartupSerializerOnceRunScript) {
- DisableTurbofan();
+ DisableAlwaysOpt();
v8::Isolate* isolate = TestIsolate::NewInitialized(true);
Vector<const byte> blob = Serialize(isolate);
isolate = Deserialize(blob);
@@ -215,7 +213,7 @@ UNINITIALIZED_TEST(StartupSerializerOnceRunScript) {
}
UNINITIALIZED_TEST(StartupSerializerTwiceRunScript) {
- DisableTurbofan();
+ DisableAlwaysOpt();
v8::Isolate* isolate = TestIsolate::NewInitialized(true);
Vector<const byte> blob1 = Serialize(isolate);
Vector<const byte> blob2 = Serialize(isolate);
@@ -287,7 +285,7 @@ static void PartiallySerializeObject(Vector<const byte>* startup_blob_out,
isolate, v8::SnapshotCreator::FunctionCodeHandling::kClear);
startup_serializer.SerializeStrongReferences();
- PartialSerializer partial_serializer(isolate, &startup_serializer);
+ PartialSerializer partial_serializer(isolate, &startup_serializer, nullptr);
partial_serializer.Serialize(&raw_foo);
startup_serializer.SerializeWeakReferencesAndDeferred();
@@ -303,7 +301,7 @@ static void PartiallySerializeObject(Vector<const byte>* startup_blob_out,
}
UNINITIALIZED_TEST(PartialSerializerObject) {
- DisableTurbofan();
+ DisableAlwaysOpt();
Vector<const byte> startup_blob;
Vector<const byte> partial_blob;
PartiallySerializeObject(&startup_blob, &partial_blob);
@@ -387,7 +385,7 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
startup_serializer.SerializeStrongReferences();
SnapshotByteSink partial_sink;
- PartialSerializer partial_serializer(isolate, &startup_serializer);
+ PartialSerializer partial_serializer(isolate, &startup_serializer, nullptr);
partial_serializer.Serialize(&raw_context);
startup_serializer.SerializeWeakReferencesAndDeferred();
@@ -401,7 +399,7 @@ static void PartiallySerializeContext(Vector<const byte>* startup_blob_out,
}
UNINITIALIZED_TEST(PartialSerializerContext) {
- DisableTurbofan();
+ DisableAlwaysOpt();
Vector<const byte> startup_blob;
Vector<const byte> partial_blob;
PartiallySerializeContext(&startup_blob, &partial_blob);
@@ -416,7 +414,8 @@ UNINITIALIZED_TEST(PartialSerializerContext) {
HandleScope handle_scope(isolate);
Handle<Object> root;
Handle<JSGlobalProxy> global_proxy =
- isolate->factory()->NewUninitializedJSGlobalProxy();
+ isolate->factory()->NewUninitializedJSGlobalProxy(
+ JSGlobalProxy::SizeWithInternalFields(0));
{
SnapshotData snapshot_data(partial_blob);
Deserializer deserializer(&snapshot_data);
@@ -506,7 +505,7 @@ static void PartiallySerializeCustomContext(
startup_serializer.SerializeStrongReferences();
SnapshotByteSink partial_sink;
- PartialSerializer partial_serializer(isolate, &startup_serializer);
+ PartialSerializer partial_serializer(isolate, &startup_serializer, nullptr);
partial_serializer.Serialize(&raw_context);
startup_serializer.SerializeWeakReferencesAndDeferred();
@@ -520,7 +519,7 @@ static void PartiallySerializeCustomContext(
}
UNINITIALIZED_TEST(PartialSerializerCustomContext) {
- DisableTurbofan();
+ DisableAlwaysOpt();
Vector<const byte> startup_blob;
Vector<const byte> partial_blob;
PartiallySerializeCustomContext(&startup_blob, &partial_blob);
@@ -535,7 +534,8 @@ UNINITIALIZED_TEST(PartialSerializerCustomContext) {
HandleScope handle_scope(isolate);
Handle<Object> root;
Handle<JSGlobalProxy> global_proxy =
- isolate->factory()->NewUninitializedJSGlobalProxy();
+ isolate->factory()->NewUninitializedJSGlobalProxy(
+ JSGlobalProxy::SizeWithInternalFields(0));
{
SnapshotData snapshot_data(partial_blob);
Deserializer deserializer(&snapshot_data);
@@ -609,16 +609,11 @@ UNINITIALIZED_TEST(PartialSerializerCustomContext) {
v8_isolate->Dispose();
}
-TEST(CustomSnapshotDataBlob) {
- DisableTurbofan();
+TEST(CustomSnapshotDataBlob1) {
+ DisableAlwaysOpt();
const char* source1 = "function f() { return 42; }";
- const char* source2 =
- "function f() { return g() * 2; }"
- "function g() { return 43; }"
- "/./.test('a')";
v8::StartupData data1 = v8::V8::CreateSnapshotDataBlob(source1);
- v8::StartupData data2 = v8::V8::CreateSnapshotDataBlob(source2);
v8::Isolate::CreateParams params1;
params1.snapshot_blob = &data1;
@@ -637,6 +632,16 @@ TEST(CustomSnapshotDataBlob) {
CHECK(CompileRun("this.g")->IsUndefined());
}
isolate1->Dispose();
+}
+
+TEST(CustomSnapshotDataBlob2) {
+ DisableAlwaysOpt();
+ const char* source2 =
+ "function f() { return g() * 2; }"
+ "function g() { return 43; }"
+ "/./.test('a')";
+
+ v8::StartupData data2 = v8::V8::CreateSnapshotDataBlob(source2);
v8::Isolate::CreateParams params2;
params2.snapshot_blob = &data2;
@@ -657,15 +662,13 @@ TEST(CustomSnapshotDataBlob) {
isolate2->Dispose();
}
-
static void SerializationFunctionTemplate(
const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(args[0]);
}
TEST(CustomSnapshotDataBlobOutdatedContextWithOverflow) {
- DisableTurbofan();
-
+ DisableAlwaysOpt();
const char* source1 =
"var o = {};"
"(function() {"
@@ -710,7 +713,7 @@ TEST(CustomSnapshotDataBlobOutdatedContextWithOverflow) {
}
TEST(CustomSnapshotDataBlobWithLocker) {
- DisableTurbofan();
+ DisableAlwaysOpt();
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate0 = v8::Isolate::New(create_params);
@@ -748,7 +751,7 @@ TEST(CustomSnapshotDataBlobWithLocker) {
}
TEST(CustomSnapshotDataBlobStackOverflow) {
- DisableTurbofan();
+ DisableAlwaysOpt();
const char* source =
"var a = [0];"
"var b = a;"
@@ -794,7 +797,7 @@ bool IsCompiled(const char* name) {
}
TEST(SnapshotDataBlobWithWarmup) {
- DisableTurbofan();
+ DisableAlwaysOpt();
const char* warmup = "Math.abs(1); Math.random = 1;";
v8::StartupData cold = v8::V8::CreateSnapshotDataBlob();
@@ -815,18 +818,18 @@ TEST(SnapshotDataBlobWithWarmup) {
// Running the warmup script has effect on whether functions are
// pre-compiled, but does not pollute the context.
CHECK(IsCompiled("Math.abs"));
- CHECK(!IsCompiled("Number.parseInt"));
+ CHECK(!IsCompiled("String.raw"));
CHECK(CompileRun("Math.random")->IsFunction());
}
isolate->Dispose();
}
TEST(CustomSnapshotDataBlobWithWarmup) {
- DisableTurbofan();
+ DisableAlwaysOpt();
const char* source =
"function f() { return Math.abs(1); }\n"
- "function g() { return Number.parseInt(1); }\n"
- "Number.parseFloat(1);"
+ "function g() { return String.raw(1); }\n"
+ "Object.valueOf(1);"
"var a = 5";
const char* warmup = "a = f()";
@@ -850,15 +853,15 @@ TEST(CustomSnapshotDataBlobWithWarmup) {
CHECK(IsCompiled("f"));
CHECK(IsCompiled("Math.abs"));
CHECK(!IsCompiled("g"));
- CHECK(!IsCompiled("Number.parseInt"));
- CHECK(!IsCompiled("Number.parseFloat"));
+ CHECK(!IsCompiled("String.raw"));
+ CHECK(!IsCompiled("Object.valueOf"));
CHECK_EQ(5, CompileRun("a")->Int32Value(context).FromJust());
}
isolate->Dispose();
}
TEST(CustomSnapshotDataBlobImmortalImmovableRoots) {
- DisableTurbofan();
+ DisableAlwaysOpt();
// Flood the startup snapshot with shared function infos. If they are
// serialized before the immortal immovable root, the root will no longer end
// up on the first page.
@@ -1070,9 +1073,9 @@ TEST(CodeSerializerLargeCodeObject) {
FLAG_always_opt = false;
Vector<const uint8_t> source =
- ConstructSource(STATIC_CHAR_VECTOR("var j=1; if (!j) {"),
+ ConstructSource(STATIC_CHAR_VECTOR("var j=1; if (j == 0) {"),
STATIC_CHAR_VECTOR("for (let i of Object.prototype);"),
- STATIC_CHAR_VECTOR("} j=7; j"), 2000);
+ STATIC_CHAR_VECTOR("} j=7; j"), 1000);
Handle<String> source_str =
isolate->factory()->NewStringFromOneByte(source).ToHandleChecked();
@@ -1678,7 +1681,7 @@ TEST(CodeSerializerInternalReference) {
// In ignition there are only relative jumps, so the following code
// would not have any internal references. This test is not relevant
// for ignition.
- if (FLAG_ignition) {
+ if (FLAG_ignition || FLAG_turbo) {
return;
}
// Disable experimental natives that are loaded after deserialization.
@@ -1762,7 +1765,7 @@ TEST(CodeSerializerInternalReference) {
}
TEST(CodeSerializerEagerCompilationAndPreAge) {
- if (FLAG_ignition) return;
+ if (FLAG_ignition || FLAG_turbo) return;
FLAG_lazy = true;
FLAG_serialize_toplevel = true;
@@ -1949,7 +1952,7 @@ TEST(CodeSerializerEmbeddedObject) {
}
TEST(SnapshotCreatorMultipleContexts) {
- DisableTurbofan();
+ DisableAlwaysOpt();
v8::StartupData blob;
{
v8::SnapshotCreator creator;
@@ -1959,19 +1962,19 @@ TEST(SnapshotCreatorMultipleContexts) {
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
CompileRun("var f = function() { return 1; }");
- CHECK_EQ(0, creator.AddContext(context));
+ CHECK_EQ(0u, creator.AddContext(context));
}
{
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
CompileRun("var f = function() { return 2; }");
- CHECK_EQ(1, creator.AddContext(context));
+ CHECK_EQ(1u, creator.AddContext(context));
}
{
v8::HandleScope handle_scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate);
- CHECK_EQ(2, creator.AddContext(context));
+ CHECK_EQ(2u, creator.AddContext(context));
}
blob =
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
@@ -2010,24 +2013,27 @@ TEST(SnapshotCreatorMultipleContexts) {
delete[] blob.data;
}
-static void SerializedCallback(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
+void SerializedCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(v8_num(42));
}
-static void SerializedCallbackReplacement(
+void SerializedCallbackReplacement(
const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(v8_num(1337));
}
+static int serialized_static_field = 314;
+
intptr_t original_external_references[] = {
- reinterpret_cast<intptr_t>(SerializedCallback), 0};
+ reinterpret_cast<intptr_t>(SerializedCallback),
+ reinterpret_cast<intptr_t>(&serialized_static_field), 0};
intptr_t replaced_external_references[] = {
- reinterpret_cast<intptr_t>(SerializedCallbackReplacement), 0};
+ reinterpret_cast<intptr_t>(SerializedCallbackReplacement),
+ reinterpret_cast<intptr_t>(&serialized_static_field), 0};
TEST(SnapshotCreatorExternalReferences) {
- DisableTurbofan();
+ DisableAlwaysOpt();
v8::StartupData blob;
{
v8::SnapshotCreator creator(original_external_references);
@@ -2042,7 +2048,7 @@ TEST(SnapshotCreatorExternalReferences) {
callback->GetFunction(context).ToLocalChecked();
CHECK(context->Global()->Set(context, v8_str("f"), function).FromJust());
ExpectInt32("f()", 42);
- CHECK_EQ(0, creator.AddContext(context));
+ CHECK_EQ(0u, creator.AddContext(context));
}
blob =
creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
@@ -2086,10 +2092,61 @@ TEST(SnapshotCreatorExternalReferences) {
delete[] blob.data;
}
+TEST(SnapshotCreatorUnknownExternalReferences) {
+ DisableAlwaysOpt();
+ v8::SnapshotCreator creator;
+ v8::Isolate* isolate = creator.GetIsolate();
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ v8::Context::Scope context_scope(context);
+
+ v8::Local<v8::FunctionTemplate> callback =
+ v8::FunctionTemplate::New(isolate, SerializedCallback);
+ v8::Local<v8::Value> function =
+ callback->GetFunction(context).ToLocalChecked();
+ CHECK(context->Global()->Set(context, v8_str("f"), function).FromJust());
+ ExpectInt32("f()", 42);
+
+ CHECK_EQ(0u, creator.AddContext(context));
+ }
+ v8::StartupData blob =
+ creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
+
+ delete[] blob.data;
+}
+
+struct InternalFieldData {
+ uint32_t data;
+};
+
+v8::StartupData SerializeInternalFields(v8::Local<v8::Object> holder,
+ int index) {
+ InternalFieldData* data = static_cast<InternalFieldData*>(
+ holder->GetAlignedPointerFromInternalField(index));
+ int size = sizeof(*data);
+ char* payload = new char[size];
+ // We simply use memcpy to serialize the content.
+ memcpy(payload, data, size);
+ return {payload, size};
+}
+
+void DeserializeInternalFields(v8::Local<v8::Object> holder, int index,
+ v8::StartupData payload) {
+ InternalFieldData* data = new InternalFieldData{0};
+ memcpy(data, payload.data, payload.raw_size);
+ holder->SetAlignedPointerInInternalField(index, data);
+}
+
TEST(SnapshotCreatorTemplates) {
- DisableTurbofan();
+ DisableAlwaysOpt();
v8::StartupData blob;
+
{
+ InternalFieldData* a1 = new InternalFieldData{11};
+ InternalFieldData* b0 = new InternalFieldData{20};
+ InternalFieldData* c0 = new InternalFieldData{30};
+
v8::SnapshotCreator creator(original_external_references);
v8::Isolate* isolate = creator.GetIsolate();
{
@@ -2102,14 +2159,42 @@ TEST(SnapshotCreatorTemplates) {
global_template->Set(v8_str("f"), callback);
v8::Local<v8::Context> context =
v8::Context::New(isolate, no_extension, global_template);
+ v8::Local<v8::ObjectTemplate> object_template =
+ v8::ObjectTemplate::New(isolate);
+ object_template->SetInternalFieldCount(3);
+
v8::Context::Scope context_scope(context);
ExpectInt32("f()", 42);
- CHECK_EQ(0, creator.AddContext(context));
- CHECK_EQ(0, creator.AddTemplate(callback));
- CHECK_EQ(1, creator.AddTemplate(global_template));
+
+ v8::Local<v8::Object> a =
+ object_template->NewInstance(context).ToLocalChecked();
+ v8::Local<v8::Object> b =
+ object_template->NewInstance(context).ToLocalChecked();
+ v8::Local<v8::Object> c =
+ object_template->NewInstance(context).ToLocalChecked();
+ v8::Local<v8::External> null_external =
+ v8::External::New(isolate, nullptr);
+ v8::Local<v8::External> field_external =
+ v8::External::New(isolate, &serialized_static_field);
+ a->SetInternalField(0, b);
+ a->SetAlignedPointerInInternalField(1, a1);
+ b->SetAlignedPointerInInternalField(0, b0);
+ b->SetInternalField(1, c);
+ c->SetAlignedPointerInInternalField(0, c0);
+ c->SetInternalField(1, null_external);
+ c->SetInternalField(2, field_external);
+ CHECK(context->Global()->Set(context, v8_str("a"), a).FromJust());
+
+ CHECK_EQ(0u, creator.AddContext(context));
+ CHECK_EQ(0u, creator.AddTemplate(callback));
+ CHECK_EQ(1u, creator.AddTemplate(global_template));
}
- blob =
- creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
+ blob = creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear,
+ SerializeInternalFields);
+
+ delete a1;
+ delete b0;
+ delete c0;
}
{
@@ -2117,6 +2202,7 @@ TEST(SnapshotCreatorTemplates) {
params.snapshot_blob = &blob;
params.array_buffer_allocator = CcTest::array_buffer_allocator();
params.external_references = original_external_references;
+ params.deserialize_internal_fields_callback = DeserializeInternalFields;
v8::Isolate* isolate = v8::Isolate::New(params);
{
v8::Isolate::Scope isolate_scope(isolate);
@@ -2150,10 +2236,47 @@ TEST(SnapshotCreatorTemplates) {
// Check that it instantiates to the same prototype.
ExpectTrue("g.prototype === f.prototype");
+ // Retrieve internal fields.
+ v8::Local<v8::Object> a = context->Global()
+ ->Get(context, v8_str("a"))
+ .ToLocalChecked()
+ ->ToObject(context)
+ .ToLocalChecked();
+ v8::Local<v8::Object> b =
+ a->GetInternalField(0)->ToObject(context).ToLocalChecked();
+ InternalFieldData* a1 = reinterpret_cast<InternalFieldData*>(
+ a->GetAlignedPointerFromInternalField(1));
+ v8::Local<v8::Value> a2 = a->GetInternalField(2);
+
+ InternalFieldData* b0 = reinterpret_cast<InternalFieldData*>(
+ b->GetAlignedPointerFromInternalField(0));
+ v8::Local<v8::Object> c =
+ b->GetInternalField(1)->ToObject(context).ToLocalChecked();
+ v8::Local<v8::Value> b2 = b->GetInternalField(2);
+
+ InternalFieldData* c0 = reinterpret_cast<InternalFieldData*>(
+ c->GetAlignedPointerFromInternalField(0));
+ v8::Local<v8::Value> c1 = c->GetInternalField(1);
+ v8::Local<v8::Value> c2 = c->GetInternalField(2);
+
+ CHECK_EQ(11u, a1->data);
+ CHECK(a2->IsUndefined());
+ CHECK_EQ(20u, b0->data);
+ CHECK(b2->IsUndefined());
+ CHECK_EQ(30u, c0->data);
+ CHECK(c1->IsExternal());
+ CHECK_NULL(v8::Local<v8::External>::Cast(c1)->Value());
+ CHECK_EQ(static_cast<void*>(&serialized_static_field),
+ v8::Local<v8::External>::Cast(c2)->Value());
+
// Accessing out of bound returns empty MaybeHandle.
CHECK(v8::ObjectTemplate::FromSnapshot(isolate, 2).IsEmpty());
CHECK(v8::FunctionTemplate::FromSnapshot(isolate, 2).IsEmpty());
CHECK(v8::Context::FromSnapshot(isolate, 2).IsEmpty());
+
+ delete a1;
+ delete b0;
+ delete c0;
}
{
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 9793ae7f18..80c8f92ac4 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -600,6 +600,42 @@ TEST(Traverse) {
printf("18\n");
}
+TEST(ConsStringWithEmptyFirstFlatten) {
+ printf("ConsStringWithEmptyFirstFlatten\n");
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+
+ i::Handle<i::String> initial_fst =
+ isolate->factory()->NewStringFromAsciiChecked("fst012345");
+ i::Handle<i::String> initial_snd =
+ isolate->factory()->NewStringFromAsciiChecked("snd012345");
+ i::Handle<i::String> str = isolate->factory()
+ ->NewConsString(initial_fst, initial_snd)
+ .ToHandleChecked();
+ CHECK(str->IsConsString());
+ auto cons = i::Handle<i::ConsString>::cast(str);
+
+ const int initial_length = cons->length();
+
+ // set_first / set_second does not update the length (which the heap verifier
+ // checks), so we need to ensure the length stays the same.
+
+ i::Handle<i::String> new_fst = isolate->factory()->empty_string();
+ i::Handle<i::String> new_snd =
+ isolate->factory()->NewStringFromAsciiChecked("snd012345012345678");
+ cons->set_first(*new_fst);
+ cons->set_second(*new_snd);
+ CHECK(!cons->IsFlat());
+ CHECK_EQ(initial_length, new_fst->length() + new_snd->length());
+ CHECK_EQ(initial_length, cons->length());
+
+ // Make sure Flatten doesn't alloc a new string.
+ DisallowHeapAllocation no_alloc;
+ i::Handle<i::String> flat = i::String::Flatten(cons);
+ CHECK(flat->IsFlat());
+ CHECK_EQ(initial_length, flat->length());
+}
static void VerifyCharacterStream(
String* flat_string, String* cons_string) {
diff --git a/deps/v8/test/cctest/test-traced-value.cc b/deps/v8/test/cctest/test-traced-value.cc
new file mode 100644
index 0000000000..1c3e7ac252
--- /dev/null
+++ b/deps/v8/test/cctest/test-traced-value.cc
@@ -0,0 +1,126 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/tracing/traced-value.h"
+#include "test/cctest/cctest.h"
+
+using v8::tracing::TracedValue;
+
+TEST(FlatDictionary) {
+ auto value = TracedValue::Create();
+ value->SetInteger("int", 2014);
+ value->SetDouble("double", 0.0);
+ value->SetBoolean("bool", true);
+ value->SetString("string", "string");
+ std::string json = "PREFIX";
+ value->AppendAsTraceFormat(&json);
+ CHECK_EQ(
+ "PREFIX{\"int\":2014,\"double\":0,\"bool\":true,\"string\":"
+ "\"string\"}",
+ json);
+}
+
+TEST(NoDotPathExpansion) {
+ auto value = TracedValue::Create();
+ value->SetInteger("in.t", 2014);
+ value->SetDouble("doub.le", -20.25);
+ value->SetBoolean("bo.ol", true);
+ value->SetString("str.ing", "str.ing");
+ std::string json;
+ value->AppendAsTraceFormat(&json);
+ CHECK_EQ(
+ "{\"in.t\":2014,\"doub.le\":-20.25,\"bo.ol\":true,\"str.ing\":\"str."
+ "ing\"}",
+ json);
+}
+
+TEST(Hierarchy) {
+ auto value = TracedValue::Create();
+ value->SetInteger("i0", 2014);
+ value->BeginDictionary("dict1");
+ value->SetInteger("i1", 2014);
+ value->BeginDictionary("dict2");
+ value->SetBoolean("b2", false);
+ value->EndDictionary();
+ value->SetString("s1", "foo");
+ value->EndDictionary();
+ value->SetDouble("d0", 0.0);
+ value->SetDouble("d1", 10.5);
+ value->SetBoolean("b0", true);
+ value->BeginArray("a1");
+ value->AppendInteger(1);
+ value->AppendBoolean(true);
+ value->BeginDictionary();
+ value->SetInteger("i2", 3);
+ value->EndDictionary();
+ value->EndArray();
+ value->SetString("s0", "foo");
+
+ value->BeginArray("arr1");
+ value->BeginDictionary();
+ value->EndDictionary();
+ value->BeginArray();
+ value->EndArray();
+ value->BeginDictionary();
+ value->EndDictionary();
+ value->EndArray();
+
+ std::string json;
+ value->AppendAsTraceFormat(&json);
+ CHECK_EQ(
+ "{\"i0\":2014,\"dict1\":{\"i1\":2014,\"dict2\":{\"b2\":false},"
+ "\"s1\":\"foo\"},\"d0\":0,\"d1\":10.5,\"b0\":true,\"a1\":[1,true,{\"i2\":"
+ "3}],\"s0\":\"foo\",\"arr1\":[{},[],{}]}",
+ json);
+}
+
+TEST(LongStrings) {
+ std::string long_string = "supercalifragilisticexpialidocious";
+ std::string long_string2 = "0123456789012345678901234567890123456789";
+ char long_string3[4096];
+ for (size_t i = 0; i < sizeof(long_string3); ++i)
+ long_string3[i] = static_cast<char>('a' + (i % 26));
+ long_string3[sizeof(long_string3) - 1] = '\0';
+
+ auto value = TracedValue::Create();
+ value->SetString("a", "short");
+ value->SetString("b", long_string);
+ value->BeginArray("c");
+ value->AppendString(long_string2);
+ value->AppendString("");
+ value->BeginDictionary();
+ value->SetString("a", long_string3);
+ value->EndDictionary();
+ value->EndArray();
+
+ std::string json;
+ value->AppendAsTraceFormat(&json);
+ CHECK_EQ("{\"a\":\"short\",\"b\":\"" + long_string + "\",\"c\":[\"" +
+ long_string2 + "\",\"\",{\"a\":\"" + long_string3 + "\"}]}",
+ json);
+}
+
+TEST(Escaping) {
+ const char* string1 = "abc\"\'\\\\x\"y\'z\n\x09\x17";
+ std::string chars127;
+ for (int i = 1; i <= 127; ++i) {
+ chars127 += static_cast<char>(i);
+ }
+ auto value = TracedValue::Create();
+ value->SetString("a", string1);
+ value->SetString("b", chars127);
+
+ std::string json;
+ value->AppendAsTraceFormat(&json);
+ // Cannot use the expected value literal directly in CHECK_EQ
+ // as it fails to process # character on Windows.
+ const char* expected =
+ "{\"a\":\"abc\\\"\'\\\\\\\\x\\\"y\'z\\n\\t\\u0017\",\"b\":"
+ "\"\\u0001\\u0002\\u0003\\u0004\\u0005\\u0006\\u0007\\u0008\\t\\n\\u000B"
+ "\\u000C\\u000D\\u000E\\u000F\\u0010\\u0011\\u0012\\u0013\\u0014\\u0015\\"
+ "u0016\\u0017\\u0018\\u0019\\u001A\\u001B\\u001C\\u001D\\u001E\\u001F "
+ "!\\\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`"
+ "abcdefghijklmnopqrstuvwxyz{|}~\177\"}";
+ CHECK_EQ(expected, json);
+}
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index dd1b3e3703..71bd3dbf29 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -31,12 +31,6 @@ static bool IsInteger(double x) {
return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
}
-
-static bool IsInteger(i::Object* x) {
- return x->IsNumber() && IsInteger(x->Number());
-}
-
-
typedef uint32_t bitset;
struct Tests {
@@ -51,7 +45,7 @@ struct Tests {
Tests()
: isolate(CcTest::InitIsolateOnce()),
scope(isolate),
- zone(isolate->allocator()),
+ zone(isolate->allocator(), ZONE_NAME),
T(&zone, isolate, isolate->random_number_generator()) {}
bool IsBitset(Type* type) { return type->IsBitsetForTesting(); }
@@ -113,8 +107,8 @@ struct Tests {
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Type* t = *it;
CHECK(1 ==
- this->IsBitset(t) + t->IsConstant() + t->IsRange() +
- this->IsUnion(t));
+ this->IsBitset(t) + t->IsHeapConstant() + t->IsRange() +
+ t->IsOtherNumberConstant() + this->IsUnion(t));
}
}
@@ -191,15 +185,25 @@ struct Tests {
// Constructor
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
Handle<i::Object> value = *vt;
- Type* type = T.Constant(value);
- CHECK(type->IsConstant());
+ Type* type = T.NewConstant(value);
+ CHECK(type->IsHeapConstant() || type->IsOtherNumberConstant() ||
+ type->IsRange());
}
// Value attribute
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
Handle<i::Object> value = *vt;
- Type* type = T.Constant(value);
- CHECK(*value == *type->AsConstant()->Value());
+ Type* type = T.NewConstant(value);
+ if (type->IsHeapConstant()) {
+ CHECK(value.address() == type->AsHeapConstant()->Value().address());
+ } else if (type->IsOtherNumberConstant()) {
+ CHECK(value->IsHeapNumber());
+ CHECK(value->Number() == type->AsOtherNumberConstant()->Value());
+ } else {
+ CHECK(type->IsRange());
+ double v = value->Number();
+ CHECK(v == type->AsRange()->Min() && v == type->AsRange()->Max());
+ }
}
// Functionality & Injectivity: Constant(V1) = Constant(V2) iff V1 = V2
@@ -207,61 +211,72 @@ struct Tests {
for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
Handle<i::Object> value1 = *vt1;
Handle<i::Object> value2 = *vt2;
- Type* type1 = T.Constant(value1);
- Type* type2 = T.Constant(value2);
- CHECK(Equal(type1, type2) == (*value1 == *value2));
+ Type* type1 = T.NewConstant(value1);
+ Type* type2 = T.NewConstant(value2);
+ if (type1->IsOtherNumberConstant() && type2->IsOtherNumberConstant()) {
+ CHECK(Equal(type1, type2) ==
+ (type1->AsOtherNumberConstant()->Value() ==
+ type2->AsOtherNumberConstant()->Value()));
+ } else if (type1->IsRange() && type2->IsRange()) {
+ CHECK(Equal(type1, type2) ==
+ ((type1->AsRange()->Min() == type2->AsRange()->Min()) &&
+ (type1->AsRange()->Max() == type2->AsRange()->Max())));
+ } else {
+ CHECK(Equal(type1, type2) == (*value1 == *value2));
+ }
}
}
// Typing of numbers
Factory* fac = isolate->factory();
- CHECK(T.Constant(fac->NewNumber(0))->Is(T.UnsignedSmall));
- CHECK(T.Constant(fac->NewNumber(1))->Is(T.UnsignedSmall));
- CHECK(T.Constant(fac->NewNumber(0x3fffffff))->Is(T.UnsignedSmall));
- CHECK(T.Constant(fac->NewNumber(-1))->Is(T.Negative31));
- CHECK(T.Constant(fac->NewNumber(-0x3fffffff))->Is(T.Negative31));
- CHECK(T.Constant(fac->NewNumber(-0x40000000))->Is(T.Negative31));
- CHECK(T.Constant(fac->NewNumber(0x40000000))->Is(T.Unsigned31));
- CHECK(!T.Constant(fac->NewNumber(0x40000000))->Is(T.Unsigned30));
- CHECK(T.Constant(fac->NewNumber(0x7fffffff))->Is(T.Unsigned31));
- CHECK(!T.Constant(fac->NewNumber(0x7fffffff))->Is(T.Unsigned30));
- CHECK(T.Constant(fac->NewNumber(-0x40000001))->Is(T.Negative32));
- CHECK(!T.Constant(fac->NewNumber(-0x40000001))->Is(T.Negative31));
- CHECK(T.Constant(fac->NewNumber(-0x7fffffff))->Is(T.Negative32));
- CHECK(!T.Constant(fac->NewNumber(-0x7fffffff - 1))->Is(T.Negative31));
+ CHECK(T.NewConstant(fac->NewNumber(0))->Is(T.UnsignedSmall));
+ CHECK(T.NewConstant(fac->NewNumber(1))->Is(T.UnsignedSmall));
+ CHECK(T.NewConstant(fac->NewNumber(0x3fffffff))->Is(T.UnsignedSmall));
+ CHECK(T.NewConstant(fac->NewNumber(-1))->Is(T.Negative31));
+ CHECK(T.NewConstant(fac->NewNumber(-0x3fffffff))->Is(T.Negative31));
+ CHECK(T.NewConstant(fac->NewNumber(-0x40000000))->Is(T.Negative31));
+ CHECK(T.NewConstant(fac->NewNumber(0x40000000))->Is(T.Unsigned31));
+ CHECK(!T.NewConstant(fac->NewNumber(0x40000000))->Is(T.Unsigned30));
+ CHECK(T.NewConstant(fac->NewNumber(0x7fffffff))->Is(T.Unsigned31));
+ CHECK(!T.NewConstant(fac->NewNumber(0x7fffffff))->Is(T.Unsigned30));
+ CHECK(T.NewConstant(fac->NewNumber(-0x40000001))->Is(T.Negative32));
+ CHECK(!T.NewConstant(fac->NewNumber(-0x40000001))->Is(T.Negative31));
+ CHECK(T.NewConstant(fac->NewNumber(-0x7fffffff))->Is(T.Negative32));
+ CHECK(!T.NewConstant(fac->NewNumber(-0x7fffffff - 1))->Is(T.Negative31));
if (SmiValuesAre31Bits()) {
- CHECK(!T.Constant(fac->NewNumber(0x40000000))->Is(T.UnsignedSmall));
- CHECK(!T.Constant(fac->NewNumber(0x7fffffff))->Is(T.UnsignedSmall));
- CHECK(!T.Constant(fac->NewNumber(-0x40000001))->Is(T.SignedSmall));
- CHECK(!T.Constant(fac->NewNumber(-0x7fffffff - 1))->Is(T.SignedSmall));
+ CHECK(!T.NewConstant(fac->NewNumber(0x40000000))->Is(T.UnsignedSmall));
+ CHECK(!T.NewConstant(fac->NewNumber(0x7fffffff))->Is(T.UnsignedSmall));
+ CHECK(!T.NewConstant(fac->NewNumber(-0x40000001))->Is(T.SignedSmall));
+ CHECK(!T.NewConstant(fac->NewNumber(-0x7fffffff - 1))->Is(T.SignedSmall));
} else {
CHECK(SmiValuesAre32Bits());
- CHECK(T.Constant(fac->NewNumber(0x40000000))->Is(T.UnsignedSmall));
- CHECK(T.Constant(fac->NewNumber(0x7fffffff))->Is(T.UnsignedSmall));
- CHECK(T.Constant(fac->NewNumber(-0x40000001))->Is(T.SignedSmall));
- CHECK(T.Constant(fac->NewNumber(-0x7fffffff - 1))->Is(T.SignedSmall));
- }
- CHECK(T.Constant(fac->NewNumber(0x80000000u))->Is(T.Unsigned32));
- CHECK(!T.Constant(fac->NewNumber(0x80000000u))->Is(T.Unsigned31));
- CHECK(T.Constant(fac->NewNumber(0xffffffffu))->Is(T.Unsigned32));
- CHECK(!T.Constant(fac->NewNumber(0xffffffffu))->Is(T.Unsigned31));
- CHECK(T.Constant(fac->NewNumber(0xffffffffu + 1.0))->Is(T.PlainNumber));
- CHECK(!T.Constant(fac->NewNumber(0xffffffffu + 1.0))->Is(T.Integral32));
- CHECK(T.Constant(fac->NewNumber(-0x7fffffff - 2.0))->Is(T.PlainNumber));
- CHECK(!T.Constant(fac->NewNumber(-0x7fffffff - 2.0))->Is(T.Integral32));
- CHECK(T.Constant(fac->NewNumber(0.1))->Is(T.PlainNumber));
- CHECK(!T.Constant(fac->NewNumber(0.1))->Is(T.Integral32));
- CHECK(T.Constant(fac->NewNumber(-10.1))->Is(T.PlainNumber));
- CHECK(!T.Constant(fac->NewNumber(-10.1))->Is(T.Integral32));
- CHECK(T.Constant(fac->NewNumber(10e60))->Is(T.PlainNumber));
- CHECK(!T.Constant(fac->NewNumber(10e60))->Is(T.Integral32));
- CHECK(T.Constant(fac->NewNumber(-1.0*0.0))->Is(T.MinusZero));
- CHECK(T.Constant(fac->NewNumber(std::numeric_limits<double>::quiet_NaN()))
- ->Is(T.NaN));
- CHECK(T.Constant(fac->NewNumber(V8_INFINITY))->Is(T.PlainNumber));
- CHECK(!T.Constant(fac->NewNumber(V8_INFINITY))->Is(T.Integral32));
- CHECK(T.Constant(fac->NewNumber(-V8_INFINITY))->Is(T.PlainNumber));
- CHECK(!T.Constant(fac->NewNumber(-V8_INFINITY))->Is(T.Integral32));
+ CHECK(T.NewConstant(fac->NewNumber(0x40000000))->Is(T.UnsignedSmall));
+ CHECK(T.NewConstant(fac->NewNumber(0x7fffffff))->Is(T.UnsignedSmall));
+ CHECK(T.NewConstant(fac->NewNumber(-0x40000001))->Is(T.SignedSmall));
+ CHECK(T.NewConstant(fac->NewNumber(-0x7fffffff - 1))->Is(T.SignedSmall));
+ }
+ CHECK(T.NewConstant(fac->NewNumber(0x80000000u))->Is(T.Unsigned32));
+ CHECK(!T.NewConstant(fac->NewNumber(0x80000000u))->Is(T.Unsigned31));
+ CHECK(T.NewConstant(fac->NewNumber(0xffffffffu))->Is(T.Unsigned32));
+ CHECK(!T.NewConstant(fac->NewNumber(0xffffffffu))->Is(T.Unsigned31));
+ CHECK(T.NewConstant(fac->NewNumber(0xffffffffu + 1.0))->Is(T.PlainNumber));
+ CHECK(!T.NewConstant(fac->NewNumber(0xffffffffu + 1.0))->Is(T.Integral32));
+ CHECK(T.NewConstant(fac->NewNumber(-0x7fffffff - 2.0))->Is(T.PlainNumber));
+ CHECK(!T.NewConstant(fac->NewNumber(-0x7fffffff - 2.0))->Is(T.Integral32));
+ CHECK(T.NewConstant(fac->NewNumber(0.1))->Is(T.PlainNumber));
+ CHECK(!T.NewConstant(fac->NewNumber(0.1))->Is(T.Integral32));
+ CHECK(T.NewConstant(fac->NewNumber(-10.1))->Is(T.PlainNumber));
+ CHECK(!T.NewConstant(fac->NewNumber(-10.1))->Is(T.Integral32));
+ CHECK(T.NewConstant(fac->NewNumber(10e60))->Is(T.PlainNumber));
+ CHECK(!T.NewConstant(fac->NewNumber(10e60))->Is(T.Integral32));
+ CHECK(T.NewConstant(fac->NewNumber(-1.0 * 0.0))->Is(T.MinusZero));
+ CHECK(
+ T.NewConstant(fac->NewNumber(std::numeric_limits<double>::quiet_NaN()))
+ ->Is(T.NaN));
+ CHECK(T.NewConstant(fac->NewNumber(V8_INFINITY))->Is(T.PlainNumber));
+ CHECK(!T.NewConstant(fac->NewNumber(V8_INFINITY))->Is(T.Integral32));
+ CHECK(T.NewConstant(fac->NewNumber(-V8_INFINITY))->Is(T.PlainNumber));
+ CHECK(!T.NewConstant(fac->NewNumber(-V8_INFINITY))->Is(T.Integral32));
}
void Range() {
@@ -317,7 +332,7 @@ struct Tests {
// Constant(V)->Is(Of(V))
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
Handle<i::Object> value = *vt;
- Type* const_type = T.Constant(value);
+ Type* const_type = T.NewConstant(value);
Type* of_type = T.Of(value);
CHECK(const_type->Is(of_type));
}
@@ -327,7 +342,7 @@ struct Tests {
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Handle<i::Object> value = *vt;
Type* type = *it;
- Type* const_type = T.Constant(value);
+ Type* const_type = T.NewConstant(value);
Type* of_type = T.Of(value);
CHECK(!of_type->Is(type) || const_type->Is(type));
}
@@ -338,7 +353,7 @@ struct Tests {
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Handle<i::Object> value = *vt;
Type* type = *it;
- Type* const_type = T.Constant(value);
+ Type* const_type = T.NewConstant(value);
Type* of_type = T.Of(value);
CHECK(!const_type->Is(type) ||
of_type->Is(type) || type->Maybe(const_type));
@@ -521,10 +536,11 @@ struct Tests {
Type* type2 = *j;
CHECK(!type1->Is(type2) || this->IsBitset(type2) ||
this->IsUnion(type2) || this->IsUnion(type1) ||
- (type1->IsConstant() && type2->IsConstant()) ||
- (type1->IsConstant() && type2->IsRange()) ||
+ (type1->IsHeapConstant() && type2->IsHeapConstant()) ||
(this->IsBitset(type1) && type2->IsRange()) ||
(type1->IsRange() && type2->IsRange()) ||
+ (type1->IsOtherNumberConstant() &&
+ type2->IsOtherNumberConstant()) ||
!type1->IsInhabited());
}
}
@@ -559,37 +575,27 @@ struct Tests {
for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
Handle<i::Object> value1 = *vt1;
Handle<i::Object> value2 = *vt2;
- Type* const_type1 = T.Constant(value1);
- Type* const_type2 = T.Constant(value2);
- CHECK(const_type1->Is(const_type2) == (*value1 == *value2));
+ Type* const_type1 = T.NewConstant(value1);
+ Type* const_type2 = T.NewConstant(value2);
+ if (const_type1->IsOtherNumberConstant() &&
+ const_type2->IsOtherNumberConstant()) {
+ CHECK(const_type1->Is(const_type2) ==
+ (const_type1->AsOtherNumberConstant()->Value() ==
+ const_type2->AsOtherNumberConstant()->Value()));
+ } else if (const_type1->IsRange() && const_type2->IsRange()) {
+ CHECK(Equal(const_type1, const_type2) ==
+ ((const_type1->AsRange()->Min() ==
+ const_type2->AsRange()->Min()) &&
+ (const_type1->AsRange()->Max() ==
+ const_type2->AsRange()->Max())));
+ } else {
+ CHECK(const_type1->Is(const_type2) == (*value1 == *value2));
+ }
}
}
// Range-specific subtyping
- // If IsInteger(v) then Constant(v)->Is(Range(v, v)).
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- Type* type = *it;
- if (type->IsConstant() && IsInteger(*type->AsConstant()->Value())) {
- CHECK(type->Is(T.Range(type->AsConstant()->Value()->Number(),
- type->AsConstant()->Value()->Number())));
- }
- }
-
- // If Constant(x)->Is(Range(min,max)) then IsInteger(v) and min <= x <= max.
- for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- Type* type1 = *it1;
- Type* type2 = *it2;
- if (type1->IsConstant() && type2->IsRange() && type1->Is(type2)) {
- double x = type1->AsConstant()->Value()->Number();
- double min = type2->AsRange()->Min();
- double max = type2->AsRange()->Max();
- CHECK(IsInteger(x) && min <= x && x <= max);
- }
- }
- }
-
// Lub(Range(x,y))->Is(T.Union(T.Integral32, T.OtherNumber))
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Type* type = *it;
@@ -712,9 +718,22 @@ struct Tests {
for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
Handle<i::Object> value1 = *vt1;
Handle<i::Object> value2 = *vt2;
- Type* const_type1 = T.Constant(value1);
- Type* const_type2 = T.Constant(value2);
- CHECK(const_type1->Maybe(const_type2) == (*value1 == *value2));
+ Type* const_type1 = T.NewConstant(value1);
+ Type* const_type2 = T.NewConstant(value2);
+ if (const_type1->IsOtherNumberConstant() &&
+ const_type2->IsOtherNumberConstant()) {
+ CHECK(const_type1->Maybe(const_type2) ==
+ (const_type1->AsOtherNumberConstant()->Value() ==
+ const_type2->AsOtherNumberConstant()->Value()));
+ } else if (const_type1->IsRange() && const_type2->IsRange()) {
+ CHECK(Equal(const_type1, const_type2) ==
+ ((const_type1->AsRange()->Min() ==
+ const_type2->AsRange()->Min()) &&
+ (const_type1->AsRange()->Max() ==
+ const_type2->AsRange()->Max())));
+ } else {
+ CHECK(const_type1->Maybe(const_type2) == (*value1 == *value2));
+ }
}
}
@@ -1054,20 +1073,6 @@ struct Tests {
CHECK(type1->Max() == range->Max());
}
}
-
- // GetRange(Union(Constant(x), Range(min,max))) == Range(min, max).
- for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- Type* type1 = *it1;
- Type* type2 = *it2;
- if (type1->IsConstant() && type2->IsRange()) {
- Type* u = T.Union(type1, type2);
-
- CHECK(type2->Min() == u->GetRange()->Min());
- CHECK(type2->Max() == u->GetRange()->Max());
- }
- }
- }
}
};
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index dde26d2676..abaa058f85 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -1141,7 +1141,7 @@ TEST(DoScavengeWithIncrementalWriteBarrier) {
// simulate incremental marking.
FLAG_stress_compaction = true;
FLAG_manual_evacuation_candidates_selection = true;
- ec_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ heap::ForceEvacuationCandidate(ec_page);
heap::SimulateIncrementalMarking(heap);
// Disable stress compaction mode in order to let GC do scavenge.
FLAG_stress_compaction = false;
@@ -1459,7 +1459,7 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
// Heap is ready, force |ec_page| to become an evacuation candidate and
// simulate incremental marking.
- ec_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ heap::ForceEvacuationCandidate(ec_page);
heap::SimulateIncrementalMarking(heap);
// Check that everything is ready for triggering incremental write barrier
diff --git a/deps/v8/test/cctest/test-unique.cc b/deps/v8/test/cctest/test-unique.cc
index 980f0b6538..21eae27ca0 100644
--- a/deps/v8/test/cctest/test-unique.cc
+++ b/deps/v8/test/cctest/test-unique.cc
@@ -150,7 +150,7 @@ TEST(UniqueSet_Add) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
UniqueSet<String>* set = new(&zone) UniqueSet<String>();
@@ -177,7 +177,7 @@ TEST(UniqueSet_Remove) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
UniqueSet<String>* set = new(&zone) UniqueSet<String>();
@@ -217,7 +217,7 @@ TEST(UniqueSet_Contains) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
UniqueSet<String>* set = new(&zone) UniqueSet<String>();
@@ -248,7 +248,7 @@ TEST(UniqueSet_At) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
UniqueSet<String>* set = new(&zone) UniqueSet<String>();
@@ -285,7 +285,7 @@ TEST(UniqueSet_Equals) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
UniqueSet<String>* set1 = new(&zone) UniqueSet<String>();
UniqueSet<String>* set2 = new(&zone) UniqueSet<String>();
@@ -323,7 +323,7 @@ TEST(UniqueSet_IsSubset1) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
UniqueSet<String>* set1 = new(&zone) UniqueSet<String>();
UniqueSet<String>* set2 = new(&zone) UniqueSet<String>();
@@ -358,7 +358,7 @@ TEST(UniqueSet_IsSubset2) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C_D_E_F_G;
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
UniqueSet<String>* set1 = new(&zone) UniqueSet<String>();
UniqueSet<String>* set2 = new(&zone) UniqueSet<String>();
@@ -401,7 +401,7 @@ TEST(UniqueSet_IsSubsetExhaustive) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C_D_E_F_G;
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
Unique<String> elements[] = {
A, B, C, D, E, F, G
@@ -424,7 +424,7 @@ TEST(UniqueSet_Intersect1) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
UniqueSet<String>* set1 = new(&zone) UniqueSet<String>();
UniqueSet<String>* set2 = new(&zone) UniqueSet<String>();
@@ -465,7 +465,7 @@ TEST(UniqueSet_IntersectExhaustive) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C_D_E_F_G;
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
Unique<String> elements[] = {
A, B, C, D, E, F, G
@@ -492,7 +492,7 @@ TEST(UniqueSet_Union1) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C;
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
UniqueSet<String>* set1 = new(&zone) UniqueSet<String>();
UniqueSet<String>* set2 = new(&zone) UniqueSet<String>();
@@ -533,7 +533,7 @@ TEST(UniqueSet_UnionExhaustive) {
MAKE_HANDLES_AND_DISALLOW_ALLOCATION;
MAKE_UNIQUES_A_B_C_D_E_F_G;
- Zone zone(CcTest::i_isolate()->allocator());
+ Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
Unique<String> elements[] = {
A, B, C, D, E, F, G
diff --git a/deps/v8/test/cctest/types-fuzz.h b/deps/v8/test/cctest/types-fuzz.h
index 16bfd737ea..90cdc8e989 100644
--- a/deps/v8/test/cctest/types-fuzz.h
+++ b/deps/v8/test/cctest/types-fuzz.h
@@ -54,27 +54,37 @@ class Types {
JS_OBJECT_TYPE, JSObject::kHeaderSize);
smi = handle(Smi::FromInt(666), isolate);
+ boxed_smi = isolate->factory()->NewHeapNumber(666);
signed32 = isolate->factory()->NewHeapNumber(0x40000000);
+ float1 = isolate->factory()->NewHeapNumber(1.53);
+ float2 = isolate->factory()->NewHeapNumber(0.53);
+ // float3 is identical to float1 in order to test that OtherNumberConstant
+ // types are equal by double value and not by handle pointer value.
+ float3 = isolate->factory()->NewHeapNumber(1.53);
object1 = isolate->factory()->NewJSObjectFromMap(object_map);
object2 = isolate->factory()->NewJSObjectFromMap(object_map);
array = isolate->factory()->NewJSArray(20);
uninitialized = isolate->factory()->uninitialized_value();
- SmiConstant = Type::Constant(smi, zone);
- Signed32Constant = Type::Constant(signed32, zone);
+ SmiConstant = Type::NewConstant(smi, zone);
+ Signed32Constant = Type::NewConstant(signed32, zone);
- ObjectConstant1 = Type::Constant(object1, zone);
- ObjectConstant2 = Type::Constant(object2, zone);
- ArrayConstant = Type::Constant(array, zone);
- UninitializedConstant = Type::Constant(uninitialized, zone);
+ ObjectConstant1 = Type::HeapConstant(object1, zone);
+ ObjectConstant2 = Type::HeapConstant(object2, zone);
+ ArrayConstant = Type::HeapConstant(array, zone);
+ UninitializedConstant = Type::HeapConstant(uninitialized, zone);
values.push_back(smi);
+ values.push_back(boxed_smi);
values.push_back(signed32);
values.push_back(object1);
values.push_back(object2);
values.push_back(array);
values.push_back(uninitialized);
+ values.push_back(float1);
+ values.push_back(float2);
+ values.push_back(float3);
for (ValueVector::iterator it = values.begin(); it != values.end(); ++it) {
- types.push_back(Type::Constant(*it, zone));
+ types.push_back(Type::NewConstant(*it, zone));
}
integers.push_back(isolate->factory()->NewNumber(-V8_INFINITY));
@@ -98,7 +108,11 @@ class Types {
Handle<i::Map> object_map;
Handle<i::Smi> smi;
+ Handle<i::HeapNumber> boxed_smi;
Handle<i::HeapNumber> signed32;
+ Handle<i::HeapNumber> float1;
+ Handle<i::HeapNumber> float2;
+ Handle<i::HeapNumber> float3;
Handle<i::JSObject> object1;
Handle<i::JSObject> object2;
Handle<i::JSArray> array;
@@ -129,8 +143,12 @@ class Types {
Type* Of(Handle<i::Object> value) { return Type::Of(value, zone_); }
- Type* Constant(Handle<i::Object> value) {
- return Type::Constant(value, zone_);
+ Type* NewConstant(Handle<i::Object> value) {
+ return Type::NewConstant(value, zone_);
+ }
+
+ Type* HeapConstant(Handle<i::HeapObject> value) {
+ return Type::HeapConstant(value, zone_);
}
Type* Range(double min, double max) { return Type::Range(min, max, zone_); }
@@ -170,7 +188,7 @@ class Types {
}
case 1: { // constant
int i = rng_->NextInt(static_cast<int>(values.size()));
- return Type::Constant(values[i], zone_);
+ return Type::NewConstant(values[i], zone_);
}
case 2: { // range
int i = rng_->NextInt(static_cast<int>(integers.size()));
diff --git a/deps/v8/test/cctest/wasm/test-managed.cc b/deps/v8/test/cctest/wasm/test-managed.cc
new file mode 100644
index 0000000000..00b1c9bb57
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-managed.cc
@@ -0,0 +1,59 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/wasm/managed.h"
+
+#include "test/cctest/cctest.h"
+#include "test/common/wasm/test-signatures.h"
+
+using namespace v8::base;
+using namespace v8::internal;
+
+class DeleteRecorder {
+ public:
+ explicit DeleteRecorder(bool* deleted) : deleted_(deleted) {
+ *deleted_ = false;
+ }
+ ~DeleteRecorder() { *deleted_ = true; }
+
+ private:
+ bool* deleted_;
+};
+
+TEST(ManagedCollect) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ bool deleted = false;
+ DeleteRecorder* d = new DeleteRecorder(&deleted);
+
+ {
+ HandleScope scope(isolate);
+ auto handle = Managed<DeleteRecorder>::New(isolate, d);
+ USE(handle);
+ }
+
+ CcTest::CollectAllAvailableGarbage();
+
+ CHECK(deleted);
+}
+
+TEST(ManagedCollectNoDelete) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ bool deleted = false;
+ DeleteRecorder* d = new DeleteRecorder(&deleted);
+
+ {
+ HandleScope scope(isolate);
+ auto handle = Managed<DeleteRecorder>::New(isolate, d, false);
+ USE(handle);
+ }
+
+ CcTest::CollectAllAvailableGarbage();
+
+ CHECK(!deleted);
+ delete d;
+}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index 3d8d484295..e9a2d2da47 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -147,6 +147,93 @@ WASM_EXEC_TEST(I64Sub) {
}
}
+WASM_EXEC_TEST(I64AddUseOnlyLowWord) {
+ REQUIRE(I64Add);
+ REQUIRE(I32ConvertI64);
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
+ BUILD(r, WASM_I32_CONVERT_I64(
+ WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ CHECK_EQ(static_cast<int32_t>(*i + *j), r.Call(*i, *j));
+ }
+ }
+}
+
+WASM_EXEC_TEST(I64SubUseOnlyLowWord) {
+ REQUIRE(I64Sub);
+ REQUIRE(I32ConvertI64);
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
+ BUILD(r, WASM_I32_CONVERT_I64(
+ WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ CHECK_EQ(static_cast<int32_t>(*i - *j), r.Call(*i, *j));
+ }
+ }
+}
+
+WASM_EXEC_TEST(I64MulUseOnlyLowWord) {
+ REQUIRE(I64Mul);
+ REQUIRE(I32ConvertI64);
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
+ BUILD(r, WASM_I32_CONVERT_I64(
+ WASM_I64_MUL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ CHECK_EQ(static_cast<int32_t>(*i * *j), r.Call(*i, *j));
+ }
+ }
+}
+
+WASM_EXEC_TEST(I64ShlUseOnlyLowWord) {
+ REQUIRE(I64Shl);
+ REQUIRE(I32ConvertI64);
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
+ BUILD(r, WASM_I32_CONVERT_I64(
+ WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ int32_t expected = static_cast<int32_t>((*i) << (*j & 0x3f));
+ CHECK_EQ(expected, r.Call(*i, *j));
+ }
+ }
+}
+
+WASM_EXEC_TEST(I64ShrUseOnlyLowWord) {
+ REQUIRE(I64ShrU);
+ REQUIRE(I32ConvertI64);
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
+ BUILD(r, WASM_I32_CONVERT_I64(
+ WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
+ FOR_UINT64_INPUTS(i) {
+ FOR_UINT64_INPUTS(j) {
+ int32_t expected = static_cast<int32_t>((*i) >> (*j & 0x3f));
+ CHECK_EQ(expected, r.Call(*i, *j));
+ }
+ }
+}
+
+WASM_EXEC_TEST(I64SarUseOnlyLowWord) {
+ REQUIRE(I64ShrS);
+ REQUIRE(I32ConvertI64);
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int64(),
+ MachineType::Int64());
+ BUILD(r, WASM_I32_CONVERT_I64(
+ WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ int32_t expected = static_cast<int32_t>((*i) >> (*j & 0x3f));
+ CHECK_EQ(expected, r.Call(*i, *j));
+ }
+ }
+}
+
WASM_EXEC_TEST(I64DivS) {
REQUIRE(I64DivS);
WasmRunner<int64_t> r(execution_mode, MachineType::Int64(),
@@ -213,7 +300,7 @@ WASM_EXEC_TEST(I64DivU_Trap) {
WasmRunner<uint64_t> r(execution_mode, MachineType::Uint64(),
MachineType::Uint64());
BUILD(r, WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- CHECK_EQ(0, r.Call(asu64(0), asu64(100)));
+ CHECK_EQ(0u, r.Call(asu64(0), asu64(100)));
CHECK_TRAP64(r.Call(asu64(100), asu64(0)));
CHECK_TRAP64(r.Call(asu64(1001), asu64(0)));
CHECK_TRAP64(r.Call(std::numeric_limits<uint64_t>::max(), asu64(0)));
@@ -284,7 +371,7 @@ WASM_EXEC_TEST(I64RemU_Trap) {
WasmRunner<uint64_t> r(execution_mode, MachineType::Uint64(),
MachineType::Uint64());
BUILD(r, WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- CHECK_EQ(17, r.Call(asu64(217), asu64(100)));
+ CHECK_EQ(17u, r.Call(asu64(217), asu64(100)));
CHECK_TRAP64(r.Call(asu64(100), asu64(0)));
CHECK_TRAP64(r.Call(asu64(1001), asu64(0)));
CHECK_TRAP64(r.Call(std::numeric_limits<uint64_t>::max(), asu64(0)));
@@ -335,22 +422,22 @@ WASM_EXEC_TEST(I64Shl) {
}
}
{
- WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(0)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 0, r.Call(*i)); }
}
{
- WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(32)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 32, r.Call(*i)); }
}
{
- WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 20, r.Call(*i)); }
}
{
- WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64V_1(40)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i << 40, r.Call(*i)); }
}
@@ -371,22 +458,22 @@ WASM_EXEC_TEST(I64ShrU) {
}
}
{
- WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(0)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 0, r.Call(*i)); }
}
{
- WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(32)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 32, r.Call(*i)); }
}
{
- WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(20)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 20, r.Call(*i)); }
}
{
- WasmRunner<int64_t> r(execution_mode, MachineType::Int64());
+ WasmRunner<uint64_t> r(execution_mode, MachineType::Int64());
BUILD(r, WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64V_1(40)));
FOR_UINT64_INPUTS(i) { CHECK_EQ(*i >> 40, r.Call(*i)); }
}
@@ -548,7 +635,7 @@ WASM_EXEC_TEST(I64UConvertI32) {
REQUIRE(I64UConvertI32);
WasmRunner<int64_t> r(execution_mode, MachineType::Uint32());
BUILD(r, WASM_I64_UCONVERT_I32(WASM_GET_LOCAL(0)));
- FOR_UINT32_INPUTS(i) { CHECK_EQ(static_cast<uint64_t>(*i), r.Call(*i)); }
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(static_cast<int64_t>(*i), r.Call(*i)); }
}
WASM_EXEC_TEST(I64Popcnt) {
@@ -1279,8 +1366,8 @@ WASM_EXEC_TEST(LoadMemI64) {
BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_I8(0)));
- module.WriteMemory<int64_t>(&memory[0], 0xaabbccdd00112233LL);
- CHECK_EQ(0xaabbccdd00112233LL, r.Call());
+ module.WriteMemory<int64_t>(&memory[0], 0x1abbccdd00112233LL);
+ CHECK_EQ(0x1abbccdd00112233LL, r.Call());
module.WriteMemory<int64_t>(&memory[0], 0x33aabbccdd001122LL);
CHECK_EQ(0x33aabbccdd001122LL, r.Call());
@@ -1300,8 +1387,8 @@ WASM_EXEC_TEST(LoadMemI64_alignment) {
BUILD(r,
WASM_LOAD_MEM_ALIGNMENT(MachineType::Int64(), WASM_I8(0), alignment));
- module.WriteMemory<int64_t>(&memory[0], 0xaabbccdd00112233LL);
- CHECK_EQ(0xaabbccdd00112233LL, r.Call());
+ module.WriteMemory<int64_t>(&memory[0], 0x1abbccdd00112233LL);
+ CHECK_EQ(0x1abbccdd00112233LL, r.Call());
module.WriteMemory<int64_t>(&memory[0], 0x33aabbccdd001122LL);
CHECK_EQ(0x33aabbccdd001122LL, r.Call());
@@ -1467,7 +1554,7 @@ static void CompileCallIndirectMany(LocalType param) {
TestSignatures sigs;
for (byte num_params = 0; num_params < 40; num_params++) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
HandleScope scope(CcTest::InitIsolateOnce());
TestingModule module(kExecuteCompiled);
FunctionSig* sig = sigs.many(&zone, kAstStmt, param, num_params);
@@ -1483,7 +1570,7 @@ static void CompileCallIndirectMany(LocalType param) {
ADD_CODE(code, kExprGetLocal, p);
}
ADD_CODE(code, kExprI8Const, 0);
- ADD_CODE(code, kExprCallIndirect, 1);
+ ADD_CODE(code, kExprCallIndirect, 1, TABLE_ZERO);
t.Build(&code[0], &code[0] + code.size());
t.Compile();
@@ -1507,7 +1594,7 @@ static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
int num_params = static_cast<int>(arraysize(mixed)) - start;
for (int which = 0; which < num_params; which++) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
TestingModule module(execution_mode);
module.AddMemory(1024);
MachineType* memtypes = &mixed[start];
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
index 007fc7a864..bd80e28f9f 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
@@ -237,33 +237,33 @@ WASM_EXEC_TEST(StoreMemI32_oob_asm) {
TEST_BODY(kExprI32AsmjsStoreMem16) \
TEST_BODY(kExprI32AsmjsStoreMem)
-#define INT_LOAD_TEST(OP_TYPE) \
- TEST(RunWasm_AsmCheckedRelocInfo##OP_TYPE) { \
- TestingModule module(kExecuteCompiled); \
- module.origin = kAsmJsOrigin; \
- WasmRunner<int32_t> r(&module, MachineType::Uint32()); \
- BUILD(r, WASM_UNOP(OP_TYPE, WASM_GET_LOCAL(0))); \
- CHECK_EQ(1, GetMatchingRelocInfoCount(module.instance->function_code[0], \
- RelocInfo::WASM_MEMORY_REFERENCE)); \
- CHECK_NE( \
- 0, GetMatchingRelocInfoCount(module.instance->function_code[0], \
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE)); \
+#define INT_LOAD_TEST(OP_TYPE) \
+ TEST(RunWasm_AsmCheckedRelocInfo##OP_TYPE) { \
+ TestingModule module(kExecuteCompiled); \
+ module.origin = kAsmJsOrigin; \
+ WasmRunner<int32_t> r(&module, MachineType::Uint32()); \
+ BUILD(r, WASM_UNOP(OP_TYPE, WASM_GET_LOCAL(0))); \
+ CHECK_EQ(1u, GetMatchingRelocInfoCount(module.instance->function_code[0], \
+ RelocInfo::WASM_MEMORY_REFERENCE)); \
+ CHECK_NE( \
+ 0u, GetMatchingRelocInfoCount(module.instance->function_code[0], \
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE)); \
}
FOREACH_INT_CHECKED_LOAD_OP(INT_LOAD_TEST)
-#define INT_STORE_TEST(OP_TYPE) \
- TEST(RunWasm_AsmCheckedRelocInfo##OP_TYPE) { \
- TestingModule module(kExecuteCompiled); \
- module.origin = kAsmJsOrigin; \
- WasmRunner<int32_t> r(&module, MachineType::Uint32(), \
- MachineType::Uint32()); \
- BUILD(r, WASM_BINOP(OP_TYPE, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))); \
- CHECK_EQ(1, GetMatchingRelocInfoCount(module.instance->function_code[0], \
- RelocInfo::WASM_MEMORY_REFERENCE)); \
- CHECK_NE( \
- 0, GetMatchingRelocInfoCount(module.instance->function_code[0], \
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE)); \
+#define INT_STORE_TEST(OP_TYPE) \
+ TEST(RunWasm_AsmCheckedRelocInfo##OP_TYPE) { \
+ TestingModule module(kExecuteCompiled); \
+ module.origin = kAsmJsOrigin; \
+ WasmRunner<int32_t> r(&module, MachineType::Uint32(), \
+ MachineType::Uint32()); \
+ BUILD(r, WASM_BINOP(OP_TYPE, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))); \
+ CHECK_EQ(1u, GetMatchingRelocInfoCount(module.instance->function_code[0], \
+ RelocInfo::WASM_MEMORY_REFERENCE)); \
+ CHECK_NE( \
+ 0u, GetMatchingRelocInfoCount(module.instance->function_code[0], \
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE)); \
}
FOREACH_INT_CHECKED_STORE_OP(INT_STORE_TEST)
@@ -274,10 +274,11 @@ TEST(RunWasm_AsmCheckedLoadFloat32RelocInfo) {
WasmRunner<float> r(&module, MachineType::Uint32());
BUILD(r, WASM_UNOP(kExprF32AsmjsLoadMem, WASM_GET_LOCAL(0)));
- CHECK_EQ(1, GetMatchingRelocInfoCount(module.instance->function_code[0],
- RelocInfo::WASM_MEMORY_REFERENCE));
- CHECK_NE(0, GetMatchingRelocInfoCount(module.instance->function_code[0],
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ CHECK_EQ(1u, GetMatchingRelocInfoCount(module.instance->function_code[0],
+ RelocInfo::WASM_MEMORY_REFERENCE));
+ CHECK_NE(0u,
+ GetMatchingRelocInfoCount(module.instance->function_code[0],
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
}
TEST(RunWasm_AsmCheckedStoreFloat32RelocInfo) {
@@ -287,10 +288,11 @@ TEST(RunWasm_AsmCheckedStoreFloat32RelocInfo) {
BUILD(r, WASM_BINOP(kExprF32AsmjsStoreMem, WASM_GET_LOCAL(0),
WASM_GET_LOCAL(1)));
- CHECK_EQ(1, GetMatchingRelocInfoCount(module.instance->function_code[0],
- RelocInfo::WASM_MEMORY_REFERENCE));
- CHECK_NE(0, GetMatchingRelocInfoCount(module.instance->function_code[0],
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ CHECK_EQ(1u, GetMatchingRelocInfoCount(module.instance->function_code[0],
+ RelocInfo::WASM_MEMORY_REFERENCE));
+ CHECK_NE(0u,
+ GetMatchingRelocInfoCount(module.instance->function_code[0],
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
}
TEST(RunWasm_AsmCheckedLoadFloat64RelocInfo) {
@@ -299,10 +301,11 @@ TEST(RunWasm_AsmCheckedLoadFloat64RelocInfo) {
WasmRunner<double> r(&module, MachineType::Uint32());
BUILD(r, WASM_UNOP(kExprF64AsmjsLoadMem, WASM_GET_LOCAL(0)));
- CHECK_EQ(1, GetMatchingRelocInfoCount(module.instance->function_code[0],
- RelocInfo::WASM_MEMORY_REFERENCE));
- CHECK_NE(0, GetMatchingRelocInfoCount(module.instance->function_code[0],
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ CHECK_EQ(1u, GetMatchingRelocInfoCount(module.instance->function_code[0],
+ RelocInfo::WASM_MEMORY_REFERENCE));
+ CHECK_NE(0u,
+ GetMatchingRelocInfoCount(module.instance->function_code[0],
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
}
TEST(RunWasm_AsmCheckedStoreFloat64RelocInfo) {
@@ -312,8 +315,9 @@ TEST(RunWasm_AsmCheckedStoreFloat64RelocInfo) {
BUILD(r, WASM_BINOP(kExprF64AsmjsStoreMem, WASM_GET_LOCAL(0),
WASM_GET_LOCAL(1)));
- CHECK_EQ(1, GetMatchingRelocInfoCount(module.instance->function_code[0],
- RelocInfo::WASM_MEMORY_REFERENCE));
- CHECK_NE(0, GetMatchingRelocInfoCount(module.instance->function_code[0],
- RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
+ CHECK_EQ(1u, GetMatchingRelocInfoCount(module.instance->function_code[0],
+ RelocInfo::WASM_MEMORY_REFERENCE));
+ CHECK_NE(0u,
+ GetMatchingRelocInfoCount(module.instance->function_code[0],
+ RelocInfo::WASM_MEMORY_SIZE_REFERENCE));
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index 0489d016d7..47d97f4e48 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -202,7 +202,8 @@ TEST(Breakpoint_I32Add) {
thread->Run(); // run to next breakpoint
// Check the thread stopped at the right pc.
CHECK_EQ(WasmInterpreter::PAUSED, thread->state());
- CHECK_EQ(kLocalsDeclSize + offsets[i], thread->GetBreakpointPc());
+ CHECK_EQ(static_cast<size_t>(kLocalsDeclSize + offsets[i]),
+ thread->GetBreakpointPc());
}
thread->Run(); // run to completion
@@ -280,7 +281,8 @@ TEST(Breakpoint_I32And_disable) {
thread->Run(); // run to next breakpoint
// Check the thread stopped at the right pc.
CHECK_EQ(WasmInterpreter::PAUSED, thread->state());
- CHECK_EQ(kLocalsDeclSize + offsets[0], thread->GetBreakpointPc());
+ CHECK_EQ(static_cast<size_t>(kLocalsDeclSize + offsets[0]),
+ thread->GetBreakpointPc());
}
thread->Run(); // run to completion
@@ -333,6 +335,72 @@ TEST(GrowMemoryInvalidSize) {
}
}
+TEST(TestPossibleNondeterminism) {
+ {
+ // F32Div may produced NaN
+ TestingModule module(kExecuteInterpreted);
+ WasmRunner<float> r(&module, MachineType::Float32(),
+ MachineType::Float32());
+ BUILD(r, WASM_F32_DIV(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ r.Call(1048575.5f, 2.5f);
+ CHECK(!r.possible_nondeterminism());
+ r.Call(0.0f, 0.0f);
+ CHECK(r.possible_nondeterminism());
+ }
+ {
+ // F32Sqrt may produced NaN
+ TestingModule module(kExecuteInterpreted);
+ WasmRunner<float> r(&module, MachineType::Float32());
+ BUILD(r, WASM_F32_SQRT(WASM_GET_LOCAL(0)));
+ r.Call(16.0f);
+ CHECK(!r.possible_nondeterminism());
+ r.Call(-1048575.5f);
+ CHECK(r.possible_nondeterminism());
+ }
+ {
+ // F32Mul may produced NaN
+ TestingModule module(kExecuteInterpreted);
+ WasmRunner<float> r(&module, MachineType::Float32(),
+ MachineType::Float32());
+ BUILD(r, WASM_F32_MUL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ r.Call(1048575.5f, 2.5f);
+ CHECK(!r.possible_nondeterminism());
+ r.Call(std::numeric_limits<float>::infinity(), 0.0f);
+ CHECK(r.possible_nondeterminism());
+ }
+ {
+ // F64Div may produced NaN
+ TestingModule module(kExecuteInterpreted);
+ WasmRunner<double> r(&module, MachineType::Float64(),
+ MachineType::Float64());
+ BUILD(r, WASM_F64_DIV(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ r.Call(1048575.5, 2.5);
+ CHECK(!r.possible_nondeterminism());
+ r.Call(0.0, 0.0);
+ CHECK(r.possible_nondeterminism());
+ }
+ {
+ // F64Sqrt may produced NaN
+ TestingModule module(kExecuteInterpreted);
+ WasmRunner<double> r(&module, MachineType::Float64());
+ BUILD(r, WASM_F64_SQRT(WASM_GET_LOCAL(0)));
+ r.Call(1048575.5);
+ CHECK(!r.possible_nondeterminism());
+ r.Call(-1048575.5);
+ CHECK(r.possible_nondeterminism());
+ }
+ {
+ // F64Mul may produced NaN
+ TestingModule module(kExecuteInterpreted);
+ WasmRunner<double> r(&module, MachineType::Float64(),
+ MachineType::Float64());
+ BUILD(r, WASM_F64_MUL(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ r.Call(1048575.5, 2.5);
+ CHECK(!r.possible_nondeterminism());
+ r.Call(std::numeric_limits<double>::infinity(), 0.0);
+ CHECK(r.possible_nondeterminism());
+ }
+}
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
index c0307e0511..4a74128f50 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
@@ -429,7 +429,7 @@ void RunJSSelectAlignTest(int num_args, int num_params) {
HandleScope scope(isolate);
TestingModule module;
uint32_t js_index = AddJSSelector(&module, &sig, which);
- CHECK_EQ(0, js_index);
+ CHECK_EQ(0u, js_index);
WasmFunctionCompiler t(&sig, &module);
t.Build(&code[0], &code[end]);
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index b358208bc3..94054bd388 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -5,10 +5,13 @@
#include <stdlib.h>
#include <string.h>
+#include "src/snapshot/code-serializer.h"
+#include "src/version.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes.h"
#include "test/cctest/cctest.h"
@@ -21,6 +24,16 @@ using namespace v8::internal::compiler;
using namespace v8::internal::wasm;
namespace {
+void Cleanup(Isolate* isolate = nullptr) {
+ // By sending a low memory notifications, we will try hard to collect all
+ // garbage and will therefore also invoke all weak callbacks of actually
+ // unreachable persistent handles.
+ if (!isolate) {
+ isolate = CcTest::InitIsolateOnce();
+ }
+ reinterpret_cast<v8::Isolate*>(isolate)->LowMemoryNotification();
+}
+
void TestModule(Zone* zone, WasmModuleBuilder* builder,
int32_t expected_result) {
ZoneBuffer buffer(zone);
@@ -48,369 +61,856 @@ void TestModuleException(Zone* zone, WasmModuleBuilder* builder) {
isolate->clear_pending_exception();
}
-void ExportAs(WasmFunctionBuilder* f, const char* name) {
- f->SetExported();
- f->SetName(name, static_cast<int>(strlen(name)));
-}
-
-void ExportAsMain(WasmFunctionBuilder* f) {
- static const char kMainName[] = "main";
- ExportAs(f, kMainName);
-}
+void ExportAsMain(WasmFunctionBuilder* f) { f->ExportAs(CStrVector("main")); }
} // namespace
TEST(Run_WasmModule_Return114) {
- static const int32_t kReturnValue = 114;
- TestSignatures sigs;
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
-
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
- ExportAsMain(f);
- byte code[] = {WASM_I8(kReturnValue)};
- f->EmitCode(code, sizeof(code));
- TestModule(&zone, builder, kReturnValue);
+ {
+ static const int32_t kReturnValue = 114;
+ TestSignatures sigs;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f);
+ byte code[] = {WASM_I8(kReturnValue)};
+ f->EmitCode(code, sizeof(code));
+ TestModule(&zone, builder, kReturnValue);
+ }
+ Cleanup();
}
TEST(Run_WasmModule_CallAdd) {
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
- TestSignatures sigs;
-
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
-
- WasmFunctionBuilder* f1 = builder->AddFunction(sigs.i_ii());
- uint16_t param1 = 0;
- uint16_t param2 = 1;
- byte code1[] = {WASM_I32_ADD(WASM_GET_LOCAL(param1), WASM_GET_LOCAL(param2))};
- f1->EmitCode(code1, sizeof(code1));
-
- WasmFunctionBuilder* f2 = builder->AddFunction(sigs.i_v());
-
- ExportAsMain(f2);
- byte code2[] = {
- WASM_CALL_FUNCTION(f1->func_index(), WASM_I8(77), WASM_I8(22))};
- f2->EmitCode(code2, sizeof(code2));
- TestModule(&zone, builder, 99);
+ {
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ TestSignatures sigs;
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+
+ WasmFunctionBuilder* f1 = builder->AddFunction(sigs.i_ii());
+ uint16_t param1 = 0;
+ uint16_t param2 = 1;
+ byte code1[] = {
+ WASM_I32_ADD(WASM_GET_LOCAL(param1), WASM_GET_LOCAL(param2))};
+ f1->EmitCode(code1, sizeof(code1));
+
+ WasmFunctionBuilder* f2 = builder->AddFunction(sigs.i_v());
+
+ ExportAsMain(f2);
+ byte code2[] = {
+ WASM_CALL_FUNCTION(f1->func_index(), WASM_I8(77), WASM_I8(22))};
+ f2->EmitCode(code2, sizeof(code2));
+ TestModule(&zone, builder, 99);
+ }
+ Cleanup();
}
TEST(Run_WasmModule_ReadLoadedDataSegment) {
- static const byte kDataSegmentDest0 = 12;
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
- TestSignatures sigs;
-
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
-
- ExportAsMain(f);
- byte code[] = {
- WASM_LOAD_MEM(MachineType::Int32(), WASM_I8(kDataSegmentDest0))};
- f->EmitCode(code, sizeof(code));
- byte data[] = {0xaa, 0xbb, 0xcc, 0xdd};
- builder->AddDataSegment(data, sizeof(data), kDataSegmentDest0);
- TestModule(&zone, builder, 0xddccbbaa);
+ {
+ static const byte kDataSegmentDest0 = 12;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ TestSignatures sigs;
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+
+ ExportAsMain(f);
+ byte code[] = {
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_I8(kDataSegmentDest0))};
+ f->EmitCode(code, sizeof(code));
+ byte data[] = {0xaa, 0xbb, 0xcc, 0xdd};
+ builder->AddDataSegment(data, sizeof(data), kDataSegmentDest0);
+ TestModule(&zone, builder, 0xddccbbaa);
+ }
+ Cleanup();
}
TEST(Run_WasmModule_CheckMemoryIsZero) {
- static const int kCheckSize = 16 * 1024;
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
- TestSignatures sigs;
-
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
-
- uint16_t localIndex = f->AddLocal(kAstI32);
- ExportAsMain(f);
- byte code[] = {WASM_BLOCK_I(
- WASM_WHILE(
- WASM_I32_LTS(WASM_GET_LOCAL(localIndex), WASM_I32V_3(kCheckSize)),
- WASM_IF_ELSE(
- WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(localIndex)),
- WASM_BRV(3, WASM_I8(-1)), WASM_INC_LOCAL_BY(localIndex, 4))),
- WASM_I8(11))};
- f->EmitCode(code, sizeof(code));
- TestModule(&zone, builder, 11);
+ {
+ static const int kCheckSize = 16 * 1024;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ TestSignatures sigs;
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+
+ uint16_t localIndex = f->AddLocal(kAstI32);
+ ExportAsMain(f);
+ byte code[] = {WASM_BLOCK_I(
+ WASM_WHILE(
+ WASM_I32_LTS(WASM_GET_LOCAL(localIndex), WASM_I32V_3(kCheckSize)),
+ WASM_IF_ELSE(
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(localIndex)),
+ WASM_BRV(3, WASM_I8(-1)), WASM_INC_LOCAL_BY(localIndex, 4))),
+ WASM_I8(11))};
+ f->EmitCode(code, sizeof(code));
+ TestModule(&zone, builder, 11);
+ }
+ Cleanup();
}
TEST(Run_WasmModule_CallMain_recursive) {
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
- TestSignatures sigs;
-
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
-
- uint16_t localIndex = f->AddLocal(kAstI32);
- ExportAsMain(f);
- byte code[] = {
- WASM_SET_LOCAL(localIndex,
- WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)),
- WASM_IF_ELSE_I(WASM_I32_LTS(WASM_GET_LOCAL(localIndex), WASM_I8(5)),
- WASM_SEQ(WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO,
- WASM_INC_LOCAL(localIndex)),
- WASM_CALL_FUNCTION0(0)),
- WASM_I8(55))};
- f->EmitCode(code, sizeof(code));
- TestModule(&zone, builder, 55);
+ {
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ TestSignatures sigs;
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+
+ uint16_t localIndex = f->AddLocal(kAstI32);
+ ExportAsMain(f);
+ byte code[] = {
+ WASM_SET_LOCAL(localIndex,
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)),
+ WASM_IF_ELSE_I(WASM_I32_LTS(WASM_GET_LOCAL(localIndex), WASM_I8(5)),
+ WASM_SEQ(WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO,
+ WASM_INC_LOCAL(localIndex)),
+ WASM_CALL_FUNCTION0(0)),
+ WASM_I8(55))};
+ f->EmitCode(code, sizeof(code));
+ TestModule(&zone, builder, 55);
+ }
+ Cleanup();
}
TEST(Run_WasmModule_Global) {
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
- TestSignatures sigs;
-
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- uint32_t global1 = builder->AddGlobal(kAstI32, 0);
- uint32_t global2 = builder->AddGlobal(kAstI32, 0);
- WasmFunctionBuilder* f1 = builder->AddFunction(sigs.i_v());
- byte code1[] = {
- WASM_I32_ADD(WASM_GET_GLOBAL(global1), WASM_GET_GLOBAL(global2))};
- f1->EmitCode(code1, sizeof(code1));
- WasmFunctionBuilder* f2 = builder->AddFunction(sigs.i_v());
- ExportAsMain(f2);
- byte code2[] = {WASM_SET_GLOBAL(global1, WASM_I32V_1(56)),
- WASM_SET_GLOBAL(global2, WASM_I32V_1(41)),
- WASM_RETURN1(WASM_CALL_FUNCTION0(f1->func_index()))};
- f2->EmitCode(code2, sizeof(code2));
- TestModule(&zone, builder, 97);
-}
-
-TEST(Run_WasmModule_Serialization) {
- static const char* kFunctionName = "increment";
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
-
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- TestSignatures sigs;
-
- WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
- byte code[] = {WASM_GET_LOCAL(0), kExprI32Const, 1, kExprI32Add};
- f->EmitCode(code, sizeof(code));
- ExportAs(f, kFunctionName);
-
- ZoneBuffer buffer(&zone);
- builder->WriteTo(buffer);
-
- Isolate* isolate = CcTest::InitIsolateOnce();
- ErrorThrower thrower(isolate, "");
- v8::WasmCompiledModule::SerializedModule data;
{
- HandleScope scope(isolate);
- testing::SetupIsolateForWasmModule(isolate);
-
- ModuleResult decoding_result = DecodeWasmModule(
- isolate, &zone, buffer.begin(), buffer.end(), false, kWasmOrigin);
- std::unique_ptr<const WasmModule> module(decoding_result.val);
- CHECK(!decoding_result.failed());
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ TestSignatures sigs;
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ uint32_t global1 = builder->AddGlobal(kAstI32, 0);
+ uint32_t global2 = builder->AddGlobal(kAstI32, 0);
+ WasmFunctionBuilder* f1 = builder->AddFunction(sigs.i_v());
+ byte code1[] = {
+ WASM_I32_ADD(WASM_GET_GLOBAL(global1), WASM_GET_GLOBAL(global2))};
+ f1->EmitCode(code1, sizeof(code1));
+ WasmFunctionBuilder* f2 = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f2);
+ byte code2[] = {WASM_SET_GLOBAL(global1, WASM_I32V_1(56)),
+ WASM_SET_GLOBAL(global2, WASM_I32V_1(41)),
+ WASM_RETURN1(WASM_CALL_FUNCTION0(f1->func_index()))};
+ f2->EmitCode(code2, sizeof(code2));
+ TestModule(&zone, builder, 97);
+ }
+ Cleanup();
+}
- MaybeHandle<FixedArray> compiled_module =
- module->CompileFunctions(isolate, &thrower);
- CHECK(!compiled_module.is_null());
- Handle<JSObject> module_obj = CreateCompiledModuleObject(
- isolate, compiled_module.ToHandleChecked(), ModuleOrigin::kWasmOrigin);
- v8::Local<v8::Object> v8_module_obj = v8::Utils::ToLocal(module_obj);
- CHECK(v8_module_obj->IsWebAssemblyCompiledModule());
+// Approximate gtest TEST_F style, in case we adopt gtest.
+class WasmSerializationTest {
+ public:
+ WasmSerializationTest() : zone_(&allocator_, ZONE_NAME) {
+ // Don't call here if we move to gtest.
+ SetUp();
+ }
- v8::Local<v8::WasmCompiledModule> v8_compiled_module =
- v8_module_obj.As<v8::WasmCompiledModule>();
- data = v8_compiled_module->Serialize();
+ void ClearSerializedData() {
+ serialized_bytes_.first = nullptr;
+ serialized_bytes_.second = 0;
}
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator =
- CcTest::InitIsolateOnce()->array_buffer_allocator();
+ void InvalidateVersion() {
+ uint32_t* buffer = reinterpret_cast<uint32_t*>(
+ const_cast<uint8_t*>(serialized_bytes_.first));
+ buffer[SerializedCodeData::kVersionHashOffset] = Version::Hash() + 1;
+ }
- v8::Isolate* v8_isolate = v8::Isolate::New(create_params);
- {
- v8::Isolate::Scope isolate_scope(v8_isolate);
- v8::HandleScope new_scope(v8_isolate);
- v8::Local<v8::Context> new_ctx = v8::Context::New(v8_isolate);
- new_ctx->Enter();
- isolate = reinterpret_cast<Isolate*>(v8_isolate);
- testing::SetupIsolateForWasmModule(isolate);
+ void InvalidateWireBytes() {
+ memset(const_cast<uint8_t*>(wire_bytes_.first), '\0',
+ wire_bytes_.second / 2);
+ }
+ v8::MaybeLocal<v8::WasmCompiledModule> Deserialize() {
+ ErrorThrower thrower(current_isolate(), "");
v8::MaybeLocal<v8::WasmCompiledModule> deserialized =
- v8::WasmCompiledModule::Deserialize(v8_isolate, data);
- v8::Local<v8::WasmCompiledModule> compiled_module;
- CHECK(deserialized.ToLocal(&compiled_module));
+ v8::WasmCompiledModule::DeserializeOrCompile(
+ current_isolate_v8(), serialized_bytes(), wire_bytes());
+ return deserialized;
+ }
+
+ void DeserializeAndRun() {
+ ErrorThrower thrower(current_isolate(), "");
+ v8::Local<v8::WasmCompiledModule> deserialized_module;
+ CHECK(Deserialize().ToLocal(&deserialized_module));
Handle<JSObject> module_object =
- Handle<JSObject>::cast(v8::Utils::OpenHandle(*compiled_module));
+ Handle<JSObject>::cast(v8::Utils::OpenHandle(*deserialized_module));
+ {
+ DisallowHeapAllocation assume_no_gc;
+ Handle<WasmCompiledModule> compiled_part(
+ WasmCompiledModule::cast(module_object->GetInternalField(0)),
+ current_isolate());
+ CHECK_EQ(memcmp(compiled_part->module_bytes()->GetCharsAddress(),
+ wire_bytes().first, wire_bytes().second),
+ 0);
+ }
Handle<JSObject> instance =
- WasmModule::Instantiate(isolate, &thrower, module_object,
+ WasmModule::Instantiate(current_isolate(), &thrower, module_object,
Handle<JSReceiver>::null(),
Handle<JSArrayBuffer>::null())
.ToHandleChecked();
- Handle<Object> params[1] = {Handle<Object>(Smi::FromInt(41), isolate)};
+ Handle<Object> params[1] = {
+ Handle<Object>(Smi::FromInt(41), current_isolate())};
int32_t result = testing::CallWasmFunctionForTesting(
- isolate, instance, &thrower, kFunctionName, 1, params,
+ current_isolate(), instance, &thrower, kFunctionName, 1, params,
ModuleOrigin::kWasmOrigin);
CHECK(result == 42);
- new_ctx->Exit();
}
+
+ Isolate* current_isolate() {
+ return reinterpret_cast<Isolate*>(current_isolate_v8_);
+ }
+
+ ~WasmSerializationTest() {
+ // Don't call from here if we move to gtest
+ TearDown();
+ }
+
+ private:
+ static const char* kFunctionName;
+
+ Zone* zone() { return &zone_; }
+ const v8::WasmCompiledModule::CallerOwnedBuffer& wire_bytes() const {
+ return wire_bytes_;
+ }
+
+ const v8::WasmCompiledModule::CallerOwnedBuffer& serialized_bytes() const {
+ return serialized_bytes_;
+ }
+
+ v8::Isolate* current_isolate_v8() { return current_isolate_v8_; }
+
+ void SetUp() {
+ WasmModuleBuilder* builder = new (zone()) WasmModuleBuilder(zone());
+ TestSignatures sigs;
+
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
+ byte code[] = {WASM_GET_LOCAL(0), kExprI32Const, 1, kExprI32Add};
+ f->EmitCode(code, sizeof(code));
+ f->ExportAs(CStrVector(kFunctionName));
+
+ ZoneBuffer buffer(&zone_);
+ builder->WriteTo(buffer);
+
+ Isolate* serialization_isolate = CcTest::InitIsolateOnce();
+ ErrorThrower thrower(serialization_isolate, "");
+ uint8_t* bytes = nullptr;
+ size_t bytes_size = 0;
+ {
+ HandleScope scope(serialization_isolate);
+ testing::SetupIsolateForWasmModule(serialization_isolate);
+
+ ModuleResult decoding_result =
+ DecodeWasmModule(serialization_isolate, buffer.begin(), buffer.end(),
+ false, kWasmOrigin);
+ CHECK(!decoding_result.failed());
+
+ Handle<WasmModuleWrapper> module_wrapper = WasmModuleWrapper::New(
+ serialization_isolate, const_cast<WasmModule*>(decoding_result.val));
+
+ MaybeHandle<WasmCompiledModule> compiled_module =
+ decoding_result.val->CompileFunctions(serialization_isolate,
+ module_wrapper, &thrower);
+ CHECK(!compiled_module.is_null());
+ Handle<JSObject> module_obj = WasmModuleObject::New(
+ serialization_isolate, compiled_module.ToHandleChecked());
+ v8::Local<v8::Object> v8_module_obj = v8::Utils::ToLocal(module_obj);
+ CHECK(v8_module_obj->IsWebAssemblyCompiledModule());
+
+ v8::Local<v8::WasmCompiledModule> v8_compiled_module =
+ v8_module_obj.As<v8::WasmCompiledModule>();
+ v8::Local<v8::String> uncompiled_bytes =
+ v8_compiled_module->GetWasmWireBytes();
+ bytes_size = static_cast<size_t>(uncompiled_bytes->Length());
+ bytes = zone()->NewArray<uint8_t>(bytes_size);
+ uncompiled_bytes->WriteOneByte(bytes, 0, uncompiled_bytes->Length(),
+ v8::String::NO_NULL_TERMINATION);
+ // keep alive data_ until the end
+ data_ = v8_compiled_module->Serialize();
+ }
+
+ wire_bytes_ = {const_cast<const uint8_t*>(bytes), bytes_size};
+
+ serialized_bytes_ = {data_.first.get(), data_.second};
+
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator =
+ serialization_isolate->array_buffer_allocator();
+
+ current_isolate_v8_ = v8::Isolate::New(create_params);
+ v8::HandleScope new_scope(current_isolate_v8());
+ v8::Local<v8::Context> deserialization_context =
+ v8::Context::New(current_isolate_v8());
+ deserialization_context->Enter();
+ testing::SetupIsolateForWasmModule(current_isolate());
+ }
+
+ void TearDown() {
+ current_isolate_v8()->Dispose();
+ current_isolate_v8_ = nullptr;
+ }
+
+ v8::internal::AccountingAllocator allocator_;
+ Zone zone_;
+ v8::WasmCompiledModule::SerializedModule data_;
+ v8::WasmCompiledModule::CallerOwnedBuffer wire_bytes_;
+ v8::WasmCompiledModule::CallerOwnedBuffer serialized_bytes_;
+ v8::Isolate* current_isolate_v8_;
+};
+
+const char* WasmSerializationTest::kFunctionName = "increment";
+
+TEST(DeserializeValidModule) {
+ WasmSerializationTest test;
+ {
+ HandleScope scope(test.current_isolate());
+ test.DeserializeAndRun();
+ }
+ Cleanup(test.current_isolate());
+ Cleanup();
+}
+
+TEST(DeserializeMismatchingVersion) {
+ WasmSerializationTest test;
+ {
+ HandleScope scope(test.current_isolate());
+ test.InvalidateVersion();
+ test.DeserializeAndRun();
+ }
+ Cleanup(test.current_isolate());
+ Cleanup();
+}
+
+TEST(DeserializeNoSerializedData) {
+ WasmSerializationTest test;
+ {
+ HandleScope scope(test.current_isolate());
+ test.ClearSerializedData();
+ test.DeserializeAndRun();
+ }
+ Cleanup(test.current_isolate());
+ Cleanup();
+}
+
+TEST(DeserializeWireBytesAndSerializedDataInvalid) {
+ WasmSerializationTest test;
+ {
+ HandleScope scope(test.current_isolate());
+ test.InvalidateVersion();
+ test.InvalidateWireBytes();
+ test.Deserialize();
+ }
+ Cleanup(test.current_isolate());
+ Cleanup();
}
TEST(MemorySize) {
- // Initial memory size is 16, see wasm-module-builder.cc
- static const int kExpectedValue = 16;
- TestSignatures sigs;
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
-
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
- ExportAsMain(f);
- byte code[] = {WASM_MEMORY_SIZE};
- f->EmitCode(code, sizeof(code));
- TestModule(&zone, builder, kExpectedValue);
+ {
+ // Initial memory size is 16, see wasm-module-builder.cc
+ static const int kExpectedValue = 16;
+ TestSignatures sigs;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f);
+ byte code[] = {WASM_MEMORY_SIZE};
+ f->EmitCode(code, sizeof(code));
+ TestModule(&zone, builder, kExpectedValue);
+ }
+ Cleanup();
}
TEST(Run_WasmModule_MemSize_GrowMem) {
- // Initial memory size = 16 + GrowMemory(10)
- static const int kExpectedValue = 26;
- TestSignatures sigs;
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
-
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
- ExportAsMain(f);
- byte code[] = {WASM_GROW_MEMORY(WASM_I8(10)), WASM_DROP, WASM_MEMORY_SIZE};
- f->EmitCode(code, sizeof(code));
- TestModule(&zone, builder, kExpectedValue);
+ {
+ // Initial memory size = 16 + GrowMemory(10)
+ static const int kExpectedValue = 26;
+ TestSignatures sigs;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f);
+ byte code[] = {WASM_GROW_MEMORY(WASM_I8(10)), WASM_DROP, WASM_MEMORY_SIZE};
+ f->EmitCode(code, sizeof(code));
+ TestModule(&zone, builder, kExpectedValue);
+ }
+ Cleanup();
+}
+
+TEST(GrowMemoryZero) {
+ {
+ // Initial memory size is 16, see wasm-module-builder.cc
+ static const int kExpectedValue = 16;
+ TestSignatures sigs;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f);
+ byte code[] = {WASM_GROW_MEMORY(WASM_I32V(0))};
+ f->EmitCode(code, sizeof(code));
+ TestModule(&zone, builder, kExpectedValue);
+ }
+ Cleanup();
+}
+
+class InterruptThread : public v8::base::Thread {
+ public:
+ explicit InterruptThread(Isolate* isolate, int32_t* memory)
+ : Thread(Options("TestInterruptLoop")),
+ isolate_(isolate),
+ memory_(memory) {}
+
+ static void OnInterrupt(v8::Isolate* isolate, void* data) {
+ int32_t* m = reinterpret_cast<int32_t*>(data);
+ // Set the interrupt location to 0 to break the loop in {TestInterruptLoop}.
+ int32_t* ptr = &m[interrupt_location_];
+ WriteLittleEndianValue<int32_t>(ptr, interrupt_value_);
+ }
+
+ virtual void Run() {
+ // Wait for the main thread to write the signal value.
+ int32_t val = 0;
+ do {
+ val = memory_[0];
+ val = ReadLittleEndianValue<int32_t>(&val);
+ } while (val != signal_value_);
+ isolate_->RequestInterrupt(&OnInterrupt, const_cast<int32_t*>(memory_));
+ }
+
+ Isolate* isolate_;
+ volatile int32_t* memory_;
+ static const int32_t interrupt_location_ = 10;
+ static const int32_t interrupt_value_ = 154;
+ static const int32_t signal_value_ = 1221;
+};
+
+TEST(TestInterruptLoop) {
+ {
+ // Do not dump the module of this test because it contains an infinite loop.
+ if (FLAG_dump_wasm_module) return;
+
+ // This test tests that WebAssembly loops can be interrupted, i.e. that if
+ // an
+ // InterruptCallback is registered by {Isolate::RequestInterrupt}, then the
+ // InterruptCallback is eventually called even if a loop in WebAssembly code
+ // is executed.
+ // Test setup:
+ // The main thread executes a WebAssembly function with a loop. In the loop
+ // {signal_value_} is written to memory to signal a helper thread that the
+ // main thread reached the loop in the WebAssembly program. When the helper
+ // thread reads {signal_value_} from memory, it registers the
+ // InterruptCallback. Upon exeution, the InterruptCallback write into the
+ // WebAssemblyMemory to end the loop in the WebAssembly program.
+ TestSignatures sigs;
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f);
+ byte code[] = {
+ WASM_LOOP(
+ WASM_IFB(WASM_NOT(WASM_LOAD_MEM(
+ MachineType::Int32(),
+ WASM_I32V(InterruptThread::interrupt_location_ * 4))),
+ WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO,
+ WASM_I32V(InterruptThread::signal_value_)),
+ WASM_BR(1))),
+ WASM_I32V(121)};
+ f->EmitCode(code, sizeof(code));
+ ZoneBuffer buffer(&zone);
+ builder->WriteTo(buffer);
+
+ HandleScope scope(isolate);
+ testing::SetupIsolateForWasmModule(isolate);
+ ErrorThrower thrower(isolate, "Test");
+ const Handle<JSObject> instance =
+ testing::CompileInstantiateWasmModuleForTesting(
+ isolate, &thrower, buffer.begin(), buffer.end(),
+ ModuleOrigin::kWasmOrigin);
+ CHECK(!instance.is_null());
+
+ MaybeHandle<JSArrayBuffer> maybe_memory =
+ GetInstanceMemory(isolate, instance);
+ Handle<JSArrayBuffer> memory = maybe_memory.ToHandleChecked();
+ int32_t* memory_array = reinterpret_cast<int32_t*>(memory->backing_store());
+
+ InterruptThread thread(isolate, memory_array);
+ thread.Start();
+ testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr,
+ ModuleOrigin::kWasmOrigin);
+ int32_t val = memory_array[InterruptThread::interrupt_location_];
+ CHECK_EQ(InterruptThread::interrupt_value_,
+ ReadLittleEndianValue<int32_t>(&val));
+ }
+ Cleanup();
}
TEST(Run_WasmModule_GrowMemoryInIf) {
- TestSignatures sigs;
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
- ExportAsMain(f);
- byte code[] = {WASM_IF_ELSE_I(WASM_I32V(0), WASM_GROW_MEMORY(WASM_I32V(1)),
- WASM_I32V(12))};
- f->EmitCode(code, sizeof(code));
- TestModule(&zone, builder, 12);
+ {
+ TestSignatures sigs;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f);
+ byte code[] = {WASM_IF_ELSE_I(WASM_I32V(0), WASM_GROW_MEMORY(WASM_I32V(1)),
+ WASM_I32V(12))};
+ f->EmitCode(code, sizeof(code));
+ TestModule(&zone, builder, 12);
+ }
+ Cleanup();
}
TEST(Run_WasmModule_GrowMemOobOffset) {
- static const int kPageSize = 0x10000;
- // Initial memory size = 16 + GrowMemory(10)
- static const int index = kPageSize * 17 + 4;
- int value = 0xaced;
- TestSignatures sigs;
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
-
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
- ExportAsMain(f);
- byte code[] = {
- WASM_GROW_MEMORY(WASM_I8(1)),
- WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index), WASM_I32V(value))};
- f->EmitCode(code, sizeof(code));
- TestModuleException(&zone, builder);
+ {
+ static const int kPageSize = 0x10000;
+ // Initial memory size = 16 + GrowMemory(10)
+ static const int index = kPageSize * 17 + 4;
+ int value = 0xaced;
+ TestSignatures sigs;
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
+ ExportAsMain(f);
+ byte code[] = {WASM_GROW_MEMORY(WASM_I8(1)),
+ WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index),
+ WASM_I32V(value))};
+ f->EmitCode(code, sizeof(code));
+ TestModuleException(&zone, builder);
+ }
+ Cleanup();
}
TEST(Run_WasmModule_GrowMemOobFixedIndex) {
- static const int kPageSize = 0x10000;
- // Initial memory size = 16 + GrowMemory(10)
- static const int index = kPageSize * 26 + 4;
- int value = 0xaced;
- TestSignatures sigs;
- Isolate* isolate = CcTest::InitIsolateOnce();
- Zone zone(isolate->allocator());
+ {
+ static const int kPageSize = 0x10000;
+ // Initial memory size = 16 + GrowMemory(10)
+ static const int index = kPageSize * 26 + 4;
+ int value = 0xaced;
+ TestSignatures sigs;
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ Zone zone(isolate->allocator(), ZONE_NAME);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
+ ExportAsMain(f);
+ byte code[] = {WASM_GROW_MEMORY(WASM_GET_LOCAL(0)), WASM_DROP,
+ WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index),
+ WASM_I32V(value)),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V(index))};
+ f->EmitCode(code, sizeof(code));
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
- ExportAsMain(f);
- byte code[] = {
- WASM_GROW_MEMORY(WASM_GET_LOCAL(0)), WASM_DROP,
- WASM_STORE_MEM(MachineType::Int32(), WASM_I32V(index), WASM_I32V(value)),
- WASM_LOAD_MEM(MachineType::Int32(), WASM_I32V(index))};
- f->EmitCode(code, sizeof(code));
+ HandleScope scope(isolate);
+ ZoneBuffer buffer(&zone);
+ builder->WriteTo(buffer);
+ testing::SetupIsolateForWasmModule(isolate);
- HandleScope scope(isolate);
- ZoneBuffer buffer(&zone);
- builder->WriteTo(buffer);
- testing::SetupIsolateForWasmModule(isolate);
+ ErrorThrower thrower(isolate, "Test");
+ Handle<JSObject> instance = testing::CompileInstantiateWasmModuleForTesting(
+ isolate, &thrower, buffer.begin(), buffer.end(),
+ ModuleOrigin::kWasmOrigin);
+ CHECK(!instance.is_null());
+
+ // Initial memory size is 16 pages, should trap till index > MemSize on
+ // consecutive GrowMem calls
+ for (uint32_t i = 1; i < 5; i++) {
+ Handle<Object> params[1] = {Handle<Object>(Smi::FromInt(i), isolate)};
+ v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ testing::RunWasmModuleForTesting(isolate, instance, 1, params,
+ ModuleOrigin::kWasmOrigin);
+ CHECK(try_catch.HasCaught());
+ isolate->clear_pending_exception();
+ }
+
+ Handle<Object> params[1] = {Handle<Object>(Smi::FromInt(1), isolate)};
+ int32_t result = testing::RunWasmModuleForTesting(
+ isolate, instance, 1, params, ModuleOrigin::kWasmOrigin);
+ CHECK(result == 0xaced);
+ }
+ Cleanup();
+}
- Handle<JSObject> instance = testing::CompileInstantiateWasmModuleForTesting(
- isolate, &zone, buffer.begin(), buffer.end(), ModuleOrigin::kWasmOrigin);
- CHECK(!instance.is_null());
+TEST(Run_WasmModule_GrowMemOobVariableIndex) {
+ {
+ static const int kPageSize = 0x10000;
+ int value = 0xaced;
+ TestSignatures sigs;
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
+ ExportAsMain(f);
+ byte code[] = {WASM_GROW_MEMORY(WASM_I8(1)), WASM_DROP,
+ WASM_STORE_MEM(MachineType::Int32(), WASM_GET_LOCAL(0),
+ WASM_I32V(value)),
+ WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(0))};
+ f->EmitCode(code, sizeof(code));
+
+ HandleScope scope(isolate);
+ ZoneBuffer buffer(&zone);
+ builder->WriteTo(buffer);
+ testing::SetupIsolateForWasmModule(isolate);
+
+ ErrorThrower thrower(isolate, "Test");
+ Handle<JSObject> instance = testing::CompileInstantiateWasmModuleForTesting(
+ isolate, &thrower, buffer.begin(), buffer.end(),
+ ModuleOrigin::kWasmOrigin);
+
+ CHECK(!instance.is_null());
+
+ // Initial memory size is 16 pages, should trap till index > MemSize on
+ // consecutive GrowMem calls
+ for (int i = 1; i < 5; i++) {
+ Handle<Object> params[1] = {
+ Handle<Object>(Smi::FromInt((16 + i) * kPageSize - 3), isolate)};
+ v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ testing::RunWasmModuleForTesting(isolate, instance, 1, params,
+ ModuleOrigin::kWasmOrigin);
+ CHECK(try_catch.HasCaught());
+ isolate->clear_pending_exception();
+ }
+
+ for (int i = 1; i < 5; i++) {
+ Handle<Object> params[1] = {
+ Handle<Object>(Smi::FromInt((20 + i) * kPageSize - 4), isolate)};
+ int32_t result = testing::RunWasmModuleForTesting(
+ isolate, instance, 1, params, ModuleOrigin::kWasmOrigin);
+ CHECK(result == 0xaced);
+ }
- // Initial memory size is 16 pages, should trap till index > MemSize on
- // consecutive GrowMem calls
- for (uint32_t i = 1; i < 5; i++) {
- Handle<Object> params[1] = {Handle<Object>(Smi::FromInt(i), isolate)};
v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ Handle<Object> params[1] = {
+ Handle<Object>(Smi::FromInt(25 * kPageSize), isolate)};
testing::RunWasmModuleForTesting(isolate, instance, 1, params,
ModuleOrigin::kWasmOrigin);
CHECK(try_catch.HasCaught());
isolate->clear_pending_exception();
}
+ Cleanup();
+}
- Handle<Object> params[1] = {Handle<Object>(Smi::FromInt(1), isolate)};
- int32_t result = testing::RunWasmModuleForTesting(
- isolate, instance, 1, params, ModuleOrigin::kWasmOrigin);
- CHECK(result == 0xaced);
+TEST(Run_WasmModule_Global_init) {
+ {
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ TestSignatures sigs;
+
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ uint32_t global1 =
+ builder->AddGlobal(kAstI32, false, false, WasmInitExpr(777777));
+ uint32_t global2 =
+ builder->AddGlobal(kAstI32, false, false, WasmInitExpr(222222));
+ WasmFunctionBuilder* f1 = builder->AddFunction(sigs.i_v());
+ byte code[] = {
+ WASM_I32_ADD(WASM_GET_GLOBAL(global1), WASM_GET_GLOBAL(global2))};
+ f1->EmitCode(code, sizeof(code));
+ ExportAsMain(f1);
+ TestModule(&zone, builder, 999999);
+ }
+ Cleanup();
}
-TEST(Run_WasmModule_GrowMemOobVariableIndex) {
- static const int kPageSize = 0x10000;
- int value = 0xaced;
- TestSignatures sigs;
- Isolate* isolate = CcTest::InitIsolateOnce();
- v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
-
- WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
- WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
- ExportAsMain(f);
- byte code[] = {
- WASM_GROW_MEMORY(WASM_I8(1)), WASM_DROP,
- WASM_STORE_MEM(MachineType::Int32(), WASM_GET_LOCAL(0), WASM_I32V(value)),
- WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(0))};
- f->EmitCode(code, sizeof(code));
+template <typename CType>
+static void RunWasmModuleGlobalInitTest(LocalType type, CType expected) {
+ {
+ v8::internal::AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ TestSignatures sigs;
+
+ LocalType types[] = {type};
+ FunctionSig sig(1, 0, types);
+
+ for (int padding = 0; padding < 5; padding++) {
+ // Test with a simple initializer
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+
+ for (int i = 0; i < padding; i++) { // pad global before
+ builder->AddGlobal(kAstI32, false, false, WasmInitExpr(i + 20000));
+ }
+ uint32_t global =
+ builder->AddGlobal(type, false, false, WasmInitExpr(expected));
+ for (int i = 0; i < padding; i++) { // pad global after
+ builder->AddGlobal(kAstI32, false, false, WasmInitExpr(i + 30000));
+ }
+
+ WasmFunctionBuilder* f1 = builder->AddFunction(&sig);
+ byte code[] = {WASM_GET_GLOBAL(global)};
+ f1->EmitCode(code, sizeof(code));
+ ExportAsMain(f1);
+ TestModule(&zone, builder, expected);
+ }
+ }
+ Cleanup();
+}
- HandleScope scope(isolate);
- ZoneBuffer buffer(&zone);
- builder->WriteTo(buffer);
- testing::SetupIsolateForWasmModule(isolate);
+TEST(Run_WasmModule_Global_i32) {
+ RunWasmModuleGlobalInitTest<int32_t>(kAstI32, -983489);
+ RunWasmModuleGlobalInitTest<int32_t>(kAstI32, 11223344);
+}
- Handle<JSObject> instance = testing::CompileInstantiateWasmModuleForTesting(
- isolate, &zone, buffer.begin(), buffer.end(), ModuleOrigin::kWasmOrigin);
+TEST(Run_WasmModule_Global_f32) {
+ RunWasmModuleGlobalInitTest<float>(kAstF32, -983.9f);
+ RunWasmModuleGlobalInitTest<float>(kAstF32, 1122.99f);
+}
- CHECK(!instance.is_null());
+TEST(Run_WasmModule_Global_f64) {
+ RunWasmModuleGlobalInitTest<double>(kAstF64, -833.9);
+ RunWasmModuleGlobalInitTest<double>(kAstF64, 86374.25);
+}
- // Initial memory size is 16 pages, should trap till index > MemSize on
- // consecutive GrowMem calls
- for (int i = 1; i < 5; i++) {
- Handle<Object> params[1] = {
- Handle<Object>(Smi::FromInt((16 + i) * kPageSize - 3), isolate)};
- v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- testing::RunWasmModuleForTesting(isolate, instance, 1, params,
- ModuleOrigin::kWasmOrigin);
- CHECK(try_catch.HasCaught());
- isolate->clear_pending_exception();
+TEST(InitDataAtTheUpperLimit) {
+ {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ testing::SetupIsolateForWasmModule(isolate);
+
+ ErrorThrower thrower(isolate, "Run_WasmModule_InitDataAtTheUpperLimit");
+
+ const byte data[] = {
+ WASM_MODULE_HEADER, // --
+ kMemorySectionCode, // --
+ U32V_1(4), // section size
+ ENTRY_COUNT(1), // --
+ kResizableMaximumFlag, // --
+ 1, // initial size
+ 2, // maximum size
+ kDataSectionCode, // --
+ U32V_1(9), // section size
+ ENTRY_COUNT(1), // --
+ 0, // linear memory index
+ WASM_I32V_3(0xffff), // destination offset
+ kExprEnd,
+ U32V_1(1), // source size
+ 'c' // data bytes
+ };
+
+ testing::CompileInstantiateWasmModuleForTesting(isolate, &thrower, data,
+ data + arraysize(data),
+ ModuleOrigin::kWasmOrigin);
+ if (thrower.error()) {
+ thrower.Reify()->Print();
+ CHECK(false);
+ }
}
+ Cleanup();
+}
- for (int i = 1; i < 5; i++) {
- Handle<Object> params[1] = {
- Handle<Object>(Smi::FromInt((20 + i) * kPageSize - 4), isolate)};
- int32_t result = testing::RunWasmModuleForTesting(
- isolate, instance, 1, params, ModuleOrigin::kWasmOrigin);
- CHECK(result == 0xaced);
+TEST(EmptyMemoryNonEmptyDataSegment) {
+ {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ testing::SetupIsolateForWasmModule(isolate);
+
+ ErrorThrower thrower(isolate, "Run_WasmModule_InitDataAtTheUpperLimit");
+
+ const byte data[] = {
+ WASM_MODULE_HEADER, // --
+ kMemorySectionCode, // --
+ U32V_1(4), // section size
+ ENTRY_COUNT(1), // --
+ kResizableMaximumFlag, // --
+ 0, // initial size
+ 0, // maximum size
+ kDataSectionCode, // --
+ U32V_1(7), // section size
+ ENTRY_COUNT(1), // --
+ 0, // linear memory index
+ WASM_I32V_1(8), // destination offset
+ kExprEnd,
+ U32V_1(1), // source size
+ 'c' // data bytes
+ };
+
+ testing::CompileInstantiateWasmModuleForTesting(isolate, &thrower, data,
+ data + arraysize(data),
+ ModuleOrigin::kWasmOrigin);
+ // It should not be possible to instantiate this module.
+ CHECK(thrower.error());
}
+ Cleanup();
+}
- v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- Handle<Object> params[1] = {
- Handle<Object>(Smi::FromInt(25 * kPageSize), isolate)};
- testing::RunWasmModuleForTesting(isolate, instance, 1, params,
- ModuleOrigin::kWasmOrigin);
- CHECK(try_catch.HasCaught());
- isolate->clear_pending_exception();
+TEST(EmptyMemoryEmptyDataSegment) {
+ {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ testing::SetupIsolateForWasmModule(isolate);
+
+ ErrorThrower thrower(isolate, "Run_WasmModule_InitDataAtTheUpperLimit");
+
+ const byte data[] = {
+ WASM_MODULE_HEADER, // --
+ kMemorySectionCode, // --
+ U32V_1(4), // section size
+ ENTRY_COUNT(1), // --
+ kResizableMaximumFlag, // --
+ 0, // initial size
+ 0, // maximum size
+ kDataSectionCode, // --
+ U32V_1(6), // section size
+ ENTRY_COUNT(1), // --
+ 0, // linear memory index
+ WASM_I32V_1(24), // destination offset
+ kExprEnd,
+ U32V_1(0), // source size
+ };
+
+ testing::CompileInstantiateWasmModuleForTesting(isolate, &thrower, data,
+ data + arraysize(data),
+ ModuleOrigin::kWasmOrigin);
+ // It should be possible to instantiate this module.
+ CHECK(!thrower.error());
+ }
+ Cleanup();
+}
+
+TEST(MemoryWithOOBEmptyDataSegment) {
+ {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ testing::SetupIsolateForWasmModule(isolate);
+
+ ErrorThrower thrower(isolate, "Run_WasmModule_InitDataAtTheUpperLimit");
+
+ const byte data[] = {
+ WASM_MODULE_HEADER, // --
+ kMemorySectionCode, // --
+ U32V_1(4), // section size
+ ENTRY_COUNT(1), // --
+ kResizableMaximumFlag, // --
+ 1, // initial size
+ 1, // maximum size
+ kDataSectionCode, // --
+ U32V_1(9), // section size
+ ENTRY_COUNT(1), // --
+ 0, // linear memory index
+ WASM_I32V_4(0x2468ace), // destination offset
+ kExprEnd,
+ U32V_1(0), // source size
+ };
+
+ testing::CompileInstantiateWasmModuleForTesting(isolate, &thrower, data,
+ data + arraysize(data),
+ ModuleOrigin::kWasmOrigin);
+ // It should be possible to instantiate this module.
+ CHECK(!thrower.error());
+ }
+ Cleanup();
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc b/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
index e3a28f611b..614e9a4ba5 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
@@ -31,7 +31,7 @@ using namespace v8::internal::compiler;
/* global = global + p0 */ \
BUILD(r, WASM_SET_GLOBAL(1, ADD(WASM_GET_GLOBAL(0), WASM_GET_LOCAL(0))), \
WASM_GET_GLOBAL(0)); \
- CHECK_EQ(1, module.instance->function_code.size()); \
+ CHECK_EQ(1u, module.instance->function_code.size()); \
\
int filter = 1 << RelocInfo::WASM_GLOBAL_REFERENCE; \
\
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd-lowering.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd-lowering.cc
new file mode 100644
index 0000000000..69e770b9b8
--- /dev/null
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd-lowering.cc
@@ -0,0 +1,96 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-macro-gen.h"
+#include "src/wasm/wasm-module.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
+#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/test-signatures.h"
+
+using namespace v8::base;
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+using namespace v8::internal::wasm;
+
+WASM_EXEC_TEST(Simd_I32x4_Splat) {
+ FLAG_wasm_simd_prototype = true;
+ WasmRunner<int32_t> r(kExecuteCompiled, MachineType::Int32());
+ BUILD(r,
+ WASM_SIMD_I32x4_EXTRACT_LANE(0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(5))));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(5, r.Call()); }
+}
+
+WASM_EXEC_TEST(Simd_I32x4_Add) {
+ FLAG_wasm_simd_prototype = true;
+ WasmRunner<int32_t> r(kExecuteCompiled, MachineType::Int32());
+ BUILD(r, WASM_SIMD_I32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_I32x4_ADD(WASM_SIMD_I32x4_SPLAT(WASM_I32V(5)),
+ WASM_SIMD_I32x4_SPLAT(WASM_I32V(6)))));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(11, r.Call()); }
+}
+
+WASM_EXEC_TEST(Simd_F32x4_Splat) {
+ FLAG_wasm_simd_prototype = true;
+ WasmRunner<int32_t> r(kExecuteCompiled, MachineType::Int32());
+ BUILD(r,
+ WASM_IF_ELSE(WASM_F32_EQ(WASM_SIMD_F32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_F32x4_SPLAT(WASM_F32(9.5))),
+ WASM_F32(9.5)),
+ WASM_RETURN1(WASM_I32V(1)), WASM_RETURN1(WASM_I32V(0))));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call()); }
+}
+
+WASM_EXEC_TEST(Simd_I32x4_Extract_With_F32x4) {
+ FLAG_wasm_simd_prototype = true;
+ WasmRunner<int32_t> r(kExecuteCompiled, MachineType::Int32());
+ BUILD(r,
+ WASM_IF_ELSE(WASM_I32_EQ(WASM_SIMD_I32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_F32x4_SPLAT(WASM_F32(30.5))),
+ WASM_I32_REINTERPRET_F32(WASM_F32(30.5))),
+ WASM_RETURN1(WASM_I32V(1)), WASM_RETURN1(WASM_I32V(0))));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call()); }
+}
+
+WASM_EXEC_TEST(Simd_F32x4_Extract_With_I32x4) {
+ FLAG_wasm_simd_prototype = true;
+ WasmRunner<int32_t> r(kExecuteCompiled, MachineType::Int32());
+ BUILD(r,
+ WASM_IF_ELSE(WASM_F32_EQ(WASM_SIMD_F32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_I32x4_SPLAT(WASM_I32V(15))),
+ WASM_F32_REINTERPRET_I32(WASM_I32V(15))),
+ WASM_RETURN1(WASM_I32V(1)), WASM_RETURN1(WASM_I32V(0))));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call()); }
+}
+
+WASM_EXEC_TEST(Simd_F32x4_Add_With_I32x4) {
+ FLAG_wasm_simd_prototype = true;
+ WasmRunner<int32_t> r(kExecuteCompiled, MachineType::Int32());
+ BUILD(r,
+ WASM_IF_ELSE(
+ WASM_F32_EQ(WASM_SIMD_F32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_F32x4_ADD(
+ WASM_SIMD_I32x4_SPLAT(WASM_I32V(32)),
+ WASM_SIMD_I32x4_SPLAT(WASM_I32V(19)))),
+ WASM_F32_ADD(WASM_F32_REINTERPRET_I32(WASM_I32V(32)),
+ WASM_F32_REINTERPRET_I32(WASM_I32V(19)))),
+ WASM_RETURN1(WASM_I32V(1)), WASM_RETURN1(WASM_I32V(0))));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call()); }
+}
+
+WASM_EXEC_TEST(Simd_I32x4_Add_With_F32x4) {
+ FLAG_wasm_simd_prototype = true;
+ WasmRunner<int32_t> r(kExecuteCompiled, MachineType::Int32());
+ BUILD(r,
+ WASM_IF_ELSE(
+ WASM_I32_EQ(WASM_SIMD_I32x4_EXTRACT_LANE(
+ 0, WASM_SIMD_I32x4_ADD(
+ WASM_SIMD_F32x4_SPLAT(WASM_F32(21.25)),
+ WASM_SIMD_F32x4_SPLAT(WASM_F32(31.5)))),
+ WASM_I32_ADD(WASM_I32_REINTERPRET_F32(WASM_F32(21.25)),
+ WASM_I32_REINTERPRET_F32(WASM_F32(31.5)))),
+ WASM_RETURN1(WASM_I32V(1)), WASM_RETURN1(WASM_I32V(0))));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(1, r.Call()); }
+}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index d9d9db80e1..a42a81ba27 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -79,6 +79,14 @@ WASM_EXEC_TEST(Int32Const_many) {
}
}
+WASM_EXEC_TEST(GraphTrimming) {
+ // This WebAssembly code requires graph trimming in the TurboFan compiler.
+ WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
+ BUILD(r, kExprGetLocal, 0, kExprGetLocal, 0, kExprGetLocal, 0, kExprI32RemS,
+ kExprI32Eq, kExprGetLocal, 0, kExprI32DivS, kExprUnreachable);
+ r.Call(1);
+}
+
WASM_EXEC_TEST(Int32Param0) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// return(local[0])
@@ -440,6 +448,42 @@ WASM_EXEC_TEST(Int32DivS_byzero_const) {
}
}
+WASM_EXEC_TEST(Int32AsmjsDivS_byzero_const) {
+ for (int8_t denom = -2; denom < 8; ++denom) {
+ TestingModule module(execution_mode);
+ module.ChangeOriginToAsmjs();
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
+ BUILD(r, WASM_I32_ASMJS_DIVS(WASM_GET_LOCAL(0), WASM_I8(denom)));
+ FOR_INT32_INPUTS(i) {
+ if (denom == 0) {
+ CHECK_EQ(0, r.Call(*i));
+ } else if (denom == -1 && *i == std::numeric_limits<int32_t>::min()) {
+ CHECK_EQ(std::numeric_limits<int32_t>::min(), r.Call(*i));
+ } else {
+ CHECK_EQ(*i / denom, r.Call(*i));
+ }
+ }
+ }
+}
+
+WASM_EXEC_TEST(Int32AsmjsRemS_byzero_const) {
+ for (int8_t denom = -2; denom < 8; ++denom) {
+ TestingModule module(execution_mode);
+ module.ChangeOriginToAsmjs();
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
+ BUILD(r, WASM_I32_ASMJS_REMS(WASM_GET_LOCAL(0), WASM_I8(denom)));
+ FOR_INT32_INPUTS(i) {
+ if (denom == 0) {
+ CHECK_EQ(0, r.Call(*i));
+ } else if (denom == -1 && *i == std::numeric_limits<int32_t>::min()) {
+ CHECK_EQ(0, r.Call(*i));
+ } else {
+ CHECK_EQ(*i % denom, r.Call(*i));
+ }
+ }
+ }
+}
+
WASM_EXEC_TEST(Int32DivU_byzero_const) {
for (uint32_t denom = 0xfffffffe; denom < 8; ++denom) {
WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32());
@@ -783,6 +827,15 @@ WASM_EXEC_TEST(Return_F64) {
}
}
+WASM_EXEC_TEST(Select_float_parameters) {
+ WasmRunner<float> r(execution_mode, MachineType::Float32(),
+ MachineType::Float32(), MachineType::Int32());
+ // return select(11, 22, a);
+ BUILD(r,
+ WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)));
+ CHECK_FLOAT_EQ(2.0f, r.Call(2.0f, 1.0f, 1));
+}
+
WASM_EXEC_TEST(Select) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
// return select(11, 22, a);
@@ -852,6 +905,15 @@ WASM_EXEC_TEST(Br_height) {
}
}
+WASM_EXEC_TEST(Regression_660262) {
+ TestingModule module(execution_mode);
+ module.AddMemoryElems<int32_t>(8);
+ WasmRunner<int32_t> r(&module);
+ BUILD(r, kExprI8Const, 0x00, kExprI8Const, 0x00, kExprI32LoadMem, 0x00, 0x0f,
+ kExprBrTable, 0x00, 0x80, 0x00); // entries=0
+ r.Call();
+}
+
WASM_EXEC_TEST(BrTable0a) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r, B1(B1(WASM_BR_TABLE(WASM_GET_LOCAL(0), 0, BR_TARGET(0)))),
@@ -1047,6 +1109,20 @@ WASM_EXEC_TEST(I32ReinterpretF32) {
}
}
+WASM_EXEC_TEST(LoadMaxUint32Offset) {
+ TestingModule module(execution_mode);
+ module.AddMemoryElems<int32_t>(8);
+ WasmRunner<int32_t> r(&module);
+
+ BUILD(r, kExprI8Const, 0, // index
+ static_cast<byte>(v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(
+ MachineType::Int32(), false)), // --
+ 0, // alignment
+ U32V_5(0xffffffff)); // offset
+
+ CHECK_TRAP32(r.Call());
+}
+
WASM_EXEC_TEST(LoadStoreLoad) {
TestingModule module(execution_mode);
int32_t* memory = module.AddMemoryElems<int32_t>(8);
@@ -1134,7 +1210,7 @@ WASM_EXEC_TEST(Block_empty_brif2) {
WasmRunner<uint32_t> r(execution_mode, MachineType::Uint32(),
MachineType::Uint32());
BUILD(r, WASM_BLOCK(WASM_BR_IF(0, WASM_GET_LOCAL(1))), WASM_GET_LOCAL(0));
- FOR_INT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i, *i + 1)); }
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i, *i + 1)); }
}
WASM_EXEC_TEST(Block_i) {
@@ -1158,7 +1234,7 @@ WASM_EXEC_TEST(Block_d) {
WASM_EXEC_TEST(Block_br2) {
WasmRunner<int32_t> r(execution_mode, MachineType::Int32());
BUILD(r, WASM_BLOCK_I(WASM_BRV(0, WASM_GET_LOCAL(0))));
- FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, r.Call(*i)); }
+ FOR_UINT32_INPUTS(i) { CHECK_EQ(*i, static_cast<uint32_t>(r.Call(*i))); }
}
WASM_EXEC_TEST(Block_If_P) {
@@ -1520,7 +1596,7 @@ WASM_EXEC_TEST(LoadMemI32_const_oob_misaligned) {
BUILD(r,
WASM_LOAD_MEM_OFFSET(MachineType::Int32(), offset, WASM_I8(index)));
- if ((offset + index) <= (kMemSize - sizeof(int32_t))) {
+ if ((offset + index) <= static_cast<int>((kMemSize - sizeof(int32_t)))) {
CHECK_EQ(module.raw_val_at<int32_t>(offset + index), r.Call());
} else {
CHECK_TRAP(r.Call());
@@ -1542,7 +1618,7 @@ WASM_EXEC_TEST(LoadMemI32_const_oob) {
BUILD(r,
WASM_LOAD_MEM_OFFSET(MachineType::Int32(), offset, WASM_I8(index)));
- if ((offset + index) <= (kMemSize - sizeof(int32_t))) {
+ if ((offset + index) <= static_cast<int>((kMemSize - sizeof(int32_t)))) {
CHECK_EQ(module.raw_val_at<int32_t>(offset + index), r.Call());
} else {
CHECK_TRAP(r.Call());
@@ -1693,7 +1769,7 @@ WASM_EXEC_TEST(CheckMachIntsZero) {
/**/ kExprI8Const, 0); // --
module.BlankMemory();
- CHECK_EQ(0, r.Call((kNumElems - 1) * 4));
+ CHECK_EQ(0u, r.Call((kNumElems - 1) * 4));
}
WASM_EXEC_TEST(MemF32_Sum) {
@@ -1853,7 +1929,7 @@ WASM_EXEC_TEST(Infinite_Loop_not_taken2_brif) {
static void TestBuildGraphForSimpleExpression(WasmOpcode opcode) {
Isolate* isolate = CcTest::InitIsolateOnce();
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
HandleScope scope(isolate);
// Enable all optional operators.
CommonOperatorBuilder common(&zone);
@@ -1868,7 +1944,7 @@ static void TestBuildGraphForSimpleExpression(WasmOpcode opcode) {
TestBuildingGraph(&zone, &jsgraph, nullptr, sig, nullptr, code,
code + arraysize(code));
} else {
- CHECK_EQ(2, sig->parameter_count());
+ CHECK_EQ(2u, sig->parameter_count());
byte code[] = {WASM_NO_LOCALS, kExprGetLocal, 0, kExprGetLocal, 1,
static_cast<byte>(opcode)};
TestBuildingGraph(&zone, &jsgraph, nullptr, sig, nullptr, code,
@@ -2240,7 +2316,7 @@ static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
int num_params = static_cast<int>(arraysize(mixed)) - start;
for (int which = 0; which < num_params; ++which) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
TestingModule module(execution_mode);
module.AddMemory(1024);
MachineType* memtypes = &mixed[start];
@@ -2610,7 +2686,7 @@ WASM_EXEC_TEST(MultipleCallIndirect) {
CHECK_TRAP(r.Call(2, 1, 0));
}
-WASM_EXEC_TEST(CallIndirect_NoTable) {
+WASM_EXEC_TEST(CallIndirect_EmptyTable) {
TestSignatures sigs;
TestingModule module(execution_mode);
@@ -2622,6 +2698,7 @@ WASM_EXEC_TEST(CallIndirect_NoTable) {
// Signature table.
module.AddSignature(sigs.f_ff());
module.AddSignature(sigs.i_ii());
+ module.AddIndirectFunctionTable(nullptr, 0);
// Builder the caller function.
WasmRunner<int32_t> r(&module, MachineType::Int32());
@@ -2632,6 +2709,49 @@ WASM_EXEC_TEST(CallIndirect_NoTable) {
CHECK_TRAP(r.Call(2));
}
+WASM_EXEC_TEST(CallIndirect_canonical) {
+ TestSignatures sigs;
+ TestingModule module(execution_mode);
+
+ WasmFunctionCompiler t1(sigs.i_ii(), &module);
+ BUILD(t1, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ t1.CompileAndAdd(/*sig_index*/ 0);
+
+ WasmFunctionCompiler t2(sigs.i_ii(), &module);
+ BUILD(t2, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ t2.CompileAndAdd(/*sig_index*/ 1);
+
+ WasmFunctionCompiler t3(sigs.f_ff(), &module);
+ BUILD(t3, WASM_F32_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ t3.CompileAndAdd(/*sig_index*/ 2);
+
+ // Signature table.
+ module.AddSignature(sigs.i_ii());
+ module.AddSignature(sigs.i_ii());
+ module.AddSignature(sigs.f_ff());
+
+ // Function table.
+ uint16_t i1 = static_cast<uint16_t>(t1.function_index());
+ uint16_t i2 = static_cast<uint16_t>(t2.function_index());
+ uint16_t i3 = static_cast<uint16_t>(t3.function_index());
+ uint16_t indirect_function_table[] = {i1, i2, i3, i1, i2};
+
+ module.AddIndirectFunctionTable(indirect_function_table,
+ arraysize(indirect_function_table));
+ module.PopulateIndirectFunctionTable();
+
+ // Builder the caller function.
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
+ BUILD(r, WASM_CALL_INDIRECT2(1, WASM_GET_LOCAL(0), WASM_I8(77), WASM_I8(11)));
+
+ CHECK_EQ(88, r.Call(0));
+ CHECK_EQ(66, r.Call(1));
+ CHECK_TRAP(r.Call(2));
+ CHECK_EQ(88, r.Call(3));
+ CHECK_EQ(66, r.Call(4));
+ CHECK_TRAP(r.Call(5));
+}
+
WASM_EXEC_TEST(F32Floor) {
WasmRunner<float> r(execution_mode, MachineType::Float32());
BUILD(r, WASM_F32_FLOOR(WASM_GET_LOCAL(0)));
@@ -2830,7 +2950,7 @@ static void CompileCallIndirectMany(LocalType param) {
TestSignatures sigs;
for (byte num_params = 0; num_params < 40; ++num_params) {
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
HandleScope scope(CcTest::InitIsolateOnce());
TestingModule module(kExecuteCompiled);
FunctionSig* sig = sigs.many(&zone, kAstStmt, param, num_params);
@@ -2846,7 +2966,7 @@ static void CompileCallIndirectMany(LocalType param) {
ADD_CODE(code, kExprGetLocal, p);
}
ADD_CODE(code, kExprI8Const, 0);
- ADD_CODE(code, kExprCallIndirect, 1);
+ ADD_CODE(code, kExprCallIndirect, 1, TABLE_ZERO);
t.Build(&code[0], &code[0] + code.size());
t.Compile();
diff --git a/deps/v8/test/cctest/wasm/test-wasm-function-name-table.cc b/deps/v8/test/cctest/wasm/test-wasm-function-name-table.cc
deleted file mode 100644
index 9a4394204b..0000000000
--- a/deps/v8/test/cctest/wasm/test-wasm-function-name-table.cc
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/wasm/wasm-function-name-table.h"
-#include "src/wasm/wasm-module.h"
-
-#include "test/cctest/cctest.h"
-
-using namespace v8::internal;
-using namespace v8::internal::wasm;
-
-namespace {
-
-#define CHECK_STREQ(exp, found) \
- do { \
- Vector<const char> exp_ = (exp); \
- Vector<const char> found_ = (found); \
- if (V8_UNLIKELY(exp_.length() != found_.length() || \
- memcmp(exp_.start(), found_.start(), exp_.length()))) { \
- V8_Fatal(__FILE__, __LINE__, \
- "Check failed: (%s) != (%s) ('%.*s' vs '%.*s').", #exp, #found, \
- exp_.length(), exp_.start(), found_.length(), found_.start()); \
- } \
- } while (0)
-
-void testFunctionNameTable(Vector<Vector<const char>> names) {
- Isolate *isolate = CcTest::InitIsolateOnce();
- HandleAndZoneScope scope;
-
- WasmModule module;
- std::vector<char> all_names;
- // No name should have offset 0, because that encodes unnamed functions.
- // In real wasm binary, offset 0 is impossible anyway.
- all_names.push_back('\0');
-
- uint32_t func_index = 0;
- for (Vector<const char> name : names) {
- size_t name_offset = name.start() ? all_names.size() : 0;
- all_names.insert(all_names.end(), name.start(),
- name.start() + name.length());
- // Make every second function name null-terminated.
- if (func_index % 2) all_names.push_back('\0');
- module.functions.push_back(
- {nullptr, 0, 0, static_cast<uint32_t>(name_offset),
- static_cast<uint32_t>(name.length()), 0, 0, false, false});
- ++func_index;
- }
-
- module.module_start = reinterpret_cast<byte *>(all_names.data());
- module.module_end = module.module_start + all_names.size();
-
- Handle<Object> wasm_function_name_table =
- BuildFunctionNamesTable(isolate, &module);
- CHECK(wasm_function_name_table->IsByteArray());
-
- func_index = 0;
- for (Vector<const char> name : names) {
- MaybeHandle<String> string = GetWasmFunctionNameFromTable(
- Handle<ByteArray>::cast(wasm_function_name_table), func_index);
- if (name.start()) {
- CHECK(string.ToHandleChecked()->IsUtf8EqualTo(name));
- } else {
- CHECK(string.is_null());
- }
- ++func_index;
- }
-}
-
-void testFunctionNameTable(Vector<const char *> names) {
- std::vector<Vector<const char>> names_vec;
- for (const char *name : names)
- names_vec.push_back(name ? CStrVector(name) : Vector<const char>());
- testFunctionNameTable(Vector<Vector<const char>>(
- names_vec.data(), static_cast<int>(names_vec.size())));
-}
-
-} // namespace
-
-TEST(NoFunctions) { testFunctionNameTable(Vector<Vector<const char>>()); }
-
-TEST(OneFunctions) {
- const char *names[] = {"foo"};
- testFunctionNameTable(ArrayVector(names));
-}
-
-TEST(ThreeFunctions) {
- const char *names[] = {"foo", "bar", "baz"};
- testFunctionNameTable(ArrayVector(names));
-}
-
-TEST(OneUnnamedFunction) {
- const char *names[] = {""};
- testFunctionNameTable(ArrayVector(names));
-}
-
-TEST(UnnamedFirstFunction) {
- const char *names[] = {"", "bar", "baz"};
- testFunctionNameTable(ArrayVector(names));
-}
-
-TEST(UnnamedLastFunction) {
- const char *names[] = {"bar", "baz", ""};
- testFunctionNameTable(ArrayVector(names));
-}
-
-TEST(ThreeUnnamedFunctions) {
- const char *names[] = {"", "", ""};
- testFunctionNameTable(ArrayVector(names));
-}
-
-TEST(UTF8Names) {
- const char *names[] = {"↱fun↰", "↺", "alpha:α beta:β"};
- testFunctionNameTable(ArrayVector(names));
-}
-
-TEST(UnnamedVsEmptyNames) {
- const char *names[] = {"", nullptr, nullptr, ""};
- testFunctionNameTable(ArrayVector(names));
-}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index 2b51287e87..a0c2e73f9d 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -110,13 +110,13 @@ TEST(CollectDetailedWasmStack_ExplicitThrowFromJs) {
Execution::TryCall(isolate, js_trampoline, global, 1, args, &maybe_exc);
CHECK(returnObjMaybe.is_null());
- // The column is 1-based, so add 1 to the actual byte offset.
+ // Line and column are 1-based, so add 1 for the expected wasm output.
ExceptionInfo expected_exceptions[] = {
- {"a", 3, 8}, // -
- {"js", 4, 2}, // -
- {"<WASM UNNAMED>", static_cast<int>(wasm_index), 3}, // -
- {"<WASM UNNAMED>", static_cast<int>(wasm_index_2), 2}, // -
- {"callFn", 1, 24} // -
+ {"a", 3, 8}, // -
+ {"js", 4, 2}, // -
+ {"<WASM UNNAMED>", static_cast<int>(wasm_index) + 1, 3}, // -
+ {"<WASM UNNAMED>", static_cast<int>(wasm_index_2) + 1, 2}, // -
+ {"callFn", 1, 24} // -
};
CheckExceptionInfos(maybe_exc.ToHandleChecked(), expected_exceptions);
}
@@ -154,11 +154,11 @@ TEST(CollectDetailedWasmStack_WasmError) {
Execution::TryCall(isolate, js_trampoline, global, 1, args, &maybe_exc);
CHECK(maybe_return_obj.is_null());
- // The column is 1-based, so add 1 to the actual byte offset.
+ // Line and column are 1-based, so add 1 for the expected wasm output.
ExceptionInfo expected_exceptions[] = {
- {"<WASM UNNAMED>", static_cast<int>(wasm_index), 2}, // -
- {"<WASM UNNAMED>", static_cast<int>(wasm_index_2), 2}, // -
- {"callFn", 1, 24} //-
+ {"<WASM UNNAMED>", static_cast<int>(wasm_index) + 1, 2}, // -
+ {"<WASM UNNAMED>", static_cast<int>(wasm_index_2) + 1, 2}, // -
+ {"callFn", 1, 24} //-
};
CheckExceptionInfos(maybe_exc.ToHandleChecked(), expected_exceptions);
}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
index d4a2b4fe0b..bd4e82dc4c 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
@@ -88,10 +88,10 @@ TEST(Unreachable) {
Execution::TryCall(isolate, js_trampoline, global, 1, args, &maybe_exc);
CHECK(returnObjMaybe.is_null());
- // The column is 1-based, so add 1 to the actual byte offset.
+ // Line and column are 1-based, so add 1 for the expected wasm output.
ExceptionInfo expected_exceptions[] = {
- {"<WASM UNNAMED>", static_cast<int>(wasm_index), 2}, // --
- {"callFn", 1, 24} // --
+ {"<WASM UNNAMED>", static_cast<int>(wasm_index) + 1, 2}, // --
+ {"callFn", 1, 24} // --
};
CheckExceptionInfos(maybe_exc.ToHandleChecked(), expected_exceptions);
}
@@ -130,11 +130,11 @@ TEST(IllegalLoad) {
Execution::TryCall(isolate, js_trampoline, global, 1, args, &maybe_exc);
CHECK(returnObjMaybe.is_null());
- // The column is 1-based, so add 1 to the actual byte offset.
+ // Line and column are 1-based, so add 1 for the expected wasm output.
ExceptionInfo expected_exceptions[] = {
- {"<WASM UNNAMED>", static_cast<int>(wasm_index), 8}, // --
- {"<WASM UNNAMED>", static_cast<int>(wasm_index_2), 3}, // --
- {"callFn", 1, 24} // --
+ {"<WASM UNNAMED>", static_cast<int>(wasm_index) + 1, 8}, // --
+ {"<WASM UNNAMED>", static_cast<int>(wasm_index_2) + 1, 3}, // --
+ {"callFn", 1, 24} // --
};
CheckExceptionInfos(maybe_exc.ToHandleChecked(), expected_exceptions);
}
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index 93fcb89dba..284b21c7c4 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -8,25 +8,25 @@
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
-
#include <memory>
#include "src/base/utils/random-number-generator.h"
#include "src/zone/accounting-allocator.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/int64-lowering.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/wasm-compiler.h"
-#include "src/compiler/zone-pool.h"
-
+#include "src/compiler/zone-stats.h"
#include "src/wasm/ast-decoder.h"
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/zone/zone.h"
@@ -68,7 +68,7 @@ const uint32_t kMaxGlobalsSize = 128;
// A helper for module environments that adds the ability to allocate memory
// and global variables. Contains a built-in {WasmModule} and
-// {WasmModuleInstance}.
+// {WasmInstance}.
class TestingModule : public ModuleEnv {
public:
explicit TestingModule(WasmExecutionMode mode = kExecuteCompiled)
@@ -97,9 +97,11 @@ class TestingModule : public ModuleEnv {
if (interpreter_) delete interpreter_;
}
+ void ChangeOriginToAsmjs() { origin = kAsmJsOrigin; }
+
byte* AddMemory(uint32_t size) {
CHECK_NULL(instance->mem_start);
- CHECK_EQ(0, instance->mem_size);
+ CHECK_EQ(0u, instance->mem_size);
instance->mem_start = reinterpret_cast<byte*>(malloc(size));
CHECK(instance->mem_start);
memset(instance->mem_start, 0, size);
@@ -206,21 +208,15 @@ class TestingModule : public ModuleEnv {
Handle<JSFunction> WrapCode(uint32_t index) {
// Wrap the code so it can be called as a JS function.
Handle<String> name = isolate_->factory()->NewStringFromStaticChars("main");
- Handle<JSObject> module_object = Handle<JSObject>(0, isolate_);
+ Handle<WasmInstanceObject> instance_obj(0, isolate_);
Handle<Code> code = instance->function_code[index];
WasmJs::InstallWasmMapsIfNeeded(isolate_, isolate_->native_context());
Handle<Code> ret_code =
compiler::CompileJSToWasmWrapper(isolate_, this, code, index);
- FunctionSig* funcSig = this->module->functions[index].sig;
- Handle<ByteArray> exportedSig = isolate_->factory()->NewByteArray(
- static_cast<int>(funcSig->parameter_count() + funcSig->return_count()),
- TENURED);
- exportedSig->copy_in(0, reinterpret_cast<const byte*>(funcSig->raw_data()),
- exportedSig->length());
- Handle<JSFunction> ret = WrapExportCodeAsJSFunction(
- isolate_, ret_code, name,
+ Handle<JSFunction> ret = WasmExportedFunction::New(
+ isolate_, instance_obj, name, ret_code,
static_cast<int>(this->module->functions[index].sig->parameter_count()),
- exportedSig, module_object);
+ static_cast<int>(index));
return ret;
}
@@ -228,24 +224,35 @@ class TestingModule : public ModuleEnv {
instance->function_code[index] = code;
}
- void AddIndirectFunctionTable(uint16_t* functions, uint32_t table_size) {
- module_.function_tables.push_back(
- {table_size, table_size, std::vector<int32_t>(), false, false});
+ void AddIndirectFunctionTable(uint16_t* function_indexes,
+ uint32_t table_size) {
+ module_.function_tables.push_back({table_size, table_size, true,
+ std::vector<int32_t>(), false, false,
+ SignatureMap()});
+ WasmIndirectFunctionTable& table = module_.function_tables.back();
+ table.min_size = table_size;
+ table.max_size = table_size;
for (uint32_t i = 0; i < table_size; ++i) {
- module_.function_tables.back().values.push_back(functions[i]);
+ table.values.push_back(function_indexes[i]);
+ table.map.FindOrInsert(module_.functions[function_indexes[i]].sig);
}
- Handle<FixedArray> values = BuildFunctionTable(
- isolate_, static_cast<int>(module_.function_tables.size() - 1),
- &module_);
- instance->function_tables.push_back(values);
+ instance->function_tables.push_back(
+ isolate_->factory()->NewFixedArray(table_size * 2));
}
void PopulateIndirectFunctionTable() {
+ // Initialize the fixed arrays in instance->function_tables.
for (uint32_t i = 0; i < instance->function_tables.size(); i++) {
- PopulateFunctionTable(instance->function_tables[i],
- module_.function_tables[i].size,
- &instance->function_code);
+ WasmIndirectFunctionTable& table = module_.function_tables[i];
+ Handle<FixedArray> array = instance->function_tables[i];
+ int table_size = static_cast<int>(table.values.size());
+ for (int j = 0; j < table_size; j++) {
+ WasmFunction& function = module_.functions[table.values[j]];
+ array->set(j, Smi::FromInt(table.map.Find(function.sig)));
+ array->set(j + table_size,
+ *instance->function_code[function.func_index]);
+ }
}
}
@@ -257,7 +264,7 @@ class TestingModule : public ModuleEnv {
private:
WasmExecutionMode execution_mode_;
WasmModule module_;
- WasmModuleInstance instance_;
+ WasmInstance instance_;
Isolate* isolate_;
v8::internal::AccountingAllocator allocator_;
uint32_t global_offset;
@@ -268,7 +275,7 @@ class TestingModule : public ModuleEnv {
byte size = WasmOpcodes::MemSize(WasmOpcodes::MachineTypeFor(type));
global_offset = (global_offset + size - 1) & ~(size - 1); // align
module_.globals.push_back(
- {type, true, NO_INIT, global_offset, false, false});
+ {type, true, WasmInitExpr(), global_offset, false, false});
global_offset += size;
// limit number of globals.
CHECK_LT(global_offset, kMaxGlobalsSize);
@@ -300,10 +307,7 @@ inline void TestBuildingGraph(Zone* zone, JSGraph* jsgraph, ModuleEnv* module,
FATAL(str.str().c_str());
}
builder.Int64LoweringForTesting();
- if (FLAG_trace_turbo_graph) {
- OFStream os(stdout);
- os << AsRPO(*jsgraph->graph());
- }
+ builder.SimdScalarLoweringForTesting();
}
template <typename ReturnType>
@@ -354,7 +358,7 @@ class WasmFunctionWrapper : public HandleAndZoneScope,
}
if (p1 != MachineType::None()) {
parameters[parameter_count] = graph()->NewNode(
- machine()->Load(p0),
+ machine()->Load(p1),
graph()->NewNode(common()->Parameter(1), graph()->start()),
graph()->NewNode(common()->Int32Constant(0)), effect,
graph()->start());
@@ -362,7 +366,7 @@ class WasmFunctionWrapper : public HandleAndZoneScope,
}
if (p2 != MachineType::None()) {
parameters[parameter_count] = graph()->NewNode(
- machine()->Load(p0),
+ machine()->Load(p2),
graph()->NewNode(common()->Parameter(2), graph()->start()),
graph()->NewNode(common()->Int32Constant(0)), effect,
graph()->start());
@@ -370,7 +374,7 @@ class WasmFunctionWrapper : public HandleAndZoneScope,
}
if (p3 != MachineType::None()) {
parameters[parameter_count] = graph()->NewNode(
- machine()->Load(p0),
+ machine()->Load(p3),
graph()->NewNode(common()->Parameter(3), graph()->start()),
graph()->NewNode(common()->Int32Constant(0)), effect,
graph()->start());
@@ -390,8 +394,9 @@ class WasmFunctionWrapper : public HandleAndZoneScope,
graph()->start()),
graph()->NewNode(common()->Int32Constant(0)), call, effect,
graph()->start());
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
Node* r = graph()->NewNode(
- common()->Return(),
+ common()->Return(), zero,
graph()->NewNode(common()->Int32Constant(WASM_WRAPPER_RETURN_VALUE)),
effect, graph()->start());
graph()->SetEnd(graph()->NewNode(common()->End(2), r, graph()->start()));
@@ -613,7 +618,7 @@ class WasmRunner {
MachineType p1 = MachineType::None(),
MachineType p2 = MachineType::None(),
MachineType p3 = MachineType::None())
- : zone(&allocator_),
+ : zone(&allocator_, ZONE_NAME),
compiled_(false),
signature_(MachineTypeForC<ReturnType>() == MachineType::None() ? 0 : 1,
GetParameterCount(p0, p1, p2, p3), storage_),
@@ -625,11 +630,12 @@ class WasmRunner {
MachineType p1 = MachineType::None(),
MachineType p2 = MachineType::None(),
MachineType p3 = MachineType::None())
- : zone(&allocator_),
+ : zone(&allocator_, ZONE_NAME),
compiled_(false),
signature_(MachineTypeForC<ReturnType>() == MachineType::None() ? 0 : 1,
GetParameterCount(p0, p1, p2, p3), storage_),
- compiler_(&signature_, module) {
+ compiler_(&signature_, module),
+ possible_nondeterminism_(false) {
DCHECK(module);
InitSigStorage(p0, p1, p2, p3);
}
@@ -739,6 +745,7 @@ class WasmRunner {
thread->PushFrame(compiler_.function_, args.start());
if (thread->Run() == WasmInterpreter::FINISHED) {
WasmVal val = thread->GetReturnValue();
+ possible_nondeterminism_ |= thread->PossibleNondeterminism();
return val.to<ReturnType>();
} else if (thread->state() == WasmInterpreter::TRAPPED) {
// TODO(titzer): return the correct trap code
@@ -755,6 +762,7 @@ class WasmRunner {
WasmFunction* function() { return compiler_.function_; }
WasmInterpreter* interpreter() { return compiler_.interpreter_; }
+ bool possible_nondeterminism() { return possible_nondeterminism_; }
protected:
v8::internal::AccountingAllocator allocator_;
@@ -764,6 +772,7 @@ class WasmRunner {
FunctionSig signature_;
WasmFunctionCompiler compiler_;
WasmFunctionWrapper<ReturnType> wrapper_;
+ bool possible_nondeterminism_;
bool interpret() { return compiler_.execution_mode_ == kExecuteInterpreted; }
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index 15c3ef433f..0b885175fc 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -12,6 +12,7 @@
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
namespace v8 {
@@ -23,26 +24,25 @@ uint32_t GetMinModuleMemSize(const WasmModule* module) {
return WasmModule::kPageSize * module->min_mem_pages;
}
-const WasmModule* DecodeWasmModuleForTesting(Isolate* isolate, Zone* zone,
- ErrorThrower* thrower,
- const byte* module_start,
- const byte* module_end,
- ModuleOrigin origin) {
+const WasmModule* DecodeWasmModuleForTesting(
+ Isolate* isolate, ErrorThrower* thrower, const byte* module_start,
+ const byte* module_end, ModuleOrigin origin, bool verify_functions) {
// Decode the module, but don't verify function bodies, since we'll
// be compiling them anyway.
- ModuleResult decoding_result =
- DecodeWasmModule(isolate, zone, module_start, module_end, false, origin);
+ ModuleResult decoding_result = DecodeWasmModule(
+ isolate, module_start, module_end, verify_functions, origin);
- std::unique_ptr<const WasmModule> module(decoding_result.val);
if (decoding_result.failed()) {
// Module verification failed. throw.
- thrower->Error("WASM.compileRun() failed: %s",
- decoding_result.error_msg.get());
- return nullptr;
+ thrower->CompileError("WASM.compileRun() failed: %s",
+ decoding_result.error_msg.get());
}
- if (thrower->error()) return nullptr;
- return module.release();
+ if (thrower->error()) {
+ if (decoding_result.val) delete decoding_result.val;
+ return nullptr;
+ }
+ return decoding_result.val;
}
const Handle<JSObject> InstantiateModuleForTesting(Isolate* isolate,
@@ -51,20 +51,19 @@ const Handle<JSObject> InstantiateModuleForTesting(Isolate* isolate,
CHECK(module != nullptr);
if (module->import_table.size() > 0) {
- thrower->Error("Not supported: module has imports.");
- }
- if (module->export_table.size() == 0) {
- thrower->Error("Not supported: module has no exports.");
+ thrower->CompileError("Not supported: module has imports.");
}
+
if (thrower->error()) return Handle<JSObject>::null();
// Although we decoded the module for some pre-validation, run the bytes
// again through the normal pipeline.
- MaybeHandle<JSObject> module_object = CreateModuleObjectFromBytes(
+ // TODO(wasm): Use {module} instead of decoding the module bytes again.
+ MaybeHandle<WasmModuleObject> module_object = CreateModuleObjectFromBytes(
isolate, module->module_start, module->module_end, thrower,
- ModuleOrigin::kWasmOrigin);
+ ModuleOrigin::kWasmOrigin, Handle<Script>::null(), nullptr, nullptr);
if (module_object.is_null()) {
- thrower->Error("Module pre-validation failed.");
+ thrower->CompileError("Module pre-validation failed.");
return Handle<JSObject>::null();
}
MaybeHandle<JSObject> maybe_instance = WasmModule::Instantiate(
@@ -78,17 +77,16 @@ const Handle<JSObject> InstantiateModuleForTesting(Isolate* isolate,
}
const Handle<JSObject> CompileInstantiateWasmModuleForTesting(
- Isolate* isolate, Zone* zone, const byte* module_start,
+ Isolate* isolate, ErrorThrower* thrower, const byte* module_start,
const byte* module_end, ModuleOrigin origin) {
- ErrorThrower thrower(isolate, "CompileInstantiateWasmModule");
std::unique_ptr<const WasmModule> module(DecodeWasmModuleForTesting(
- isolate, zone, &thrower, module_start, module_end, origin));
+ isolate, thrower, module_start, module_end, origin));
if (module == nullptr) {
- thrower.Error("Wasm module decode failed");
+ thrower->CompileError("Wasm module decoding failed");
return Handle<JSObject>::null();
}
- return InstantiateModuleForTesting(isolate, &thrower, module.get());
+ return InstantiateModuleForTesting(isolate, thrower, module.get());
}
int32_t RunWasmModuleForTesting(Isolate* isolate, Handle<JSObject> instance,
@@ -103,10 +101,9 @@ int32_t RunWasmModuleForTesting(Isolate* isolate, Handle<JSObject> instance,
int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
const byte* module_end, ModuleOrigin origin) {
HandleScope scope(isolate);
- Zone zone(isolate->allocator());
-
+ ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
Handle<JSObject> instance = CompileInstantiateWasmModuleForTesting(
- isolate, &zone, module_start, module_end, origin);
+ isolate, &thrower, module_start, module_end, origin);
if (instance.is_null()) {
return -1;
}
@@ -115,39 +112,23 @@ int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
int32_t InterpretWasmModule(Isolate* isolate, ErrorThrower* thrower,
const WasmModule* module, int function_index,
- WasmVal* args) {
+ WasmVal* args, bool* possible_nondeterminism) {
CHECK(module != nullptr);
- Zone zone(isolate->allocator());
+ Zone zone(isolate->allocator(), ZONE_NAME);
v8::internal::HandleScope scope(isolate);
if (module->import_table.size() > 0) {
- thrower->Error("Not supported: module has imports.");
+ thrower->CompileError("Not supported: module has imports.");
}
if (module->export_table.size() == 0) {
- thrower->Error("Not supported: module has no exports.");
+ thrower->CompileError("Not supported: module has no exports.");
}
if (thrower->error()) return -1;
- ModuleEnv module_env;
- module_env.module = module;
- module_env.origin = module->origin;
-
- for (size_t i = 0; i < module->functions.size(); i++) {
- FunctionBody body = {
- &module_env, module->functions[i].sig, module->module_start,
- module->module_start + module->functions[i].code_start_offset,
- module->module_start + module->functions[i].code_end_offset};
- DecodeResult result = VerifyWasmCode(isolate->allocator(), body);
- if (result.failed()) {
- thrower->Error("Function did not verify");
- return -1;
- }
- }
-
// The code verifies, we create an instance to run it in the interpreter.
- WasmModuleInstance instance(module);
+ WasmInstance instance(module);
instance.context = isolate->native_context();
instance.mem_size = GetMinModuleMemSize(module);
// TODO(ahaas): Move memory allocation to wasm-module.cc for better
@@ -155,7 +136,6 @@ int32_t InterpretWasmModule(Isolate* isolate, ErrorThrower* thrower,
instance.mem_start =
static_cast<byte*>(calloc(GetMinModuleMemSize(module), 1));
instance.globals_start = nullptr;
- module_env.instance = &instance;
WasmInterpreter interpreter(&instance, isolate->allocator());
@@ -166,13 +146,14 @@ int32_t InterpretWasmModule(Isolate* isolate, ErrorThrower* thrower,
if (instance.mem_start) {
free(instance.mem_start);
}
+ *possible_nondeterminism = thread->PossibleNondeterminism();
if (interpreter_result == WasmInterpreter::FINISHED) {
WasmVal val = thread->GetReturnValue();
return val.to<int32_t>();
} else if (thread->state() == WasmInterpreter::TRAPPED) {
return 0xdeadbeef;
} else {
- thrower->Error(
+ thrower->RangeError(
"Interpreter did not finish execution within its step bound");
return -1;
}
@@ -205,7 +186,7 @@ int32_t CallWasmFunctionForTesting(Isolate* isolate, Handle<JSObject> instance,
// The result should be a number.
if (retval.is_null()) {
- thrower->Error("WASM.compileRun() failed: Invocation was null");
+ thrower->RuntimeError("WASM.compileRun() failed: Invocation was null");
return -1;
}
Handle<Object> result = retval.ToHandleChecked();
@@ -215,7 +196,8 @@ int32_t CallWasmFunctionForTesting(Isolate* isolate, Handle<JSObject> instance,
if (result->IsHeapNumber()) {
return static_cast<int32_t>(HeapNumber::cast(*result)->value());
}
- thrower->Error("WASM.compileRun() failed: Return value should be number");
+ thrower->RuntimeError(
+ "WASM.compileRun() failed: Return value should be number");
return -1;
}
@@ -224,7 +206,6 @@ void SetupIsolateForWasmModule(Isolate* isolate) {
WasmJs::InstallWasmModuleSymbolIfNeeded(isolate, isolate->global_object(),
isolate->native_context());
}
-
} // namespace testing
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.h b/deps/v8/test/common/wasm/wasm-module-runner.h
index 780d23e06f..d6702e1e37 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.h
+++ b/deps/v8/test/common/wasm/wasm-module-runner.h
@@ -19,11 +19,9 @@ namespace wasm {
namespace testing {
// Decodes the given encoded module.
-const WasmModule* DecodeWasmModuleForTesting(Isolate* isolate, Zone* zone,
- ErrorThrower* thrower,
- const byte* module_start,
- const byte* module_end,
- ModuleOrigin origin);
+const WasmModule* DecodeWasmModuleForTesting(
+ Isolate* isolate, ErrorThrower* thrower, const byte* module_start,
+ const byte* module_end, ModuleOrigin origin, bool verify_functions = false);
// Instantiates a module without any imports and exports.
const Handle<JSObject> InstantiateModuleForTesting(Isolate* isolate,
@@ -45,11 +43,11 @@ int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
// should not have any imports or exports
int32_t InterpretWasmModule(Isolate* isolate, ErrorThrower* thrower,
const WasmModule* module, int function_index,
- WasmVal* args);
+ WasmVal* args, bool* may_produced_nan);
// Compiles WasmModule bytes and return an instance of the compiled module.
const Handle<JSObject> CompileInstantiateWasmModuleForTesting(
- Isolate* isolate, Zone* zone, const byte* module_start,
+ Isolate* isolate, ErrorThrower* thrower, const byte* module_start,
const byte* module_end, ModuleOrigin origin);
// Runs the module instance with arguments.
diff --git a/deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js b/deps/v8/test/debugger/debug-evaluate-locals-optimized-double.js
index 84b7e20fe6..90e79694c0 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js
+++ b/deps/v8/test/debugger/debug-evaluate-locals-optimized-double.js
@@ -45,23 +45,19 @@ var input = [
];
var expected = [
- { locals: {a0: 1.01, b0: 2.02},
+ { locals: {i: 0, x0: 3.03, y0: 4.04, a0: 1.01, b0: 2.02},
args: { names: ["i", "x0", "y0"], values: [0, 3.03, 4.04] } },
- { locals: {a1: 3.03, b1: 4.04},
+ { locals: {i: 1, x1: 5.05, y1: 6.06, a1: 3.03, b1: 4.04},
args: { names: ["i", "x1", "y1"], values: [1, 5.05, 6.06] } },
- { locals: {a2: 5.05, b2: 6.06},
+ { locals: {i: 2, a2: 5.05, b2: 6.06},
args: { names: ["i"], values: [2] } },
- { locals: {a3: 7.07, b3: 8.08},
+ { locals: {i: 3, x3: 9.09, y3: 10.10, z3: undefined, a3: 7.07, b3: 8.08},
args: { names: ["i", "x3", "y3", "z3"],
values: [3, 9.09, 10.10, undefined] } },
- { locals: {a4: 9.09, b4: 10.10},
+ { locals: {i: 4, x4: 11.11, y4: 12.12, a4: 9.09, b4: 10.10},
args: { names: ["i", "x4", "y4"], values: [4, 11.11, 12.12] } }
];
-function arraySum(arr) {
- return arr.reduce(function (a, b) { return a + b; }, 0);
-}
-
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break)
@@ -81,13 +77,6 @@ function listener(event, exec_state, event_data, data) {
}
assertPropertiesEqual(expected_locals, locals);
- // All frames except the bottom one have expected arguments.
- for (var j = 0; j < expected_args.names.length; j++) {
- assertEquals(expected_args.names[j], frame.argumentName(j));
- assertEquals(expected_args.values[j],
- frame.argumentValue(j).value());
- }
-
// All frames except the bottom one have two scopes.
assertEquals(3, frame.scopeCount());
assertEquals(debug.ScopeType.Local, frame.scope(0).scopeType());
@@ -117,22 +106,6 @@ function listener(event, exec_state, event_data, data) {
assertEquals(arg_value, frame.evaluate(arg_name).value());
assertEquals(arg_value, frame.evaluate('arguments['+j+']').value());
}
-
- var expected_args_sum = arraySum(expected_args.values);
- var expected_locals_sum =
- arraySum(Object.keys(expected_locals).
- map(function (k) { return expected_locals[k]; }));
-
- assertEquals(expected_locals_sum + expected_args_sum,
- frame.evaluate(Object.keys(expected_locals).join('+') +
- ' + ' +
- expected_args.names.join('+')).value());
-
- var arguments_sum = expected_args.names.map(function(_, idx) {
- return "arguments[" + idx + "]";
- }).join('+');
- assertEquals(expected_args_sum,
- frame.evaluate(arguments_sum).value());
} else {
// The bottom frame only have the global scope.
assertEquals(2, frame.scopeCount());
@@ -142,28 +115,14 @@ function listener(event, exec_state, event_data, data) {
// Check the frame function.
switch (i) {
- case 0: assertEquals(h, frame.func().value()); break;
- case 1: assertEquals(g3, frame.func().value()); break;
- case 2: assertEquals(g2, frame.func().value()); break;
- case 3: assertEquals(g1, frame.func().value()); break;
- case 4: assertEquals(f, frame.func().value()); break;
+ case 0: assertEquals("h", frame.func().name()); break;
+ case 1: assertEquals("g3", frame.func().name()); break;
+ case 2: assertEquals("g2", frame.func().name()); break;
+ case 3: assertEquals("g1", frame.func().name()); break;
+ case 4: assertEquals("f", frame.func().name()); break;
case 5: break;
default: assertUnreachable();
}
-
- // Check for construct call.
- if (i == 4) {
- assertEquals(testingConstructCall, frame.isConstructCall());
- } else if (i == 2) {
- assertTrue(frame.isConstructCall());
- } else {
- assertFalse(frame.isConstructCall());
- }
-
- if (i > 4) {
- assertFalse(frame.isOptimizedFrame());
- assertFalse(frame.isInlinedFrame());
- }
}
// Indicate that all was processed.
diff --git a/deps/v8/test/debugger/debug/compiler/osr-typing-debug-change.js b/deps/v8/test/debugger/debug/compiler/osr-typing-debug-change.js
new file mode 100644
index 0000000000..1a7e85cb5a
--- /dev/null
+++ b/deps/v8/test/debugger/debug/compiler/osr-typing-debug-change.js
@@ -0,0 +1,118 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+var Debug = debug.Debug;
+
+var changed = false;
+
+function listenerSetJToResult(
+ event, exec_state, event_data, data) {
+
+ if (event == Debug.DebugEvent.Break) {
+ var scope = exec_state.frame(1).scope(0);
+ var newval = "result";
+ try {
+ scope.setVariableValue("j", newval);
+ changed = true;
+ } catch(e) {
+ changed = false;
+ }
+ }
+}
+
+Debug.setListener(listenerSetJToResult);
+
+function g() { debugger; }
+%NeverOptimizeFunction(g);
+
+function ChangeSmiConstantAndOsr() {
+ var j = 1;
+ for (var i = 0; i < 4; i++) {
+ if (i == 2) {
+ %OptimizeOsr();
+ g();
+ }
+ }
+ return j;
+}
+var r1 = ChangeSmiConstantAndOsr();
+if (changed) {
+ assertEquals("result", r1);
+} else {
+ assertEquals(1, r1);
+}
+
+function ChangeFloatConstantAndOsr() {
+ var j = 0.1;
+ for (var i = 0; i < 4; i++) {
+ if (i == 2) {
+ %OptimizeOsr();
+ g();
+ }
+ }
+ return j;
+}
+var r2 = ChangeFloatConstantAndOsr();
+if (changed) {
+ assertEquals("result", r2);
+} else {
+ assertEquals(0.1, r2);
+}
+
+function ChangeFloatVarAndOsr() {
+ var j = 0.1;
+ for (var i = 0; i < 4; i++) {
+ j = j + 0.1;
+ if (i == 2) {
+ %OptimizeOsr();
+ g();
+ }
+ }
+ return j;
+}
+var r3 = ChangeFloatVarAndOsr();
+if (changed) {
+ assertEquals("result0.1", r3);
+} else {
+ assertEquals(0.5, r3);
+}
+
+var counter = 0;
+var o = { toString : function() { counter++; return 100; } };
+
+function listenerSetJToObject(
+ event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) {
+ var scope = exec_state.frame(1).scope(0);
+ try {
+ scope.setVariableValue("j", o);
+ changed = true;
+ } catch(e) {
+ changed = false;
+ }
+ }
+}
+
+Debug.setListener(listenerSetJToObject);
+
+function ChangeIntVarAndOsr() {
+ var j = 1;
+ for (var i = 0; i < 4; i++) {
+ j = j + 1|0;
+ if (i == 2) {
+ %OptimizeOsr();
+ g();
+ }
+ }
+ return j;
+}
+
+var r4 = ChangeIntVarAndOsr();
+if (changed) {
+ assertEquals(101, r4);
+ assertEquals(1, counter);
+} else {
+ assertEquals(5, r4);
+}
diff --git a/deps/v8/test/mjsunit/debug-allscopes-on-debugger.js b/deps/v8/test/debugger/debug/debug-allscopes-on-debugger.js
index 17668cfc24..4f09b0a61e 100644
--- a/deps/v8/test/mjsunit/debug-allscopes-on-debugger.js
+++ b/deps/v8/test/debugger/debug/debug-allscopes-on-debugger.js
@@ -2,22 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
Debug = debug.Debug
var exception = null;
var break_count = 0;
+const expected_breaks = 8;
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
assertTrue(exec_state.frameCount() != 0, "FAIL: Empty stack trace");
- // Count number of expected breakpoints in this source file.
- if (!break_count) {
- var source_text = exec_state.frame(0).func().script().source();
- expected_breaks = source_text.match(/\/\/\s*Break\s+\d+\./g).length;
- print("Expected breaks: " + expected_breaks);
- }
var frameMirror = exec_state.frame(0);
frameMirror.allScopes();
diff --git a/deps/v8/test/mjsunit/debug-break-inline.js b/deps/v8/test/debugger/debug/debug-break-inline.js
index 4418fa8d1b..18574ecea1 100644
--- a/deps/v8/test/mjsunit/debug-break-inline.js
+++ b/deps/v8/test/debugger/debug/debug-break-inline.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --allow-natives-syntax
// This test tests that deoptimization due to debug breaks works for
// inlined functions where the full-code is generated before the
@@ -33,7 +32,6 @@
//
//See http://code.google.com/p/chromium/issues/detail?id=105375
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug;
var count = 0;
@@ -47,25 +45,6 @@ function listener(event, exec_state, event_data, data) {
break_count++;
if (break_count == 1) {
Debug.setBreakPoint(g, 3);
-
- for (var i = 0; i < exec_state.frameCount(); i++) {
- var frame = exec_state.frame(i);
- // When function f is optimized (1 means YES, see runtime.cc) we
- // expect an optimized frame for f and g.
- if (%GetOptimizationStatus(f) == 1) {
- if (i == 1) {
- assertTrue(frame.isOptimizedFrame());
- assertTrue(frame.isInlinedFrame());
- assertEquals(4 - i, frame.inlinedFrameIndex());
- } else if (i == 2) {
- assertTrue(frame.isOptimizedFrame());
- assertFalse(frame.isInlinedFrame());
- } else {
- assertFalse(frame.isOptimizedFrame());
- assertFalse(frame.isInlinedFrame());
- }
- }
- }
}
}
}
diff --git a/deps/v8/test/mjsunit/debug-compile-optimized.js b/deps/v8/test/debugger/debug/debug-compile-optimized.js
index 468605abaa..089f2a0970 100644
--- a/deps/v8/test/mjsunit/debug-compile-optimized.js
+++ b/deps/v8/test/debugger/debug/debug-compile-optimized.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --crankshaft
Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/debug-constructor.js b/deps/v8/test/debugger/debug/debug-constructor.js
index a4d50311e9..4f0317a1c0 100644
--- a/deps/v8/test/mjsunit/debug-constructor.js
+++ b/deps/v8/test/debugger/debug/debug-constructor.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
// Simple function which collects a simple call graph.
diff --git a/deps/v8/test/mjsunit/debug-evaluate-closure.js b/deps/v8/test/debugger/debug/debug-evaluate-closure.js
index ebd42f3ae9..5b8ad89c5d 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-closure.js
+++ b/deps/v8/test/debugger/debug/debug-evaluate-closure.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --allow-natives-syntax
Debug = debug.Debug;
var listened = false;
diff --git a/deps/v8/test/mjsunit/debug-evaluate-declaration.js b/deps/v8/test/debugger/debug/debug-evaluate-declaration.js
index c64498e097..c562b73d46 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-declaration.js
+++ b/deps/v8/test/debugger/debug/debug-evaluate-declaration.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
// Test that debug-evaluate only resolves variables that are used by
// the function inside which we debug-evaluate. This is to avoid
diff --git a/deps/v8/test/mjsunit/debug-evaluate-locals-capturing.js b/deps/v8/test/debugger/debug/debug-evaluate-locals-capturing.js
index 5fdacba85c..d2ad57eb77 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-locals-capturing.js
+++ b/deps/v8/test/debugger/debug/debug-evaluate-locals-capturing.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
Debug = debug.Debug
var exception = null;
diff --git a/deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js b/deps/v8/test/debugger/debug/debug-evaluate-locals-optimized.js
index 9d539fe282..32e5379502 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js
+++ b/deps/v8/test/debugger/debug/debug-evaluate-locals-optimized.js
@@ -25,10 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --expose-gc --allow-natives-syntax
+// Flags: --expose-gc
// Flags: --inline-construct
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
var listenerComplete = false;
@@ -37,22 +36,18 @@ var exception = false;
var testingConstructCall = false;
var expected = [
- { locals: {a0: 1, b0: 2},
+ { locals: {i: 0, x0: 3, y0: 4, a0: 1, b0: 2},
args: { names: ["i", "x0", "y0"], values: [0, 3, 4] } },
- { locals: {a1: 3, b1: 4},
+ { locals: {i: 1, x1: 5, y1: 6, a1: 3, b1: 4},
args: { names: ["i", "x1", "y1"], values: [1, 5, 6] } },
- { locals: {a2: 5, b2: 6},
+ { locals: {i: 2, a2: 5, b2: 6},
args: { names: ["i"], values: [2] } },
- { locals: {a3: 7, b3: 8},
+ { locals: {i: 3, x3: 9, y3: 10, z3: undefined, a3: 7, b3: 8},
args: { names: ["i", "x3", "y3", "z3"], values: [3, 9, 10, undefined] } },
- { locals: {a4: 9, b4: 10},
+ { locals: {i: 4, x4: 11, y4: 12, a4: 9, b4: 10},
args: { names: ["i", "x4", "y4"], values: [4, 11, 12] } }
];
-function arraySum(arr) {
- return arr.reduce(function (a, b) { return a + b; }, 0);
-}
-
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break)
@@ -72,13 +67,6 @@ function listener(event, exec_state, event_data, data) {
}
assertPropertiesEqual(expected_locals, locals);
- // All frames except the bottom one have expected arguments.
- for (var j = 0; j < expected_args.names.length; j++) {
- assertEquals(expected_args.names[j], frame.argumentName(j));
- assertEquals(expected_args.values[j],
- frame.argumentValue(j).value());
- }
-
// All frames except the bottom one have three scopes.
assertEquals(3, frame.scopeCount());
assertEquals(debug.ScopeType.Local, frame.scope(0).scopeType());
@@ -108,22 +96,6 @@ function listener(event, exec_state, event_data, data) {
assertEquals(arg_value, frame.evaluate(arg_name).value());
assertEquals(arg_value, frame.evaluate('arguments['+j+']').value());
}
-
- var expected_args_sum = arraySum(expected_args.values);
- var expected_locals_sum =
- arraySum(Object.keys(expected_locals).
- map(function (k) { return expected_locals[k]; }));
-
- assertEquals(expected_locals_sum + expected_args_sum,
- frame.evaluate(Object.keys(expected_locals).join('+') +
- ' + ' +
- expected_args.names.join('+')).value());
-
- var arguments_sum = expected_args.names.map(function(_, idx) {
- return "arguments[" + idx + "]";
- }).join('+');
- assertEquals(expected_args_sum,
- frame.evaluate(arguments_sum).value());
} else {
// The bottom frame only have the script scope and the global scope.
assertEquals(2, frame.scopeCount());
@@ -133,28 +105,14 @@ function listener(event, exec_state, event_data, data) {
// Check the frame function.
switch (i) {
- case 0: assertEquals(h, frame.func().value()); break;
- case 1: assertEquals(g3, frame.func().value()); break;
- case 2: assertEquals(g2, frame.func().value()); break;
- case 3: assertEquals(g1, frame.func().value()); break;
- case 4: assertEquals(f, frame.func().value()); break;
+ case 0: assertEquals("h", frame.func().name()); break;
+ case 1: assertEquals("g3", frame.func().name()); break;
+ case 2: assertEquals("g2", frame.func().name()); break;
+ case 3: assertEquals("g1", frame.func().name()); break;
+ case 4: assertEquals("f", frame.func().name()); break;
case 5: break;
default: assertUnreachable();
}
-
- // Check for construct call.
- if (i == 4) {
- assertEquals(testingConstructCall, frame.isConstructCall());
- } else if (i == 2) {
- assertTrue(frame.isConstructCall());
- } else {
- assertFalse(frame.isConstructCall());
- }
-
- if (i > 4) {
- assertFalse(frame.isOptimizedFrame());
- assertFalse(frame.isInlinedFrame());
- }
}
// Indicate that all was processed.
diff --git a/deps/v8/test/mjsunit/debug-evaluate-modify-catch-block-scope.js b/deps/v8/test/debugger/debug/debug-evaluate-modify-catch-block-scope.js
index 676f78282d..f7d1b8de07 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-modify-catch-block-scope.js
+++ b/deps/v8/test/debugger/debug/debug-evaluate-modify-catch-block-scope.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/debug-evaluate-modify-this.js b/deps/v8/test/debugger/debug/debug-evaluate-modify-this.js
index 930f6ed043..18a343b4cd 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-modify-this.js
+++ b/deps/v8/test/debugger/debug/debug-evaluate-modify-this.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/debug-evaluate-nested-let.js b/deps/v8/test/debugger/debug/debug-evaluate-nested-let.js
index 8e9f8c157a..d56a36ccf6 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-nested-let.js
+++ b/deps/v8/test/debugger/debug/debug-evaluate-nested-let.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/debug-evaluate-shadowed-context-2.js b/deps/v8/test/debugger/debug/debug-evaluate-shadowed-context-2.js
index 59352e06a5..5edd03ca58 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-shadowed-context-2.js
+++ b/deps/v8/test/debugger/debug/debug-evaluate-shadowed-context-2.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --no-analyze-environment-liveness
// Test that debug-evaluate correctly collects free outer variables
// and does not get confused by variables in nested scopes.
diff --git a/deps/v8/test/mjsunit/debug-evaluate-with.js b/deps/v8/test/debugger/debug/debug-evaluate-with.js
index 4e02d9e188..06b627c517 100644
--- a/deps/v8/test/mjsunit/debug-evaluate-with.js
+++ b/deps/v8/test/debugger/debug/debug-evaluate-with.js
@@ -25,9 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
listenerComplete = false;
diff --git a/deps/v8/test/mjsunit/debug-event-listener.js b/deps/v8/test/debugger/debug/debug-event-listener.js
index a2eb5f009b..d89bf809ef 100644
--- a/deps/v8/test/mjsunit/debug-event-listener.js
+++ b/deps/v8/test/debugger/debug/debug-event-listener.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
// Simple function which stores the last debug event.
diff --git a/deps/v8/test/mjsunit/debug-exceptions.js b/deps/v8/test/debugger/debug/debug-exceptions.js
index 1a0e222d51..42090770d8 100644
--- a/deps/v8/test/mjsunit/debug-exceptions.js
+++ b/deps/v8/test/debugger/debug/debug-exceptions.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/debug-generator-break-on-stack.js b/deps/v8/test/debugger/debug/debug-generator-break-on-stack.js
index 5a1a9c56c1..6d0d88118b 100644
--- a/deps/v8/test/mjsunit/debug-generator-break-on-stack.js
+++ b/deps/v8/test/debugger/debug/debug-generator-break-on-stack.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/debug-generator-break.js b/deps/v8/test/debugger/debug/debug-generator-break.js
index 34ed82c376..7974ab96b5 100644
--- a/deps/v8/test/mjsunit/debug-generator-break.js
+++ b/deps/v8/test/debugger/debug/debug-generator-break.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/debug-materialized.js b/deps/v8/test/debugger/debug/debug-materialized.js
index 0b01b78df4..dd22e1eb79 100644
--- a/deps/v8/test/mjsunit/debug-materialized.js
+++ b/deps/v8/test/debugger/debug/debug-materialized.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --expose-debug-as debug
function dbg(x) {
debugger;
diff --git a/deps/v8/test/mjsunit/debug-optimize.js b/deps/v8/test/debugger/debug/debug-optimize.js
index d1ce63d5a0..1d99da4983 100644
--- a/deps/v8/test/mjsunit/debug-optimize.js
+++ b/deps/v8/test/debugger/debug/debug-optimize.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --use-inlining
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/debug-print.js b/deps/v8/test/debugger/debug/debug-print.js
index b0e141d709..ea7e66be93 100644
--- a/deps/v8/test/mjsunit/debug-print.js
+++ b/deps/v8/test/debugger/debug/debug-print.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
// Make sure printing different element kinds doesn't crash.
diff --git a/deps/v8/test/mjsunit/debug-stack-check-position.js b/deps/v8/test/debugger/debug/debug-stack-check-position.js
index a5570ce904..5684d2b8c4 100644
--- a/deps/v8/test/mjsunit/debug-stack-check-position.js
+++ b/deps/v8/test/debugger/debug/debug-stack-check-position.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
var Debug = debug.Debug;
var exception = null;
diff --git a/deps/v8/test/debugger/debug/debug-step-2.js b/deps/v8/test/debugger/debug/debug-step-2.js
new file mode 100644
index 0000000000..e47816a4d5
--- /dev/null
+++ b/deps/v8/test/debugger/debug/debug-step-2.js
@@ -0,0 +1,61 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const Debug = new DebugWrapper();
+Debug.enable();
+
+// This test tests that full code compiled without debug break slots
+// is recompiled with debug break slots when debugging is started.
+
+var bp;
+var done = false;
+var step_count = 0;
+
+// Debug event listener which steps until the global variable done is true.
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) {
+ if (!done) Debug.stepOver();
+ step_count++;
+ }
+};
+
+// Set the global variables state to prpare the stepping test.
+function prepare_step_test() {
+ done = false;
+ step_count = 0;
+}
+
+// Test function to step through.
+function f() {
+ var i = 1;
+ var j = 2;
+ done = true;
+};
+
+prepare_step_test();
+f();
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+bp = Debug.setBreakPoint(f, 1);
+
+prepare_step_test();
+f();
+assertEquals(4, step_count);
+Debug.clearBreakPoint(bp);
+
+// Set a breakpoint on the first var statement (line 1).
+bp = Debug.setBreakPoint(f, 1);
+
+// Step through the function ensuring that the var statements are hit as well.
+prepare_step_test();
+f();
+assertEquals(4, step_count);
+
+// Clear the breakpoint and check that no stepping happens.
+Debug.clearBreakPoint(bp);
+prepare_step_test();
+f();
+assertEquals(0, step_count);
diff --git a/deps/v8/test/debugger/debug/debug-step-3.js b/deps/v8/test/debugger/debug/debug-step-3.js
new file mode 100644
index 0000000000..ea0b5d65d4
--- /dev/null
+++ b/deps/v8/test/debugger/debug/debug-step-3.js
@@ -0,0 +1,68 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const Debug = new DebugWrapper();
+Debug.enable();
+
+// This test tests that full code compiled without debug break slots
+// is recompiled with debug break slots when debugging is started.
+
+var bp;
+var done = false;
+var step_count = 0;
+var set_bp = false
+
+// Debug event listener which steps until the global variable done is true.
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) {
+ if (!done) Debug.stepOver();
+ step_count++;
+ }
+};
+
+// Set the global variables state to prepare the stepping test.
+function prepare_step_test() {
+ done = false;
+ step_count = 0;
+}
+
+// Test function to step through.
+function f() {
+ var a = 0;
+ if (set_bp) { bp = Debug.setBreakPoint(f, 3); }
+ var i = 1;
+ var j = 2;
+ done = true;
+};
+
+prepare_step_test();
+f();
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+// Make f set a breakpoint with an activation on the stack.
+prepare_step_test();
+set_bp = true;
+f();
+assertEquals(4, step_count);
+Debug.clearBreakPoint(bp);
+
+// Set a breakpoint on the first var statement (line 1).
+set_bp = false;
+bp = Debug.setBreakPoint(f, 3);
+
+// Step through the function ensuring that the var statements are hit as well.
+prepare_step_test();
+f();
+assertEquals(4, step_count);
+
+// Clear the breakpoint and check that no stepping happens.
+Debug.clearBreakPoint(bp);
+prepare_step_test();
+f();
+assertEquals(0, step_count);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/deps/v8/test/debugger/debug/debug-step-4.js b/deps/v8/test/debugger/debug/debug-step-4.js
new file mode 100644
index 0000000000..95afacac2e
--- /dev/null
+++ b/deps/v8/test/debugger/debug/debug-step-4.js
@@ -0,0 +1,80 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const Debug = new DebugWrapper();
+Debug.enable();
+
+// Tests how debugger can step over not necessarily in the top frame.
+
+// Simple 3 functions, that protocol their execution state in global
+// variable state.
+var state;
+
+function f() {
+ var a = 1978;
+ for (state[2] = 0; state[2] < 3; state[2]++) {
+ void String(a);
+ }
+}
+function g() {
+ for (state[1] = 0; state[1] < 3; state[1]++) {
+ f();
+ }
+}
+function h() {
+ state = [-1, -1, -1];
+ for (state[0] = 0; state[0] < 3; state[0]++) {
+ g();
+ }
+}
+
+function TestCase(expected_final_state) {
+ var listener_exception = null;
+ var state_snapshot;
+ var listener_state;
+ var bp;
+
+ function listener(event, exec_state, event_data, data) {
+ const location = exec_state.frames[0].location
+ print("Here (" + event + "/" + listener_state + "): " +
+ location.lineNumber + ":" + location.columnNumber);
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ if (listener_state == 0) {
+ Debug.clearBreakPoint(bp);
+ Debug.stepOver();
+ listener_state = 1;
+ } else if (listener_state == 1) {
+ state_snapshot = String(state);
+ print("State: " + state_snapshot);
+ Debug.setListener(null);
+ listener_state = 2;
+ }
+ }
+ } catch (e) {
+ listener_exception = e;
+ }
+ }
+
+
+ // Add the debug event listener.
+ listener_state = 0;
+ Debug.setListener(listener);
+ bp = Debug.setBreakPoint(f, 1);
+
+ h();
+ Debug.setListener(null);
+ if (listener_exception !== null) {
+ print("Exception caught: " + listener_exception);
+ assertUnreachable();
+ }
+
+ assertEquals(expected_final_state, state_snapshot);
+}
+
+
+// Warm-up -- make sure all is compiled and ready for breakpoint.
+h();
+
+TestCase("0,0,-1");
diff --git a/deps/v8/test/mjsunit/debug-step-end-of-script.js b/deps/v8/test/debugger/debug/debug-step-end-of-script.js
index e8ffcc8bc1..92a381e3cf 100644
--- a/deps/v8/test/mjsunit/debug-step-end-of-script.js
+++ b/deps/v8/test/debugger/debug/debug-step-end-of-script.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
var Debug = debug.Debug;
var expected = ["debugger;", "debugger;"];
diff --git a/deps/v8/test/mjsunit/debug-step-into-json.js b/deps/v8/test/debugger/debug/debug-step-into-json.js
index d4ba7097c9..1859141063 100644
--- a/deps/v8/test/mjsunit/debug-step-into-json.js
+++ b/deps/v8/test/debugger/debug/debug-step-into-json.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/debug-step-into-valueof.js b/deps/v8/test/debugger/debug/debug-step-into-valueof.js
index b1d9cf1454..ba5390cd71 100644
--- a/deps/v8/test/mjsunit/debug-step-into-valueof.js
+++ b/deps/v8/test/debugger/debug/debug-step-into-valueof.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/debug-step-stub-callfunction.js b/deps/v8/test/debugger/debug/debug-step-stub-callfunction.js
index 50d095b532..8c74d4ca60 100644
--- a/deps/v8/test/mjsunit/debug-step-stub-callfunction.js
+++ b/deps/v8/test/debugger/debug/debug-step-stub-callfunction.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
// Simple debug event handler which counts the number of breaks hit and steps.
diff --git a/deps/v8/test/mjsunit/debug-step-turbofan.js b/deps/v8/test/debugger/debug/debug-step-turbofan.js
index 6c1fceff31..a40114b28b 100644
--- a/deps/v8/test/mjsunit/debug-step-turbofan.js
+++ b/deps/v8/test/debugger/debug/debug-step-turbofan.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --turbo-filter=g --allow-natives-syntax
+// Flags: --turbo-filter=g
// Test that Debug::PrepareForBreakPoints can deal with turbofan code (g)
// on the stack. Without deoptimization support, we will not be able to
diff --git a/deps/v8/test/debugger/debug/debug-step.js b/deps/v8/test/debugger/debug/debug-step.js
new file mode 100644
index 0000000000..3560efd1df
--- /dev/null
+++ b/deps/v8/test/debugger/debug/debug-step.js
@@ -0,0 +1,43 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Debug = new DebugWrapper();
+Debug.enable();
+
+// Simple debug event handler which performs 100 steps and then retrieves
+// the resulting value of "i" in f().
+
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) {
+ if (step_count > 0) {
+ Debug.stepInto();
+ step_count--;
+ } else {
+ const frameid = exec_state.frames[0].callFrameId;
+ result = Debug.evaluate(frameid, "i").value;
+ }
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+// Test debug event for break point.
+function f() {
+ var i; // Line 1.
+ for (i = 0; i < 100; i++) { // Line 2.
+ x = 1; // Line 3.
+ }
+};
+
+// Set a breakpoint on the for statement (line 1).
+Debug.setBreakPoint(f, 1);
+
+// Check that performing 100 steps will make i 33.
+let step_count = 100;
+let result = -1;
+
+f();
+
+assertEquals(33, result);
diff --git a/deps/v8/test/mjsunit/debug-stepin-accessor-ic.js b/deps/v8/test/debugger/debug/debug-stepin-accessor-ic.js
index 66c0580fd6..d83402a9d7 100644
--- a/deps/v8/test/mjsunit/debug-stepin-accessor-ic.js
+++ b/deps/v8/test/debugger/debug/debug-stepin-accessor-ic.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
function get() {
return 3; // Break
diff --git a/deps/v8/test/mjsunit/debug-stepin-accessor.js b/deps/v8/test/debugger/debug/debug-stepin-accessor.js
index 8513ca0eae..14da5584f4 100644
--- a/deps/v8/test/mjsunit/debug-stepin-accessor.js
+++ b/deps/v8/test/debugger/debug/debug-stepin-accessor.js
@@ -25,9 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
var exception = null;
diff --git a/deps/v8/test/mjsunit/debug-stepin-builtin-callback-opt.js b/deps/v8/test/debugger/debug/debug-stepin-builtin-callback-opt.js
index 7e281ab220..fbf5ab4c0e 100644
--- a/deps/v8/test/mjsunit/debug-stepin-builtin-callback-opt.js
+++ b/deps/v8/test/debugger/debug/debug-stepin-builtin-callback-opt.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/debug-stepin-builtin-callback.js b/deps/v8/test/debugger/debug/debug-stepin-builtin-callback.js
index 4fde6e440f..0a09a0b214 100644
--- a/deps/v8/test/mjsunit/debug-stepin-builtin-callback.js
+++ b/deps/v8/test/debugger/debug/debug-stepin-builtin-callback.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
// Test stepping into callbacks passed to builtin functions.
diff --git a/deps/v8/test/mjsunit/debug-stepin-builtin.js b/deps/v8/test/debugger/debug/debug-stepin-builtin.js
index f61098045c..f58019adc7 100644
--- a/deps/v8/test/mjsunit/debug-stepin-builtin.js
+++ b/deps/v8/test/debugger/debug/debug-stepin-builtin.js
@@ -25,9 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
var exception = null;
diff --git a/deps/v8/test/mjsunit/debug-stepin-call-function-stub.js b/deps/v8/test/debugger/debug/debug-stepin-call-function-stub.js
index b3e385bfb5..092f2da151 100644
--- a/deps/v8/test/mjsunit/debug-stepin-call-function-stub.js
+++ b/deps/v8/test/debugger/debug/debug-stepin-call-function-stub.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
var exception = null;
@@ -88,8 +86,9 @@ for (var i = 0; i < 3; i++) {
expected_source_line_text = ' return "s"; // expected line';
step_in_count = 2;
// Set a break point and call to invoke the debug event listener.
- Debug.setBreakPoint(testFunction, 1, 0);
+ const breakid = Debug.setBreakPoint(testFunction, 1, 0);
testFunction();
+ Debug.clearBreakPoint(breakid);
assertNull(exception);
assertEquals(3, state);
}
@@ -104,8 +103,9 @@ for (var i = 0; i < 3; i++) {
expected_source_line_text = ' return "s2"; // expected line';
step_in_count = 1;
// Set a break point and call to invoke the debug event listener.
- Debug.setBreakPoint(testFunction2, 2, 0);
+ const breakid = Debug.setBreakPoint(testFunction2, 2, 0);
testFunction2();
+ Debug.clearBreakPoint(breakid);
assertNull(exception);
assertEquals(3, state);
}
diff --git a/deps/v8/test/mjsunit/debug-stepin-construct-call.js b/deps/v8/test/debugger/debug/debug-stepin-construct-call.js
index 7dbf7b1d28..c405a05392 100644
--- a/deps/v8/test/mjsunit/debug-stepin-construct-call.js
+++ b/deps/v8/test/debugger/debug/debug-stepin-construct-call.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
var break_count = 0;
var exception = null;
@@ -31,9 +30,7 @@ function g() {
new f();
}
-Debug.setBreakPoint(g, 6, Debug.BreakPositionAlignment.BreakPosition);
-print(Debug.showBreakPoints(g, undefined,
- Debug.BreakPositionAlignment.BreakPosition));
+Debug.setBreakPoint(g, 6);
g();
Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-stepin-constructor.js b/deps/v8/test/debugger/debug/debug-stepin-constructor.js
index 5549814a65..b6aaec8809 100644
--- a/deps/v8/test/mjsunit/debug-stepin-constructor.js
+++ b/deps/v8/test/debugger/debug/debug-stepin-constructor.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
// Simple debug event handler which counts the number of breaks hit and steps.
@@ -38,9 +36,6 @@ function listener(event, exec_state, event_data, data) {
if (exec_state.frameCount() > 1) {
exec_state.prepareStep(Debug.StepAction.StepIn);
}
-
- // Test that there is a script.
- assertTrue(typeof(event_data.func().script()) == 'object');
}
};
diff --git a/deps/v8/test/mjsunit/debug-stepin-foreach.js b/deps/v8/test/debugger/debug/debug-stepin-foreach.js
index 69ce3efab7..94a6341368 100644
--- a/deps/v8/test/mjsunit/debug-stepin-foreach.js
+++ b/deps/v8/test/debugger/debug/debug-stepin-foreach.js
@@ -2,24 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
// Tests stepping into through Array.prototype.forEach callbacks.
Debug = debug.Debug
var exception = null;
var break_count = 0;
-var expected_breaks = -1;
+var expected_breaks = 11;
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
assertTrue(exec_state.frameCount() != 0, "FAIL: Empty stack trace");
- if (!break_count) {
- // Count number of expected breakpoints in this source file.
- var source_text = exec_state.frame(0).func().script().source();
- expected_breaks = source_text.match(/\/\/\s*Break\s+\d+\./g).length;
- print("Expected breaks: " + expected_breaks);
- }
var source = exec_state.frame(0).sourceLineText();
print("paused at: " + source);
assertTrue(source.indexOf("// Break " + break_count + ".") > 0,
diff --git a/deps/v8/test/mjsunit/debug-stepin-function-call.js b/deps/v8/test/debugger/debug/debug-stepin-function-call.js
index 8af7aad19d..0227ccd947 100644
--- a/deps/v8/test/mjsunit/debug-stepin-function-call.js
+++ b/deps/v8/test/debugger/debug/debug-stepin-function-call.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
var exception = null;
diff --git a/deps/v8/test/mjsunit/debug-stepin-property-function-call.js b/deps/v8/test/debugger/debug/debug-stepin-property-function-call.js
index dff83c7e14..0fdb94ed2b 100644
--- a/deps/v8/test/mjsunit/debug-stepin-property-function-call.js
+++ b/deps/v8/test/debugger/debug/debug-stepin-property-function-call.js
@@ -25,8 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --nocrankshaft
-// Get the Debug object exposed from the debug context global object.
+// Flags: --nocrankshaft
Debug = debug.Debug
var exception = null;
diff --git a/deps/v8/test/mjsunit/debug-stepnext-do-while.js b/deps/v8/test/debugger/debug/debug-stepnext-do-while.js
index bbb18bc436..92382b28fd 100644
--- a/deps/v8/test/mjsunit/debug-stepnext-do-while.js
+++ b/deps/v8/test/debugger/debug/debug-stepnext-do-while.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
var exception = null;
diff --git a/deps/v8/test/mjsunit/debug-stepout-recursive-function.js b/deps/v8/test/debugger/debug/debug-stepout-recursive-function.js
index 9082294b78..3c674a37a6 100644
--- a/deps/v8/test/mjsunit/debug-stepout-recursive-function.js
+++ b/deps/v8/test/debugger/debug/debug-stepout-recursive-function.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
var exception = null;
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part1.js b/deps/v8/test/debugger/debug/debug-stepout-scope-part1.js
index 27a91d4d02..32acc2417a 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part1.js
+++ b/deps/v8/test/debugger/debug/debug-stepout-scope-part1.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
// Check that the ScopeIterator can properly recreate the scope at
// every point when stepping through functions.
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part2.js b/deps/v8/test/debugger/debug/debug-stepout-scope-part2.js
index 5b0b17b9e4..ef1acd53b8 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part2.js
+++ b/deps/v8/test/debugger/debug/debug-stepout-scope-part2.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
// Check that the ScopeIterator can properly recreate the scope at
// every point when stepping through functions.
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part3.js b/deps/v8/test/debugger/debug/debug-stepout-scope-part3.js
index 2584914681..ac5f8f2fc6 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part3.js
+++ b/deps/v8/test/debugger/debug/debug-stepout-scope-part3.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
// Check that the ScopeIterator can properly recreate the scope at
// every point when stepping through functions.
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part4.js b/deps/v8/test/debugger/debug/debug-stepout-scope-part4.js
index 765bfe246b..ea7e8afe5f 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part4.js
+++ b/deps/v8/test/debugger/debug/debug-stepout-scope-part4.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
// Check that the ScopeIterator can properly recreate the scope at
// every point when stepping through functions.
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part5.js b/deps/v8/test/debugger/debug/debug-stepout-scope-part5.js
index e819e293ce..ee67c197e1 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part5.js
+++ b/deps/v8/test/debugger/debug/debug-stepout-scope-part5.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
// Check that the ScopeIterator can properly recreate the scope at
// every point when stepping through functions.
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part6.js b/deps/v8/test/debugger/debug/debug-stepout-scope-part6.js
index fb009e6f7f..9545b5b80c 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part6.js
+++ b/deps/v8/test/debugger/debug/debug-stepout-scope-part6.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
// Check that the ScopeIterator can properly recreate the scope at
// every point when stepping through functions.
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part7.js b/deps/v8/test/debugger/debug/debug-stepout-scope-part7.js
index 969fb7f9ed..1ab2065575 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part7.js
+++ b/deps/v8/test/debugger/debug/debug-stepout-scope-part7.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
// Check that the ScopeIterator can properly recreate the scope at
// every point when stepping through functions.
diff --git a/deps/v8/test/mjsunit/debug-stepout-scope-part8.js b/deps/v8/test/debugger/debug/debug-stepout-scope-part8.js
index 790caca7c3..5b3585cc57 100644
--- a/deps/v8/test/mjsunit/debug-stepout-scope-part8.js
+++ b/deps/v8/test/debugger/debug/debug-stepout-scope-part8.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
// Check that the ScopeIterator can properly recreate the scope at
// every point when stepping through functions.
diff --git a/deps/v8/test/mjsunit/debug-stepout-to-builtin.js b/deps/v8/test/debugger/debug/debug-stepout-to-builtin.js
index 2e5e7a25f8..905e07fa3e 100644
--- a/deps/v8/test/mjsunit/debug-stepout-to-builtin.js
+++ b/deps/v8/test/debugger/debug/debug-stepout-to-builtin.js
@@ -25,9 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
var exception = null;
diff --git a/deps/v8/test/mjsunit/es6/debug-break-default-constructor.js b/deps/v8/test/debugger/debug/es6/debug-break-default-constructor.js
index fc8bebd13d..d13e0a7eeb 100644
--- a/deps/v8/test/mjsunit/es6/debug-break-default-constructor.js
+++ b/deps/v8/test/debugger/debug/es6/debug-break-default-constructor.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
"use strict";
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/async-task-event.js b/deps/v8/test/debugger/debug/es6/debug-promises/async-task-event.js
index 0b0fa1e64f..fcf80825aa 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/async-task-event.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/async-task-event.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/evaluate-across-microtasks.js b/deps/v8/test/debugger/debug/es6/debug-promises/evaluate-across-microtasks.js
index 73718eec7b..71b07476d3 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/evaluate-across-microtasks.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/evaluate-across-microtasks.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
var Debug = debug.Debug;
var listenerComplete = false;
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/promise-all-caught.js b/deps/v8/test/debugger/debug/es6/debug-promises/promise-all-caught.js
index 2c940ce217..639564a306 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/promise-all-caught.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/promise-all-caught.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we only listen to uncaught exceptions and a
// Promise p3 created by Promise.all has a catch handler, and is rejected
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/promise-race-caught.js b/deps/v8/test/debugger/debug/es6/debug-promises/promise-race-caught.js
index dd3ca83ee8..d75ddea890 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/promise-race-caught.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/promise-race-caught.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we only listen to uncaught exceptions and a
// Promise p3 created by Promise.race has a catch handler, and is rejected
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reentry.js b/deps/v8/test/debugger/debug/es6/debug-promises/reentry.js
index cc98ed9efd..bb7f48fb6b 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reentry.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/reentry.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
// Test reentry of special try catch for Promises.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-after-resolve.js b/deps/v8/test/debugger/debug/es6/debug-promises/reject-after-resolve.js
index 5ec2da50e9..0bf3c59020 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-after-resolve.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/reject-after-resolve.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we listen to uncaught exceptions and
// the Promise is rejected in a chained closure after it has been resolved.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-late.js b/deps/v8/test/debugger/debug/es6/debug-promises/reject-caught-late.js
index 44eb76728f..b54516ec39 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-late.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/reject-caught-late.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we only listen to uncaught exceptions, the Promise
// is rejected, and a catch handler is installed right before the rejection.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-uncaught.js b/deps/v8/test/debugger/debug/es6/debug-promises/reject-caught-uncaught.js
index b2fe8b0a45..873e8ca58d 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/reject-caught-uncaught.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/reject-caught-uncaught.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we only listen to uncaught exceptions and
// there is a catch handler for the to-be-rejected Promise.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/stepin-constructor.js b/deps/v8/test/debugger/debug/es6/debug-promises/stepin-constructor.js
index 6914ae0036..dd18968a1c 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/stepin-constructor.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/stepin-constructor.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
var Debug = debug.Debug;
var exception = null;
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/stepin-handler.js b/deps/v8/test/debugger/debug/es6/debug-promises/stepin-handler.js
index 8083c17103..721423a968 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/stepin-handler.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/stepin-handler.js
@@ -2,24 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --noalways-opt
+// Flags: --noalways-opt
// Tests stepping into through Promises.
Debug = debug.Debug
var exception = null;
var break_count = 0;
-var expected_breaks = -1;
+const expected_breaks = 9;
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
assertTrue(exec_state.frameCount() != 0, "FAIL: Empty stack trace");
- if (!break_count) {
- // Count number of expected breakpoints in this source file.
- var source_text = exec_state.frame(0).func().script().source();
- expected_breaks = source_text.match(/\/\/\s*Break\s+\d+\./g).length;
- print("Expected breaks: " + expected_breaks);
- }
var source = exec_state.frame(0).sourceLineText();
print("paused at: " + source);
assertTrue(source.indexOf("// Break " + break_count + ".") > 0,
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-late.js b/deps/v8/test/debugger/debug/es6/debug-promises/throw-caught-late.js
index 0399e5cc34..a8109faea3 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-late.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/throw-caught-late.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we only listen to uncaught exceptions, the Promise
// throws, and a catch handler is installed right before throwing.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-uncaught.js b/deps/v8/test/debugger/debug/es6/debug-promises/throw-caught-uncaught.js
index 8e1524d519..756c0f1301 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-caught-uncaught.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/throw-caught-uncaught.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we only listen to uncaught exceptions and
// there is a catch handler for the exception thrown in a Promise.
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/throw-eventually-caught.js b/deps/v8/test/debugger/debug/es6/debug-promises/throw-eventually-caught.js
index 19610f77e8..76cce14a1e 100644
--- a/deps/v8/test/mjsunit/es6/debug-promises/throw-eventually-caught.js
+++ b/deps/v8/test/debugger/debug/es6/debug-promises/throw-eventually-caught.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
// Test debug events when we only listen to uncaught exceptions and
// there is a catch handler for the to-be-rejected Promise.
diff --git a/deps/v8/test/mjsunit/es6/debug-step-destructuring-assignment.js b/deps/v8/test/debugger/debug/es6/debug-step-destructuring-assignment.js
index 4fde928b38..d46f670ee1 100644
--- a/deps/v8/test/mjsunit/es6/debug-step-destructuring-assignment.js
+++ b/deps/v8/test/debugger/debug/es6/debug-step-destructuring-assignment.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
var exception = null;
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/es6/debug-step-destructuring-bind.js b/deps/v8/test/debugger/debug/es6/debug-step-destructuring-bind.js
index f670f525af..b6446cfd6d 100644
--- a/deps/v8/test/mjsunit/es6/debug-step-destructuring-bind.js
+++ b/deps/v8/test/debugger/debug/es6/debug-step-destructuring-bind.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
var exception = null;
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/es6/debug-step-into-class-extends.js b/deps/v8/test/debugger/debug/es6/debug-step-into-class-extends.js
index 6c887ab08d..fb988bcb57 100644
--- a/deps/v8/test/mjsunit/es6/debug-step-into-class-extends.js
+++ b/deps/v8/test/debugger/debug/es6/debug-step-into-class-extends.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
'use strict';
diff --git a/deps/v8/test/mjsunit/es6/debug-step-into-constructor.js b/deps/v8/test/debugger/debug/es6/debug-step-into-constructor.js
index 96cdc93159..07290593bf 100644
--- a/deps/v8/test/mjsunit/es6/debug-step-into-constructor.js
+++ b/deps/v8/test/debugger/debug/es6/debug-step-into-constructor.js
@@ -2,21 +2,24 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
'use strict';
var Debug = debug.Debug
var done, stepCount;
+var exception = null;
function listener(event, execState, eventData, data) {
- if (event == Debug.DebugEvent.Break) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
if (!done) {
execState.prepareStep(Debug.StepAction.StepIn);
var s = execState.frame().sourceLineText();
assertTrue(s.indexOf('// ' + stepCount + '.') !== -1);
stepCount++;
}
+ } catch (e) {
+ exception = e;
}
};
@@ -24,14 +27,14 @@ Debug.setListener(listener);
class Base {
- constructor() { // 1.
- var x = 1; // 2.
- var y = 2; // 3.
- done = true; // 4.
+ constructor() {
+ var x = 1; // 1.
+ var y = 2; // 2.
+ done = true; // 3.
}
}
-class Derived extends Base {}
+class Derived extends Base {} // 0.
(function TestBreakPointInConstructor() {
@@ -40,7 +43,7 @@ class Derived extends Base {}
var bp = Debug.setBreakPoint(Base, 0);
new Base();
- assertEquals(1, stepCount);
+ assertEquals(4, stepCount);
Debug.clearBreakPoint(bp);
})();
@@ -52,7 +55,7 @@ class Derived extends Base {}
var bp = Debug.setBreakPoint(Base, 0);
new Derived();
- assertEquals(1, stepCount);
+ assertEquals(4, stepCount);
Debug.clearBreakPoint(bp);
})();
@@ -60,15 +63,15 @@ class Derived extends Base {}
(function TestStepInto() {
done = false;
- stepCount = 0;
+ stepCount = -1;
function f() {
- new Derived(); // 0.
+ new Derived(); // -1.
}
var bp = Debug.setBreakPoint(f, 0);
f();
- assertEquals(1, stepCount);
+ assertEquals(4, stepCount);
Debug.clearBreakPoint(bp);
})();
@@ -76,17 +79,17 @@ class Derived extends Base {}
(function TestExtraIndirection() {
done = false;
- stepCount = 0;
+ stepCount = -2;
- class Derived2 extends Derived {}
+ class Derived2 extends Derived {} // -1.
function f() {
- new Derived2(); // 0.
+ new Derived2(); // -2.
}
var bp = Debug.setBreakPoint(f, 0);
f();
- assertEquals(1, stepCount);
+ assertEquals(4, stepCount);
Debug.clearBreakPoint(bp);
})();
@@ -94,20 +97,21 @@ class Derived extends Base {}
(function TestBoundClass() {
done = false;
- stepCount = 0;
+ stepCount = -1;
var bound = Derived.bind(null);
function f() {
- new bound(); // 0.
+ new bound(); // -1.
}
var bp = Debug.setBreakPoint(f, 0);
f();
- assertEquals(1, stepCount);
+ assertEquals(4, stepCount);
Debug.clearBreakPoint(bp);
})();
Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/es6/debug-step-into-regexp-subclass.js b/deps/v8/test/debugger/debug/es6/debug-step-into-regexp-subclass.js
index 5e5eb47d7f..5e70fad274 100644
--- a/deps/v8/test/mjsunit/es6/debug-step-into-regexp-subclass.js
+++ b/deps/v8/test/debugger/debug/es6/debug-step-into-regexp-subclass.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/es6/debug-stepin-collections-foreach.js b/deps/v8/test/debugger/debug/es6/debug-stepin-collections-foreach.js
index 5551843cb2..32601d5e61 100644
--- a/deps/v8/test/mjsunit/es6/debug-stepin-collections-foreach.js
+++ b/deps/v8/test/debugger/debug/es6/debug-stepin-collections-foreach.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --expose-debug-as debug
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/es6/debug-stepin-generators.js b/deps/v8/test/debugger/debug/es6/debug-stepin-generators.js
index 081dfb7063..5468ea0491 100644
--- a/deps/v8/test/mjsunit/es6/debug-stepin-generators.js
+++ b/deps/v8/test/debugger/debug/es6/debug-stepin-generators.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
Debug = debug.Debug
var exception = null;
diff --git a/deps/v8/test/mjsunit/es6/debug-stepin-tailcalls.js b/deps/v8/test/debugger/debug/es6/debug-stepin-tailcalls.js
index 6020ba9d50..6483d5b722 100644
--- a/deps/v8/test/mjsunit/es6/debug-stepin-tailcalls.js
+++ b/deps/v8/test/debugger/debug/es6/debug-stepin-tailcalls.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --harmony-tailcalls
+// Flags: --harmony-tailcalls
"use strict";
diff --git a/deps/v8/test/mjsunit/es6/debug-stepout-tailcalls.js b/deps/v8/test/debugger/debug/es6/debug-stepout-tailcalls.js
index db0878d7a9..2e94c5058b 100644
--- a/deps/v8/test/mjsunit/es6/debug-stepout-tailcalls.js
+++ b/deps/v8/test/debugger/debug/es6/debug-stepout-tailcalls.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --harmony-tailcalls
+// Flags: --harmony-tailcalls
"use strict";
diff --git a/deps/v8/test/mjsunit/es6/generators-relocation.js b/deps/v8/test/debugger/debug/es6/generators-relocation.js
index 2636f52d7b..78413fde6e 100644
--- a/deps/v8/test/mjsunit/es6/generators-relocation.js
+++ b/deps/v8/test/debugger/debug/es6/generators-relocation.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-3280.js b/deps/v8/test/debugger/debug/es6/regress/regress-3280.js
index 2dadd02840..30d970a343 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-3280.js
+++ b/deps/v8/test/debugger/debug/es6/regress/regress-3280.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/for-in-opt.js b/deps/v8/test/debugger/debug/for-in-opt.js
index bc59a1b087..405199d538 100644
--- a/deps/v8/test/mjsunit/for-in-opt.js
+++ b/deps/v8/test/debugger/debug/for-in-opt.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --expose-debug-as debug
"use strict";
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-basic.js b/deps/v8/test/debugger/debug/harmony/async-debug-basic.js
index a4909729c5..c22662c01b 100644
--- a/deps/v8/test/mjsunit/harmony/async-debug-basic.js
+++ b/deps/v8/test/debugger/debug/harmony/async-debug-basic.js
@@ -2,9 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-async-await --allow-natives-syntax --expose-debug-as debug
+// Flags: --harmony-async-await
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
listenerComplete = false;
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases.js b/deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases.js
index 76296ef7f1..99dc515bc2 100644
--- a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases.js
+++ b/deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-async-await --expose-debug-as debug
+// Flags: --harmony-async-await
Debug = debug.Debug
diff --git a/deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases0.js b/deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases0.js
new file mode 100644
index 0000000000..6862e54a12
--- /dev/null
+++ b/deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases0.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await
+// Files: test/debugger/debug/harmony/async-debug-caught-exception-cases.js
+
+runPart(0);
diff --git a/deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases1.js b/deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases1.js
new file mode 100644
index 0000000000..17b534e8c1
--- /dev/null
+++ b/deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases1.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await
+// Files: test/debugger/debug/harmony/async-debug-caught-exception-cases.js
+
+runPart(1);
diff --git a/deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases2.js b/deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases2.js
new file mode 100644
index 0000000000..06583494a7
--- /dev/null
+++ b/deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases2.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await
+// Files: test/debugger/debug/harmony/async-debug-caught-exception-cases.js
+
+runPart(2);
diff --git a/deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases3.js b/deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases3.js
new file mode 100644
index 0000000000..451f8b9636
--- /dev/null
+++ b/deps/v8/test/debugger/debug/harmony/async-debug-caught-exception-cases3.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await
+// Files: test/debugger/debug/harmony/async-debug-caught-exception-cases.js
+
+runPart(3);
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception.js b/deps/v8/test/debugger/debug/harmony/async-debug-caught-exception.js
index 047b421d3d..391a41cd15 100644
--- a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception.js
+++ b/deps/v8/test/debugger/debug/harmony/async-debug-caught-exception.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-async-await --expose-debug-as debug
+// Flags: --harmony-async-await
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-abort-at-break.js b/deps/v8/test/debugger/debug/harmony/async-debug-step-abort-at-break.js
index be1f8056a8..d57db4e4e1 100644
--- a/deps/v8/test/mjsunit/harmony/async-debug-step-abort-at-break.js
+++ b/deps/v8/test/debugger/debug/harmony/async-debug-step-abort-at-break.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+// Flags: --harmony-async-await
var Debug = debug.Debug;
var step_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-continue-at-break.js b/deps/v8/test/debugger/debug/harmony/async-debug-step-continue-at-break.js
index 5099b2f53e..3ae5656738 100644
--- a/deps/v8/test/mjsunit/harmony/async-debug-step-continue-at-break.js
+++ b/deps/v8/test/debugger/debug/harmony/async-debug-step-continue-at-break.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+// Flags: --harmony-async-await
var Debug = debug.Debug;
var step_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-in-and-out.js b/deps/v8/test/debugger/debug/harmony/async-debug-step-in-and-out.js
index 30fe2d6053..388fa721cf 100644
--- a/deps/v8/test/mjsunit/harmony/async-debug-step-in-and-out.js
+++ b/deps/v8/test/debugger/debug/harmony/async-debug-step-in-and-out.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+// Flags: --harmony-async-await
var Debug = debug.Debug;
var step_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-in-out-out.js b/deps/v8/test/debugger/debug/harmony/async-debug-step-in-out-out.js
index c2f34bb029..09e3c6bf5e 100644
--- a/deps/v8/test/mjsunit/harmony/async-debug-step-in-out-out.js
+++ b/deps/v8/test/debugger/debug/harmony/async-debug-step-in-out-out.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+// Flags: --harmony-async-await
var Debug = debug.Debug;
var step_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-in.js b/deps/v8/test/debugger/debug/harmony/async-debug-step-in.js
index 0a7de1a2a3..64366a84d3 100644
--- a/deps/v8/test/mjsunit/harmony/async-debug-step-in.js
+++ b/deps/v8/test/debugger/debug/harmony/async-debug-step-in.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+// Flags: --harmony-async-await
var Debug = debug.Debug;
var step_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-nested.js b/deps/v8/test/debugger/debug/harmony/async-debug-step-nested.js
index adf7a51432..9736195b46 100644
--- a/deps/v8/test/mjsunit/harmony/async-debug-step-nested.js
+++ b/deps/v8/test/debugger/debug/harmony/async-debug-step-nested.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+// Flags: --harmony-async-await
var Debug = debug.Debug;
var step_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-next-constant.js b/deps/v8/test/debugger/debug/harmony/async-debug-step-next-constant.js
index cea86d7a2f..d342790d7e 100644
--- a/deps/v8/test/mjsunit/harmony/async-debug-step-next-constant.js
+++ b/deps/v8/test/debugger/debug/harmony/async-debug-step-next-constant.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+// Flags: --harmony-async-await
var Debug = debug.Debug;
var step_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-next.js b/deps/v8/test/debugger/debug/harmony/async-debug-step-next.js
index 952d88dd85..59133eb46e 100644
--- a/deps/v8/test/mjsunit/harmony/async-debug-step-next.js
+++ b/deps/v8/test/debugger/debug/harmony/async-debug-step-next.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+// Flags: --harmony-async-await
var Debug = debug.Debug;
var step_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-step-out.js b/deps/v8/test/debugger/debug/harmony/async-debug-step-out.js
index 41779acb54..b675523123 100644
--- a/deps/v8/test/mjsunit/harmony/async-debug-step-out.js
+++ b/deps/v8/test/debugger/debug/harmony/async-debug-step-out.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --harmony-async-await
+// Flags: --harmony-async-await
var Debug = debug.Debug;
var step_count = 0;
diff --git a/deps/v8/test/mjsunit/harmony/debug-async-break-on-stack.js b/deps/v8/test/debugger/debug/harmony/debug-async-break-on-stack.js
index d3d9d8bef6..5173c87ee4 100644
--- a/deps/v8/test/mjsunit/harmony/debug-async-break-on-stack.js
+++ b/deps/v8/test/debugger/debug/harmony/debug-async-break-on-stack.js
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
-// Flags: --harmony-async-await --allow-natives-syntax
+// Flags: --harmony-async-await
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/harmony/debug-async-break.js b/deps/v8/test/debugger/debug/harmony/debug-async-break.js
index 3b6b71baca..dac3474f75 100644
--- a/deps/v8/test/mjsunit/harmony/debug-async-break.js
+++ b/deps/v8/test/debugger/debug/harmony/debug-async-break.js
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
-// Flags: --harmony-async-await --allow-natives-syntax
+// Flags: --harmony-async-await
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/harmony/debug-async-function-async-task-event.js b/deps/v8/test/debugger/debug/harmony/debug-async-function-async-task-event.js
index 90e13d8659..d5d506e153 100644
--- a/deps/v8/test/mjsunit/harmony/debug-async-function-async-task-event.js
+++ b/deps/v8/test/debugger/debug/harmony/debug-async-function-async-task-event.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-async-await --expose-debug-as debug --allow-natives-syntax
+// Flags: --harmony-async-await
// The test observes the callbacks that async/await makes to the inspector
// to make accurate stack traces. The pattern is based on saving a stack once
@@ -43,6 +43,7 @@ function assertLog(msg) {
function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.AsyncTaskEvent) return;
try {
+ if ("Promise.resolve" == event_data.name()) return;
if (base_id < 0)
base_id = event_data.id();
var id = event_data.id() - base_id + 1;
@@ -73,3 +74,24 @@ resolver();
%RunMicrotasks();
assertNull(exception);
+
+Debug.clearBreakOnUncaughtException();
+Debug.setListener(null);
+
+var resolve;
+var turnOnListenerPromise = new Promise(r => resolve = r);
+async function confused() {
+ await turnOnListenerPromise;
+ throw foo
+}
+
+confused();
+
+Promise.resolve().then(() => {
+ Debug.setListener(listener);
+ Debug.setBreakOnUncaughtException();
+ resolve();
+});
+
+%RunMicrotasks();
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/ignition/debug-break-mixed-stack.js b/deps/v8/test/debugger/debug/ignition/debug-break-mixed-stack.js
index 878a918d5c..bdf8cbedf5 100644
--- a/deps/v8/test/mjsunit/ignition/debug-break-mixed-stack.js
+++ b/deps/v8/test/debugger/debug/ignition/debug-break-mixed-stack.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/ignition/debug-break-on-stack.js b/deps/v8/test/debugger/debug/ignition/debug-break-on-stack.js
index d2577b38de..846fb4c82d 100644
--- a/deps/v8/test/mjsunit/ignition/debug-break-on-stack.js
+++ b/deps/v8/test/debugger/debug/ignition/debug-break-on-stack.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/ignition/debug-break.js b/deps/v8/test/debugger/debug/ignition/debug-break.js
index 8237d4a552..3678587adb 100644
--- a/deps/v8/test/mjsunit/ignition/debug-break.js
+++ b/deps/v8/test/debugger/debug/ignition/debug-break.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/ignition/debug-scope-on-return.js b/deps/v8/test/debugger/debug/ignition/debug-scope-on-return.js
index 5be6de6e65..70a80b1981 100644
--- a/deps/v8/test/mjsunit/ignition/debug-scope-on-return.js
+++ b/deps/v8/test/debugger/debug/ignition/debug-scope-on-return.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
// Check that the we are still in function context when we break on return.
diff --git a/deps/v8/test/mjsunit/ignition/debug-step-mixed-stack.js b/deps/v8/test/debugger/debug/ignition/debug-step-mixed-stack.js
index 6566431175..78b54b455a 100644
--- a/deps/v8/test/mjsunit/ignition/debug-step-mixed-stack.js
+++ b/deps/v8/test/debugger/debug/ignition/debug-step-mixed-stack.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/ignition/debugger-statement.js b/deps/v8/test/debugger/debug/ignition/debugger-statement.js
index 9c2204e4d2..05f6c07623 100644
--- a/deps/v8/test/mjsunit/ignition/debugger-statement.js
+++ b/deps/v8/test/debugger/debug/ignition/debugger-statement.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --ignition-filter=f --expose-debug-as debug
+// Flags: --ignition-filter=f
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/regress-3225.js b/deps/v8/test/debugger/debug/regress-3225.js
index 51fac89bf6..ed17f0ee10 100644
--- a/deps/v8/test/mjsunit/regress-3225.js
+++ b/deps/v8/test/debugger/debug/regress-3225.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/regress/debug-prepare-step-in.js b/deps/v8/test/debugger/debug/regress/debug-prepare-step-in.js
index 93474da695..fbbc1c935a 100644
--- a/deps/v8/test/mjsunit/regress/debug-prepare-step-in.js
+++ b/deps/v8/test/debugger/debug/regress/debug-prepare-step-in.js
@@ -25,8 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --allow-natives-syntax --expose-gc
-// Get the Debug object exposed from the debug context global object.
+// Flags: --expose-gc
Debug = debug.Debug
function breakListener(event, exec_state, event_data, data) {
diff --git a/deps/v8/test/mjsunit/regress/regress-1170187.js b/deps/v8/test/debugger/debug/regress/regress-1170187.js
index 6aa2896751..165a308dc5 100644
--- a/deps/v8/test/mjsunit/regress/regress-1170187.js
+++ b/deps/v8/test/debugger/debug/regress/regress-1170187.js
@@ -25,12 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
// Make sure that the retreival of local variables are performed correctly even
// when an adapter frame is present.
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
listenerCalled = false;
@@ -38,7 +36,8 @@ exception = false;
function checkName(name) {
- assertTrue(name == 'a' || name == 'b' || name == 'c');
+ const validNames = new Set([ 'a', 'b', 'c', 'x', 'y' ]);
+ assertTrue(validNames.has(name));
}
diff --git a/deps/v8/test/mjsunit/regress/regress-131994.js b/deps/v8/test/debugger/debug/regress/regress-131994.js
index 7f600959da..ed90f62b6f 100644
--- a/deps/v8/test/mjsunit/regress/regress-131994.js
+++ b/deps/v8/test/debugger/debug/regress/regress-131994.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
// Test that a variable in the local scope that shadows a context-allocated
// variable is correctly resolved when being evaluated in the debugger.
diff --git a/deps/v8/test/mjsunit/regress/regress-147497.js b/deps/v8/test/debugger/debug/regress/regress-147497.js
index f61d0c664d..c0369d0b8a 100644
--- a/deps/v8/test/mjsunit/regress/regress-147497.js
+++ b/deps/v8/test/debugger/debug/regress/regress-147497.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/regress/regress-1523.js b/deps/v8/test/debugger/debug/regress/regress-1523.js
index 30b3d59eec..2a6a99cfe5 100644
--- a/deps/v8/test/mjsunit/regress/regress-1523.js
+++ b/deps/v8/test/debugger/debug/regress/regress-1523.js
@@ -27,8 +27,6 @@
// See: http://code.google.com/p/v8/issues/detail?id=1523
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/regress/regress-1586.js b/deps/v8/test/debugger/debug/regress/regress-1586.js
index 9c805a7a4b..9975d3657f 100644
--- a/deps/v8/test/mjsunit/regress/regress-1586.js
+++ b/deps/v8/test/debugger/debug/regress/regress-1586.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
// Test debug evaluation for functions without local context, but with
// nested catch contexts.
@@ -41,7 +40,6 @@ function f() {
}
};
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
function listener(event, exec_state, event_data, data) {
diff --git a/deps/v8/test/mjsunit/regress/regress-269.js b/deps/v8/test/debugger/debug/regress/regress-269.js
index ce165e0abf..83b15acbca 100644
--- a/deps/v8/test/mjsunit/regress/regress-269.js
+++ b/deps/v8/test/debugger/debug/regress/regress-269.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
function listener(event, exec_state, event_data, data) {
diff --git a/deps/v8/test/mjsunit/regress/regress-2825.js b/deps/v8/test/debugger/debug/regress/regress-2825.js
index 6ffd8ec150..01bdddff88 100644
--- a/deps/v8/test/mjsunit/regress/regress-2825.js
+++ b/deps/v8/test/debugger/debug/regress/regress-2825.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
// Do not edit this file with an editor that replaces \r with \r\n.
// Variable definitions for i0 through i3 are each terminated with \r.
diff --git a/deps/v8/test/mjsunit/regress/regress-325676.js b/deps/v8/test/debugger/debug/regress/regress-325676.js
index 6c23d0a6bc..b9864e81d6 100644
--- a/deps/v8/test/mjsunit/regress/regress-325676.js
+++ b/deps/v8/test/debugger/debug/regress/regress-325676.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
// If a function parameter is forced to be context allocated,
// debug evaluate need to resolve it to a context slot instead of
diff --git a/deps/v8/test/mjsunit/regress/regress-3717.js b/deps/v8/test/debugger/debug/regress/regress-3717.js
index 1f7bc7d126..c9949fcd6b 100644
--- a/deps/v8/test/mjsunit/regress/regress-3717.js
+++ b/deps/v8/test/debugger/debug/regress/regress-3717.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --no-lazy
+// Flags: --no-lazy
Debug = debug.Debug;
var exception = null;
diff --git a/deps/v8/test/mjsunit/regress/regress-392114.js b/deps/v8/test/debugger/debug/regress/regress-392114.js
index e5cf1cde37..b9ca4ed2a7 100644
--- a/deps/v8/test/mjsunit/regress/regress-392114.js
+++ b/deps/v8/test/debugger/debug/regress/regress-392114.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --allow-natives-syntax
Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/regress/regress-4309-1.js b/deps/v8/test/debugger/debug/regress/regress-4309-1.js
index a13fd43a4a..2e7ef47c09 100644
--- a/deps/v8/test/mjsunit/regress/regress-4309-1.js
+++ b/deps/v8/test/debugger/debug/regress/regress-4309-1.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/regress/regress-4309-3.js b/deps/v8/test/debugger/debug/regress/regress-4309-3.js
index 687dd4c44a..026a7cb5cb 100644
--- a/deps/v8/test/mjsunit/regress/regress-4309-3.js
+++ b/deps/v8/test/debugger/debug/regress/regress-4309-3.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/regress/regress-4320.js b/deps/v8/test/debugger/debug/regress/regress-4320.js
index df6a99b28f..5d88cc33d4 100644
--- a/deps/v8/test/mjsunit/regress/regress-4320.js
+++ b/deps/v8/test/debugger/debug/regress/regress-4320.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --expose-debug-as debug
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/regress/regress-4703.js b/deps/v8/test/debugger/debug/regress/regress-4703.js
index dad8a97874..68832a7a6b 100644
--- a/deps/v8/test/mjsunit/regress/regress-4703.js
+++ b/deps/v8/test/debugger/debug/regress/regress-4703.js
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
+
+var exception = null;
function listener(event, exec_state, event_data, data) {
- if (event != Debug.DebugEvent.Break) return;
+ if (event != debug.Debug.DebugEvent.Break) return;
try {
var all_scopes = exec_state.frame().allScopes();
assertEquals([ debug.ScopeType.Block,
@@ -15,6 +16,7 @@ function listener(event, exec_state, event_data, data) {
all_scopes.map(scope => scope.scopeType()));
} catch (e) {
exception = e;
+ print(e);
}
}
@@ -28,3 +30,6 @@ debug.Debug.setListener(listener);
}
debugger;
})();
+
+debug.Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-491536.js b/deps/v8/test/debugger/debug/regress/regress-491536.js
index 6e6e0c6e14..cbe03687bd 100644
--- a/deps/v8/test/mjsunit/regress/regress-491536.js
+++ b/deps/v8/test/debugger/debug/regress/regress-491536.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
if (this["debug"]) debug.Debug.setListener(function() {});
var source = "var outer = 0; function test() {'use strict'; outer = 1; } test(); print('ok');";
diff --git a/deps/v8/test/debugger/debug/regress/regress-5071.js b/deps/v8/test/debugger/debug/regress/regress-5071.js
new file mode 100644
index 0000000000..4ad1d7ed7c
--- /dev/null
+++ b/deps/v8/test/debugger/debug/regress/regress-5071.js
@@ -0,0 +1,32 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+var Debug = debug.Debug;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ assertEquals(2, exec_state.frameCount());
+ assertEquals("a", exec_state.frame(0).localName(0));
+ assertEquals(1, exec_state.frame(0).localValue(0).value());
+ assertEquals(1, exec_state.frame(0).localCount());
+ } catch (e) {
+ exception = e;
+ }
+}
+
+function f() {
+ var a = 1;
+ {
+ let b = 2;
+ debugger;
+ }
+}
+
+Debug.setListener(listener);
+f();
+Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-514362.js b/deps/v8/test/debugger/debug/regress/regress-514362.js
index f69cfecebe..beebf4c063 100644
--- a/deps/v8/test/mjsunit/regress/regress-514362.js
+++ b/deps/v8/test/debugger/debug/regress/regress-514362.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --expose-debug-as debug
function bar(x) { debugger; }
function foo() { bar(arguments[0]); }
diff --git a/deps/v8/test/mjsunit/regress/regress-5279.js b/deps/v8/test/debugger/debug/regress/regress-5279.js
index 847f5df054..4a30ac5f1c 100644
--- a/deps/v8/test/mjsunit/regress/regress-5279.js
+++ b/deps/v8/test/debugger/debug/regress/regress-5279.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --expose-debug-as debug
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/regress/regress-5559.js b/deps/v8/test/debugger/debug/regress/regress-5559.js
index c6f32575f5..7c29f493ce 100644
--- a/deps/v8/test/mjsunit/regress/regress-5559.js
+++ b/deps/v8/test/debugger/debug/regress/regress-5559.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
Debug = debug.Debug
diff --git a/deps/v8/test/mjsunit/regress/regress-617882.js b/deps/v8/test/debugger/debug/regress/regress-617882.js
index acc332c59b..acc332c59b 100644
--- a/deps/v8/test/mjsunit/regress/regress-617882.js
+++ b/deps/v8/test/debugger/debug/regress/regress-617882.js
diff --git a/deps/v8/test/mjsunit/regress/regress-662935.js b/deps/v8/test/debugger/debug/regress/regress-662935.js
index b15f83a1e9..04ed6d9ddd 100644
--- a/deps/v8/test/mjsunit/regress/regress-662935.js
+++ b/deps/v8/test/debugger/debug/regress/regress-662935.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
Debug = debug.Debug
function overflow() {
diff --git a/deps/v8/test/mjsunit/regress/regress-94873.js b/deps/v8/test/debugger/debug/regress/regress-94873.js
index b61bc0c15d..ea71e8ebb9 100644
--- a/deps/v8/test/mjsunit/regress/regress-94873.js
+++ b/deps/v8/test/debugger/debug/regress/regress-94873.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug;
function sendCommand(state, cmd) {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-107996.js b/deps/v8/test/debugger/debug/regress/regress-crbug-107996.js
index dfe07e59de..f671ca8a29 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-107996.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-107996.js
@@ -25,20 +25,23 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
Debug = debug.Debug;
Debug.setListener(listener);
-
+var exception = null;
var fourteen;
var four_in_debugger = [];
function listener(event, exec_state, event_data, data) {
if (event == Debug.DebugEvent.Break) {
- for (var i = 0; i < exec_state.frameCount(); i++) {
- var frame = exec_state.frame(i);
- four_in_debugger[i] = frame.evaluate("four", false).value();
+ try {
+ for (var i = 0; i < exec_state.frameCount() - 1; i++) {
+ var frame = exec_state.frame(i);
+ four_in_debugger[i] = frame.evaluate("four", false).value();
+ }
+ } catch (e) {
+ exception = e;
}
}
}
@@ -62,3 +65,4 @@ assertEquals(4, four_in_debugger[1]);
assertEquals(4, four_in_debugger[2]);
Debug.setListener(null);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-323936.js b/deps/v8/test/debugger/debug/regress/regress-crbug-323936.js
index ca543b068a..e89f7a0a02 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-323936.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-323936.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-387599.js b/deps/v8/test/debugger/debug/regress/regress-crbug-387599.js
index 753dcfa3a6..bf15cbaec4 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-387599.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-387599.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --expose-debug-as debug
Debug = debug.Debug;
Debug.setListener(function() {});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-401915.js b/deps/v8/test/debugger/debug/regress/regress-crbug-401915.js
index 96dce04868..7c791e4d18 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-401915.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-401915.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --expose-debug-as debug
Debug = debug.Debug;
Debug.setListener(function() {});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-405491.js b/deps/v8/test/debugger/debug/regress/regress-crbug-405491.js
index b63378113f..b63378113f 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-405491.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-405491.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-405922.js b/deps/v8/test/debugger/debug/regress/regress-crbug-405922.js
index 31b432de19..116eabda1e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-405922.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-405922.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --expose-debug-as debug
Debug = debug.Debug
var exception = null;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-409614.js b/deps/v8/test/debugger/debug/regress/regress-crbug-409614.js
index 1a9a77746a..609aa8031e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-409614.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-409614.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
Debug = debug.Debug;
var exception = null;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-467180.js b/deps/v8/test/debugger/debug/regress/regress-crbug-467180.js
index a07c6a6466..c2333fca93 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-467180.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-467180.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
function f() {
for (var i = 10; i < 14; i++) { // 1
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-568477-1.js b/deps/v8/test/debugger/debug/regress/regress-crbug-568477-1.js
index ed269a9d7d..bf1aacb88e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-568477-1.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-568477-1.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
var Debug = debug.Debug;
var expected = ["debugger;", "var x = y;", "debugger;", "var x = y;"];
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-568477-3.js b/deps/v8/test/debugger/debug/regress/regress-crbug-568477-3.js
index 812db2b8f7..cd4c60df3e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-568477-3.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-568477-3.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
var Debug = debug.Debug;
var expected = ["debugger;", "var x = y;", "debugger;", "var x = y;"];
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-568477-4.js b/deps/v8/test/debugger/debug/regress/regress-crbug-568477-4.js
index f0e3e901db..706e6c8ebb 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-568477-4.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-568477-4.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax
var Debug = debug.Debug;
var expected =
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-582048.js b/deps/v8/test/debugger/debug/regress/regress-crbug-582048.js
index 6d98f488e3..77efe3cb78 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-582048.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-582048.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
var Debug = debug.Debug;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-582051.js b/deps/v8/test/debugger/debug/regress/regress-crbug-582051.js
index 93f4e70dfb..65dea825a2 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-582051.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-582051.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
var test_y = false;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-609046.js b/deps/v8/test/debugger/debug/regress/regress-crbug-609046.js
index 10b63af3e3..1bc91fbd74 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-609046.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-609046.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
// Test that hidden scopes are correctly walked in the scope chain.
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-633999.js b/deps/v8/test/debugger/debug/regress/regress-crbug-633999.js
index 3f16908610..18ca4d0cb7 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-633999.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-633999.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug --allow-natives-syntax --noturbo
+// Flags: --noturbo
var Debug = debug.Debug
var exception = null;
diff --git a/deps/v8/test/mjsunit/regress/regress-debug-code-recompilation.js b/deps/v8/test/debugger/debug/regress/regress-debug-code-recompilation.js
index 2f81d0cb54..995f787c11 100644
--- a/deps/v8/test/mjsunit/regress/regress-debug-code-recompilation.js
+++ b/deps/v8/test/debugger/debug/regress/regress-debug-code-recompilation.js
@@ -25,8 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --hydrogen-filter=Debug.setBreakPoint
-// Flags: --expose-debug-as debug
+// Flags: --hydrogen-filter=Debug.setBreakPoint
Debug = debug.Debug
Debug.setListener(function(){});
diff --git a/deps/v8/test/mjsunit/regress/regress-debug-context-load.js b/deps/v8/test/debugger/debug/regress/regress-debug-context-load.js
index 0b3c275f99..d9fe074d91 100644
--- a/deps/v8/test/mjsunit/regress/regress-debug-context-load.js
+++ b/deps/v8/test/debugger/debug/regress/regress-debug-context-load.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
Debug = debug.Debug;
Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js b/deps/v8/test/debugger/debug/regress/regress-opt-after-debug-deopt.js
index c637be5497..01204fc169 100644
--- a/deps/v8/test/mjsunit/regress/regress-opt-after-debug-deopt.js
+++ b/deps/v8/test/debugger/debug/regress/regress-opt-after-debug-deopt.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --allow-natives-syntax
// Flags: --concurrent-recompilation --block-concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
diff --git a/deps/v8/test/debugger/debug/wasm/asm-debug.js b/deps/v8/test/debugger/debug/wasm/asm-debug.js
new file mode 100644
index 0000000000..52852ae8e9
--- /dev/null
+++ b/deps/v8/test/debugger/debug/wasm/asm-debug.js
@@ -0,0 +1,89 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --validate-asm
+
+Debug = debug.Debug
+
+// Initialized in setup().
+var exception;
+var break_count;
+var num_wasm_scripts;
+var module;
+
+var filename = '(?:[^ ]+/)?test/mjsunit/wasm/asm-debug.js';
+filename = filename.replace(/\//g, '[/\\\\]');
+
+var expected_stack_entries = [];
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ ++break_count;
+ // Request frame details.
+ var num_frames = exec_state.frameCount();
+ assertEquals(
+ expected_stack_entries.length, num_frames, 'number of frames');
+ print('Stack Trace (length ' + num_frames + '):');
+ for (var i = 0; i < num_frames; ++i) {
+ var frame = exec_state.frame(i);
+ var script = frame.script();
+ assertNotNull(script);
+ var line = frame.sourceLine() + 1;
+ var column = frame.sourceColumn() + 1;
+ var funcName = frame.func().name();
+ var name = script.name();
+ print(
+ ' [' + i + '] ' + funcName + ' (' + name + ':' + line + ':' +
+ column + ')');
+ assertMatches(filename, name, 'name');
+ assertEquals(
+ expected_stack_entries[i][0], funcName, 'function name at ' + i);
+ assertEquals(expected_stack_entries[i][1], line, 'line at ' + i);
+ assertEquals(expected_stack_entries[i][2], column, 'column at ' + i);
+ }
+ }
+ } catch (e) {
+ print('exception: ' + e);
+ exception = e;
+ }
+};
+
+function generateWasmFromAsmJs(stdlib, foreign, heap) {
+ 'use asm';
+ var debugger_fun = foreign.call_debugger;
+ function callDebugger() {
+ debugger_fun();
+ }
+ function redirectFun() {
+ callDebugger();
+ }
+ return redirectFun;
+}
+
+function call_debugger() {
+ debugger;
+}
+
+function setup() {
+ exception = null;
+ break_count = 0;
+}
+
+(function FrameInspection() {
+ setup();
+ var fun =
+ generateWasmFromAsmJs(this, {'call_debugger': call_debugger}, undefined);
+ expected_stack_entries = [
+ ['call_debugger', 66, 3], // --
+ ['callDebugger', 57, 5], // --
+ ['redirectFun', 60, 5], // --
+ ['FrameInspection', 86, 3], // --
+ ['', 89, 3]
+ ];
+ Debug.setListener(listener);
+ fun();
+ Debug.setListener(null);
+ assertEquals(1, break_count);
+})();
diff --git a/deps/v8/test/debugger/debugger.gyp b/deps/v8/test/debugger/debugger.gyp
new file mode 100644
index 0000000000..c093d09717
--- /dev/null
+++ b/deps/v8/test/debugger/debugger.gyp
@@ -0,0 +1,26 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'debugger_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../../gypfiles/features.gypi',
+ '../../gypfiles/isolate.gypi',
+ ],
+ 'sources': [
+ 'debugger.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/deps/v8/test/debugger/debugger.isolate b/deps/v8/test/debugger/debugger.isolate
new file mode 100644
index 0000000000..588c3b01a4
--- /dev/null
+++ b/deps/v8/test/debugger/debugger.isolate
@@ -0,0 +1,19 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['v8_enable_inspector==1', {
+ 'variables': {
+ 'files': [
+ './',
+ ],
+ },
+ }],
+ ],
+ 'includes': [
+ '../../src/d8.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+}
diff --git a/deps/v8/test/debugger/debugger.status b/deps/v8/test/debugger/debugger.status
new file mode 100644
index 0000000000..497479508f
--- /dev/null
+++ b/deps/v8/test/debugger/debugger.status
@@ -0,0 +1,44 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+[ALWAYS, {
+ # Issue 3660: Replacing activated TurboFan frames by unoptimized code does
+ # not work, but we expect it to not crash.
+ 'debug/debug-step-turbofan': [PASS, FAIL],
+}], # ALWAYS
+
+##############################################################################
+['gc_stress == True', {
+ # Skip tests not suitable for GC stress.
+ # Tests taking too long
+ 'debug/debug-stepout-scope-part1': [SKIP],
+ 'debug/debug-stepout-scope-part2': [SKIP],
+ 'debug/debug-stepout-scope-part3': [SKIP],
+ 'debug/debug-stepout-scope-part4': [SKIP],
+ 'debug/debug-stepout-scope-part5': [SKIP],
+ 'debug/debug-stepout-scope-part6': [SKIP],
+ 'debug/debug-stepout-scope-part7': [SKIP],
+ 'debug/debug-stepout-scope-part8': [SKIP],
+}], # 'gc_stress == True'
+
+##############################################################################
+['variant == turbofan_opt', {
+ # TODO(mstarzinger): Debugger cannot materialize de-materialized functions.
+ 'debug/regress/regress-crbug-323936': [FAIL],
+
+ # TODO(jarin/mstarzinger): Investigate debugger issues with TurboFan.
+ 'debug/debug-evaluate-closure': [FAIL],
+}], # variant == turbofan_opt
+
+##############################################################################
+['gc_stress == True', {
+ # Async function tests taking too long
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5411
+ 'debug/harmony/async-debug-caught-exception-cases0': [SKIP],
+ 'debug/harmony/async-debug-caught-exception-cases1': [SKIP],
+ 'debug/harmony/async-debug-caught-exception-cases2': [SKIP],
+ 'debug/harmony/async-debug-caught-exception-cases3': [SKIP],
+}], # 'gc_stress == True'
+]
diff --git a/deps/v8/test/debugger/protocol/basic-round-trip.js b/deps/v8/test/debugger/protocol/basic-round-trip.js
new file mode 100644
index 0000000000..a20f482523
--- /dev/null
+++ b/deps/v8/test/debugger/protocol/basic-round-trip.js
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var received = 0;
+
+function receive(message) {
+ var message_obj = JSON.parse(message);
+ assertEquals(3, message_obj.result.result.value);
+ received++;
+}
+
+var message = JSON.stringify({
+ id : 1,
+ method : "Runtime.evaluate",
+ params : {
+ expression: "function f() { return 2 }; 3"
+ }
+});
+
+send(message);
+
+assertEquals(1, received);
+assertEquals(2, f());
diff --git a/deps/v8/test/debugger/regress/regress-5575-1.js b/deps/v8/test/debugger/regress/regress-5575-1.js
new file mode 100644
index 0000000000..657a811c05
--- /dev/null
+++ b/deps/v8/test/debugger/regress/regress-5575-1.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Debug = debug.Debug;
+
+Debug.setListener(function() {});
+
+function factory() {
+ return function* generator() {
+ debugger;
+ yield 42;
+ }
+}
+
+Debug.setBreakPoint(function(){}, 0, 0);
+var generator = factory();
+var obj = generator();
+obj.next();
+Debug.setBreakPoint(factory, 0, 0);
+obj.next();
diff --git a/deps/v8/test/debugger/regress/regress-5575-2.js b/deps/v8/test/debugger/regress/regress-5575-2.js
new file mode 100644
index 0000000000..0a27367f1b
--- /dev/null
+++ b/deps/v8/test/debugger/regress/regress-5575-2.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Debug = debug.Debug;
+
+Debug.setListener(function() {});
+
+function main() {
+ function* boo() {
+ debugger;
+ yield 42;
+ }
+
+ var gen = boo();
+ gen.next();
+ Debug.setBreakPoint(main, 0, 0);
+ gen.next();
+}
+
+main();
diff --git a/deps/v8/test/debugger/regress/regress-5575-3.js b/deps/v8/test/debugger/regress/regress-5575-3.js
new file mode 100644
index 0000000000..7fd448c839
--- /dev/null
+++ b/deps/v8/test/debugger/regress/regress-5575-3.js
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Debug = debug.Debug;
+
+let n = 0;
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) {
+ if (n++ == 0) {
+ Debug.setBreakPoint(main, 0, 0);
+ }
+ }
+}
+
+Debug.setListener(listener);
+
+function main() {
+ function* boo() {
+ debugger;
+ yield;
+ }
+
+ var gen = boo();
+ gen.next();
+ gen.next();
+}
+
+main();
diff --git a/deps/v8/test/debugger/regress/regress-5610.js b/deps/v8/test/debugger/regress/regress-5610.js
new file mode 100644
index 0000000000..4c45aef82d
--- /dev/null
+++ b/deps/v8/test/debugger/regress/regress-5610.js
@@ -0,0 +1,33 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-async-await --turbo
+
+var Debug = debug.Debug;
+var step_count = 0;
+
+function listener(event, execState, eventData, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var line = execState.frame(0).sourceLineText();
+ print(line);
+ var [match, expected_count, step] = /\/\/ B(\d) (\w+)$/.exec(line);
+ assertEquals(step_count++, parseInt(expected_count));
+ if (step != "Continue") execState.prepareStep(Debug.StepAction[step]);
+ } catch (e) {
+ print(e, e.stack);
+ quit(1);
+ }
+}
+
+Debug.setListener(listener);
+
+async function f() {
+ var a = 1;
+ debugger; // B0 StepNext
+ print(1); // B1 StepNext
+ return a; // B2 StepNext
+} // B3 Continue
+
+f();
diff --git a/deps/v8/test/debugger/test-api.js b/deps/v8/test/debugger/test-api.js
new file mode 100644
index 0000000000..69c7c5d219
--- /dev/null
+++ b/deps/v8/test/debugger/test-api.js
@@ -0,0 +1,432 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+// If true, prints all messages sent and received by inspector.
+const printProtocolMessages = false;
+
+// The active wrapper instance.
+let activeWrapper = undefined;
+
+// Receiver function called by inspector, delegating to active wrapper.
+function receive(message) {
+ activeWrapper.receiveMessage(message);
+}
+
+class DebugWrapper {
+ constructor() {
+ // Message dictionary storing {id, message} pairs.
+ this.receivedMessages = new Map();
+
+ // Each message dispatched by the Debug wrapper is assigned a unique number
+ // using nextMessageId.
+ this.nextMessageId = 0;
+
+ // The listener method called on certain events.
+ this.listener = undefined;
+
+ // TODO(jgruber): Determine which of these are still required and possible.
+ // Debug events which can occur in the V8 JavaScript engine.
+ this.DebugEvent = { Break: 1,
+ Exception: 2,
+ NewFunction: 3,
+ BeforeCompile: 4,
+ AfterCompile: 5,
+ CompileError: 6,
+ AsyncTaskEvent: 7
+ };
+
+ // The different types of steps.
+ this.StepAction = { StepOut: 0,
+ StepNext: 1,
+ StepIn: 2,
+ StepFrame: 3,
+ };
+
+ // A copy of the scope types from runtime-debug.cc.
+ // NOTE: these constants should be backward-compatible, so
+ // add new ones to the end of this list.
+ this.ScopeType = { Global: 0,
+ Local: 1,
+ With: 2,
+ Closure: 3,
+ Catch: 4,
+ Block: 5,
+ Script: 6,
+ Eval: 7,
+ Module: 8
+ };
+
+ // Types of exceptions that can be broken upon.
+ this.ExceptionBreak = { Caught : 0,
+ Uncaught: 1 };
+
+ // Store the current script id so we can skip corresponding break events.
+ this.thisScriptId = %FunctionGetScriptId(receive);
+
+ // Register as the active wrapper.
+ assertTrue(activeWrapper === undefined);
+ activeWrapper = this;
+ }
+
+ enable() { this.sendMessageForMethodChecked("Debugger.enable"); }
+ disable() { this.sendMessageForMethodChecked("Debugger.disable"); }
+
+ setListener(listener) { this.listener = listener; }
+
+ stepOver() { this.sendMessageForMethodChecked("Debugger.stepOver"); }
+ stepInto() { this.sendMessageForMethodChecked("Debugger.stepInto"); }
+ stepOut() { this.sendMessageForMethodChecked("Debugger.stepOut"); }
+
+ setBreakOnException() {
+ this.sendMessageForMethodChecked(
+ "Debugger.setPauseOnExceptions", { state : "all" });
+ }
+
+ clearBreakOnException() {
+ const newState = this.isBreakOnUncaughtException() ? "uncaught" : "none";
+ this.sendMessageForMethodChecked(
+ "Debugger.setPauseOnExceptions", { state : newState });
+ }
+
+ isBreakOnException() {
+ return !!%IsBreakOnException(this.ExceptionBreak.Caught);
+ };
+
+ setBreakOnUncaughtException() {
+ const newState = this.isBreakOnException() ? "all" : "uncaught";
+ this.sendMessageForMethodChecked(
+ "Debugger.setPauseOnExceptions", { state : newState });
+ }
+
+ clearBreakOnUncaughtException() {
+ const newState = this.isBreakOnException() ? "all" : "none";
+ this.sendMessageForMethodChecked(
+ "Debugger.setPauseOnExceptions", { state : newState });
+ }
+
+ isBreakOnUncaughtException() {
+ return !!%IsBreakOnException(this.ExceptionBreak.Uncaught);
+ };
+
+ clearStepping() { %ClearStepping(); };
+
+ // Returns the resulting breakpoint id.
+ setBreakPoint(func, opt_line, opt_column, opt_condition) {
+ assertTrue(%IsFunction(func));
+ assertFalse(%FunctionIsAPIFunction(func));
+
+ // TODO(jgruber): We handle only script breakpoints for now.
+
+ const scriptid = %FunctionGetScriptId(func);
+ assertTrue(scriptid != -1);
+
+ const offset = %FunctionGetScriptSourcePosition(func);
+ const loc =
+ %ScriptLocationFromLine2(scriptid, opt_line, opt_column, offset);
+
+ const params = { location :
+ { scriptId : scriptid.toString(),
+ lineNumber : loc.line,
+ columnNumber : loc.column,
+ }};
+ if (!!opt_condition) {
+ params.condition = opt_condition;
+ }
+
+ const {msgid, msg} = this.createMessage(
+ "Debugger.setBreakpoint", params);
+ this.sendMessage(msg);
+
+ const reply = this.takeReplyChecked(msgid);
+ assertTrue(reply.result !== undefined);
+ const breakid = reply.result.breakpointId;
+ assertTrue(breakid !== undefined);
+
+ return breakid;
+ }
+
+ clearBreakPoint(breakid) {
+ const {msgid, msg} = this.createMessage(
+ "Debugger.removeBreakpoint", { breakpointId : breakid });
+ this.sendMessage(msg);
+ this.takeReplyChecked(msgid);
+ }
+
+ // Returns the serialized result of the given expression. For example:
+ // {"type":"number", "value":33, "description":"33"}.
+ evaluate(frameid, expression) {
+ const {msgid, msg} = this.createMessage(
+ "Debugger.evaluateOnCallFrame",
+ { callFrameId : frameid,
+ expression : expression
+ });
+ this.sendMessage(msg);
+
+ const reply = this.takeReplyChecked(msgid);
+ return reply.result.result;
+ }
+
+ // --- Internal methods. -----------------------------------------------------
+
+ getNextMessageId() {
+ return this.nextMessageId++;
+ }
+
+ createMessage(method, params) {
+ const id = this.getNextMessageId();
+ const msg = JSON.stringify({
+ id: id,
+ method: method,
+ params: params,
+ });
+ return { msgid : id, msg: msg };
+ }
+
+ receiveMessage(message) {
+ if (printProtocolMessages) print(message);
+
+ const parsedMessage = JSON.parse(message);
+ if (parsedMessage.id !== undefined) {
+ this.receivedMessages.set(parsedMessage.id, parsedMessage);
+ }
+
+ this.dispatchMessage(parsedMessage);
+ }
+
+ sendMessage(message) {
+ if (printProtocolMessages) print(message);
+ send(message);
+ }
+
+ sendMessageForMethodChecked(method, params) {
+ const {msgid, msg} = this.createMessage(method, params);
+ this.sendMessage(msg);
+ this.takeReplyChecked(msgid);
+ }
+
+ takeReplyChecked(msgid) {
+ const reply = this.receivedMessages.get(msgid);
+ assertTrue(reply !== undefined);
+ this.receivedMessages.delete(msgid);
+ return reply;
+ }
+
+ execStatePrepareStep(action) {
+ switch(action) {
+ case this.StepAction.StepOut: this.stepOut(); break;
+ case this.StepAction.StepNext: this.stepOver(); break;
+ case this.StepAction.StepIn: this.stepInto(); break;
+ default: %AbortJS("Unsupported StepAction"); break;
+ }
+ }
+
+ execStateScopeType(type) {
+ switch (type) {
+ case "global": return this.ScopeType.Global;
+ case "local": return this.ScopeType.Local;
+ case "with": return this.ScopeType.With;
+ case "closure": return this.ScopeType.Closure;
+ case "catch": return this.ScopeType.Catch;
+ case "block": return this.ScopeType.Block;
+ case "script": return this.ScopeType.Script;
+ default: %AbortJS("Unexpected scope type");
+ }
+ }
+
+ // Returns an array of property descriptors of the scope object.
+ // This is in contrast to the original API, which simply passed object
+ // mirrors.
+ execStateScopeObject(obj) {
+ const serialized_scope = this.getProperties(obj.objectId);
+ const scope = {}
+ const scope_tuples = serialized_scope.forEach((elem) => {
+ const key = elem.name;
+
+ let value;
+ if (elem.value) {
+ // Some properties (e.g. with getters/setters) don't have a value.
+ switch (elem.value.type) {
+ case "undefined": value = undefined; break;
+ default: value = elem.value.value; break;
+ }
+ }
+
+ scope[key] = value;
+ })
+
+ return { value : () => scope };
+ }
+
+ execStateScope(scope) {
+ return { scopeType : () => this.execStateScopeType(scope.type),
+ scopeObject : () => this.execStateScopeObject(scope.object)
+ };
+ }
+
+ getProperties(objectId) {
+ const {msgid, msg} = this.createMessage(
+ "Runtime.getProperties", { objectId : objectId });
+ this.sendMessage(msg);
+ const reply = this.takeReplyChecked(msgid);
+ return reply.result.result;
+ }
+
+ getLocalScopeDetails(frame) {
+ const scopes = frame.scopeChain;
+ for (let i = 0; i < scopes.length; i++) {
+ const scope = scopes[i]
+ if (scope.type == "local") {
+ return this.getProperties(scope.object.objectId);
+ }
+ }
+
+ return undefined;
+ }
+
+ execStateFrameLocalCount(frame) {
+ const scope_details = this.getLocalScopeDetails(frame);
+ return scope_details ? scope_details.length : 0;
+ }
+
+ execStateFrameLocalName(frame, index) {
+ const scope_details = this.getLocalScopeDetails(frame);
+ if (index < 0 || index >= scope_details.length) return undefined;
+ return scope_details[index].name;
+ }
+
+ execStateFrameLocalValue(frame, index) {
+ const scope_details = this.getLocalScopeDetails(frame);
+ if (index < 0 || index >= scope_details.length) return undefined;
+
+ const local = scope_details[index];
+
+ let localValue;
+ switch (local.value.type) {
+ case "undefined": localValue = undefined; break;
+ default: localValue = local.value.value; break;
+ }
+
+ return { value : () => localValue };
+ }
+
+ execStateFrameEvaluate(frame, expr) {
+ const frameid = frame.callFrameId;
+ const {msgid, msg} = this.createMessage(
+ "Debugger.evaluateOnCallFrame",
+ { callFrameId : frameid,
+ expression : expr
+ });
+ this.sendMessage(msg);
+ const reply = this.takeReplyChecked(msgid);
+
+ const result = reply.result.result;
+ if (result.subtype == "error") {
+ throw new Error(result.description);
+ }
+
+ return { value : () => result.value };
+ }
+
+ execStateFrame(frame) {
+ const scriptid = parseInt(frame.location.scriptId);
+ const line = frame.location.lineNumber;
+ const column = frame.location.columnNumber;
+ const loc = %ScriptLocationFromLine2(scriptid, line, column, 0);
+ const func = { name : () => frame.functionName };
+ return { sourceLineText : () => loc.sourceText,
+ evaluate : (expr) => this.execStateFrameEvaluate(frame, expr),
+ functionName : () => frame.functionName,
+ func : () => func,
+ localCount : () => this.execStateFrameLocalCount(frame),
+ localName : (ix) => this.execStateFrameLocalName(frame, ix),
+ localValue: (ix) => this.execStateFrameLocalValue(frame, ix),
+ scopeCount : () => frame.scopeChain.length,
+ scope : (index) => this.execStateScope(frame.scopeChain[index]),
+ allScopes : () => frame.scopeChain.map(
+ this.execStateScope.bind(this))
+ };
+ }
+
+ // --- Message handlers. -----------------------------------------------------
+
+ dispatchMessage(message) {
+ const method = message.method;
+ if (method == "Debugger.paused") {
+ this.handleDebuggerPaused(message);
+ } else if (method == "Debugger.scriptParsed") {
+ this.handleDebuggerScriptParsed(message);
+ }
+ }
+
+ handleDebuggerPaused(message) {
+ const params = message.params;
+
+ var debugEvent;
+ switch (params.reason) {
+ case "exception":
+ case "promiseRejection":
+ debugEvent = this.DebugEvent.Exception;
+ break;
+ default:
+ // TODO(jgruber): More granularity.
+ debugEvent = this.DebugEvent.Break;
+ break;
+ }
+
+ // Skip break events in this file.
+ if (params.callFrames[0].location.scriptId == this.thisScriptId) return;
+
+ // TODO(jgruber): Arguments as needed.
+ let execState = { frames : params.callFrames,
+ prepareStep : this.execStatePrepareStep.bind(this),
+ frame : (index) => this.execStateFrame(
+ index ? params.callFrames[index]
+ : params.callFrames[0]),
+ frameCount : () => params.callFrames.length
+ };
+
+ let eventData = this.execStateFrame(params.callFrames[0]);
+ if (debugEvent == this.DebugEvent.Exception) {
+ eventData.uncaught = () => params.data.uncaught;
+ }
+
+ this.invokeListener(debugEvent, execState, eventData);
+ }
+
+ handleDebuggerScriptParsed(message) {
+ const params = message.params;
+ let eventData = { scriptId : params.scriptId,
+ eventType : this.DebugEvent.AfterCompile
+ }
+
+ // TODO(jgruber): Arguments as needed. Still completely missing exec_state,
+ // and eventData used to contain the script mirror instead of its id.
+ this.invokeListener(this.DebugEvent.AfterCompile, undefined, eventData,
+ undefined);
+ }
+
+ invokeListener(event, exec_state, event_data, data) {
+ if (this.listener) {
+ this.listener(event, exec_state, event_data, data);
+ }
+ }
+}
+
+// Simulate the debug object generated by --expose-debug-as debug.
+var debug = { instance : undefined };
+
+Object.defineProperty(debug, 'Debug', { get: function() {
+ if (!debug.instance) {
+ debug.instance = new DebugWrapper();
+ debug.instance.enable();
+ }
+ return debug.instance;
+}});
+
+Object.defineProperty(debug, 'ScopeType', { get: function() {
+ const instance = debug.Debug;
+ return instance.ScopeType;
+}});
diff --git a/deps/v8/test/debugger/testcfg.py b/deps/v8/test/debugger/testcfg.py
new file mode 100644
index 0000000000..bef7544a07
--- /dev/null
+++ b/deps/v8/test/debugger/testcfg.py
@@ -0,0 +1,72 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import re
+
+from testrunner.local import testsuite
+from testrunner.objects import testcase
+
+FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
+FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
+
+class DebuggerTestSuite(testsuite.TestSuite):
+
+ def __init__(self, name, root):
+ super(DebuggerTestSuite, self).__init__(name, root)
+
+ def ListTests(self, context):
+ tests = []
+ for dirname, dirs, files in os.walk(self.root):
+ for dotted in [x for x in dirs if x.startswith('.')]:
+ dirs.remove(dotted)
+ dirs.sort()
+ files.sort()
+ for filename in files:
+ if (filename.endswith(".js") and filename != "test-api.js"):
+ fullpath = os.path.join(dirname, filename)
+ relpath = fullpath[len(self.root) + 1 : -3]
+ testname = relpath.replace(os.path.sep, "/")
+ test = testcase.TestCase(self, testname)
+ tests.append(test)
+ return tests
+
+ def GetFlagsForTestCase(self, testcase, context):
+ source = self.GetSourceForTest(testcase)
+ flags = ["--enable-inspector", "--allow-natives-syntax"] + context.mode_flags
+ flags_match = re.findall(FLAGS_PATTERN, source)
+ for match in flags_match:
+ flags += match.strip().split()
+
+ files_list = [] # List of file names to append to command arguments.
+ files_match = FILES_PATTERN.search(source);
+ # Accept several lines of 'Files:'.
+ while True:
+ if files_match:
+ files_list += files_match.group(1).strip().split()
+ files_match = FILES_PATTERN.search(source, files_match.end())
+ else:
+ break
+
+ files = []
+ files.append(os.path.normpath(os.path.join(self.root, "..", "mjsunit", "mjsunit.js")))
+ files.append(os.path.join(self.root, "test-api.js"))
+ files.extend([ os.path.normpath(os.path.join(self.root, '..', '..', f))
+ for f in files_list ])
+ files.append(os.path.join(self.root, testcase.path + self.suffix()))
+
+ flags += files
+ if context.isolates:
+ flags.append("--isolate")
+ flags += files
+
+ return testcase.flags + flags
+
+ def GetSourceForTest(self, testcase):
+ filename = os.path.join(self.root, testcase.path + self.suffix())
+ with open(filename) as f:
+ return f.read()
+
+def GetSuite(name, root):
+ return DebuggerTestSuite(name, root)
diff --git a/deps/v8/test/debugger/wrapper/break-on-debugger-stmt.js b/deps/v8/test/debugger/wrapper/break-on-debugger-stmt.js
new file mode 100644
index 0000000000..2d5e6a930f
--- /dev/null
+++ b/deps/v8/test/debugger/wrapper/break-on-debugger-stmt.js
@@ -0,0 +1,20 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f() { debugger; debugger; }
+
+const Debug = new DebugWrapper();
+Debug.enable();
+
+let breakEventCount = 0;
+Debug.setListener(function(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ breakEventCount++;
+});
+
+assertEquals(0, breakEventCount);
+f();
+f();
+f();
+assertEquals(6, breakEventCount);
diff --git a/deps/v8/test/debugger/wrapper/enable-disable.js b/deps/v8/test/debugger/wrapper/enable-disable.js
new file mode 100644
index 0000000000..36813efb67
--- /dev/null
+++ b/deps/v8/test/debugger/wrapper/enable-disable.js
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let compileCount = 0;
+
+const Debug = new DebugWrapper();
+
+Debug.setListener(function(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.AfterCompile) return;
+ compileCount++;
+});
+
+Debug.enable();
+assertTrue(compileCount != 0);
+
+const compileCountAfterEnable = compileCount;
+
+Debug.enable(); // Idempotent.
+assertEquals(compileCountAfterEnable, compileCount);
+
+Debug.disable();
+assertEquals(compileCountAfterEnable, compileCount);
+
+Debug.disable(); // Idempotent.
+assertEquals(compileCountAfterEnable, compileCount);
+
+Debug.enable(); // Re-enabling causes recompilation.
+assertEquals(2 * compileCountAfterEnable, compileCount);
diff --git a/deps/v8/test/default.gyp b/deps/v8/test/default.gyp
index dd1d9e2dc8..2a6ab3a686 100644
--- a/deps/v8/test/default.gyp
+++ b/deps/v8/test/default.gyp
@@ -18,6 +18,13 @@
'preparser/preparser.gyp:preparser_run',
'unittests/unittests.gyp:unittests_run',
],
+ 'conditions': [
+ ['v8_enable_inspector==1', {
+ 'dependencies': [
+ 'inspector/inspector.gyp:inspector-test_run',
+ ],
+ }],
+ ],
'includes': [
'../gypfiles/features.gypi',
'../gypfiles/isolate.gypi',
diff --git a/deps/v8/test/default.isolate b/deps/v8/test/default.isolate
index 416137c5b5..8ef69c3b15 100644
--- a/deps/v8/test/default.isolate
+++ b/deps/v8/test/default.isolate
@@ -9,7 +9,9 @@
},
'includes': [
'cctest/cctest.isolate',
+ 'debugger/debugger.isolate',
'fuzzer/fuzzer.isolate',
+ 'inspector/inspector.isolate'
'intl/intl.isolate',
'message/message.isolate',
'mjsunit/mjsunit.isolate',
diff --git a/deps/v8/test/fuzzer/fuzzer.cc b/deps/v8/test/fuzzer/fuzzer.cc
index 71a26b86b3..cb4a287d70 100644
--- a/deps/v8/test/fuzzer/fuzzer.cc
+++ b/deps/v8/test/fuzzer/fuzzer.cc
@@ -42,7 +42,7 @@ int main(int argc, char* argv[]) {
size_t bytes_read = fread(data, 1, size, input);
fclose(input);
- if (bytes_read != size) {
+ if (bytes_read != static_cast<size_t>(size)) {
free(data);
fprintf(stderr, "Failed to read %s\n", argv[1]);
return 1;
diff --git a/deps/v8/test/fuzzer/fuzzer.gyp b/deps/v8/test/fuzzer/fuzzer.gyp
index f8629f6030..f24c75bc1a 100644
--- a/deps/v8/test/fuzzer/fuzzer.gyp
+++ b/deps/v8/test/fuzzer/fuzzer.gyp
@@ -51,7 +51,7 @@
'target_name': 'parser_fuzzer_lib',
'type': 'static_library',
'dependencies': [
- 'fuzzer_support_nocomponent',
+ 'fuzzer_support',
],
'include_dirs': [
'../..',
@@ -143,6 +143,35 @@
],
},
{
+ 'target_name': 'v8_simple_wasm_call_fuzzer',
+ 'type': 'executable',
+ 'dependencies': [
+ 'wasm_call_fuzzer_lib',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'fuzzer.cc',
+ ],
+ },
+ {
+ 'target_name': 'wasm_call_fuzzer_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'fuzzer_support',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'wasm-call.cc',
+ '../common/wasm/test-signatures.h',
+ '../common/wasm/wasm-module-runner.cc',
+ '../common/wasm/wasm-module-runner.h',
+ ],
+ },
+ {
'target_name': 'v8_simple_wasm_code_fuzzer',
'type': 'executable',
'dependencies': [
@@ -386,6 +415,7 @@
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8',
+ '../../src/v8.gyp:v8_libbase',
'../../src/v8.gyp:v8_libplatform',
],
'include_dirs': [
@@ -396,29 +426,6 @@
'fuzzer-support.h',
],
},
- {
- 'target_name': 'fuzzer_support_nocomponent',
- 'type': 'static_library',
- 'dependencies': [
- '../../src/v8.gyp:v8_libplatform',
- ],
- 'include_dirs': [
- '../..',
- ],
- 'sources': [ ### gcmole(all) ###
- 'fuzzer-support.cc',
- 'fuzzer-support.h',
- ],
- 'conditions': [
- ['component=="shared_library"', {
- # fuzzers can't be built against a shared library, so we need to
- # depend on the underlying static target in that case.
- 'dependencies': ['../../src/v8.gyp:v8_maybe_snapshot'],
- }, {
- 'dependencies': ['../../src/v8.gyp:v8'],
- }],
- ],
- },
],
'conditions': [
['test_isolation_mode != "noop"', {
diff --git a/deps/v8/test/fuzzer/fuzzer.isolate b/deps/v8/test/fuzzer/fuzzer.isolate
index 6b93c53481..788c76c010 100644
--- a/deps/v8/test/fuzzer/fuzzer.isolate
+++ b/deps/v8/test/fuzzer/fuzzer.isolate
@@ -10,6 +10,7 @@
'<(PRODUCT_DIR)/v8_simple_regexp_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_wasm_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_wasm_asmjs_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/v8_simple_wasm_call_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_wasm_code_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_wasm_data_section_fuzzer<(EXECUTABLE_SUFFIX)',
'<(PRODUCT_DIR)/v8_simple_wasm_function_sigs_section_fuzzer<(EXECUTABLE_SUFFIX)',
@@ -25,6 +26,7 @@
'./regexp/',
'./wasm/',
'./wasm_asmjs/',
+ './wasm_call/',
'./wasm_code/',
'./wasm_data_section/',
'./wasm_function_sigs_section/',
diff --git a/deps/v8/test/fuzzer/parser.cc b/deps/v8/test/fuzzer/parser.cc
index fc8f59ee62..20f4b1703d 100644
--- a/deps/v8/test/fuzzer/parser.cc
+++ b/deps/v8/test/fuzzer/parser.cc
@@ -34,9 +34,8 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::internal::Handle<v8::internal::Script> script =
factory->NewScript(source.ToHandleChecked());
- v8::internal::Zone zone(i_isolate->allocator());
+ v8::internal::Zone zone(i_isolate->allocator(), ZONE_NAME);
v8::internal::ParseInfo info(&zone, script);
- info.set_global();
v8::internal::Parser parser(&info);
parser.Parse(&info);
isolate->RequestGarbageCollectionForTesting(
diff --git a/deps/v8/test/fuzzer/regexp.cc b/deps/v8/test/fuzzer/regexp.cc
index 874a434476..4b053d4eb9 100644
--- a/deps/v8/test/fuzzer/regexp.cc
+++ b/deps/v8/test/fuzzer/regexp.cc
@@ -16,7 +16,8 @@
namespace i = v8::internal;
void Test(v8::Isolate* isolate, i::Handle<i::JSRegExp> regexp,
- i::Handle<i::String> subject, i::Handle<i::JSArray> results_array) {
+ i::Handle<i::String> subject,
+ i::Handle<i::RegExpMatchInfo> results_array) {
v8::TryCatch try_catch(isolate);
USE(i::RegExpImpl::Exec(regexp, subject, 0, results_array));
}
@@ -48,7 +49,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
const uint8_t one_byte_array[6] = {'f', 'o', 'o', 'b', 'a', 'r'};
const i::uc16 two_byte_array[6] = {'f', 0xD83D, 0xDCA9, 'b', 'a', 0x2603};
- i::Handle<i::JSArray> results_array = factory->NewJSArray(5);
+ i::Handle<i::RegExpMatchInfo> results_array = factory->NewRegExpMatchInfo();
i::Handle<i::String> one_byte =
factory->NewStringFromOneByte(i::Vector<const uint8_t>(one_byte_array, 6))
.ToHandleChecked();
diff --git a/deps/v8/test/fuzzer/testcfg.py b/deps/v8/test/fuzzer/testcfg.py
index 830b459742..0e4925bc06 100644
--- a/deps/v8/test/fuzzer/testcfg.py
+++ b/deps/v8/test/fuzzer/testcfg.py
@@ -18,8 +18,8 @@ class FuzzerVariantGenerator(testsuite.VariantGenerator):
class FuzzerTestSuite(testsuite.TestSuite):
- SUB_TESTS = ( 'json', 'parser', 'regexp', 'wasm', 'wasm_asmjs', 'wasm_code',
- 'wasm_data_section', 'wasm_function_sigs_section',
+ SUB_TESTS = ( 'json', 'parser', 'regexp', 'wasm', 'wasm_asmjs', 'wasm_call',
+ 'wasm_code', 'wasm_data_section', 'wasm_function_sigs_section',
'wasm_globals_section', 'wasm_imports_section', 'wasm_memory_section',
'wasm_names_section', 'wasm_types_section' )
diff --git a/deps/v8/test/fuzzer/wasm-call.cc b/deps/v8/test/fuzzer/wasm-call.cc
new file mode 100644
index 0000000000..fb9a696135
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm-call.cc
@@ -0,0 +1,183 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "include/v8.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/utils.h"
+#include "src/wasm/wasm-interpreter.h"
+#include "src/wasm/wasm-module-builder.h"
+#include "src/wasm/wasm-module.h"
+#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-module-runner.h"
+#include "test/fuzzer/fuzzer-support.h"
+
+#define WASM_CODE_FUZZER_HASH_SEED 83
+#define MAX_NUM_FUNCTIONS 3
+#define MAX_NUM_PARAMS 3
+
+using namespace v8::internal::wasm;
+
+template <typename V>
+static inline V read_value(const uint8_t** data, size_t* size, bool* ok) {
+ // The status flag {ok} checks that the decoding up until now was okay, and
+ // that a value of type V can be read without problems.
+ *ok &= (*size > sizeof(V));
+ if (!(*ok)) return 0;
+ V result = v8::internal::ReadLittleEndianValue<V>(*data);
+ *data += sizeof(V);
+ *size -= sizeof(V);
+ return result;
+}
+
+static void add_argument(
+ v8::internal::Isolate* isolate, LocalType type, WasmVal* interpreter_args,
+ v8::internal::Handle<v8::internal::Object>* compiled_args, int* argc,
+ const uint8_t** data, size_t* size, bool* ok) {
+ if (!(*ok)) return;
+ switch (type) {
+ case kAstF32: {
+ float value = read_value<float>(data, size, ok);
+ interpreter_args[*argc] = WasmVal(value);
+ compiled_args[*argc] =
+ isolate->factory()->NewNumber(static_cast<double>(value));
+ break;
+ }
+ case kAstF64: {
+ double value = read_value<double>(data, size, ok);
+ interpreter_args[*argc] = WasmVal(value);
+ compiled_args[*argc] = isolate->factory()->NewNumber(value);
+ break;
+ }
+ case kAstI32: {
+ int32_t value = read_value<int32_t>(data, size, ok);
+ interpreter_args[*argc] = WasmVal(value);
+ compiled_args[*argc] =
+ isolate->factory()->NewNumber(static_cast<double>(value));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ (*argc)++;
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
+ v8::internal::Isolate* i_isolate =
+ reinterpret_cast<v8::internal::Isolate*>(isolate);
+
+ // Clear any pending exceptions from a prior run.
+ if (i_isolate->has_pending_exception()) {
+ i_isolate->clear_pending_exception();
+ }
+
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::Scope context_scope(support->GetContext());
+ v8::TryCatch try_catch(isolate);
+
+ v8::internal::AccountingAllocator allocator;
+ v8::internal::Zone zone(&allocator, ZONE_NAME);
+
+ bool ok = true;
+ uint8_t num_functions =
+ (read_value<uint8_t>(&data, &size, &ok) % MAX_NUM_FUNCTIONS) + 1;
+
+ LocalType types[] = {kAstF32, kAstF64, kAstI32, kAstI64};
+ WasmVal interpreter_args[3];
+ v8::internal::Handle<v8::internal::Object> compiled_args[3];
+ int argc = 0;
+
+ WasmModuleBuilder builder(&zone);
+ for (int fun = 0; fun < num_functions; fun++) {
+ size_t num_params = static_cast<size_t>(
+ (read_value<uint8_t>(&data, &size, &ok) % MAX_NUM_PARAMS) + 1);
+ FunctionSig::Builder sig_builder(&zone, 1, num_params);
+ sig_builder.AddReturn(kAstI32);
+ for (size_t param = 0; param < num_params; param++) {
+ // The main function cannot handle int64 parameters.
+ LocalType param_type = types[(read_value<uint8_t>(&data, &size, &ok) %
+ (arraysize(types) - (fun == 0 ? 1 : 0)))];
+ sig_builder.AddParam(param_type);
+ if (fun == 0) {
+ add_argument(i_isolate, param_type, interpreter_args, compiled_args,
+ &argc, &data, &size, &ok);
+ }
+ }
+ v8::internal::wasm::WasmFunctionBuilder* f =
+ builder.AddFunction(sig_builder.Build());
+ uint32_t code_size = static_cast<uint32_t>(size / num_functions);
+ f->EmitCode(data, code_size);
+ data += code_size;
+ size -= code_size;
+ if (fun == 0) {
+ f->ExportAs(v8::internal::CStrVector("main"));
+ }
+ }
+
+ ZoneBuffer buffer(&zone);
+ builder.WriteTo(buffer);
+
+ if (!ok) {
+ // The input data was too short.
+ return 0;
+ }
+
+ v8::internal::wasm::testing::SetupIsolateForWasmModule(i_isolate);
+
+ v8::internal::HandleScope scope(i_isolate);
+
+ ErrorThrower interpreter_thrower(i_isolate, "Interpreter");
+ std::unique_ptr<const WasmModule> module(testing::DecodeWasmModuleForTesting(
+ i_isolate, &interpreter_thrower, buffer.begin(), buffer.end(),
+ v8::internal::wasm::ModuleOrigin::kWasmOrigin, true));
+
+ if (module == nullptr) {
+ return 0;
+ }
+ int32_t result_interpreted;
+ bool possible_nondeterminism = false;
+ {
+ result_interpreted = testing::InterpretWasmModule(
+ i_isolate, &interpreter_thrower, module.get(), 0, interpreter_args,
+ &possible_nondeterminism);
+ }
+
+ ErrorThrower compiler_thrower(i_isolate, "Compiler");
+ v8::internal::Handle<v8::internal::JSObject> instance =
+ testing::InstantiateModuleForTesting(i_isolate, &compiler_thrower,
+ module.get());
+
+ if (!interpreter_thrower.error()) {
+ CHECK(!instance.is_null());
+ } else {
+ return 0;
+ }
+ int32_t result_compiled;
+ {
+ result_compiled = testing::CallWasmFunctionForTesting(
+ i_isolate, instance, &compiler_thrower, "main", argc, compiled_args,
+ v8::internal::wasm::ModuleOrigin::kWasmOrigin);
+ }
+ if (result_interpreted == bit_cast<int32_t>(0xdeadbeef)) {
+ CHECK(i_isolate->has_pending_exception());
+ i_isolate->clear_pending_exception();
+ } else {
+ // The WebAssembly spec allows the sign bit of NaN to be non-deterministic.
+ // This sign bit may cause result_interpreted to be different than
+ // result_compiled. Therefore we do not check the equality of the results
+ // if the execution may have produced a NaN at some point.
+ if (!possible_nondeterminism && (result_interpreted != result_compiled)) {
+ V8_Fatal(__FILE__, __LINE__, "WasmCodeFuzzerHash=%x",
+ v8::internal::StringHasher::HashSequentialString(
+ data, static_cast<int>(size), WASM_CODE_FUZZER_HASH_SEED));
+ }
+ }
+ return 0;
+}
diff --git a/deps/v8/test/fuzzer/wasm-code.cc b/deps/v8/test/fuzzer/wasm-code.cc
index 13b665137d..3f7b091883 100644
--- a/deps/v8/test/fuzzer/wasm-code.cc
+++ b/deps/v8/test/fuzzer/wasm-code.cc
@@ -36,7 +36,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::TryCatch try_catch(isolate);
v8::internal::AccountingAllocator allocator;
- v8::internal::Zone zone(&allocator);
+ v8::internal::Zone zone(&allocator, ZONE_NAME);
TestSignatures sigs;
@@ -45,8 +45,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::internal::wasm::WasmFunctionBuilder* f =
builder.AddFunction(sigs.i_iii());
f->EmitCode(data, static_cast<uint32_t>(size));
- f->SetExported();
- f->SetName("main", 4);
+ f->ExportAs(v8::internal::CStrVector("main"));
ZoneBuffer buffer(&zone);
builder.WriteTo(buffer);
@@ -57,17 +56,19 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
ErrorThrower interpreter_thrower(i_isolate, "Interpreter");
std::unique_ptr<const WasmModule> module(testing::DecodeWasmModuleForTesting(
- i_isolate, &zone, &interpreter_thrower, buffer.begin(), buffer.end(),
- v8::internal::wasm::ModuleOrigin::kWasmOrigin));
+ i_isolate, &interpreter_thrower, buffer.begin(), buffer.end(),
+ v8::internal::wasm::ModuleOrigin::kWasmOrigin, true));
if (module == nullptr) {
return 0;
}
int32_t result_interpreted;
+ bool possible_nondeterminism = false;
{
WasmVal args[] = {WasmVal(1), WasmVal(2), WasmVal(3)};
result_interpreted = testing::InterpretWasmModule(
- i_isolate, &interpreter_thrower, module.get(), 0, args);
+ i_isolate, &interpreter_thrower, module.get(), 0, args,
+ &possible_nondeterminism);
}
ErrorThrower compiler_thrower(i_isolate, "Compiler");
@@ -90,11 +91,15 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
i_isolate, instance, &compiler_thrower, "main", arraysize(arguments),
arguments, v8::internal::wasm::ModuleOrigin::kWasmOrigin);
}
- if (result_interpreted == 0xdeadbeef) {
+ if (result_interpreted == bit_cast<int32_t>(0xdeadbeef)) {
CHECK(i_isolate->has_pending_exception());
i_isolate->clear_pending_exception();
} else {
- if (result_interpreted != result_compiled) {
+ // The WebAssembly spec allows the sign bit of NaN to be non-deterministic.
+ // This sign bit may cause result_interpreted to be different than
+ // result_compiled. Therefore we do not check the equality of the results
+ // if the execution may have produced a NaN at some point.
+ if (!possible_nondeterminism && (result_interpreted != result_compiled)) {
V8_Fatal(__FILE__, __LINE__, "WasmCodeFuzzerHash=%x",
v8::internal::StringHasher::HashSequentialString(
data, static_cast<int>(size), WASM_CODE_FUZZER_HASH_SEED));
diff --git a/deps/v8/test/fuzzer/wasm-section-fuzzers.cc b/deps/v8/test/fuzzer/wasm-section-fuzzers.cc
index e95beba0fd..1ef5967aa8 100644
--- a/deps/v8/test/fuzzer/wasm-section-fuzzers.cc
+++ b/deps/v8/test/fuzzer/wasm-section-fuzzers.cc
@@ -36,7 +36,7 @@ int fuzz_wasm_section(WasmSectionCode section, const uint8_t* data,
v8::TryCatch try_catch(isolate);
v8::internal::AccountingAllocator allocator;
- v8::internal::Zone zone(&allocator);
+ v8::internal::Zone zone(&allocator, ZONE_NAME);
ZoneBuffer buffer(&zone);
buffer.write_u32(kWasmMagic);
@@ -57,7 +57,7 @@ int fuzz_wasm_section(WasmSectionCode section, const uint8_t* data,
ErrorThrower thrower(i_isolate, "decoder");
std::unique_ptr<const WasmModule> module(testing::DecodeWasmModuleForTesting(
- i_isolate, &zone, &thrower, buffer.begin(), buffer.end(), kWasmOrigin));
+ i_isolate, &thrower, buffer.begin(), buffer.end(), kWasmOrigin));
return 0;
}
diff --git a/deps/v8/test/fuzzer/wasm_call/foo b/deps/v8/test/fuzzer/wasm_call/foo
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm_call/foo
diff --git a/deps/v8/test/inspector/BUILD.gn b/deps/v8/test/inspector/BUILD.gn
index cb96bf423c..e51b011569 100644
--- a/deps/v8/test/inspector/BUILD.gn
+++ b/deps/v8/test/inspector/BUILD.gn
@@ -3,37 +3,34 @@
# found in the LICENSE file.
import("../../gni/v8.gni")
+import("//build_overrides/v8.gni")
-v8_executable("inspector-test") {
- testonly = true
+if (v8_enable_inspector_override) {
+ v8_executable("inspector-test") {
+ testonly = true
- sources = [
- "inspector-impl.cc",
- "inspector-impl.h",
- "inspector-test.cc",
- "task-runner.cc",
- "task-runner.h",
- ]
+ sources = [
+ "inspector-impl.cc",
+ "inspector-impl.h",
+ "inspector-test.cc",
+ "task-runner.cc",
+ "task-runner.h",
+ ]
- configs = [
- "../..:external_config",
- "../..:internal_config_base",
- ]
+ configs = [
+ "../..:external_config",
+ "../..:internal_config_base",
+ ]
- deps = [
- "../..:v8_libplatform",
- "//build/config/sanitizers:deps",
- "//build/win:default_exe_manifest",
- ]
+ deps = [
+ "../..:v8",
+ "../..:v8_libbase",
+ "../..:v8_libplatform",
+ "//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
+ ]
- if (is_component_build) {
- # inspector-test can't be built against a shared library, so we
- # need to depend on the underlying static target in that case.
- deps += [ "../..:v8_maybe_snapshot" ]
- } else {
- deps += [ "../..:v8" ]
+ cflags = []
+ ldflags = []
}
-
- cflags = []
- ldflags = []
}
diff --git a/deps/v8/test/inspector/DEPS b/deps/v8/test/inspector/DEPS
index af99e05595..93c8f6afa5 100644
--- a/deps/v8/test/inspector/DEPS
+++ b/deps/v8/test/inspector/DEPS
@@ -1,9 +1,9 @@
include_rules = [
"-src",
+ "+src/base/atomic-utils.h",
"+src/base/macros.h",
"+src/base/platform/platform.h",
"+src/flags.h",
- "+src/inspector/string-16.h",
"+src/locked-queue-inl.h",
"+src/utils.h",
"+src/vector.h",
diff --git a/deps/v8/test/inspector/console/destroy-context-during-log-expected.txt b/deps/v8/test/inspector/console/destroy-context-during-log-expected.txt
new file mode 100644
index 0000000000..c8e9293ff2
--- /dev/null
+++ b/deps/v8/test/inspector/console/destroy-context-during-log-expected.txt
@@ -0,0 +1,8 @@
+{
+ type : string
+ value : First inspector activity after attaching inspector
+}
+{
+ type : string
+ value : End of test
+}
diff --git a/deps/v8/test/inspector/console/destroy-context-during-log.js b/deps/v8/test/inspector/console/destroy-context-during-log.js
new file mode 100644
index 0000000000..2289fbefc0
--- /dev/null
+++ b/deps/v8/test/inspector/console/destroy-context-during-log.js
@@ -0,0 +1,38 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const expression = `
+ Object.defineProperty(Object.prototype, 'RemoteObject', {
+ configurable: true,
+ set(v) {
+ delete Object.prototype.RemoteObject;
+ this.RemoteObject = v;
+
+ detachInspector();
+ setTimeout(function() {
+ // Attach the inspector again for the sake of establishing a
+ // communication channel with the frontend test runner.
+ attachInspector();
+ console.log("End of test");
+ }, 0);
+ },
+ });
+
+ // Before the whole script runs, the inspector is already attached.
+ // Re-attach the inspector and trigger the console API to make sure that the
+ // injected inspector script runs again (and triggers the above setter).
+ detachInspector();
+ attachInspector();
+ console.log("First inspector activity after attaching inspector");
+`;
+
+Protocol.Runtime.enable();
+Protocol.Runtime.evaluate({ expression: expression });
+
+Protocol.Runtime.onConsoleAPICalled(function(result) {
+ InspectorTest.logObject(result.params.args[0]);
+ if (result.params.args[0].value == "End of test") {
+ InspectorTest.completeTest();
+ }
+});
diff --git a/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt b/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt
new file mode 100644
index 0000000000..90d57c2dfe
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec-expected.txt
@@ -0,0 +1,109 @@
+This test runs asm.js which calls back to JS. Before executing (after the script is parsed) we set breakpoints in the asm.js code.
+
+Running test: enableDebugger
+
+Running test: addScript
+Script nr 1 parsed!
+First script; assuming testFunction.
+Flooding script with breakpoints for all lines (0 - 20)...
+Setting breakpoint on line 0
+error: undefined
+Setting breakpoint on line 1
+error: undefined
+Setting breakpoint on line 2
+error: undefined
+Setting breakpoint on line 3
+error: undefined
+Setting breakpoint on line 4
+error: undefined
+Setting breakpoint on line 5
+error: undefined
+Setting breakpoint on line 6
+error: undefined
+Setting breakpoint on line 7
+error: undefined
+Setting breakpoint on line 8
+error: undefined
+Setting breakpoint on line 9
+error: undefined
+Setting breakpoint on line 10
+error: undefined
+Setting breakpoint on line 11
+error: undefined
+Setting breakpoint on line 12
+error: undefined
+Setting breakpoint on line 13
+error: undefined
+Setting breakpoint on line 14
+error: undefined
+Setting breakpoint on line 15
+error: undefined
+Setting breakpoint on line 16
+error: undefined
+Setting breakpoint on line 17
+error: undefined
+Setting breakpoint on line 18
+error: undefined
+Setting breakpoint on line 19
+error: undefined
+
+Running test: runTestFunction
+Script nr 2 parsed!
+Paused #1
+ - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":17,"columnNumber":2}
+ - [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Paused #2
+ - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":12}
+ - [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Paused #3
+ - [0] {"functionName":"generateAsmJs","function_lineNumber":1,"function_columnNumber":24,"lineNumber":2,"columnNumber":4}
+ - [1] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":12}
+ - [2] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Paused #4
+ - [0] {"functionName":"generateAsmJs","function_lineNumber":1,"function_columnNumber":24,"lineNumber":3,"columnNumber":30}
+ - [1] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":12}
+ - [2] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Paused #5
+ - [0] {"functionName":"generateAsmJs","function_lineNumber":1,"function_columnNumber":24,"lineNumber":10,"columnNumber":4}
+ - [1] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":12}
+ - [2] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Paused #6
+ - [0] {"functionName":"generateAsmJs","function_lineNumber":1,"function_columnNumber":24,"lineNumber":11,"columnNumber":2}
+ - [1] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":12}
+ - [2] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Paused #7
+ - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":2}
+ - [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Paused #8
+ - [0] {"functionName":"redirectFun","function_lineNumber":7,"function_columnNumber":24,"lineNumber":8,"columnNumber":6}
+ - [1] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":2}
+ - [2] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Paused #9
+ - [0] {"functionName":"callDebugger","function_lineNumber":4,"function_columnNumber":25,"lineNumber":5,"columnNumber":6}
+ - [1] {"functionName":"redirectFun","function_lineNumber":7,"function_columnNumber":24,"lineNumber":8,"columnNumber":6}
+ - [2] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":2}
+ - [3] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Paused #10
+ - [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":14,"columnNumber":4}
+ - [1] {"functionName":"callDebugger","function_lineNumber":4,"function_columnNumber":25,"lineNumber":5,"columnNumber":6}
+ - [2] {"functionName":"redirectFun","function_lineNumber":7,"function_columnNumber":24,"lineNumber":8,"columnNumber":6}
+ - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":2}
+ - [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Paused #11
+ - [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":15,"columnNumber":2}
+ - [1] {"functionName":"callDebugger","function_lineNumber":4,"function_columnNumber":25,"lineNumber":5,"columnNumber":6}
+ - [2] {"functionName":"redirectFun","function_lineNumber":7,"function_columnNumber":24,"lineNumber":8,"columnNumber":6}
+ - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":2}
+ - [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Paused #12
+ - [0] {"functionName":"callDebugger","function_lineNumber":4,"function_columnNumber":25,"lineNumber":6,"columnNumber":4}
+ - [1] {"functionName":"redirectFun","function_lineNumber":7,"function_columnNumber":24,"lineNumber":8,"columnNumber":6}
+ - [2] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":2}
+ - [3] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Paused #13
+ - [0] {"functionName":"redirectFun","function_lineNumber":7,"function_columnNumber":24,"lineNumber":9,"columnNumber":4}
+ - [1] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":2}
+ - [2] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+
+Running test: finished
+Finished TestSuite.
diff --git a/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js b/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js
new file mode 100644
index 0000000000..5c0dbaaefb
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js
@@ -0,0 +1,128 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+InspectorTest.log(
+ 'This test runs asm.js which calls back to JS. Before executing (after ' +
+ 'the script is parsed) we set breakpoints in the asm.js code.');
+
+function testFunction() {
+ function generateAsmJs(stdlib, foreign, heap) {
+ 'use asm';
+ var debugger_fun = foreign.call_debugger;
+ function callDebugger() {
+ debugger_fun();
+ }
+ function redirectFun() {
+ callDebugger();
+ }
+ return redirectFun;
+ }
+
+ function call_debugger() {
+ debugger;
+ }
+
+ %OptimizeFunctionOnNextCall(generateAsmJs);
+ var fun = generateAsmJs(this, {'call_debugger': call_debugger}, undefined);
+ fun();
+}
+
+Protocol.Debugger.onPaused(handleDebuggerPaused);
+Protocol.Debugger.onScriptParsed(handleScriptParsed);
+
+function printResultAndContinue(next, message) {
+ if (message.result && message.result.exceptionDetails)
+ InspectorTest.logMessage(message.result.exceptionDetails);
+ else if (message.error)
+ InspectorTest.logMessage(message.error);
+ else if (message.result && message.result.type !== undefined)
+ InspectorTest.logMessage(message.result);
+ next();
+}
+
+InspectorTest.runTestSuite([
+ function enableDebugger(next) {
+ Protocol.Debugger.enable().then(next);
+ },
+
+ function addScript(next) {
+ afterScriptParsedCallback = next;
+ InspectorTest.addScript(testFunction.toString());
+ },
+
+ function runTestFunction(next) {
+ Protocol.Runtime.evaluate({'expression': 'testFunction()'})
+ .then(printResultAndContinue.bind(null, next));
+ },
+
+ function finished(next) {
+ InspectorTest.log('Finished TestSuite.');
+ next();
+ },
+]);
+
+function locationToString(callFrame) {
+ var res = {functionName: callFrame.functionName};
+ for (var attr in callFrame.functionLocation) {
+ if (attr == 'scriptId') continue;
+ res['function_' + attr] = callFrame.functionLocation[attr];
+ }
+ for (var attr in callFrame.location) {
+ if (attr == 'scriptId') continue;
+ res[attr] = callFrame.location[attr];
+ }
+ return JSON.stringify(res);
+}
+
+function logStackTrace(messageObject) {
+ var frames = messageObject.params.callFrames;
+ for (var i = 0; i < frames.length; ++i) {
+ InspectorTest.log(' - [' + i + '] ' + locationToString(frames[i]));
+ }
+}
+
+var numPaused = 0;
+
+function handleDebuggerPaused(messageObject)
+{
+ ++numPaused;
+ InspectorTest.log('Paused #' + numPaused);
+ logStackTrace(messageObject);
+ Protocol.Debugger.resume();
+}
+
+var numScripts = 0;
+
+function handleScriptParsed(messageObject)
+{
+ var scriptId = messageObject.params.scriptId;
+ ++numScripts;
+ InspectorTest.log('Script nr ' + numScripts + ' parsed!');
+ if (numScripts > 1) return;
+
+ var startLine = messageObject.params.startLine;
+ var endLine = messageObject.params.endLine;
+ InspectorTest.log('First script; assuming testFunction.');
+ InspectorTest.log(
+ 'Flooding script with breakpoints for all lines (' + startLine + ' - ' +
+ endLine + ')...');
+ var currentLine = startLine;
+ function setNextBreakpoint(message) {
+ if (message) InspectorTest.logMessage('error: ' + message.error);
+ if (currentLine == endLine) {
+ afterScriptParsedCallback();
+ return;
+ }
+ var thisLine = currentLine;
+ currentLine += 1;
+ InspectorTest.log('Setting breakpoint on line ' + thisLine);
+ Protocol.Debugger
+ .setBreakpoint(
+ {'location': {'scriptId': scriptId, 'lineNumber': thisLine}})
+ .then(setNextBreakpoint);
+ }
+ setNextBreakpoint();
+}
diff --git a/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt b/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt
new file mode 100644
index 0000000000..32c950d2bd
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec-expected.txt
@@ -0,0 +1,99 @@
+This test runs asm.js which calls back to JS. JS triggers a break, on pause we set breakpoints in the asm.js code.
+
+Running test: enableDebugger
+
+Running test: addScript
+Script nr 1 parsed!
+
+Running test: runTestFunction
+Script nr 2 parsed!
+Paused #1
+ - [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":14,"columnNumber":4}
+ - [1] {"functionName":"callDebugger","function_lineNumber":4,"function_columnNumber":25,"lineNumber":5,"columnNumber":6}
+ - [2] {"functionName":"redirectFun","function_lineNumber":7,"function_columnNumber":24,"lineNumber":8,"columnNumber":6}
+ - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":2}
+ - [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+First time paused, setting breakpoints!
+Flooding script with breakpoints for all lines (0 - 23)...
+Setting breakpoint on line 0
+error: undefined
+Setting breakpoint on line 1
+error: undefined
+Setting breakpoint on line 2
+error: undefined
+Setting breakpoint on line 3
+error: undefined
+Setting breakpoint on line 4
+error: undefined
+Setting breakpoint on line 5
+error: undefined
+Setting breakpoint on line 6
+error: undefined
+Setting breakpoint on line 7
+error: undefined
+Setting breakpoint on line 8
+error: undefined
+Setting breakpoint on line 9
+error: undefined
+Setting breakpoint on line 10
+error: undefined
+Setting breakpoint on line 11
+error: undefined
+Setting breakpoint on line 12
+error: undefined
+Setting breakpoint on line 13
+error: undefined
+Setting breakpoint on line 14
+error: undefined
+Setting breakpoint on line 15
+error: undefined
+Setting breakpoint on line 16
+error: undefined
+Setting breakpoint on line 17
+error: undefined
+Setting breakpoint on line 18
+error: undefined
+Setting breakpoint on line 19
+error: undefined
+Setting breakpoint on line 20
+error: undefined
+Setting breakpoint on line 21
+error: undefined
+Setting breakpoint on line 22
+error: undefined
+Script nr 3 parsed!
+Resuming...
+Paused #2
+ - [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":15,"columnNumber":2}
+ - [1] {"functionName":"callDebugger","function_lineNumber":4,"function_columnNumber":25,"lineNumber":5,"columnNumber":6}
+ - [2] {"functionName":"redirectFun","function_lineNumber":7,"function_columnNumber":24,"lineNumber":8,"columnNumber":6}
+ - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":2}
+ - [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Script nr 4 parsed!
+Resuming...
+Paused #3
+ - [0] {"functionName":"callDebugger","function_lineNumber":4,"function_columnNumber":25,"lineNumber":6,"columnNumber":4}
+ - [1] {"functionName":"redirectFun","function_lineNumber":7,"function_columnNumber":24,"lineNumber":8,"columnNumber":6}
+ - [2] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":2}
+ - [3] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Script nr 5 parsed!
+Resuming...
+Paused #4
+ - [0] {"functionName":"redirectFun","function_lineNumber":7,"function_columnNumber":24,"lineNumber":9,"columnNumber":4}
+ - [1] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":19,"columnNumber":2}
+ - [2] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Script nr 6 parsed!
+Resuming...
+Paused #5
+ - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":21,"columnNumber":17}
+ - [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Script nr 7 parsed!
+Resuming...
+Paused #6
+ - [0] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":22,"columnNumber":2}
+ - [1] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Script nr 8 parsed!
+Resuming...
+
+Running test: finished
+Finished TestSuite.
diff --git a/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js b/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js
new file mode 100644
index 0000000000..2a29dc1ea4
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js
@@ -0,0 +1,165 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+InspectorTest.log(
+ 'This test runs asm.js which calls back to JS. JS triggers a break, on ' +
+ 'pause we set breakpoints in the asm.js code.');
+
+function testFunction() {
+ function generateAsmJs(stdlib, foreign, heap) {
+ 'use asm';
+ var debugger_fun = foreign.call_debugger;
+ function callDebugger() {
+ debugger_fun();
+ }
+ function redirectFun() {
+ callDebugger();
+ }
+ return redirectFun;
+ }
+
+ function call_debugger() {
+ debugger;
+ }
+
+ %OptimizeFunctionOnNextCall(generateAsmJs);
+ var fun = generateAsmJs(this, {'call_debugger': call_debugger}, undefined);
+ fun();
+
+ var finished = 'finished';
+ debugger;
+}
+
+Protocol.Debugger.onPaused(handleDebuggerPaused);
+Protocol.Debugger.onScriptParsed(handleScriptParsed);
+
+function printResultAndContinue(next, message) {
+ if (message.result && message.result.exceptionDetails)
+ InspectorTest.logMessage(message.result.exceptionDetails);
+ else if (message.error)
+ InspectorTest.logMessage(message.error);
+ else if (message.result && message.result.type !== undefined)
+ InspectorTest.logMessage(message.result);
+ if (next) next();
+}
+
+InspectorTest.runTestSuite([
+ function enableDebugger(next) {
+ Protocol.Debugger.enable().then(next);
+ },
+
+ function addScript(next) {
+ afterScriptParsedCallback = next;
+ InspectorTest.addScript(testFunction.toString());
+ },
+
+ function runTestFunction(next) {
+ afterFinishedTestFunctionCallback = next;
+ Protocol.Runtime.evaluate({'expression': 'testFunction()'})
+ .then(printResultAndContinue.bind(null, null));
+ },
+
+ function finished(next) {
+ InspectorTest.log('Finished TestSuite.');
+ next();
+ },
+]);
+
+function locationToString(callFrame) {
+ var res = {functionName: callFrame.functionName};
+ for (var attr in callFrame.functionLocation) {
+ if (attr == 'scriptId') continue;
+ res['function_' + attr] = callFrame.functionLocation[attr];
+ }
+ for (var attr in callFrame.location) {
+ if (attr == 'scriptId') continue;
+ res[attr] = callFrame.location[attr];
+ }
+ return JSON.stringify(res);
+}
+
+function logStackTrace(messageObject) {
+ var frames = messageObject.params.callFrames;
+ for (var i = 0; i < frames.length; ++i) {
+ InspectorTest.log(' - [' + i + '] ' + locationToString(frames[i]));
+ }
+}
+
+var numPaused = 0;
+var parsedScriptParams;
+
+function handleDebuggerPaused(messageObject)
+{
+ ++numPaused;
+ InspectorTest.log('Paused #' + numPaused);
+ logStackTrace(messageObject);
+
+ function cont() {
+ var topFrameId = messageObject.params.callFrames[0].callFrameId;
+ Protocol.Debugger
+ .evaluateOnCallFrame({
+ callFrameId: topFrameId,
+ expression: 'typeof finished'
+ })
+ .then(callbackEvaluate);
+ function callbackEvaluate(message) {
+ var finished = message.result && message.result.result &&
+ message.result.result.value === 'string';
+
+ InspectorTest.log('Resuming...');
+ Protocol.Debugger.resume();
+
+ if (finished)
+ afterFinishedTestFunctionCallback();
+ }
+ }
+
+ if (numPaused > 1) {
+ cont();
+ return;
+ }
+
+ InspectorTest.log('First time paused, setting breakpoints!');
+
+ var startLine = parsedScriptParams.startLine;
+ var endLine = parsedScriptParams.endLine;
+ InspectorTest.log(
+ 'Flooding script with breakpoints for all lines (' + startLine + ' - ' +
+ endLine + ')...');
+ var currentLine = startLine;
+ function setNextBreakpoint(message) {
+ if (message) InspectorTest.logMessage('error: ' + message.error);
+ if (currentLine == endLine) {
+ cont();
+ return;
+ }
+ var thisLine = currentLine;
+ currentLine += 1;
+ InspectorTest.log('Setting breakpoint on line ' + thisLine);
+ Protocol.Debugger
+ .setBreakpoint({
+ 'location': {
+ 'scriptId': parsedScriptParams.scriptId,
+ 'lineNumber': thisLine
+ }
+ })
+ .then(setNextBreakpoint);
+ }
+ setNextBreakpoint();
+}
+
+var numScripts = 0;
+
+function handleScriptParsed(messageObject)
+{
+ var scriptId = messageObject.params.scriptId;
+ ++numScripts;
+ InspectorTest.log('Script nr ' + numScripts + ' parsed!');
+ if (numScripts == 1) {
+ parsedScriptParams = JSON.parse(JSON.stringify(messageObject.params));
+ afterScriptParsedCallback();
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/asm-js-stack-expected.txt b/deps/v8/test/inspector/debugger/asm-js-stack-expected.txt
new file mode 100644
index 0000000000..f3bfd8de6a
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/asm-js-stack-expected.txt
@@ -0,0 +1,17 @@
+Paused on 'debugger;'
+Number of frames: 5
+ - [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":14,"columnNumber":4}
+ - [1] {"functionName":"callDebugger","lineNumber":5,"columnNumber":6}
+ - [2] {"functionName":"redirectFun","lineNumber":8,"columnNumber":6}
+ - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":18,"columnNumber":2}
+ - [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Getting v8-generated stack trace...
+Result of evaluate (string):
+Error: getting stack trace
+ -- skipped --
+ at call_debugger (<anonymous>:15:5)
+ at callDebugger (<anonymous>:6:7)
+ at redirectFun (<anonymous>:9:7)
+ at testFunction (<anonymous>:19:3)
+ at <anonymous>:1:1
+Finished!
diff --git a/deps/v8/test/inspector/debugger/asm-js-stack.js b/deps/v8/test/inspector/debugger/asm-js-stack.js
new file mode 100644
index 0000000000..37db088ba1
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/asm-js-stack.js
@@ -0,0 +1,79 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm
+
+function testFunction() {
+ function generateAsmJs(stdlib, foreign, heap) {
+ 'use asm';
+ var debugger_fun = foreign.call_debugger;
+ function callDebugger() {
+ debugger_fun();
+ }
+ function redirectFun() {
+ callDebugger();
+ }
+ return redirectFun;
+ }
+
+ function call_debugger() {
+ debugger;
+ }
+
+ var fun = generateAsmJs(this, {'call_debugger': call_debugger}, undefined);
+ fun();
+}
+
+InspectorTest.addScript(testFunction.toString());
+
+Protocol.Debugger.enable();
+Protocol.Debugger.oncePaused().then(handleDebuggerPaused);
+Protocol.Runtime.evaluate({'expression': 'testFunction()'});
+
+function locationToString(callFrame) {
+ var res = {functionName: callFrame.functionName};
+ for (var attr in callFrame.functionLocation) {
+ if (attr == 'scriptId') continue;
+ res['function_'+attr] = callFrame.functionLocation[attr];
+ }
+ for (var attr in callFrame.location) {
+ if (attr == 'scriptId') continue;
+ res[attr] = callFrame.location[attr];
+ }
+ return JSON.stringify(res);
+}
+
+function logStackTrace(messageObject) {
+ var frames = messageObject.params.callFrames;
+ InspectorTest.log('Number of frames: ' + frames.length);
+ for (var i = 0; i < frames.length; ++i) {
+ InspectorTest.log(' - [' + i + '] ' + locationToString(frames[i]));
+ }
+}
+
+function handleDebuggerPaused(messageObject)
+{
+ InspectorTest.log('Paused on \'debugger;\'');
+ logStackTrace(messageObject);
+ InspectorTest.log('Getting v8-generated stack trace...');
+ var topFrameId = messageObject.params.callFrames[0].callFrameId;
+ Protocol.Debugger
+ .evaluateOnCallFrame({
+ callFrameId: topFrameId,
+ expression: '(new Error("getting stack trace")).stack'
+ })
+ .then(callbackEvaluate);
+}
+
+function callbackEvaluate(response)
+{
+ InspectorTest.log(
+ 'Result of evaluate (' + response.result.result.type + '):');
+ var result_lines = response.result.result.value.split("\n");
+ // Skip the second line, containing the 'evaluate' position.
+ result_lines[1] = " -- skipped --";
+ InspectorTest.log(result_lines.join('\n'));
+ InspectorTest.log('Finished!');
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/debugger/break-on-exception-expected.txt b/deps/v8/test/inspector/debugger/break-on-exception-expected.txt
new file mode 100644
index 0000000000..92a38f0e83
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/break-on-exception-expected.txt
@@ -0,0 +1,12 @@
+Check that inspector correctly change break on exception state.
+
+Running test: noBreakOnExceptionAfterEnabled
+
+Running test: breakOnUncaughtException
+paused in throwUncaughtException
+
+Running test: breakOnCaughtException
+paused in throwUncaughtException
+paused in throwCaughtException
+
+Running test: noBreakInEvaluateInSilentMode \ No newline at end of file
diff --git a/deps/v8/test/inspector/debugger/break-on-exception.js b/deps/v8/test/inspector/debugger/break-on-exception.js
new file mode 100644
index 0000000000..e0afc3846f
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/break-on-exception.js
@@ -0,0 +1,71 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Check that inspector correctly change break on exception state.");
+
+InspectorTest.addScript(`
+function scheduleUncaughtException()
+{
+ setTimeout(throwUncaughtException, 0);
+}
+function throwUncaughtException()
+{
+ throw new Error();
+}
+function throwCaughtException()
+{
+ throw new Error();
+}`);
+
+Protocol.Debugger.onPaused(message => {
+ InspectorTest.log("paused in " + message.params.callFrames[0].functionName);
+ Protocol.Debugger.resume();
+});
+
+Protocol.Runtime.enable();
+
+InspectorTest.runTestSuite([
+ function noBreakOnExceptionAfterEnabled(next)
+ {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.setPauseOnExceptions({ state: "all" });
+ Protocol.Debugger.disable();
+ Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({ expression: "scheduleUncaughtException()" })
+ .then(() => Protocol.Runtime.evaluate({ expression: "throwCaughtException()" }))
+ .then(() => Protocol.Debugger.disable())
+ .then(next);
+ },
+
+ function breakOnUncaughtException(next)
+ {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.setPauseOnExceptions({ state: "uncaught" });
+ Protocol.Runtime.evaluate({ expression: "scheduleUncaughtException()" })
+ .then(() => Protocol.Runtime.onceExceptionThrown())
+ .then(() => Protocol.Runtime.evaluate({ expression: "throwCaughtException()" }))
+ .then(() => Protocol.Debugger.disable())
+ .then(next);
+ },
+
+ function breakOnCaughtException(next)
+ {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.setPauseOnExceptions({ state: "all" });
+ Protocol.Runtime.evaluate({ expression: "scheduleUncaughtException()" })
+ .then(() => Protocol.Runtime.onceExceptionThrown())
+ .then(() => Protocol.Runtime.evaluate({ expression: "throwCaughtException()" }))
+ .then(() => Protocol.Debugger.disable())
+ .then(next);
+ },
+
+ function noBreakInEvaluateInSilentMode(next)
+ {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.setPauseOnExceptions({ state: "all" })
+ .then(() => Protocol.Runtime.evaluate({ expression: "throwCaughtException()", silent: true }))
+ .then(() => Protocol.Debugger.disable())
+ .then(next);
+ }
+]);
diff --git a/deps/v8/test/inspector/debugger/caught-uncaught-exceptions-expected.txt b/deps/v8/test/inspector/debugger/caught-uncaught-exceptions-expected.txt
new file mode 100644
index 0000000000..b784fa549e
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/caught-uncaught-exceptions-expected.txt
@@ -0,0 +1,5 @@
+Check that inspector correctly passes caught/uncaught information.
+paused in throwCaught
+uncaught: false
+paused in throwUncaught
+uncaught: true
diff --git a/deps/v8/test/inspector/debugger/caught-uncaught-exceptions.js b/deps/v8/test/inspector/debugger/caught-uncaught-exceptions.js
new file mode 100644
index 0000000000..95d9b3ff65
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/caught-uncaught-exceptions.js
@@ -0,0 +1,25 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Check that inspector correctly passes caught/uncaught information.");
+
+InspectorTest.addScript(
+`function throwCaught() { try { throw new Error(); } catch (_) {} }
+ function throwUncaught() { throw new Error(); }
+ function schedule(f) { setTimeout(f, 0); }
+`);
+
+Protocol.Debugger.enable();
+
+Protocol.Debugger.setPauseOnExceptions({ "state": "all" });
+Protocol.Debugger.onPaused(message => {
+ InspectorTest.log("paused in " + message.params.callFrames[0].functionName);
+ InspectorTest.log("uncaught: " + message.params.data.uncaught);
+ Protocol.Debugger.resume();
+});
+
+Protocol.Runtime.evaluate({ "expression": "schedule(throwCaught);" })
+ .then(() => Protocol.Runtime.evaluate(
+ { "expression": "schedule(throwUncaught);" }))
+ .then(() => InspectorTest.completeTest());
diff --git a/deps/v8/test/inspector/debugger/command-line-api-with-bound-function-expected.txt b/deps/v8/test/inspector/debugger/command-line-api-with-bound-function-expected.txt
index 4148ef860a..821ce46cd3 100644
--- a/deps/v8/test/inspector/debugger/command-line-api-with-bound-function-expected.txt
+++ b/deps/v8/test/inspector/debugger/command-line-api-with-bound-function-expected.txt
@@ -1,23 +1,23 @@
Check that debug and monitor methods from Command Line API works with bound function.
-debug foo and bar
-call foo and bar
+> debug foo and bar
+> call foo and bar
paused in foo
paused in boo
-undebug foo and bar
-call foo and bar
-monitor foo and bar
-call foo and bar
+> undebug foo and bar
+> call foo and bar
+> monitor foo and bar
+> call foo and bar
function foo called
function boo called
-unmonitor foo and bar
-call foo and bar
-monitor and debug bar
-call bar
+> unmonitor foo and bar
+> call foo and bar
+> monitor and debug bar
+> call bar
function boo called
paused in boo
-undebug bar
-call bar
+> undebug bar
+> call bar
function boo called
-debug and unmonitor bar
-call bar
+> debug and unmonitor bar
+> call bar
paused in boo \ No newline at end of file
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal-expected.txt
new file mode 100644
index 0000000000..c85832872c
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal-expected.txt
@@ -0,0 +1,27 @@
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 6
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [2] : {
+ columnNumber : 7
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [3] : {
+ columnNumber : 8
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal.js b/deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal.js
new file mode 100644
index 0000000000..e574f69c01
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Protocol.Debugger.enable();
+
+Protocol.Debugger.onceScriptParsed().then(message => message.params.scriptId)
+ .then((scriptId) => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 0, columnNumber: 0, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(InspectorTest.completeTest);
+
+compileAndRunWithOrigin("() => []", "", 0, 0);
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt
new file mode 100644
index 0000000000..608abce218
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-expected.txt
@@ -0,0 +1,1109 @@
+Test for Debugger.getPossibleBreakpoints
+
+Running test: getPossibleBreakpointsInRange
+Test start.scriptId != end.scriptId.
+{
+ error : {
+ code : -32000
+ message : Locations should contain the same scriptId
+ }
+ id : <messageId>
+}
+Test not existing scriptId.
+{
+ error : {
+ code : -32000
+ message : Script not found
+ }
+ id : <messageId>
+}
+Test end < start.
+{
+ id : <messageId>
+ result : {
+ locations : [
+ ]
+ }
+}
+Test empty range in first line.
+{
+ id : <messageId>
+ result : {
+ locations : [
+ ]
+ }
+}
+Test one character range in first line.
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 16
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+Test empty range in not first line.
+{
+ id : <messageId>
+ result : {
+ locations : [
+ ]
+ }
+}
+Test one character range in not first line.
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 16
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+Test end is undefined
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 16
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 42
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [2] : {
+ columnNumber : 16
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [3] : {
+ columnNumber : 52
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [4] : {
+ columnNumber : 54
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [5] : {
+ columnNumber : 57
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [6] : {
+ columnNumber : 0
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+Test end.lineNumber > scripts.lineCount()
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 16
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 42
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [2] : {
+ columnNumber : 16
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [3] : {
+ columnNumber : 52
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [4] : {
+ columnNumber : 54
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [5] : {
+ columnNumber : 57
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [6] : {
+ columnNumber : 0
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+Test one string
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 16
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 42
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+Test end.columnNumber > end.line.length(), should be the same as previous.
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 16
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 42
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+
+Running test: getPossibleBreakpointsInArrow
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 17
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 53
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [2] : {
+ columnNumber : 56
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [3] : {
+ columnNumber : 69
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [4] : {
+ columnNumber : 71
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [5] : {
+ columnNumber : 84
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [6] : {
+ columnNumber : 90
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [7] : {
+ columnNumber : 92
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [8] : {
+ columnNumber : 94
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+
+Running test: arrowFunctionFirstLine
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 18
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 47
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [2] : {
+ columnNumber : 49
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [3] : {
+ columnNumber : 51
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 18
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 47
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 49
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 51
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+paused in foo1
+{
+ columnNumber : 18
+ lineNumber : 0
+ scriptId : <scriptId>
+}
+paused in foo1
+{
+ columnNumber : 51
+ lineNumber : 0
+ scriptId : <scriptId>
+}
+paused in Promise.resolve.then
+{
+ columnNumber : 47
+ lineNumber : 0
+ scriptId : <scriptId>
+}
+paused in Promise.resolve.then
+{
+ columnNumber : 49
+ lineNumber : 0
+ scriptId : <scriptId>
+}
+
+Running test: arrowFunctionOnPause
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 28
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [2] : {
+ columnNumber : 57
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [3] : {
+ columnNumber : 59
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [4] : {
+ columnNumber : 61
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [5] : {
+ columnNumber : 18
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [6] : {
+ columnNumber : 47
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [7] : {
+ columnNumber : 49
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [8] : {
+ columnNumber : 51
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [9] : {
+ columnNumber : 0
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+ [10] : {
+ columnNumber : 0
+ lineNumber : 3
+ scriptId : <scriptId>
+ }
+ [11] : {
+ columnNumber : 6
+ lineNumber : 3
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 28
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 57
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 59
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 61
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 18
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 47
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 49
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 51
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 0
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 0
+ lineNumber : 3
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 6
+ lineNumber : 3
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+paused in
+{
+ columnNumber : 0
+ lineNumber : 2
+ scriptId : <scriptId>
+}
+paused in foo3
+{
+ columnNumber : 28
+ lineNumber : 0
+ scriptId : <scriptId>
+}
+paused in foo3
+{
+ columnNumber : 61
+ lineNumber : 0
+ scriptId : <scriptId>
+}
+paused in
+{
+ columnNumber : 0
+ lineNumber : 3
+ scriptId : <scriptId>
+}
+paused in foo4
+{
+ columnNumber : 18
+ lineNumber : 1
+ scriptId : <scriptId>
+}
+paused in foo4
+{
+ columnNumber : 51
+ lineNumber : 1
+ scriptId : <scriptId>
+}
+paused in
+{
+ columnNumber : 6
+ lineNumber : 3
+ scriptId : <scriptId>
+}
+paused in Promise.resolve.then
+{
+ columnNumber : 57
+ lineNumber : 0
+ scriptId : <scriptId>
+}
+paused in Promise.resolve.then
+{
+ columnNumber : 59
+ lineNumber : 0
+ scriptId : <scriptId>
+}
+paused in Promise.resolve.then
+{
+ columnNumber : 47
+ lineNumber : 1
+ scriptId : <scriptId>
+}
+paused in Promise.resolve.then
+{
+ columnNumber : 49
+ lineNumber : 1
+ scriptId : <scriptId>
+}
+
+Running test: getPossibleBreakpointsInRangeWithOffset
+Test empty range in first line.
+{
+ id : <messageId>
+ result : {
+ locations : [
+ ]
+ }
+}
+Test one character range in first line.
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 17
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+Test empty range in not first line.
+{
+ id : <messageId>
+ result : {
+ locations : [
+ ]
+ }
+}
+Test one character range in not first line.
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 16
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+Test end is undefined
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 17
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 43
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [2] : {
+ columnNumber : 16
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+ [3] : {
+ columnNumber : 52
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+ [4] : {
+ columnNumber : 54
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+ [5] : {
+ columnNumber : 57
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+ [6] : {
+ columnNumber : 0
+ lineNumber : 3
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+Test end.lineNumber > scripts.lineCount()
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 17
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 43
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [2] : {
+ columnNumber : 16
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+ [3] : {
+ columnNumber : 52
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+ [4] : {
+ columnNumber : 54
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+ [5] : {
+ columnNumber : 57
+ lineNumber : 2
+ scriptId : <scriptId>
+ }
+ [6] : {
+ columnNumber : 0
+ lineNumber : 3
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+Test one string
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 17
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 43
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+Test end.columnNumber > end.line.length(), should be the same as previous.
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 17
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 43
+ lineNumber : 1
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+
+Running test: withOffset
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 36
+ lineNumber : 3
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 65
+ lineNumber : 3
+ scriptId : <scriptId>
+ }
+ [2] : {
+ columnNumber : 67
+ lineNumber : 3
+ scriptId : <scriptId>
+ }
+ [3] : {
+ columnNumber : 69
+ lineNumber : 3
+ scriptId : <scriptId>
+ }
+ [4] : {
+ columnNumber : 18
+ lineNumber : 4
+ scriptId : <scriptId>
+ }
+ [5] : {
+ columnNumber : 47
+ lineNumber : 4
+ scriptId : <scriptId>
+ }
+ [6] : {
+ columnNumber : 49
+ lineNumber : 4
+ scriptId : <scriptId>
+ }
+ [7] : {
+ columnNumber : 51
+ lineNumber : 4
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 36
+ lineNumber : 3
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 65
+ lineNumber : 3
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 67
+ lineNumber : 3
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 69
+ lineNumber : 3
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 18
+ lineNumber : 4
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 47
+ lineNumber : 4
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 49
+ lineNumber : 4
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+{
+ id : <messageId>
+ result : {
+ actualLocation : {
+ columnNumber : 51
+ lineNumber : 4
+ scriptId : <scriptId>
+ }
+ breakpointId : <breakpointId>
+ }
+}
+paused in foo5
+{
+ columnNumber : 36
+ lineNumber : 3
+ scriptId : <scriptId>
+}
+paused in foo5
+{
+ columnNumber : 69
+ lineNumber : 3
+ scriptId : <scriptId>
+}
+paused in foo6
+{
+ columnNumber : 18
+ lineNumber : 4
+ scriptId : <scriptId>
+}
+paused in foo6
+{
+ columnNumber : 51
+ lineNumber : 4
+ scriptId : <scriptId>
+}
+paused in Promise.resolve.then
+{
+ columnNumber : 65
+ lineNumber : 3
+ scriptId : <scriptId>
+}
+paused in Promise.resolve.then
+{
+ columnNumber : 67
+ lineNumber : 3
+ scriptId : <scriptId>
+}
+paused in Promise.resolve.then
+{
+ columnNumber : 47
+ lineNumber : 4
+ scriptId : <scriptId>
+}
+paused in Promise.resolve.then
+{
+ columnNumber : 49
+ lineNumber : 4
+ scriptId : <scriptId>
+}
+
+Running test: arrowFunctionReturn
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 6
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [2] : {
+ columnNumber : 9
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 34
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 45
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [2] : {
+ columnNumber : 48
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [3] : {
+ columnNumber : 49
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 8
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [2] : {
+ columnNumber : 12
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [3] : {
+ columnNumber : 13
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 17
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 21
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [2] : {
+ columnNumber : 22
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 6
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [2] : {
+ columnNumber : 8
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [3] : {
+ columnNumber : 9
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
+{
+ id : <messageId>
+ result : {
+ locations : [
+ [0] : {
+ columnNumber : 0
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [1] : {
+ columnNumber : 8
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ [2] : {
+ columnNumber : 19
+ lineNumber : 0
+ scriptId : <scriptId>
+ }
+ ]
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints.js b/deps/v8/test/inspector/debugger/get-possible-breakpoints.js
new file mode 100644
index 0000000000..b09c08de14
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints.js
@@ -0,0 +1,203 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Test for Debugger.getPossibleBreakpoints");
+
+Protocol.Runtime.enable();
+Protocol.Debugger.enable();
+
+InspectorTest.runTestSuite([
+
+ function getPossibleBreakpointsInRange(next) {
+ var source = "function foo(){ return Promise.resolve(); }\nfunction boo(){ return Promise.resolve().then(() => 42); }\n\n";
+ var scriptId;
+ compileScript(source)
+ .then(id => scriptId = id)
+ .then(() => InspectorTest.log("Test start.scriptId != end.scriptId."))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 0, columnNumber: 0, scriptId: scriptId }, end: { lineNumber: 0, columnNumber: 0, scriptId: scriptId + "0" }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test not existing scriptId."))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 0, columnNumber: 0, scriptId: "-1" }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test end < start."))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 1, columnNumber: 0, scriptId: scriptId }, end: { lineNumber: 0, columnNumber: 0, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test empty range in first line."))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 0, columnNumber: 16, scriptId: scriptId }, end: { lineNumber: 0, columnNumber: 16, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test one character range in first line."))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 0, columnNumber: 16, scriptId: scriptId }, end: { lineNumber: 0, columnNumber: 17, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test empty range in not first line."))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 1, columnNumber: 16, scriptId: scriptId }, end: { lineNumber: 1, columnNumber: 16, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test one character range in not first line."))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 1, columnNumber: 16, scriptId: scriptId }, end: { lineNumber: 1, columnNumber: 17, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test end is undefined"))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 0, columnNumber: 0, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test end.lineNumber > scripts.lineCount()"))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 0, columnNumber: 0, scriptId: scriptId }, end: { lineNumber: 5, columnNumber: 0, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test one string"))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 0, columnNumber: 0, scriptId: scriptId }, end: { lineNumber: 1, columnNumber: 0, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test end.columnNumber > end.line.length(), should be the same as previous."))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 0, columnNumber: 0, scriptId: scriptId }, end: { lineNumber: 0, columnNumber: 256, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(next);
+ },
+
+ function getPossibleBreakpointsInArrow(next) {
+ var source = "function foo() { return Promise.resolve().then(() => 239).then(() => 42).then(() => () => 42) }";
+ var scriptId;
+ compileScript(source)
+ .then(id => scriptId = id)
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 0, columnNumber: 0, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(next);
+ },
+
+ function arrowFunctionFirstLine(next) {
+ Protocol.Debugger.onPaused(message => {
+ InspectorTest.log("paused in " + message.params.callFrames[0].functionName);
+ InspectorTest.logMessage(message.params.callFrames[0].location);
+ Protocol.Debugger.resume();
+ });
+
+ var source = `function foo1() { Promise.resolve().then(() => 42) }
+function foo2() { Promise.resolve().then(() => 42) }`;
+ waitForPossibleBreakpoints(source, { lineNumber: 0, columnNumber: 0 }, { lineNumber: 1, columnNumber: 0 })
+ .then(InspectorTest.logMessage)
+ .then(setAllBreakpoints)
+ .then(() => Protocol.Runtime.evaluate({ expression: "foo1(); foo2()"}))
+ .then(next);
+ },
+
+ function arrowFunctionOnPause(next) {
+ function dumpAndResume(message) {
+ InspectorTest.log("paused in " + message.params.callFrames[0].functionName);
+ InspectorTest.logMessage(message.params.callFrames[0].location);
+ Protocol.Debugger.resume();
+ }
+
+ var source = `debugger; function foo3() { Promise.resolve().then(() => 42) }
+function foo4() { Promise.resolve().then(() => 42) };\nfoo3();\nfoo4();`;
+ waitForPossibleBreakpointsOnPause(source, { lineNumber: 0, columnNumber: 0 }, undefined, next)
+ .then(InspectorTest.logMessage)
+ .then(setAllBreakpoints)
+ .then(() => Protocol.Debugger.onPaused(dumpAndResume))
+ .then(() => Protocol.Debugger.resume());
+ },
+
+ function getPossibleBreakpointsInRangeWithOffset(next) {
+ var source = "function foo(){ return Promise.resolve(); }\nfunction boo(){ return Promise.resolve().then(() => 42); }\n\n";
+ var scriptId;
+ compileScript(source, { name: "with-offset.js", line_offset: 1, column_offset: 1 })
+ .then(id => scriptId = id)
+ .then(() => InspectorTest.log("Test empty range in first line."))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 1, columnNumber: 17, scriptId: scriptId }, end: { lineNumber: 1, columnNumber: 17, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test one character range in first line."))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 1, columnNumber: 17, scriptId: scriptId }, end: { lineNumber: 1, columnNumber: 18, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test empty range in not first line."))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 2, columnNumber: 16, scriptId: scriptId }, end: { lineNumber: 2, columnNumber: 16, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test one character range in not first line."))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 2, columnNumber: 16, scriptId: scriptId }, end: { lineNumber: 2, columnNumber: 17, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test end is undefined"))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 0, columnNumber: 0, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test end.lineNumber > scripts.lineCount()"))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 0, columnNumber: 0, scriptId: scriptId }, end: { lineNumber: 5, columnNumber: 0, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test one string"))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 1, columnNumber: 1, scriptId: scriptId }, end: { lineNumber: 2, columnNumber: 0, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(() => InspectorTest.log("Test end.columnNumber > end.line.length(), should be the same as previous."))
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 1, columnNumber: 1, scriptId: scriptId }, end: { lineNumber: 1, columnNumber: 256, scriptId: scriptId }}))
+ .then(InspectorTest.logMessage)
+ .then(next);
+ },
+
+ function withOffset(next) {
+ Protocol.Debugger.onPaused(message => {
+ InspectorTest.log("paused in " + message.params.callFrames[0].functionName);
+ InspectorTest.logMessage(message.params.callFrames[0].location);
+ Protocol.Debugger.resume();
+ });
+
+ var source = `function foo5() { Promise.resolve().then(() => 42) }
+function foo6() { Promise.resolve().then(() => 42) }`;
+ waitForPossibleBreakpoints(source, { lineNumber: 0, columnNumber: 0 }, undefined, { name: "with-offset.js", line_offset: 3, column_offset: 18 })
+ .then(InspectorTest.logMessage)
+ .then(setAllBreakpoints)
+ .then(() => Protocol.Runtime.evaluate({ expression: "foo5(); foo6()"}))
+ .then(next);
+ },
+
+ function arrowFunctionReturn(next) {
+ waitForPossibleBreakpoints("() => 239\n", { lineNumber: 0, columnNumber: 0 })
+ .then(InspectorTest.logMessage)
+ .then(() => waitForPossibleBreakpoints("function foo() { function boo() { return 239 } }\n", { lineNumber: 0, columnNumber: 0 }))
+ .then(InspectorTest.logMessage)
+ .then(() => waitForPossibleBreakpoints("() => { 239 }\n", { lineNumber: 0, columnNumber: 0 }))
+ .then(InspectorTest.logMessage)
+ // TODO(kozyatinskiy): lineNumber for return position should be 21 instead of 22.
+ .then(() => waitForPossibleBreakpoints("function foo() { 239 }\n", { lineNumber: 0, columnNumber: 0 }))
+ .then(InspectorTest.logMessage)
+ // TODO(kozyatinskiy): lineNumber for return position should be only 9, not 8.
+ .then(() => waitForPossibleBreakpoints("() => 239", { lineNumber: 0, columnNumber: 0 }))
+ .then(InspectorTest.logMessage)
+ // TODO(kozyatinskiy): lineNumber for return position should be only 19, not 20.
+ .then(() => waitForPossibleBreakpoints("() => { return 239 }", { lineNumber: 0, columnNumber: 0 }))
+ .then(InspectorTest.logMessage)
+ .then(next)
+ }
+]);
+
+function compileScript(source, origin) {
+ var promise = Protocol.Debugger.onceScriptParsed().then(message => message.params.scriptId);
+ if (!origin) origin = { name: "", line_offset: 0, column_offset: 0 };
+ compileAndRunWithOrigin(source, origin.name, origin.line_offset, origin.column_offset);
+ return promise;
+}
+
+function waitForPossibleBreakpoints(source, start, end, origin) {
+ return compileScript(source, origin)
+ .then(scriptId => { (start || {}).scriptId = scriptId; (end || {}).scriptId = scriptId })
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: start, end: end }));
+}
+
+function waitForPossibleBreakpointsOnPause(source, start, end, next) {
+ var promise = Protocol.Debugger.oncePaused()
+ .then(msg => { (start || {}).scriptId = msg.params.callFrames[0].location.scriptId; (end || {}).scriptId = msg.params.callFrames[0].location.scriptId })
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({ start: start, end: end }));
+ Protocol.Runtime.evaluate({ expression: source }).then(next);
+ return promise;
+}
+
+function setAllBreakpoints(message) {
+ var promises = [];
+ for (var location of message.result.locations)
+ promises.push(Protocol.Debugger.setBreakpoint({ location: location }).then(checkBreakpointAndDump));
+ return Promise.all(promises);
+}
+
+function checkBreakpointAndDump(message) {
+ if (message.error) {
+ InspectorTest.log("FAIL: error in setBreakpoint");
+ InspectorTest.logMessage(message);
+ return;
+ }
+ var id_data = message.result.breakpointId.split(":");
+ if (parseInt(id_data[1]) !== message.result.actualLocation.lineNumber || parseInt(id_data[2]) !== message.result.actualLocation.columnNumber) {
+ InspectorTest.log("FAIL: possible breakpoint was resolved in another location");
+ InspectorTest.logMessage(message);
+ }
+ InspectorTest.logMessage(message);
+}
diff --git a/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt b/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
new file mode 100644
index 0000000000..2c7995c335
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/object-preview-internal-properties-expected.txt
@@ -0,0 +1,253 @@
+Check internal properties reported in object preview.
+
+Running test: boxedObjects
+expression: new Number(239)
+{
+ name : [[PrimitiveValue]]
+ type : number
+ value : 239
+}
+
+expression: new Boolean(false)
+{
+ name : [[PrimitiveValue]]
+ type : boolean
+ value : false
+}
+
+expression: new String("abc")
+{
+ name : [[PrimitiveValue]]
+ type : string
+ value : abc
+}
+
+expression: Object(Symbol(42))
+{
+ name : [[PrimitiveValue]]
+ type : symbol
+ valuePreview : {
+ description : Symbol
+ overflow : false
+ properties : [
+ ]
+ type : object
+ }
+}
+
+
+Running test: promise
+expression: Promise.resolve(42)
+{
+ name : [[PromiseStatus]]
+ type : string
+ value : resolved
+}
+{
+ name : [[PromiseValue]]
+ type : number
+ value : 42
+}
+
+expression: new Promise(() => undefined)
+{
+ name : [[PromiseStatus]]
+ type : string
+ value : pending
+}
+{
+ name : [[PromiseValue]]
+ type : undefined
+ value : undefined
+}
+
+
+Running test: generatorObject
+expression: (function* foo() { yield 1 })()
+{
+ name : [[GeneratorStatus]]
+ type : string
+ value : suspended
+}
+
+
+Running test: entriesInMapAndSet
+expression: new Map([[1,2]])
+[[Entries]]:
+[
+ [0] : {
+ key : {
+ description : 1
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ value : {
+ description : 2
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ }
+]
+
+expression: new Set([1])
+[[Entries]]:
+[
+ [0] : {
+ value : {
+ description : 1
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ }
+]
+
+expression: new WeakMap([[{}, 42]])
+[[Entries]]:
+[
+ [0] : {
+ key : {
+ description : Object
+ overflow : false
+ properties : [
+ ]
+ type : object
+ }
+ value : {
+ description : 42
+ overflow : false
+ properties : [
+ ]
+ type : number
+ }
+ }
+]
+
+expression: new WeakSet([{}])
+[[Entries]]:
+[
+ [0] : {
+ value : {
+ description : Object
+ overflow : false
+ properties : [
+ ]
+ type : object
+ }
+ }
+]
+
+
+Running test: iteratorObject
+expression: (new Map([[1,2]])).entries()
+{
+ name : [[IteratorHasMore]]
+ type : boolean
+ value : true
+}
+{
+ name : [[IteratorIndex]]
+ type : number
+ value : 0
+}
+{
+ name : [[IteratorKind]]
+ type : string
+ value : entries
+}
+[[Entries]]:
+[
+ [0] : {
+ value : {
+ description : Array[2]
+ overflow : false
+ properties : [
+ [0] : {
+ name : 0
+ type : number
+ value : 1
+ }
+ [1] : {
+ name : 1
+ type : number
+ value : 2
+ }
+ ]
+ subtype : array
+ type : object
+ }
+ }
+]
+
+expression: (new Set([[1,2]])).entries()
+{
+ name : [[IteratorHasMore]]
+ type : boolean
+ value : true
+}
+{
+ name : [[IteratorIndex]]
+ type : number
+ value : 0
+}
+{
+ name : [[IteratorKind]]
+ type : string
+ value : entries
+}
+[[Entries]]:
+[
+ [0] : {
+ value : {
+ description : Array[2]
+ overflow : false
+ properties : [
+ [0] : {
+ name : 0
+ subtype : array
+ type : object
+ value : Array[2]
+ }
+ [1] : {
+ name : 1
+ subtype : array
+ type : object
+ value : Array[2]
+ }
+ ]
+ subtype : array
+ type : object
+ }
+ }
+]
+
+
+Running test: noPreviewForFunctionObject
+(function foo(){})
+{
+ id : <messageId>
+ result : {
+ result : {
+ className : Function
+ description : function foo(){}
+ objectId : <objectId>
+ type : function
+ }
+ }
+}
+
+Running test: otherObjects
+expression: [1,2,3]
+
+expression: /123/
+
+expression: ({})
+
+
+Running test: overridenArrayGetter
+expression: Promise.resolve(42)
diff --git a/deps/v8/test/inspector/debugger/object-preview-internal-properties.js b/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
new file mode 100644
index 0000000000..00b1d5bf45
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
@@ -0,0 +1,107 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Check internal properties reported in object preview.");
+
+Protocol.Debugger.enable();
+Protocol.Runtime.enable();
+Protocol.Runtime.onConsoleAPICalled(dumpInternalPropertiesAndEntries);
+
+InspectorTest.runTestSuite([
+ function boxedObjects(next)
+ {
+ checkExpression("new Number(239)")
+ .then(() => checkExpression("new Boolean(false)"))
+ .then(() => checkExpression("new String(\"abc\")"))
+ .then(() => checkExpression("Object(Symbol(42))"))
+ .then(next);
+ },
+
+ function promise(next)
+ {
+ checkExpression("Promise.resolve(42)")
+ .then(() => checkExpression("new Promise(() => undefined)"))
+ .then(next);
+ },
+
+ function generatorObject(next)
+ {
+ checkExpression("(function* foo() { yield 1 })()")
+ .then(next);
+ },
+
+ function entriesInMapAndSet(next)
+ {
+ checkExpression("new Map([[1,2]])")
+ .then(() => checkExpression("new Set([1])"))
+ .then(() => checkExpression("new WeakMap([[{}, 42]])"))
+ .then(() => checkExpression("new WeakSet([{}])"))
+ .then(next);
+ },
+
+ function iteratorObject(next)
+ {
+ checkExpression("(new Map([[1,2]])).entries()")
+ .then(() => checkExpression("(new Set([[1,2]])).entries()"))
+ .then(next);
+ },
+
+ function noPreviewForFunctionObject(next)
+ {
+ var expression = "(function foo(){})";
+ InspectorTest.log(expression);
+ Protocol.Runtime.evaluate({ expression: expression, generatePreview: true})
+ .then(message => InspectorTest.logMessage(message))
+ .then(next);
+ },
+
+ function otherObjects(next)
+ {
+ checkExpression("[1,2,3]")
+ .then(() => checkExpression("/123/"))
+ .then(() => checkExpression("({})"))
+ .then(next);
+ },
+
+ function overridenArrayGetter(next)
+ {
+ Protocol.Runtime.evaluate({ expression: "Array.prototype.__defineGetter__(\"0\",() => { throw new Error() }) "})
+ .then(() => checkExpression("Promise.resolve(42)"))
+ .then(next);
+ }
+]);
+
+function checkExpression(expression)
+{
+ InspectorTest.log(`expression: ${expression}`);
+ // console.table has higher limits for internal properties amount in preview.
+ return Protocol.Runtime.evaluate({ expression: `console.table(${expression})`, generatePreview: true });
+}
+
+function dumpInternalPropertiesAndEntries(message)
+{
+ var properties;
+ var entries;
+ try {
+ var preview = message.params.args[0].preview;
+ properties = preview.properties;
+ entries = preview.entries;
+ } catch (e) {
+ InspectorTest.logMessage(message);
+ return;
+ }
+ if (!properties) {
+ InspectorTest.logMessage(message);
+ return;
+ }
+ for (var property of properties) {
+ if (property.name.startsWith("[["))
+ InspectorTest.logMessage(property);
+ }
+ if (entries) {
+ InspectorTest.log("[[Entries]]:");
+ InspectorTest.logMessage(entries);
+ }
+ InspectorTest.log("");
+}
diff --git a/deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt b/deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt
new file mode 100644
index 0000000000..d7c6d76346
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/script-on-after-compile-expected.txt
@@ -0,0 +1,664 @@
+Checks that inspector correctly process compiled scripts
+scriptParsed
+{
+ scriptSource : function foo1(){}//# sourceURL=oneline.js<nl>
+}
+{
+ endColumn : 0
+ endLine : 2
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : oneline.js
+}
+scriptParsed
+{
+ scriptSource : function foo2(){}//# sourceURL=oneline-without-nl.js
+}
+{
+ endColumn : 52
+ endLine : 0
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : oneline-without-nl.js
+}
+scriptParsed
+{
+ scriptSource : function foo3(){}<nl>//# sourceURL=twoline.js<nl>
+}
+{
+ endColumn : 0
+ endLine : 3
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : twoline.js
+}
+scriptParsed
+{
+ scriptSource : function foo4(){}<nl><nl>//# sourceURL=threeline.js<nl>
+}
+{
+ endColumn : 0
+ endLine : 4
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : threeline.js
+}
+scriptParsed
+{
+ scriptSource : function foo5(){}//# sourceMappingURL=oneline-map<nl>
+}
+{
+ endColumn : 0
+ endLine : 2
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL : oneline-map
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo6(){}//# sourceMappingURL=oneline-without-nl-map
+}
+{
+ endColumn : 60
+ endLine : 0
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL : oneline-without-nl-map
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo7(){}<nl>//# sourceMappingURL=twoline-map<nl>
+}
+{
+ endColumn : 0
+ endLine : 3
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL : twoline-map
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo8(){}<nl><nl>//# sourceMappingURL=threeline-map<nl>
+}
+{
+ endColumn : 0
+ endLine : 4
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL : threeline-map
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo9(){}//# sourceMappingURL=source-mapping-url-map<nl>//# sourceURL=source-url.js
+}
+{
+ endColumn : 27
+ endLine : 1
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL : source-mapping-url-map
+ startColumn : 0
+ startLine : 0
+ url : source-url.js
+}
+scriptParsed
+{
+ scriptSource : function foo10(){}//# sourceURL=source-url.js<nl>//# sourceMappingURL=source-mapping-url-map
+}
+{
+ endColumn : 43
+ endLine : 1
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL : source-mapping-url-map
+ startColumn : 0
+ startLine : 0
+ url : source-url.js
+}
+scriptParsed
+{
+ scriptSource : function foo11(){}<nl>//# sourceURL=end1.js
+}
+{
+ endColumn : 21
+ endLine : 1
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : end1.js
+}
+scriptParsed
+{
+ scriptSource : function foo12(){}<nl>//# sourceURL=end2.js
+}
+{
+ endColumn : 22
+ endLine : 1
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : end2.js
+}
+scriptParsed
+{
+ scriptSource : function foo13(){}
+}
+{
+ endColumn : 18
+ endLine : 0
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo15(){}; eval("function foo14(){}//# sourceURL=eval.js")//# sourceURL=eval-wrapper.js
+}
+{
+ endColumn : 96
+ endLine : 0
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : eval-wrapper.js
+}
+scriptParsed
+{
+ scriptSource : function foo14(){}//# sourceURL=eval.js
+}
+{
+ endColumn : 39
+ endLine : 0
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : eval.js
+}
+scriptParsed
+{
+ scriptSource : function foo16(){}<nl>
+}
+{
+ endColumn : 0
+ endLine : 2
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo17(){}<nl><nl>
+}
+{
+ endColumn : 0
+ endLine : 3
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo18(){}<nl><nl><nl>
+}
+{
+ endColumn : 0
+ endLine : 4
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo19(){}<nl><nl><nl><nl>
+}
+{
+ endColumn : 0
+ endLine : 5
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo1(){}//# sourceURL=oneline.js<nl>
+}
+{
+ endColumn : 0
+ endLine : 2
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : oneline.js
+}
+scriptParsed
+{
+ scriptSource : function foo2(){}//# sourceURL=oneline-without-nl.js
+}
+{
+ endColumn : 52
+ endLine : 0
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : oneline-without-nl.js
+}
+scriptParsed
+{
+ scriptSource : function foo3(){}<nl>//# sourceURL=twoline.js<nl>
+}
+{
+ endColumn : 0
+ endLine : 3
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : twoline.js
+}
+scriptParsed
+{
+ scriptSource : function foo4(){}<nl><nl>//# sourceURL=threeline.js<nl>
+}
+{
+ endColumn : 0
+ endLine : 4
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : threeline.js
+}
+scriptParsed
+{
+ scriptSource : function foo5(){}//# sourceMappingURL=oneline-map<nl>
+}
+{
+ endColumn : 0
+ endLine : 2
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL : oneline-map
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo6(){}//# sourceMappingURL=oneline-without-nl-map
+}
+{
+ endColumn : 60
+ endLine : 0
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL : oneline-without-nl-map
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo7(){}<nl>//# sourceMappingURL=twoline-map<nl>
+}
+{
+ endColumn : 0
+ endLine : 3
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL : twoline-map
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo8(){}<nl><nl>//# sourceMappingURL=threeline-map<nl>
+}
+{
+ endColumn : 0
+ endLine : 4
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL : threeline-map
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo9(){}//# sourceMappingURL=source-mapping-url-map<nl>//# sourceURL=source-url.js
+}
+{
+ endColumn : 27
+ endLine : 1
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL : source-mapping-url-map
+ startColumn : 0
+ startLine : 0
+ url : source-url.js
+}
+scriptParsed
+{
+ scriptSource : function foo10(){}//# sourceURL=source-url.js<nl>//# sourceMappingURL=source-mapping-url-map
+}
+{
+ endColumn : 43
+ endLine : 1
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL : source-mapping-url-map
+ startColumn : 0
+ startLine : 0
+ url : source-url.js
+}
+scriptParsed
+{
+ scriptSource : function foo11(){}<nl>//# sourceURL=end1.js
+}
+{
+ endColumn : 21
+ endLine : 1
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : end1.js
+}
+scriptParsed
+{
+ scriptSource : function foo12(){}<nl>//# sourceURL=end2.js
+}
+{
+ endColumn : 22
+ endLine : 1
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : end2.js
+}
+scriptParsed
+{
+ scriptSource : function foo13(){}
+}
+{
+ endColumn : 18
+ endLine : 0
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo15(){}; eval("function foo14(){}//# sourceURL=eval.js")//# sourceURL=eval-wrapper.js
+}
+{
+ endColumn : 96
+ endLine : 0
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : eval-wrapper.js
+}
+scriptParsed
+{
+ scriptSource : function foo14(){}//# sourceURL=eval.js
+}
+{
+ endColumn : 39
+ endLine : 0
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : eval.js
+}
+scriptFailedToParse
+{
+ scriptSource : }//# sourceURL=failed.js<nl>//# sourceMappingURL=failed-map
+}
+{
+ endColumn : 31
+ endLine : 1
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : <hash>
+ scriptId : <scriptId>
+ sourceMapURL : failed-map
+ startColumn : 0
+ startLine : 0
+ url : failed.js
+}
+scriptParsed
+{
+ scriptSource : function foo16(){}<nl>
+}
+{
+ endColumn : 0
+ endLine : 2
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo17(){}<nl><nl>
+}
+{
+ endColumn : 0
+ endLine : 3
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo18(){}<nl><nl><nl>
+}
+{
+ endColumn : 0
+ endLine : 4
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url :
+}
+scriptParsed
+{
+ scriptSource : function foo19(){}<nl><nl><nl><nl>
+}
+{
+ endColumn : 0
+ endLine : 5
+ executionContextId : <executionContextId>
+ hasSourceURL : false
+ hash : <hash>
+ isLiveEdit : false
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url :
+}
+Remove script references and re-enable debugger.
diff --git a/deps/v8/test/inspector/debugger/script-on-after-compile.js b/deps/v8/test/inspector/debugger/script-on-after-compile.js
new file mode 100644
index 0000000000..083aa48065
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/script-on-after-compile.js
@@ -0,0 +1,93 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Checks that inspector correctly process compiled scripts");
+
+function addScripts() {
+ // sourceURL in the same line
+ return addScript("function foo1(){}//# sourceURL=oneline.js\n")
+ // sourceURL without end line
+ .then(() => addScript("function foo2(){}//# sourceURL=oneline-without-nl.js"))
+ // other source urls
+ .then(() => addScript("function foo3(){}\n//# sourceURL=twoline.js\n"))
+ .then(() => addScript("function foo4(){}\n\n//# sourceURL=threeline.js\n"))
+
+ // sourceMappingURL in the same line
+ .then(() => addScript("function foo5(){}//# sourceMappingURL=oneline-map\n"))
+ // sourceMappingURL without end line
+ .then(() => addScript("function foo6(){}//# sourceMappingURL=oneline-without-nl-map"))
+ // other sourceMappingURLs
+ .then(() => addScript("function foo7(){}\n//# sourceMappingURL=twoline-map\n"))
+ .then(() => addScript("function foo8(){}\n\n//# sourceMappingURL=threeline-map\n"))
+
+ // sourceURL + sourceMappingURL
+ .then(() => addScript("function foo9(){}//# sourceMappingURL=source-mapping-url-map\n//# sourceURL=source-url.js"))
+ .then(() => addScript("function foo10(){}//# sourceURL=source-url.js\n//# sourceMappingURL=source-mapping-url-map"))
+
+ // non zero endLine and endColumn..
+ .then(() => addScript("function foo11(){}\n//# sourceURL=end1.js"))
+ // .. + 1 character
+ .then(() => addScript("function foo12(){}\n//# sourceURL=end2.js "))
+ // script without sourceURL
+ .then(() => addScript("function foo13(){}"))
+ // script in eval
+ .then(() => addScript("function foo15(){}; eval(\"function foo14(){}//# sourceURL=eval.js\")//# sourceURL=eval-wrapper.js"))
+ // sourceURL and sourceMappingURL works even for script with syntax error
+ .then(() => addScript("}//# sourceURL=failed.js\n//# sourceMappingURL=failed-map"))
+ // empty lines at end
+ .then(() => addScript("function foo16(){}\n"))
+ .then(() => addScript("function foo17(){}\n\n"))
+ .then(() => addScript("function foo18(){}\n\n\n"))
+ .then(() => addScript("function foo19(){}\n\n\n\n"));
+}
+
+Protocol.Debugger.onScriptParsed((message) => requestSourceAndDump(message, true));
+Protocol.Debugger.onScriptFailedToParse((message) => requestSourceAndDump(message, false));
+addScripts()
+ .then(() => Protocol.Debugger.enable())
+ .then(addScripts)
+ .then(() => Protocol.Debugger.disable())
+ .then(() => InspectorTest.log("Remove script references and re-enable debugger."))
+ .then(() => Protocol.Runtime.evaluate(
+ { expression: "for (let i = 1; i < 20; ++i) eval(`foo${i} = undefined`);" }))
+ .then(() => Protocol.HeapProfiler.collectGarbage())
+ .then(() => Protocol.Debugger.enable())
+ .then(InspectorTest.completeTest);
+
+function addScript(source) {
+ return Protocol.Runtime.evaluate({ expression: source });
+}
+
+function requestSourceAndDump(scriptParsedMessage, scriptParsed) {
+ Protocol.Debugger.getScriptSource({ scriptId: scriptParsedMessage.params.scriptId })
+ .then((sourceMessage) => dumpScriptParsed(scriptParsedMessage, sourceMessage, scriptParsed));
+}
+
+function dumpScriptParsed(scriptParsedMessage, sourceMessage, scriptParsed) {
+ var params = scriptParsedMessage.params;
+ var re = /[A-Z0-9]{40,40}/;
+ if (!params.hash || !matchExact(re, params.hash))
+ params.hash = "Invalid hash: " + params.hash;
+ else
+ params.hash = "<hash>";
+ if (params.executionContextId <= 0)
+ params.executionContextId = "Invalid executionContextId: " + params.executionContextId;
+ else
+ params.executionContextId = "<executionContextId>";
+ if (params.scriptId * 1 <= 0)
+ params.scriptId = "Invalid scriptId: " + params.scriptId;
+ else
+ params.scriptId = "<scriptId>";
+
+ var sourceResult = sourceMessage.result;
+ sourceResult.scriptSource = sourceResult.scriptSource.replace(/\n/g, "<nl>");
+ InspectorTest.log(scriptParsed ? "scriptParsed" : "scriptFailedToParse");
+ InspectorTest.logObject(sourceResult);
+ InspectorTest.logObject(params);
+}
+
+function matchExact(re, str) {
+ var match = str.match(re);
+ return match !== null && str === match[0];
+}
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt
index 81685a2b8b..e4fdd95d5f 100644
--- a/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt
@@ -1,7 +1,6 @@
setBreakpointByUrl error: undefined
setBreakpoint error: {
"code": -32602,
- "message": "Invalid request",
+ "message": "Invalid parameters",
"data": "location: object expected"
}
-
diff --git a/deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints-expected.txt b/deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints-expected.txt
new file mode 100644
index 0000000000..da35fc77b4
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints-expected.txt
@@ -0,0 +1,11 @@
+-- call boo:
+(top)
+(top)
+boo
+(top)
+-- call foo:
+(top)
+(top)
+(top)
+foo
+(top)
diff --git a/deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints.js b/deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints.js
new file mode 100644
index 0000000000..7d6577b82a
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints.js
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+InspectorTest.addScript(`
+function boo() {}
+boo();
+function foo() {}
+//# sourceURL=foo.js`);
+
+Protocol.Debugger.onPaused((message) => {
+ InspectorTest.logMessage(message.params.callFrames[0].functionName || "(top)");
+ Protocol.Debugger.stepInto();
+});
+var scriptId;
+Protocol.Debugger.onScriptParsed(message => {
+ if (message.params.url === 'foo.js')
+ scriptId = message.params.scriptId;
+});
+Protocol.Debugger.enable()
+ .then(() => Protocol.Debugger.getPossibleBreakpoints({start: {scriptId, lineNumber:0,columnNumber:0}}))
+ .then(() => InspectorTest.log('-- call boo:'))
+ .then(() => Protocol.Runtime.evaluate({ expression: 'debugger; boo();'}))
+ .then(() => InspectorTest.log('-- call foo:'))
+ .then(() => Protocol.Runtime.evaluate({ expression: 'debugger; foo();'}))
+ .then(InspectorTest.completeTest);
diff --git a/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges.js b/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges.js
index 740634f68f..0a97e5dc35 100644
--- a/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges.js
+++ b/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges.js
@@ -103,8 +103,10 @@ var actions = [ "stepOut", "print", "stepOut", "print", "stepOut", "print",
function runAction(response)
{
var action = actions.shift();
- if (!action)
+ if (!action) {
InspectorTest.completeTest();
+ return;
+ }
if (action === "print") {
printCallFrames(response.params.callFrames);
diff --git a/deps/v8/test/inspector/debugger/wasm-stack-expected.txt b/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
new file mode 100644
index 0000000000..cbe4406932
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
@@ -0,0 +1,18 @@
+Running testFunction with generated WASM bytes...
+Paused on 'debugger;'
+Number of frames: 5
+ - [0] {"functionName":"call_debugger","function_lineNumber":1,"function_columnNumber":24,"lineNumber":2,"columnNumber":4}
+ - [1] {"functionName":"call_func","lineNumber":1,"columnNumber":1}
+ - [2] {"functionName":"main","lineNumber":2,"columnNumber":1}
+ - [3] {"functionName":"testFunction","function_lineNumber":0,"function_columnNumber":21,"lineNumber":14,"columnNumber":19}
+ - [4] {"functionName":"","function_lineNumber":0,"function_columnNumber":0,"lineNumber":0,"columnNumber":0}
+Getting v8-generated stack trace...
+Result of evaluate (string):
+Error: this is your stack trace:
+ -- skipped --
+ at call_debugger (<anonymous>:3:5)
+ at call_func (<WASM>[1]+1)
+ at main (<WASM>[2]+1)
+ at testFunction (<anonymous>:15:20)
+ at <anonymous>:1:1
+Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-stack.js b/deps/v8/test/inspector/debugger/wasm-stack.js
new file mode 100644
index 0000000000..a30d37f320
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/wasm-stack.js
@@ -0,0 +1,94 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+
+var imported_idx = builder.addImport("func", kSig_v_v);
+
+var call_imported_idx = builder.addFunction("call_func", kSig_v_v)
+ .addBody([kExprCallFunction, imported_idx])
+ .index;
+
+builder.addFunction("main", kSig_v_v)
+ .addBody([kExprCallFunction, call_imported_idx])
+ .exportAs("main");
+
+var module_bytes = builder.toArray();
+
+function testFunction(bytes) {
+ function call_debugger() {
+ debugger;
+ }
+
+ var buffer = new ArrayBuffer(bytes.length);
+ var view = new Uint8Array(buffer);
+ for (var i = 0; i < bytes.length; i++) {
+ view[i] = bytes[i] | 0;
+ }
+
+ var module = new WebAssembly.Module(buffer);
+ var instance = new WebAssembly.Instance(module, {func: call_debugger});
+
+ instance.exports.main();
+}
+
+InspectorTest.addScript(testFunction.toString());
+
+Protocol.Debugger.enable();
+Protocol.Debugger.onPaused(handleDebuggerPaused);
+InspectorTest.log('Running testFunction with generated WASM bytes...');
+Protocol.Runtime.evaluate(
+ {'expression': 'testFunction(' + JSON.stringify(module_bytes) + ')'});
+
+function locationToString(callFrame) {
+ var res = {functionName: callFrame.functionName};
+ for (var attr in callFrame.functionLocation) {
+ if (attr == 'scriptId') continue;
+ res['function_'+attr] = callFrame.functionLocation[attr];
+ }
+ for (var attr in callFrame.location) {
+ if (attr == 'scriptId') continue;
+ res[attr] = callFrame.location[attr];
+ }
+ return JSON.stringify(res);
+}
+
+function logStackTrace(messageObject) {
+ var frames = messageObject.params.callFrames;
+ InspectorTest.log('Number of frames: ' + frames.length);
+ for (var i = 0; i < frames.length; ++i) {
+ InspectorTest.log(' - [' + i + '] ' + locationToString(frames[i]));
+ }
+}
+
+function handleDebuggerPaused(messageObject)
+{
+ InspectorTest.log('Paused on \'debugger;\'');
+ logStackTrace(messageObject);
+ InspectorTest.log('Getting v8-generated stack trace...');
+ var topFrameId = messageObject.params.callFrames[0].callFrameId;
+ Protocol.Debugger
+ .evaluateOnCallFrame({
+ callFrameId: topFrameId,
+ expression: '(new Error("this is your stack trace:")).stack'
+ })
+ .then(callbackEvaluate);
+}
+
+function callbackEvaluate(response)
+{
+ InspectorTest.log(
+ 'Result of evaluate (' + response.result.result.type + '):');
+ var result_lines = response.result.result.value.split('\n');
+ // Skip the second line, containing the 'evaluate' position.
+ result_lines[1] = ' -- skipped --';
+ InspectorTest.log(result_lines.join('\n'));
+ InspectorTest.log('Finished!');
+ InspectorTest.completeTest();
+}
diff --git a/deps/v8/test/inspector/inspector-impl.cc b/deps/v8/test/inspector/inspector-impl.cc
index 57499215b9..87b3c7b4a1 100644
--- a/deps/v8/test/inspector/inspector-impl.cc
+++ b/deps/v8/test/inspector/inspector-impl.cc
@@ -5,7 +5,8 @@
#include "test/inspector/inspector-impl.h"
#include "include/v8.h"
-#include "src/inspector/string-16.h"
+
+#include "src/vector.h"
namespace {
@@ -40,10 +41,11 @@ InspectorClientImpl* InspectorClientFromContext(
return inspector_client;
}
-v8_inspector::String16 ToString16(v8::Local<v8::String> str) {
- std::unique_ptr<uint16_t[]> buffer(new uint16_t[str->Length()]);
- str->Write(reinterpret_cast<uint16_t*>(buffer.get()), 0, str->Length());
- return v8_inspector::String16(buffer.get(), str->Length());
+v8::internal::Vector<uint16_t> ToVector(v8::Local<v8::String> str) {
+ v8::internal::Vector<uint16_t> buffer =
+ v8::internal::Vector<uint16_t>::New(str->Length());
+ str->Write(buffer.start(), 0, str->Length());
+ return buffer;
}
void MessageHandler(v8::Local<v8::Message> message,
@@ -55,7 +57,8 @@ void MessageHandler(v8::Local<v8::Message> message,
InspectorClientImpl::InspectorFromContext(context);
v8::Local<v8::StackTrace> stack = message->GetStackTrace();
- int script_id = message->GetScriptOrigin().ScriptID()->Value();
+ int script_id =
+ static_cast<int>(message->GetScriptOrigin().ScriptID()->Value());
if (!stack.IsEmpty() && stack->GetFrameCount() > 0) {
int top_script_id = stack->GetFrame(0)->GetScriptId();
if (top_script_id == script_id) script_id = 0;
@@ -66,15 +69,15 @@ void MessageHandler(v8::Local<v8::Message> message,
column_number = message->GetStartColumn(context).FromJust() + 1;
v8_inspector::StringView detailed_message;
- v8_inspector::String16 message_text_string = ToString16(message->Get());
- v8_inspector::StringView message_text(message_text_string.characters16(),
+ v8::internal::Vector<uint16_t> message_text_string = ToVector(message->Get());
+ v8_inspector::StringView message_text(message_text_string.start(),
message_text_string.length());
- v8_inspector::String16 url_string;
+ v8::internal::Vector<uint16_t> url_string;
if (message->GetScriptOrigin().ResourceName()->IsString()) {
url_string =
- ToString16(message->GetScriptOrigin().ResourceName().As<v8::String>());
+ ToVector(message->GetScriptOrigin().ResourceName().As<v8::String>());
}
- v8_inspector::StringView url(url_string.characters16(), url_string.length());
+ v8_inspector::StringView url(url_string.start(), url_string.length());
inspector->exceptionThrown(context, message_text, exception, detailed_message,
url, line_number, column_number,
@@ -158,7 +161,8 @@ v8_inspector::V8InspectorSession* InspectorClientImpl::SessionFromContext(
class SendMessageToBackendTask : public TaskRunner::Task {
public:
- explicit SendMessageToBackendTask(const v8_inspector::String16& message)
+ explicit SendMessageToBackendTask(
+ const v8::internal::Vector<uint16_t>& message)
: message_(message) {}
bool is_inspector_task() final { return true; }
@@ -172,14 +176,12 @@ class SendMessageToBackendTask : public TaskRunner::Task {
session = InspectorClientImpl::SessionFromContext(context);
CHECK(session);
}
- v8_inspector::StringView message_view(
- reinterpret_cast<const uint16_t*>(message_.characters16()),
- message_.length());
+ v8_inspector::StringView message_view(message_.start(), message_.length());
session->dispatchProtocolMessage(message_view);
}
private:
- v8_inspector::String16 message_;
+ v8::internal::Vector<uint16_t> message_;
};
TaskRunner* SendMessageToBackendExtension::backend_task_runner_ = nullptr;
@@ -196,6 +198,5 @@ void SendMessageToBackendExtension::SendMessageToBackend(
CHECK(backend_task_runner_);
CHECK(args.Length() == 1 && args[0]->IsString());
v8::Local<v8::String> message = args[0].As<v8::String>();
- backend_task_runner_->Append(
- new SendMessageToBackendTask(ToString16(message)));
+ backend_task_runner_->Append(new SendMessageToBackendTask(ToVector(message)));
}
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index 872d211c75..401aca0776 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -21,10 +21,28 @@
namespace {
+std::vector<TaskRunner*> task_runners;
+
+void Terminate() {
+ for (size_t i = 0; i < task_runners.size(); ++i) {
+ task_runners[i]->Terminate();
+ task_runners[i]->Join();
+ }
+ std::vector<TaskRunner*> empty;
+ task_runners.swap(empty);
+}
+
void Exit() {
fflush(stdout);
fflush(stderr);
- _exit(0);
+ Terminate();
+}
+
+v8::internal::Vector<uint16_t> ToVector(v8::Local<v8::String> str) {
+ v8::internal::Vector<uint16_t> buffer =
+ v8::internal::Vector<uint16_t>::New(str->Length());
+ str->Write(buffer.start(), 0, str->Length());
+ return buffer;
}
class UtilsExtension : public v8::Extension {
@@ -33,7 +51,9 @@ class UtilsExtension : public v8::Extension {
: v8::Extension("v8_inspector/utils",
"native function print();"
"native function quit();"
- "native function setlocale();") {}
+ "native function setlocale();"
+ "native function load();"
+ "native function compileAndRunWithOrigin();") {}
virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
v8::Isolate* isolate, v8::Local<v8::String> name) {
v8::Local<v8::Context> context = isolate->GetCurrentContext();
@@ -54,11 +74,30 @@ class UtilsExtension : public v8::Extension {
.ToLocalChecked())
.FromJust()) {
return v8::FunctionTemplate::New(isolate, UtilsExtension::SetLocale);
+ } else if (name->Equals(context,
+ v8::String::NewFromUtf8(isolate, "load",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .FromJust()) {
+ return v8::FunctionTemplate::New(isolate, UtilsExtension::Load);
+ } else if (name->Equals(context, v8::String::NewFromUtf8(
+ isolate, "compileAndRunWithOrigin",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .FromJust()) {
+ return v8::FunctionTemplate::New(isolate,
+ UtilsExtension::CompileAndRunWithOrigin);
}
return v8::Local<v8::FunctionTemplate>();
}
+ static void set_backend_task_runner(TaskRunner* runner) {
+ backend_runner_ = runner;
+ }
+
private:
+ static TaskRunner* backend_runner_;
+
static void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
for (int i = 0; i < args.Length(); i++) {
v8::HandleScope handle_scope(args.GetIsolate());
@@ -102,8 +141,48 @@ class UtilsExtension : public v8::Extension {
v8::String::Utf8Value str(args[0]);
setlocale(LC_NUMERIC, *str);
}
+
+ static void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsString()) {
+ fprintf(stderr, "Internal error: load gets one string argument.");
+ Exit();
+ }
+ v8::String::Utf8Value str(args[0]);
+ v8::Isolate* isolate = args.GetIsolate();
+ bool exists = false;
+ std::string filename(*str, str.length());
+ v8::internal::Vector<const char> chars =
+ v8::internal::ReadFile(filename.c_str(), &exists);
+ if (!exists) {
+ isolate->ThrowException(
+ v8::String::NewFromUtf8(isolate, "Error loading file",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked());
+ return;
+ }
+ ExecuteStringTask task(chars);
+ v8::Global<v8::Context> context(isolate, isolate->GetCurrentContext());
+ task.Run(isolate, context);
+ }
+
+ static void CompileAndRunWithOrigin(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 4 || !args[0]->IsString() || !args[1]->IsString() ||
+ !args[2]->IsInt32() || !args[3]->IsInt32()) {
+ fprintf(stderr,
+ "Internal error: compileAndRunWithOrigin(source, name, line, "
+ "column).");
+ Exit();
+ }
+
+ backend_runner_->Append(new ExecuteStringTask(
+ ToVector(args[0].As<v8::String>()), args[1].As<v8::String>(),
+ args[2].As<v8::Int32>(), args[3].As<v8::Int32>()));
+ }
};
+TaskRunner* UtilsExtension::backend_runner_ = nullptr;
+
class SetTimeoutTask : public TaskRunner::Task {
public:
SetTimeoutTask(v8::Isolate* isolate, v8::Local<v8::Function> function)
@@ -153,28 +232,84 @@ class SetTimeoutExtension : public v8::Extension {
"Internal error: only setTimeout(function, 0) is supported.");
Exit();
}
- v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
if (args[0]->IsFunction()) {
- TaskRunner::FromContext(context)->Append(new SetTimeoutTask(
- args.GetIsolate(), v8::Local<v8::Function>::Cast(args[0])));
+ TaskRunner::FromContext(context)->Append(
+ new SetTimeoutTask(isolate, v8::Local<v8::Function>::Cast(args[0])));
} else {
- v8::Local<v8::String> data = args[0].As<v8::String>();
- std::unique_ptr<uint16_t[]> buffer(new uint16_t[data->Length()]);
- data->Write(reinterpret_cast<uint16_t*>(buffer.get()), 0, data->Length());
- v8_inspector::String16 source =
- v8_inspector::String16(buffer.get(), data->Length());
- TaskRunner::FromContext(context)->Append(new ExecuteStringTask(source));
+ TaskRunner::FromContext(context)->Append(new ExecuteStringTask(
+ ToVector(args[0].As<v8::String>()), v8::String::Empty(isolate),
+ v8::Integer::New(isolate, 0), v8::Integer::New(isolate, 0)));
}
}
};
-v8_inspector::String16 ToString16(const v8_inspector::StringView& string) {
+class InspectorExtension : public v8::Extension {
+ public:
+ InspectorExtension()
+ : v8::Extension("v8_inspector/inspector",
+ "native function attachInspector();"
+ "native function detachInspector();") {}
+
+ virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) {
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ if (name->Equals(context,
+ v8::String::NewFromUtf8(isolate, "attachInspector",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .FromJust()) {
+ return v8::FunctionTemplate::New(isolate, InspectorExtension::Attach);
+ } else if (name->Equals(context,
+ v8::String::NewFromUtf8(isolate, "detachInspector",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .FromJust()) {
+ return v8::FunctionTemplate::New(isolate, InspectorExtension::Detach);
+ }
+ return v8::Local<v8::FunctionTemplate>();
+ }
+
+ private:
+ static void Attach(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8_inspector::V8Inspector* inspector =
+ InspectorClientImpl::InspectorFromContext(context);
+ if (!inspector) {
+ fprintf(stderr, "Inspector client not found - cannot attach!");
+ Exit();
+ }
+ inspector->contextCreated(
+ v8_inspector::V8ContextInfo(context, 1, v8_inspector::StringView()));
+ }
+
+ static void Detach(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ v8_inspector::V8Inspector* inspector =
+ InspectorClientImpl::InspectorFromContext(context);
+ if (!inspector) {
+ fprintf(stderr, "Inspector client not found - cannot detach!");
+ Exit();
+ }
+ inspector->contextDestroyed(context);
+ }
+};
+
+v8::Local<v8::String> ToString(v8::Isolate* isolate,
+ const v8_inspector::StringView& string) {
if (string.is8Bit())
- return v8_inspector::String16(
- reinterpret_cast<const char*>(string.characters8()), string.length());
- return v8_inspector::String16(
- reinterpret_cast<const uint16_t*>(string.characters16()),
- string.length());
+ return v8::String::NewFromOneByte(isolate, string.characters8(),
+ v8::NewStringType::kNormal,
+ static_cast<int>(string.length()))
+ .ToLocalChecked();
+ else
+ return v8::String::NewFromTwoByte(isolate, string.characters16(),
+ v8::NewStringType::kNormal,
+ static_cast<int>(string.length()))
+ .ToLocalChecked();
}
class FrontendChannelImpl : public InspectorClientImpl::FrontendChannel {
@@ -184,11 +319,24 @@ class FrontendChannelImpl : public InspectorClientImpl::FrontendChannel {
virtual ~FrontendChannelImpl() {}
void SendMessageToFrontend(const v8_inspector::StringView& message) final {
- v8_inspector::String16Builder script;
- script.append("InspectorTest._dispatchMessage(");
- script.append(ToString16(message));
- script.append(")");
- frontend_task_runner_->Append(new ExecuteStringTask(script.toString()));
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+
+ v8::Local<v8::String> prefix =
+ v8::String::NewFromUtf8(isolate, "InspectorTest._dispatchMessage(",
+ v8::NewStringType::kInternalized)
+ .ToLocalChecked();
+ v8::Local<v8::String> message_string = ToString(isolate, message);
+ v8::Local<v8::String> suffix =
+ v8::String::NewFromUtf8(isolate, ")", v8::NewStringType::kInternalized)
+ .ToLocalChecked();
+
+ v8::Local<v8::String> result = v8::String::Concat(prefix, message_string);
+ result = v8::String::Concat(result, suffix);
+
+ frontend_task_runner_->Append(new ExecuteStringTask(
+ ToVector(result), v8::String::Empty(isolate),
+ v8::Integer::New(isolate, 0), v8::Integer::New(isolate, 0)));
}
private:
@@ -201,12 +349,14 @@ int main(int argc, char* argv[]) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
- v8::internal::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
+ v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::V8::InitializeExternalStartupData(argv[0]);
v8::V8::Initialize();
SetTimeoutExtension set_timeout_extension;
v8::RegisterExtension(&set_timeout_extension);
+ InspectorExtension inspector_extension;
+ v8::RegisterExtension(&inspector_extension);
UtilsExtension utils_extension;
v8::RegisterExtension(&utils_extension);
SendMessageToBackendExtension send_message_to_backend_extension;
@@ -214,12 +364,14 @@ int main(int argc, char* argv[]) {
v8::base::Semaphore ready_semaphore(0);
- const char* backend_extensions[] = {"v8_inspector/setTimeout"};
+ const char* backend_extensions[] = {"v8_inspector/setTimeout",
+ "v8_inspector/inspector"};
v8::ExtensionConfiguration backend_configuration(
arraysize(backend_extensions), backend_extensions);
TaskRunner backend_runner(&backend_configuration, false, &ready_semaphore);
ready_semaphore.Wait();
SendMessageToBackendExtension::set_backend_task_runner(&backend_runner);
+ UtilsExtension::set_backend_task_runner(&backend_runner);
const char* frontend_extensions[] = {"v8_inspector/utils",
"v8_inspector/frontend"};
@@ -233,6 +385,9 @@ int main(int argc, char* argv[]) {
&ready_semaphore);
ready_semaphore.Wait();
+ task_runners.push_back(&frontend_runner);
+ task_runners.push_back(&backend_runner);
+
for (int i = 1; i < argc; ++i) {
if (argv[i][0] == '-') break;
@@ -244,11 +399,10 @@ int main(int argc, char* argv[]) {
argv[i]);
Exit();
}
- v8_inspector::String16 source =
- v8_inspector::String16::fromUTF8(chars.start(), chars.length());
- frontend_runner.Append(new ExecuteStringTask(source));
+ frontend_runner.Append(new ExecuteStringTask(chars));
}
frontend_runner.Join();
+ backend_runner.Join();
return 0;
}
diff --git a/deps/v8/test/inspector/inspector.gyp b/deps/v8/test/inspector/inspector.gyp
index 21a75f9eef..8c96ae5d75 100644
--- a/deps/v8/test/inspector/inspector.gyp
+++ b/deps/v8/test/inspector/inspector.gyp
@@ -5,13 +5,6 @@
{
'variables': {
'v8_code': 1,
- 'inspector_protocol_sources': [
- 'inspector-impl.cc',
- 'inspector-impl.h',
- 'inspector-test.cc',
- 'task-runner.cc',
- 'task-runner.h',
- ],
},
'includes': ['../../gypfiles/toolchain.gypi', '../../gypfiles/features.gypi'],
'targets': [
@@ -20,22 +13,38 @@
'type': 'executable',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
+ '../../src/v8.gyp:v8_libbase',
+ '../../src/v8.gyp:v8',
],
'include_dirs': [
'../..',
],
'sources': [
- '<@(inspector_protocol_sources)',
- ],
- 'conditions': [
- ['component=="shared_library"', {
- # inspector-test can't be built against a shared library, so we
- # need to depend on the underlying static target in that case.
- 'dependencies': ['../../src/v8.gyp:v8_maybe_snapshot'],
- }, {
- 'dependencies': ['../../src/v8.gyp:v8'],
- }],
+ 'inspector-impl.cc',
+ 'inspector-impl.h',
+ 'inspector-test.cc',
+ 'task-runner.cc',
+ 'task-runner.h',
],
},
],
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'inspector-test_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'inspector-test',
+ ],
+ 'includes': [
+ '../../gypfiles/isolate.gypi',
+ ],
+ 'sources': [
+ 'inspector.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
}
diff --git a/deps/v8/test/inspector/inspector.isolate b/deps/v8/test/inspector/inspector.isolate
new file mode 100644
index 0000000000..ca45911da3
--- /dev/null
+++ b/deps/v8/test/inspector/inspector.isolate
@@ -0,0 +1,27 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'conditions': [
+ ['v8_enable_inspector==1', {
+ 'variables': {
+ 'files': [
+ './console/',
+ './cpu-profiler/',
+ './debugger/',
+ './inspector.status',
+ './json-parse-expected.txt',
+ './json-parse.js',
+ './protocol-test.js',
+ './runtime/',
+ './testcfg.py',
+ '<(PRODUCT_DIR)/inspector-test<(EXECUTABLE_SUFFIX)',
+ ],
+ },
+ }],
+ ],
+ 'includes': [
+ '../../src/base.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+} \ No newline at end of file
diff --git a/deps/v8/test/inspector/protocol-test.js b/deps/v8/test/inspector/protocol-test.js
index 7eb822ae2e..9e18065201 100644
--- a/deps/v8/test/inspector/protocol-test.js
+++ b/deps/v8/test/inspector/protocol-test.js
@@ -31,12 +31,13 @@ Protocol = new Proxy({}, {
InspectorTest.log = print.bind(null);
-InspectorTest.logMessage = function(message)
+InspectorTest.logMessage = function(originalMessage)
{
+ var message = JSON.parse(JSON.stringify(originalMessage));
if (message.id)
message.id = "<messageId>";
- const nonStableFields = new Set(["objectId", "scriptId", "exceptionId", "timestamp", "executionContextId", "callFrameId"]);
+ const nonStableFields = new Set(["objectId", "scriptId", "exceptionId", "timestamp", "executionContextId", "callFrameId", "breakpointId"]);
var objects = [ message ];
while (objects.length) {
var object = objects.shift();
@@ -49,7 +50,7 @@ InspectorTest.logMessage = function(message)
}
InspectorTest.logObject(message);
- return message;
+ return originalMessage;
}
InspectorTest.logObject = function(object, title)
@@ -96,11 +97,14 @@ InspectorTest.logObject = function(object, title)
lines.push(prefix + "]");
}
- dumpValue(object, "", title);
+ dumpValue(object, "", title || "");
InspectorTest.log(lines.join("\n"));
}
-InspectorTest.completeTest = quit.bind(null);
+InspectorTest.completeTest = function()
+{
+ Protocol.Debugger.disable().then(() => quit());
+}
InspectorTest.completeTestAfterPendingTimeouts = function()
{
@@ -109,18 +113,7 @@ InspectorTest.completeTestAfterPendingTimeouts = function()
awaitPromise: true }).then(InspectorTest.completeTest);
}
-InspectorTest.addScript = function(string)
-{
- return InspectorTest._sendCommandPromise("Runtime.evaluate", { "expression": string }).then(dumpErrorIfNeeded);
-
- function dumpErrorIfNeeded(message)
- {
- if (message.error) {
- InspectorTest.log("Error while executing '" + string + "': " + message.error.message);
- InspectorTest.completeTest();
- }
- }
-};
+InspectorTest.addScript = (string) => compileAndRunWithOrigin(string, "", 0, 0);
InspectorTest.startDumpingProtocolMessages = function()
{
diff --git a/deps/v8/test/inspector/task-runner.cc b/deps/v8/test/inspector/task-runner.cc
index c78d23b415..51fd110203 100644
--- a/deps/v8/test/inspector/task-runner.cc
+++ b/deps/v8/test/inspector/task-runner.cc
@@ -63,8 +63,9 @@ void TaskRunner::Run() {
void TaskRunner::RunMessageLoop(bool only_protocol) {
int loop_number = ++nested_loop_count_;
- while (nested_loop_count_ == loop_number) {
+ while (nested_loop_count_ == loop_number && !is_terminated_.Value()) {
TaskRunner::Task* task = GetNext(only_protocol);
+ if (!task) return;
v8::Isolate::Scope isolate_scope(isolate_);
if (catch_exceptions_) {
v8::TryCatch try_catch(isolate_);
@@ -93,8 +94,14 @@ void TaskRunner::Append(Task* task) {
process_queue_semaphore_.Signal();
}
+void TaskRunner::Terminate() {
+ is_terminated_.Increment(1);
+ process_queue_semaphore_.Signal();
+}
+
TaskRunner::Task* TaskRunner::GetNext(bool only_protocol) {
for (;;) {
+ if (is_terminated_.Value()) return nullptr;
if (only_protocol) {
Task* task = nullptr;
if (queue_.Dequeue(&task)) {
@@ -108,7 +115,6 @@ TaskRunner::Task* TaskRunner::GetNext(bool only_protocol) {
}
process_queue_semaphore_.Wait();
}
- UNREACHABLE();
return nullptr;
}
@@ -117,8 +123,29 @@ TaskRunner* TaskRunner::FromContext(v8::Local<v8::Context> context) {
context->GetAlignedPointerFromEmbedderData(kTaskRunnerIndex));
}
-ExecuteStringTask::ExecuteStringTask(const v8_inspector::String16& expression)
- : expression_(expression) {}
+namespace {
+
+v8::internal::Vector<uint16_t> ToVector(v8::Local<v8::String> str) {
+ v8::internal::Vector<uint16_t> buffer =
+ v8::internal::Vector<uint16_t>::New(str->Length());
+ str->Write(buffer.start(), 0, str->Length());
+ return buffer;
+}
+
+} // namespace
+
+ExecuteStringTask::ExecuteStringTask(
+ const v8::internal::Vector<uint16_t>& expression,
+ v8::Local<v8::String> name, v8::Local<v8::Integer> line_offset,
+ v8::Local<v8::Integer> column_offset)
+ : expression_(expression),
+ name_(ToVector(name)),
+ line_offset_(line_offset.As<v8::Int32>()->Value()),
+ column_offset_(column_offset.As<v8::Int32>()->Value()) {}
+
+ExecuteStringTask::ExecuteStringTask(
+ const v8::internal::Vector<const char>& expression)
+ : expression_utf8_(expression), line_offset_(0), column_offset_(0) {}
void ExecuteStringTask::Run(v8::Isolate* isolate,
const v8::Global<v8::Context>& context) {
@@ -128,12 +155,27 @@ void ExecuteStringTask::Run(v8::Isolate* isolate,
v8::Local<v8::Context> local_context = context.Get(isolate);
v8::Context::Scope context_scope(local_context);
- v8::ScriptOrigin origin(v8::String::Empty(isolate));
- v8::Local<v8::String> source =
- v8::String::NewFromTwoByte(isolate, expression_.characters16(),
- v8::NewStringType::kNormal,
- static_cast<int>(expression_.length()))
+ v8::Local<v8::String> name =
+ v8::String::NewFromTwoByte(isolate, name_.start(),
+ v8::NewStringType::kNormal, name_.length())
.ToLocalChecked();
+ v8::Local<v8::Integer> line_offset = v8::Integer::New(isolate, line_offset_);
+ v8::Local<v8::Integer> column_offset =
+ v8::Integer::New(isolate, column_offset_);
+
+ v8::ScriptOrigin origin(name, line_offset, column_offset);
+ v8::Local<v8::String> source;
+ if (expression_.length()) {
+ source = v8::String::NewFromTwoByte(isolate, expression_.start(),
+ v8::NewStringType::kNormal,
+ expression_.length())
+ .ToLocalChecked();
+ } else {
+ source = v8::String::NewFromUtf8(isolate, expression_utf8_.start(),
+ v8::NewStringType::kNormal,
+ expression_utf8_.length())
+ .ToLocalChecked();
+ }
v8::ScriptCompiler::Source scriptSource(source, origin);
v8::Local<v8::Script> script;
diff --git a/deps/v8/test/inspector/task-runner.h b/deps/v8/test/inspector/task-runner.h
index 88c36543d3..17385468be 100644
--- a/deps/v8/test/inspector/task-runner.h
+++ b/deps/v8/test/inspector/task-runner.h
@@ -8,10 +8,11 @@
#include "include/v8-inspector.h"
#include "include/v8-platform.h"
#include "include/v8.h"
+#include "src/base/atomic-utils.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
-#include "src/inspector/string-16.h"
#include "src/locked-queue-inl.h"
+#include "src/vector.h"
class TaskRunner : public v8::base::Thread {
public:
@@ -39,6 +40,8 @@ class TaskRunner : public v8::base::Thread {
static TaskRunner* FromContext(v8::Local<v8::Context>);
+ void Terminate();
+
private:
void InitializeContext();
Task* GetNext(bool only_protocol);
@@ -51,28 +54,38 @@ class TaskRunner : public v8::base::Thread {
v8::Global<v8::Context> context_;
// deferred_queue_ combined with queue_ (in this order) have all tasks in the
- // correct order.
- // Sometimes we skip non-protocol tasks by moving them from queue_ to
- // deferred_queue_.
+ // correct order. Sometimes we skip non-protocol tasks by moving them from
+ // queue_ to deferred_queue_.
v8::internal::LockedQueue<Task*> queue_;
v8::internal::LockedQueue<Task*> deffered_queue_;
v8::base::Semaphore process_queue_semaphore_;
int nested_loop_count_;
+ v8::base::AtomicNumber<int> is_terminated_;
+
DISALLOW_COPY_AND_ASSIGN(TaskRunner);
};
class ExecuteStringTask : public TaskRunner::Task {
public:
- explicit ExecuteStringTask(const v8_inspector::String16& expression);
+ ExecuteStringTask(const v8::internal::Vector<uint16_t>& expression,
+ v8::Local<v8::String> name,
+ v8::Local<v8::Integer> line_offset,
+ v8::Local<v8::Integer> column_offset);
+ explicit ExecuteStringTask(
+ const v8::internal::Vector<const char>& expression);
bool is_inspector_task() override { return false; }
void Run(v8::Isolate* isolate,
const v8::Global<v8::Context>& context) override;
private:
- v8_inspector::String16 expression_;
+ v8::internal::Vector<uint16_t> expression_;
+ v8::internal::Vector<const char> expression_utf8_;
+ v8::internal::Vector<uint16_t> name_;
+ int32_t line_offset_;
+ int32_t column_offset_;
DISALLOW_COPY_AND_ASSIGN(ExecuteStringTask);
};
diff --git a/deps/v8/test/inspector_protocol_parser_test/BUILD.gn b/deps/v8/test/inspector_protocol_parser_test/BUILD.gn
deleted file mode 100644
index 52c3788717..0000000000
--- a/deps/v8/test/inspector_protocol_parser_test/BUILD.gn
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("../../gni/v8.gni")
-# TODO: bring tests back once inspector_protocol is stable and moves out of blink.
diff --git a/deps/v8/test/inspector_protocol_parser_test/DEPS b/deps/v8/test/inspector_protocol_parser_test/DEPS
deleted file mode 100644
index 06d0b24e46..0000000000
--- a/deps/v8/test/inspector_protocol_parser_test/DEPS
+++ /dev/null
@@ -1,3 +0,0 @@
-include_rules = [
- "+testing"
-]
diff --git a/deps/v8/test/inspector_protocol_parser_test/RunTests.cpp b/deps/v8/test/inspector_protocol_parser_test/RunTests.cpp
deleted file mode 100644
index 065b0dcd2b..0000000000
--- a/deps/v8/test/inspector_protocol_parser_test/RunTests.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace {
-
-class InspectorProtocolTestEnvironment final : public ::testing::Environment {};
-
-} // namespace
-
-int main(int argc, char** argv) {
- testing::InitGoogleMock(&argc, argv);
- testing::AddGlobalTestEnvironment(new InspectorProtocolTestEnvironment);
- return RUN_ALL_TESTS();
-}
diff --git a/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.gyp b/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.gyp
deleted file mode 100644
index 8fe2da0fe2..0000000000
--- a/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.gyp
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# TODO: bring tests back once inspector_protocol is stable and moves out of blink.
-{ 'variables': {
- },
- 'targets': [
- ],
- 'conditions': [
- ],
-}
diff --git a/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.isolate b/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.isolate
deleted file mode 100644
index 66052bfc3a..0000000000
--- a/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.isolate
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# TODO: bring tests back once inspector_protocol is stable and moves out of blink.
-{
- 'variables': {
- 'files': [
- ],
- },
- 'includes': [
- ],
-} \ No newline at end of file
diff --git a/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.status b/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.status
deleted file mode 100644
index 9aa1649869..0000000000
--- a/deps/v8/test/inspector_protocol_parser_test/inspector_protocol_parser_test.status
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright 2016 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-[
-]
diff --git a/deps/v8/test/intl/general/case-mapping.js b/deps/v8/test/intl/general/case-mapping.js
index 17eb5bae57..8fb9e82933 100644
--- a/deps/v8/test/intl/general/case-mapping.js
+++ b/deps/v8/test/intl/general/case-mapping.js
@@ -84,20 +84,22 @@ assertEquals("abci\u0307", "aBcI\u0307".toLocaleLowerCase(["en", "tr"]));
assertEquals("abci\u0307", "aBcI\u0307".toLowerCase());
// Greek uppercasing: not covered by intl402/String/*, yet. Tonos (U+0301) and
-// other diacritic marks are dropped. This rule is based on the current CLDR's
-// el-Upper transformation, but Greek uppercasing rules are more sophisticated
-// than this. See http://bugs.icu-project.org/trac/ticket/10582 and
-// http://unicode.org/cldr/trac/ticket/7905 .
+// other diacritic marks are dropped. See
+// http://bugs.icu-project.org/trac/ticket/5456#comment:19 for more examples.
+// See also http://bugs.icu-project.org/trac/ticket/12845 .
assertEquals("Α", "α\u0301".toLocaleUpperCase("el"));
assertEquals("Α", "α\u0301".toLocaleUpperCase("el-GR"));
assertEquals("Α", "α\u0301".toLocaleUpperCase("el-Grek"));
assertEquals("Α", "α\u0301".toLocaleUpperCase("el-Grek-GR"));
assertEquals("Α", "ά".toLocaleUpperCase("el"));
-assertEquals("ΑΟΥΩ", "άόύώ".toLocaleUpperCase("el"));
-assertEquals("ΑΟΥΩ", "α\u0301ο\u0301υ\u0301ω\u0301".toLocaleUpperCase("el"));
-assertEquals("ΑΟΥΩ", "άόύώ".toLocaleUpperCase("el"));
+assertEquals("ΑΟΫΩ", "άόύώ".toLocaleUpperCase("el"));
+assertEquals("ΑΟΫΩ", "α\u0301ο\u0301υ\u0301ω\u0301".toLocaleUpperCase("el"));
+assertEquals("ΑΟΫΩ", "άόύώ".toLocaleUpperCase("el"));
assertEquals("ΟΕ", "Ό\u1f15".toLocaleUpperCase("el"));
assertEquals("ΟΕ", "Ο\u0301ε\u0314\u0301".toLocaleUpperCase("el"));
+assertEquals("ΡΩΜΕΪΚΑ", "ρωμέικα".toLocaleUpperCase("el"));
+assertEquals("ΜΑΪΟΥ, ΤΡΟΛΕΪ", "Μαΐου, τρόλεϊ".toLocaleUpperCase("el"));
+assertEquals("ΤΟ ΕΝΑ Ή ΤΟ ΑΛΛΟ.", "Το ένα ή το άλλο.".toLocaleUpperCase("el"));
// Input and output are identical.
assertEquals("αβγδε", "αβγδε".toLocaleLowerCase("el"));
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
index a88746b10c..44b821f7b5 100644
--- a/deps/v8/test/js-perf-test/JSTests.json
+++ b/deps/v8/test/js-perf-test/JSTests.json
@@ -1,8 +1,8 @@
{
"name": "JSTests",
"run_count": 3,
- "run_count_android_arm": 1,
- "run_count_android_arm64": 1,
+ "run_count_arm": 1,
+ "run_count_arm64": 1,
"timeout": 120,
"units": "score",
"total": true,
@@ -119,6 +119,7 @@
"results_regexp": "^%s\\-Strings\\(Score\\): (.+)$",
"run_count": 1,
"timeout": 240,
+ "timeout_arm": 420,
"tests": [
{"name": "StringFunctions"}
]
@@ -162,12 +163,14 @@
"flags": ["--harmony"],
"resources": [
"assign.js",
- "values.js",
- "entries.js"
+ "create.js",
+ "entries.js",
+ "values.js"
],
"results_regexp": "^%s\\-Object\\(Score\\): (.+)$",
"tests": [
{"name": "Assign"},
+ {"name": "Create"},
{"name": "Entries"},
{"name": "EntriesMegamorphic"},
{"name": "Values"},
diff --git a/deps/v8/test/js-perf-test/SixSpeed.json b/deps/v8/test/js-perf-test/SixSpeed.json
new file mode 100644
index 0000000000..c72345efdb
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed.json
@@ -0,0 +1,27 @@
+{
+ "name": "SixSpeed",
+ "run_count": 3,
+ "run_count_arm": 1,
+ "run_count_arm64": 1,
+ "timeout": 120,
+ "units": "score",
+ "total": true,
+ "resources": ["base.js"],
+ "tests": [
+ {
+ "name": "Spread",
+ "path": ["SixSpeed/spread"],
+ "main": "run.js",
+ "resources": [
+ "run.js",
+ "spread.js"
+ ],
+ "results_regexp": "^%s\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Spread-ES5"},
+ {"name": "Spread-Traceur"},
+ {"name": "Spread-ES6"}
+ ]
+ }
+ ]
+}
diff --git a/deps/v8/test/js-perf-test/SixSpeed/LICENSE.sixspeed b/deps/v8/test/js-perf-test/SixSpeed/LICENSE.sixspeed
new file mode 100644
index 0000000000..2cc181ce74
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/LICENSE.sixspeed
@@ -0,0 +1,22 @@
+Tests included here are based on build output generated by the six-speed
+benchmark suite.
+
+Copyright (c) 2015 Kevin Decker
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/deps/v8/test/js-perf-test/SixSpeed/spread/run.js b/deps/v8/test/js-perf-test/SixSpeed/spread/run.js
new file mode 100644
index 0000000000..c46b29be98
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/spread/run.js
@@ -0,0 +1,25 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('../../base.js');
+load('spread.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '(Score): ' + result);
+}
+
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/js-perf-test/SixSpeed/spread/spread.js b/deps/v8/test/js-perf-test/SixSpeed/spread/spread.js
new file mode 100644
index 0000000000..7c5f40b427
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/spread/spread.js
@@ -0,0 +1,73 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This benchmark is based on the six-speed benchmark build output.
+// Copyright 2014 Kevin Decker <https://github.com/kpdecker/six-speed/>
+
+
+new BenchmarkSuite('Spread-ES5', [1000], [
+ new Benchmark('ES5', false, false, 0, ES5),
+]);
+
+new BenchmarkSuite('Spread-Traceur', [1000], [
+ new Benchmark('Traceur', false, false, 0, Traceur),
+]);
+
+new BenchmarkSuite('Spread-ES6', [1000], [
+ new Benchmark('ES6', false, false, 0, ES6),
+]);
+
+// ----------------------------------------------------------------------------
+// Benchmark: ES5
+// ----------------------------------------------------------------------------
+
+function ES5() {
+ "use strict";
+ return Math.max.apply(Math, [1,2,3]);
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: Traceur
+// ----------------------------------------------------------------------------
+
+function checkObjectCoercible(v) {
+ "use strict";
+ if (v === null || v === undefined) {
+ throw new $TypeError('Value cannot be converted to an Object');
+ }
+ return v;
+}
+
+function spread() {
+ "use strict";
+ var rv = [],
+ j = 0,
+ iterResult;
+ for (var i = 0; i < arguments.length; i++) {
+ var valueToSpread = checkObjectCoercible(arguments[i]);
+ if (typeof valueToSpread[Symbol.iterator] !== 'function') {
+ throw new TypeError('Cannot spread non-iterable object.');
+ }
+ var iter = valueToSpread[Symbol.iterator]();
+ while (!(iterResult = iter.next()).done) {
+ rv[j++] = iterResult.value;
+ }
+ }
+ return rv;
+}
+
+function Traceur() {
+ "use strict";
+ var $__0;
+ return ($__0 = Math).max.apply($__0, spread([1, 2, 3]));
+}
+
+// ----------------------------------------------------------------------------
+// Benchmark: ES6
+// ----------------------------------------------------------------------------
+
+function ES6() {
+ "use strict";
+ return Math.max(...[1,2,3]);
+}
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index e4db83db09..c6c3487473 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -31,5 +31,15 @@
# escapes (we need to parse to distinguish octal escapes from valid
# back-references).
'strict-octal-regexp': [SKIP],
+
+ # Modules which are only meant to be imported from by other tests, not to be
+ # tested standalone.
+ 'modules-skip*': [SKIP],
}], # ALWAYS
+
+['variant != ignition and variant != ignition_staging and variant != ignition_turbofan', {
+ # Ongoing implementation of modules.
+ # https://bugs.chromium.org/p/v8/issues/detail?id=1569
+ 'modules-*': [SKIP],
+}], # variant != ignition and variant != ignition_staging and variant != ignition_turbofan
]
diff --git a/deps/v8/test/mjsunit/modules-fail-cyclic-1.js b/deps/v8/test/message/modules-cycle1.js
index 5156a57cd9..e3497cace8 100644
--- a/deps/v8/test/mjsunit/modules-fail-cyclic-1.js
+++ b/deps/v8/test/message/modules-cycle1.js
@@ -4,5 +4,5 @@
//
// MODULE
-import {a} from "modules-fail-cyclic-1.js";
+import {a} from "modules-cycle1.js";
export {a};
diff --git a/deps/v8/test/message/modules-cycle1.out b/deps/v8/test/message/modules-cycle1.out
new file mode 100644
index 0000000000..3e6f31b1d4
--- /dev/null
+++ b/deps/v8/test/message/modules-cycle1.out
@@ -0,0 +1,5 @@
+*%(basename)s:7: SyntaxError: Detected cycle while resolving name 'a'
+import {a} from "modules-cycle1.js";
+ ^
+SyntaxError: Detected cycle while resolving name 'a'
+
diff --git a/deps/v8/test/mjsunit/modules-fail-cyclic-2.js b/deps/v8/test/message/modules-cycle2.js
index f6a7cecec1..1121c3098f 100644
--- a/deps/v8/test/mjsunit/modules-fail-cyclic-2.js
+++ b/deps/v8/test/message/modules-cycle2.js
@@ -4,5 +4,5 @@
//
// MODULE
-import {a} from "modules-skip-cyclic.js";
+import {a} from "modules-skip-cycle2.js";
export {a as b};
diff --git a/deps/v8/test/message/modules-cycle2.out b/deps/v8/test/message/modules-cycle2.out
new file mode 100644
index 0000000000..f3c19d20ed
--- /dev/null
+++ b/deps/v8/test/message/modules-cycle2.out
@@ -0,0 +1,5 @@
+*%(basename)s:7: SyntaxError: Detected cycle while resolving name 'a'
+import {a} from "modules-skip-cycle2.js";
+ ^
+SyntaxError: Detected cycle while resolving name 'a'
+
diff --git a/deps/v8/test/message/modules-cycle3.js b/deps/v8/test/message/modules-cycle3.js
new file mode 100644
index 0000000000..133d203886
--- /dev/null
+++ b/deps/v8/test/message/modules-cycle3.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+export {a as x} from "modules-skip-cycle3.js";
+export {b as y} from "modules-skip-cycle3.js";
diff --git a/deps/v8/test/message/modules-cycle3.out b/deps/v8/test/message/modules-cycle3.out
new file mode 100644
index 0000000000..a5b10149f9
--- /dev/null
+++ b/deps/v8/test/message/modules-cycle3.out
@@ -0,0 +1,5 @@
+*%(basename)s:7: SyntaxError: Detected cycle while resolving name 'a'
+export {a as x} from "modules-skip-cycle3.js";
+ ^^^^^^
+SyntaxError: Detected cycle while resolving name 'a'
+
diff --git a/deps/v8/test/mjsunit/modules-fail-1.js b/deps/v8/test/message/modules-cycle4.js
index 3e28647514..2e22757e54 100644
--- a/deps/v8/test/mjsunit/modules-fail-1.js
+++ b/deps/v8/test/message/modules-cycle4.js
@@ -4,4 +4,4 @@
//
// MODULE
-import {a} from "modules-fail-1.js";
+import {x} from "modules-cycle3.js";
diff --git a/deps/v8/test/message/modules-cycle4.out b/deps/v8/test/message/modules-cycle4.out
new file mode 100644
index 0000000000..74789e0ec9
--- /dev/null
+++ b/deps/v8/test/message/modules-cycle4.out
@@ -0,0 +1,5 @@
+*modules-cycle3.js:7: SyntaxError: Detected cycle while resolving name 'a'
+export {a as x} from "modules-skip-cycle3.js";
+ ^^^^^^
+SyntaxError: Detected cycle while resolving name 'a'
+
diff --git a/deps/v8/test/mjsunit/modules-fail-3.js b/deps/v8/test/message/modules-duplicate-export1.js
index d29d44476d..0ba421a0b4 100644
--- a/deps/v8/test/mjsunit/modules-fail-3.js
+++ b/deps/v8/test/message/modules-duplicate-export1.js
@@ -4,4 +4,5 @@
//
// MODULE
-import foo from "modules-fail-3.js";
+export {x};
+export let x = 42;
diff --git a/deps/v8/test/message/modules-duplicate-export1.out b/deps/v8/test/message/modules-duplicate-export1.out
new file mode 100644
index 0000000000..5b2478b3a2
--- /dev/null
+++ b/deps/v8/test/message/modules-duplicate-export1.out
@@ -0,0 +1,5 @@
+*%(basename)s:8: SyntaxError: Duplicate export of 'x'
+export let x = 42;
+ ^^^^^^^^^^^
+SyntaxError: Duplicate export of 'x'
+
diff --git a/deps/v8/test/mjsunit/modules-fail-8.js b/deps/v8/test/message/modules-duplicate-export2.js
index bc9c101301..3aec862341 100644
--- a/deps/v8/test/mjsunit/modules-fail-8.js
+++ b/deps/v8/test/message/modules-duplicate-export2.js
@@ -4,4 +4,5 @@
//
// MODULE
-import {a} from "modules-skip-7.js";
+export let x = 42;
+export {x};
diff --git a/deps/v8/test/message/modules-duplicate-export2.out b/deps/v8/test/message/modules-duplicate-export2.out
new file mode 100644
index 0000000000..17e831886c
--- /dev/null
+++ b/deps/v8/test/message/modules-duplicate-export2.out
@@ -0,0 +1,5 @@
+*%(basename)s:8: SyntaxError: Duplicate export of 'x'
+export {x};
+ ^
+SyntaxError: Duplicate export of 'x'
+
diff --git a/deps/v8/test/message/modules-duplicate-export3.js b/deps/v8/test/message/modules-duplicate-export3.js
new file mode 100644
index 0000000000..36fc27b6c0
--- /dev/null
+++ b/deps/v8/test/message/modules-duplicate-export3.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+export let x = 42;
+let y;
+export {y as x};
diff --git a/deps/v8/test/message/modules-duplicate-export3.out b/deps/v8/test/message/modules-duplicate-export3.out
new file mode 100644
index 0000000000..3913a75c02
--- /dev/null
+++ b/deps/v8/test/message/modules-duplicate-export3.out
@@ -0,0 +1,5 @@
+*%(basename)s:9: SyntaxError: Duplicate export of 'x'
+export {y as x};
+ ^^^^^^
+SyntaxError: Duplicate export of 'x'
+
diff --git a/deps/v8/test/message/modules-duplicate-export4.js b/deps/v8/test/message/modules-duplicate-export4.js
new file mode 100644
index 0000000000..1bc60dad60
--- /dev/null
+++ b/deps/v8/test/message/modules-duplicate-export4.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+export let x = 42;
+let y;
+export {y as z, y as x, y};
diff --git a/deps/v8/test/message/modules-duplicate-export4.out b/deps/v8/test/message/modules-duplicate-export4.out
new file mode 100644
index 0000000000..73e0fdcc91
--- /dev/null
+++ b/deps/v8/test/message/modules-duplicate-export4.out
@@ -0,0 +1,5 @@
+*%(basename)s:9: SyntaxError: Duplicate export of 'x'
+export {y as z, y as x, y};
+ ^^^^^^
+SyntaxError: Duplicate export of 'x'
+
diff --git a/deps/v8/test/message/modules-import-redeclare1.js b/deps/v8/test/message/modules-import-redeclare1.js
new file mode 100644
index 0000000000..22e1ce35a9
--- /dev/null
+++ b/deps/v8/test/message/modules-import-redeclare1.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+export let x = 42;
+import {x} from "modules-import-redeclare1.js";
diff --git a/deps/v8/test/message/modules-import-redeclare1.out b/deps/v8/test/message/modules-import-redeclare1.out
new file mode 100644
index 0000000000..09b5e8b713
--- /dev/null
+++ b/deps/v8/test/message/modules-import-redeclare1.out
@@ -0,0 +1,5 @@
+*%(basename)s:8: SyntaxError: Identifier 'x' has already been declared
+import {x} from "modules-import-redeclare1.js";
+ ^
+SyntaxError: Identifier 'x' has already been declared
+
diff --git a/deps/v8/test/message/modules-import-redeclare2.js b/deps/v8/test/message/modules-import-redeclare2.js
new file mode 100644
index 0000000000..af7ec2b4d2
--- /dev/null
+++ b/deps/v8/test/message/modules-import-redeclare2.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+export let x = 42;
+import {y, x, z} from "modules-import-redeclare1.js";
diff --git a/deps/v8/test/message/modules-import-redeclare2.out b/deps/v8/test/message/modules-import-redeclare2.out
new file mode 100644
index 0000000000..c972a382e5
--- /dev/null
+++ b/deps/v8/test/message/modules-import-redeclare2.out
@@ -0,0 +1,5 @@
+*%(basename)s:8: SyntaxError: Identifier 'x' has already been declared
+import {y, x, z} from "modules-import-redeclare1.js";
+ ^
+SyntaxError: Identifier 'x' has already been declared
+
diff --git a/deps/v8/test/message/modules-import-redeclare3.js b/deps/v8/test/message/modules-import-redeclare3.js
new file mode 100644
index 0000000000..60ae6f20e5
--- /dev/null
+++ b/deps/v8/test/message/modules-import-redeclare3.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {y, x, z} from "modules-import-redeclare1.js";
+export let x = 42;
diff --git a/deps/v8/test/message/modules-import-redeclare3.out b/deps/v8/test/message/modules-import-redeclare3.out
new file mode 100644
index 0000000000..3a1080736d
--- /dev/null
+++ b/deps/v8/test/message/modules-import-redeclare3.out
@@ -0,0 +1,5 @@
+*%(basename)s:8: SyntaxError: Identifier 'x' has already been declared
+export let x = 42;
+ ^
+SyntaxError: Identifier 'x' has already been declared
+
diff --git a/deps/v8/test/mjsunit/modules-fail-2.js b/deps/v8/test/message/modules-import1.js
index e7dd683ced..fbfe907995 100644
--- a/deps/v8/test/mjsunit/modules-fail-2.js
+++ b/deps/v8/test/message/modules-import1.js
@@ -4,4 +4,4 @@
//
// MODULE
-import {a as b} from "modules-fail-2.js";
+import {a} from "modules-import1.js";
diff --git a/deps/v8/test/message/modules-import1.out b/deps/v8/test/message/modules-import1.out
new file mode 100644
index 0000000000..6facd0fa7c
--- /dev/null
+++ b/deps/v8/test/message/modules-import1.out
@@ -0,0 +1,5 @@
+*%(basename)s:7: SyntaxError: The requested module does not provide an export named 'a'
+import {a} from "modules-import1.js";
+ ^
+SyntaxError: The requested module does not provide an export named 'a'
+
diff --git a/deps/v8/test/message/modules-import2.js b/deps/v8/test/message/modules-import2.js
new file mode 100644
index 0000000000..8a719ace18
--- /dev/null
+++ b/deps/v8/test/message/modules-import2.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {a as b} from "modules-import2.js";
diff --git a/deps/v8/test/message/modules-import2.out b/deps/v8/test/message/modules-import2.out
new file mode 100644
index 0000000000..317399a6db
--- /dev/null
+++ b/deps/v8/test/message/modules-import2.out
@@ -0,0 +1,5 @@
+*%(basename)s:7: SyntaxError: The requested module does not provide an export named 'a'
+import {a as b} from "modules-import2.js";
+ ^
+SyntaxError: The requested module does not provide an export named 'a'
+
diff --git a/deps/v8/test/message/modules-import3.js b/deps/v8/test/message/modules-import3.js
new file mode 100644
index 0000000000..7e93ff3155
--- /dev/null
+++ b/deps/v8/test/message/modules-import3.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import foo from "modules-import3.js";
diff --git a/deps/v8/test/message/modules-import3.out b/deps/v8/test/message/modules-import3.out
new file mode 100644
index 0000000000..75abc74b50
--- /dev/null
+++ b/deps/v8/test/message/modules-import3.out
@@ -0,0 +1,5 @@
+*%(basename)s:7: SyntaxError: The requested module does not provide an export named 'default'
+import foo from "modules-import3.js";
+ ^^^
+SyntaxError: The requested module does not provide an export named 'default'
+
diff --git a/deps/v8/test/mjsunit/modules-fail-4.js b/deps/v8/test/message/modules-import4.js
index ec9edda909..0410e38ec8 100644
--- a/deps/v8/test/mjsunit/modules-fail-4.js
+++ b/deps/v8/test/message/modules-import4.js
@@ -4,5 +4,5 @@
//
// MODULE
-import {a as b} from "modules-fail-4.js";
-export {c as a} from "modules-fail-4.js";
+import {a as b} from "modules-import4.js";
+export {c as a} from "modules-import4.js";
diff --git a/deps/v8/test/message/modules-import4.out b/deps/v8/test/message/modules-import4.out
new file mode 100644
index 0000000000..bd406e4021
--- /dev/null
+++ b/deps/v8/test/message/modules-import4.out
@@ -0,0 +1,5 @@
+*%(basename)s:8: SyntaxError: The requested module does not provide an export named 'c'
+export {c as a} from "modules-import4.js";
+ ^^^^^^
+SyntaxError: The requested module does not provide an export named 'c'
+
diff --git a/deps/v8/test/message/modules-import5.js b/deps/v8/test/message/modules-import5.js
new file mode 100644
index 0000000000..d4cb6559bd
--- /dev/null
+++ b/deps/v8/test/message/modules-import5.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {a as b} from "modules-import5.js";
+export {c as a} from "modules-import5.js";
+import {c} from "modules-import5.js";
diff --git a/deps/v8/test/message/modules-import5.out b/deps/v8/test/message/modules-import5.out
new file mode 100644
index 0000000000..8828774c92
--- /dev/null
+++ b/deps/v8/test/message/modules-import5.out
@@ -0,0 +1,5 @@
+*%(basename)s:8: SyntaxError: The requested module does not provide an export named 'c'
+export {c as a} from "modules-import5.js";
+ ^^^^^^
+SyntaxError: The requested module does not provide an export named 'c'
+
diff --git a/deps/v8/test/message/modules-import6.js b/deps/v8/test/message/modules-import6.js
new file mode 100644
index 0000000000..f625a342b1
--- /dev/null
+++ b/deps/v8/test/message/modules-import6.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {a as b} from "modules-import6.js";
+export {c as a};
+import {c} from "modules-import6.js";
diff --git a/deps/v8/test/message/modules-import6.out b/deps/v8/test/message/modules-import6.out
new file mode 100644
index 0000000000..9d7eeebe5d
--- /dev/null
+++ b/deps/v8/test/message/modules-import6.out
@@ -0,0 +1,5 @@
+*%(basename)s:9: SyntaxError: The requested module does not provide an export named 'c'
+import {c} from "modules-import6.js";
+ ^
+SyntaxError: The requested module does not provide an export named 'c'
+
diff --git a/deps/v8/test/mjsunit/modules-skip-cyclic.js b/deps/v8/test/message/modules-skip-cycle2.js
index ad5d80608e..8b5ea93468 100644
--- a/deps/v8/test/mjsunit/modules-skip-cyclic.js
+++ b/deps/v8/test/message/modules-skip-cycle2.js
@@ -2,4 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-export {b as a} from "modules-fail-cyclic-2.js";
+export {b as a} from "modules-cycle2.js";
diff --git a/deps/v8/test/message/modules-skip-cycle3.js b/deps/v8/test/message/modules-skip-cycle3.js
new file mode 100644
index 0000000000..a63a660c16
--- /dev/null
+++ b/deps/v8/test/message/modules-skip-cycle3.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export {y as a} from "modules-cycle3.js";
+export {x as b} from "modules-cycle3.js";
diff --git a/deps/v8/test/message/modules-star-conflict1.js b/deps/v8/test/message/modules-star-conflict1.js
new file mode 100644
index 0000000000..1bf3473f47
--- /dev/null
+++ b/deps/v8/test/message/modules-star-conflict1.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import {a} from "../mjsunit/modules-skip-7.js";
diff --git a/deps/v8/test/message/modules-star-conflict1.out b/deps/v8/test/message/modules-star-conflict1.out
new file mode 100644
index 0000000000..1a4986ad88
--- /dev/null
+++ b/deps/v8/test/message/modules-star-conflict1.out
@@ -0,0 +1,5 @@
+*%(basename)s:7: SyntaxError: The requested module contains conflicting star exports for name 'a'
+import {a} from "../mjsunit/modules-skip-7.js";
+ ^
+SyntaxError: The requested module contains conflicting star exports for name 'a'
+
diff --git a/deps/v8/test/message/modules-star-conflict2.js b/deps/v8/test/message/modules-star-conflict2.js
new file mode 100644
index 0000000000..84d23d8edc
--- /dev/null
+++ b/deps/v8/test/message/modules-star-conflict2.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+export * from "../mjsunit/modules-skip-star-exports-conflict.js";
+export * from "../mjsunit/modules-skip-6.js";
+
+import {a} from "modules-star-conflict2.js";
diff --git a/deps/v8/test/message/modules-star-conflict2.out b/deps/v8/test/message/modules-star-conflict2.out
new file mode 100644
index 0000000000..9cbbfc4247
--- /dev/null
+++ b/deps/v8/test/message/modules-star-conflict2.out
@@ -0,0 +1,5 @@
+*%(basename)s:7: SyntaxError: The requested module contains conflicting star exports for name 'a'
+export * from "../mjsunit/modules-skip-star-exports-conflict.js";
+ ^
+SyntaxError: The requested module contains conflicting star exports for name 'a'
+
diff --git a/deps/v8/test/mjsunit/modules-fail-6.js b/deps/v8/test/message/modules-star-default.js
index 766cf43852..30bc8f271a 100644
--- a/deps/v8/test/mjsunit/modules-fail-6.js
+++ b/deps/v8/test/message/modules-star-default.js
@@ -5,4 +5,4 @@
// MODULE
// Star exports do not propagate a default export.
-import a from "modules-skip-4.js";
+import a from "modules-import4.js";
diff --git a/deps/v8/test/message/modules-star-default.out b/deps/v8/test/message/modules-star-default.out
new file mode 100644
index 0000000000..a3cd5a6107
--- /dev/null
+++ b/deps/v8/test/message/modules-star-default.out
@@ -0,0 +1,5 @@
+*modules-import4.js:8: SyntaxError: The requested module does not provide an export named 'c'
+export {c as a} from "modules-import4.js";
+ ^^^^^^
+SyntaxError: The requested module does not provide an export named 'c'
+
diff --git a/deps/v8/test/message/modules-undefined-export1.js b/deps/v8/test/message/modules-undefined-export1.js
new file mode 100644
index 0000000000..ddedbaaded
--- /dev/null
+++ b/deps/v8/test/message/modules-undefined-export1.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+export {x};
diff --git a/deps/v8/test/message/modules-undefined-export1.out b/deps/v8/test/message/modules-undefined-export1.out
new file mode 100644
index 0000000000..66b862c167
--- /dev/null
+++ b/deps/v8/test/message/modules-undefined-export1.out
@@ -0,0 +1,5 @@
+*%(basename)s:7: SyntaxError: Export 'x' is not defined in module
+export {x};
+ ^
+SyntaxError: Export 'x' is not defined in module
+
diff --git a/deps/v8/test/message/modules-undefined-export2.js b/deps/v8/test/message/modules-undefined-export2.js
new file mode 100644
index 0000000000..bf8c2a0c94
--- /dev/null
+++ b/deps/v8/test/message/modules-undefined-export2.js
@@ -0,0 +1,7 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+export {x as y};
diff --git a/deps/v8/test/message/modules-undefined-export2.out b/deps/v8/test/message/modules-undefined-export2.out
new file mode 100644
index 0000000000..fd036f190a
--- /dev/null
+++ b/deps/v8/test/message/modules-undefined-export2.out
@@ -0,0 +1,5 @@
+*%(basename)s:7: SyntaxError: Export 'x' is not defined in module
+export {x as y};
+ ^^^^^^
+SyntaxError: Export 'x' is not defined in module
+
diff --git a/deps/v8/test/message/redeclaration1.js b/deps/v8/test/message/redeclaration1.js
new file mode 100644
index 0000000000..56a891a2b0
--- /dev/null
+++ b/deps/v8/test/message/redeclaration1.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let foo = 1;
+let foo = 2;
diff --git a/deps/v8/test/message/redeclaration1.out b/deps/v8/test/message/redeclaration1.out
new file mode 100644
index 0000000000..89e074701a
--- /dev/null
+++ b/deps/v8/test/message/redeclaration1.out
@@ -0,0 +1,5 @@
+*%(basename)s:6: SyntaxError: Identifier 'foo' has already been declared
+let foo = 2;
+ ^
+SyntaxError: Identifier 'foo' has already been declared
+
diff --git a/deps/v8/test/message/redeclaration2.js b/deps/v8/test/message/redeclaration2.js
new file mode 100644
index 0000000000..d8637f1503
--- /dev/null
+++ b/deps/v8/test/message/redeclaration2.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let foo = 1;
+let {bar, foo} = {};
diff --git a/deps/v8/test/message/redeclaration2.out b/deps/v8/test/message/redeclaration2.out
new file mode 100644
index 0000000000..8689b6928a
--- /dev/null
+++ b/deps/v8/test/message/redeclaration2.out
@@ -0,0 +1,5 @@
+*%(basename)s:6: SyntaxError: Identifier 'foo' has already been declared
+let {bar, foo} = {};
+ ^
+SyntaxError: Identifier 'foo' has already been declared
+
diff --git a/deps/v8/test/message/redeclaration3.js b/deps/v8/test/message/redeclaration3.js
new file mode 100644
index 0000000000..8916609597
--- /dev/null
+++ b/deps/v8/test/message/redeclaration3.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const {bar, foo} = {};
+var [...foo] = [11];
diff --git a/deps/v8/test/message/redeclaration3.out b/deps/v8/test/message/redeclaration3.out
new file mode 100644
index 0000000000..7e691faac4
--- /dev/null
+++ b/deps/v8/test/message/redeclaration3.out
@@ -0,0 +1,5 @@
+*%(basename)s:6: SyntaxError: Identifier 'foo' has already been declared
+var [...foo] = [11];
+ ^
+SyntaxError: Identifier 'foo' has already been declared
+
diff --git a/deps/v8/test/mjsunit/array-literal-transitions.js b/deps/v8/test/mjsunit/array-literal-transitions.js
index cd9d96bcf4..1af02d8d2f 100644
--- a/deps/v8/test/mjsunit/array-literal-transitions.js
+++ b/deps/v8/test/mjsunit/array-literal-transitions.js
@@ -25,8 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --expose-gc
-// Flags: --ignition-osr --turbo-from-bytecode
+// Flags: --allow-natives-syntax --expose-gc --ignition-osr
// IC and Crankshaft support for smi-only elements in dynamic array literals.
function get(foo) { return foo; } // Used to generate dynamic values.
diff --git a/deps/v8/test/mjsunit/asm/asm-validation.js b/deps/v8/test/mjsunit/asm/asm-validation.js
index eae282ca57..06799dab67 100644
--- a/deps/v8/test/mjsunit/asm/asm-validation.js
+++ b/deps/v8/test/mjsunit/asm/asm-validation.js
@@ -8,6 +8,76 @@ function assertValidAsm(func) {
assertTrue(%IsAsmWasmCode(func));
}
+(function TestConst() {
+ function Module(s) {
+ "use asm";
+ var fround = s.Math.fround;
+ // Global constants. These are treated just like numeric literals.
+ const fConst = fround(-3.0);
+ const dConst = -3.0;
+ const iConst = -3;
+
+ // consts can be used to initialize other consts.
+ const fPrime = fConst;
+
+ // The following methods verify that return statements with global constants
+ // do not need type annotations.
+ function f() {
+ return fPrime;
+ }
+ function d() {
+ return dConst;
+ }
+ function i() {
+ return iConst;
+ }
+
+ // The following methods verify that locals initialized with global
+ // constants do not need type annotations.
+ function fVar() {
+ var v = fPrime;
+ return fround(v);
+ }
+ function iVar() {
+ var v = iConst;
+ return v|0;
+ }
+ function dVar() {
+ var v = dConst;
+ return +v;
+ }
+
+ return {
+ f: f, d: d, i: i,
+ fVar: fVar, dVar: dVar, iVar: iVar,
+ };
+ }
+
+ function DisallowAssignToConstGlobal() {
+ const constant = 0;
+ function invalid(i) {
+ i = i|0;
+ constant = i;
+ return constant;
+ }
+ return invalid;
+ }
+
+ var m = Module(this);
+ assertValidAsm(Module);
+
+ assertEquals(-3, m.i());
+ assertEquals(-3.0, m.d());
+ assertEquals(Math.fround(-3.0), m.f());
+
+ assertEquals(-3, m.iVar());
+ assertEquals(-3.0, m.dVar());
+ assertEquals(Math.fround(-3.0), m.fVar());
+
+ var m = DisallowAssignToConstGlobal();
+ assertTrue(%IsNotAsmWasmCode(DisallowAssignToConstGlobal));
+})();
+
(function TestModuleArgs() {
function Module1(stdlib) {
"use asm";
@@ -213,3 +283,15 @@ function assertValidAsm(func) {
assertValidAsm(Module);
assertEquals(123, m.foo());
})();
+
+(function TestBadConstUnsignedReturn() {
+ function Module() {
+ "use asm";
+ const i = 0xffffffff;
+ function foo() { return i; }
+ return { foo: foo };
+ }
+ var m = Module();
+ assertTrue(%IsNotAsmWasmCode(Module));
+ assertEquals(0xffffffff, m.foo());
+})();
diff --git a/deps/v8/test/mjsunit/asm/b5528-comma.js b/deps/v8/test/mjsunit/asm/b5528-comma.js
new file mode 100644
index 0000000000..a6eab6d2ea
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/b5528-comma.js
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib, env, heap) {
+ "use asm";
+
+ var a = new stdlib.Int32Array(heap);
+ var b = new stdlib.Float32Array(heap);
+ var fround = stdlib.Math.fround;
+ var value = env.value|0;
+
+ function foo() {
+ var x = fround(0.0);
+ x = (a[0]=value|0,fround(b[0]));
+ return fround(x);
+ }
+
+ return { foo: foo };
+}
+
+var buffer = new ArrayBuffer(32);
+assertEquals(0.0, Module(this, {value: 0x00000000}, buffer).foo());
+assertEquals(-0.0, Module(this, {value: 0x80000000}, buffer).foo());
+assertEquals(5.0, Module(this, {value: 0x40a00000}, buffer).foo());
+assertEquals(-5.0, Module(this, {value: 0xc0a00000}, buffer).foo());
+assertEquals(129.375, Module(this, {value: 0x43016000}, buffer).foo());
+assertEquals(-129.375, Module(this, {value: 0xc3016000}, buffer).foo());
+assertEquals(Infinity, Module(this, {value: 0x7f800000}, buffer).foo());
+assertEquals(-Infinity, Module(this, {value: 0xff800000}, buffer).foo());
+assertEquals(NaN, Module(this, {value: 0x7fffffff}, buffer).foo());
diff --git a/deps/v8/test/mjsunit/asm/regress-660813.js b/deps/v8/test/mjsunit/asm/regress-660813.js
new file mode 100644
index 0000000000..e9bf5797c7
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/regress-660813.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module() {
+ "use asm";
+ const i = 0xffffffff;
+ function foo() {
+ return i;
+ }
+}
+Module();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-numberoroddball-binop.js b/deps/v8/test/mjsunit/compiler/deopt-numberoroddball-binop.js
new file mode 100644
index 0000000000..51faef0952
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-numberoroddball-binop.js
@@ -0,0 +1,155 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ function foo(x, y) { return x << y; }
+
+ foo(1.1, 0.1);
+ %BaselineFunctionOnNextCall(foo);
+ foo(0.1, 1.1);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(undefined, 1.1);
+ assertOptimized(foo);
+ foo(1.1, undefined);
+ assertOptimized(foo);
+ foo(null, 1.1);
+ assertOptimized(foo);
+ foo(1.1, null);
+ assertOptimized(foo);
+ foo(true, 1.1);
+ assertOptimized(foo);
+ foo(1.1, true);
+ assertOptimized(foo);
+ foo(false, 1.1);
+ assertOptimized(foo);
+ foo(1.1, false);
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(x, y) { return x >> y; }
+
+ foo(1.1, 0.1);
+ %BaselineFunctionOnNextCall(foo);
+ foo(0.1, 1.1);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(undefined, 1.1);
+ assertOptimized(foo);
+ foo(1.1, undefined);
+ assertOptimized(foo);
+ foo(null, 1.1);
+ assertOptimized(foo);
+ foo(1.1, null);
+ assertOptimized(foo);
+ foo(true, 1.1);
+ assertOptimized(foo);
+ foo(1.1, true);
+ assertOptimized(foo);
+ foo(false, 1.1);
+ assertOptimized(foo);
+ foo(1.1, false);
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(x, y) { return x >>> y; }
+
+ foo(1.1, 0.1);
+ %BaselineFunctionOnNextCall(foo);
+ foo(0.1, 1.1);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(undefined, 1.1);
+ assertOptimized(foo);
+ foo(1.1, undefined);
+ assertOptimized(foo);
+ foo(null, 1.1);
+ assertOptimized(foo);
+ foo(1.1, null);
+ assertOptimized(foo);
+ foo(true, 1.1);
+ assertOptimized(foo);
+ foo(1.1, true);
+ assertOptimized(foo);
+ foo(false, 1.1);
+ assertOptimized(foo);
+ foo(1.1, false);
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(x, y) { return x ^ y; }
+
+ foo(1.1, 0.1);
+ %BaselineFunctionOnNextCall(foo);
+ foo(0.1, 1.1);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(undefined, 1.1);
+ assertOptimized(foo);
+ foo(1.1, undefined);
+ assertOptimized(foo);
+ foo(null, 1.1);
+ assertOptimized(foo);
+ foo(1.1, null);
+ assertOptimized(foo);
+ foo(true, 1.1);
+ assertOptimized(foo);
+ foo(1.1, true);
+ assertOptimized(foo);
+ foo(false, 1.1);
+ assertOptimized(foo);
+ foo(1.1, false);
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(x, y) { return x | y; }
+
+ foo(1.1, 0.1);
+ %BaselineFunctionOnNextCall(foo);
+ foo(0.1, 1.1);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(undefined, 1.1);
+ assertOptimized(foo);
+ foo(1.1, undefined);
+ assertOptimized(foo);
+ foo(null, 1.1);
+ assertOptimized(foo);
+ foo(1.1, null);
+ assertOptimized(foo);
+ foo(true, 1.1);
+ assertOptimized(foo);
+ foo(1.1, true);
+ assertOptimized(foo);
+ foo(false, 1.1);
+ assertOptimized(foo);
+ foo(1.1, false);
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(x, y) { return x & y; }
+
+ foo(1.1, 0.1);
+ %BaselineFunctionOnNextCall(foo);
+ foo(0.1, 1.1);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(undefined, 1.1);
+ assertOptimized(foo);
+ foo(1.1, undefined);
+ assertOptimized(foo);
+ foo(null, 1.1);
+ assertOptimized(foo);
+ foo(1.1, null);
+ assertOptimized(foo);
+ foo(true, 1.1);
+ assertOptimized(foo);
+ foo(1.1, true);
+ assertOptimized(foo);
+ foo(false, 1.1);
+ assertOptimized(foo);
+ foo(1.1, false);
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js b/deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js
new file mode 100644
index 0000000000..2a9f6a1c38
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var s = "12345";
+
+(function() {
+ function foo() { return s[5]; }
+
+ foo();
+ foo();
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+ %OptimizeFunctionOnNextCall(foo);
+ foo();
+ assertOptimized(foo);
+})();
+
+(function() {
+ function foo(i) { return s[i]; }
+
+ foo(0);
+ foo(1);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(5);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(5);
+ assertOptimized(foo);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js b/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js
index b2b63988ba..66245ef073 100644
--- a/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js
+++ b/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js
@@ -53,9 +53,15 @@ f(g(1));
assertUnoptimized(f);
assertUnoptimized(g);
+%BaselineFunctionOnNextCall(f);
+%BaselineFunctionOnNextCall(g);
+f(g(2));
+assertUnoptimized(f);
+assertUnoptimized(g);
+
%OptimizeFunctionOnNextCall(f, "concurrent");
%OptimizeFunctionOnNextCall(g, "concurrent");
-f(g(2)); // Kick off recompilation.
+f(g(3)); // Kick off recompilation.
assertUnoptimized(f, "no sync"); // Not yet optimized since recompilation
assertUnoptimized(g, "no sync"); // is still blocked.
diff --git a/deps/v8/test/mjsunit/compiler/regress-5538.js b/deps/v8/test/mjsunit/compiler/regress-5538.js
new file mode 100644
index 0000000000..7e4c25d3bc
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-5538.js
@@ -0,0 +1,53 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ function foo(x) {
+ x = x | 0;
+ return Number.parseInt(x + 1);
+ }
+
+ assertEquals(1, foo(0));
+ assertEquals(2, foo(1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(Math.pow(2, 31), foo(Math.pow(2, 31) - 1));
+})();
+
+(function() {
+ function foo(x) {
+ x = x | 0;
+ return Number.parseInt(x + 1, 0);
+ }
+
+ assertEquals(1, foo(0));
+ assertEquals(2, foo(1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(Math.pow(2, 31), foo(Math.pow(2, 31) - 1));
+})();
+
+(function() {
+ function foo(x) {
+ x = x | 0;
+ return Number.parseInt(x + 1, 10);
+ }
+
+ assertEquals(1, foo(0));
+ assertEquals(2, foo(1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(Math.pow(2, 31), foo(Math.pow(2, 31) - 1));
+})();
+
+(function() {
+ function foo(x) {
+ x = x | 0;
+ return Number.parseInt(x + 1, undefined);
+ }
+
+ assertEquals(1, foo(0));
+ assertEquals(2, foo(1));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(Math.pow(2, 31), foo(Math.pow(2, 31) - 1));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/regress-664490.js b/deps/v8/test/mjsunit/compiler/regress-664490.js
new file mode 100644
index 0000000000..94094c7362
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-664490.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var foo = function(msg) {};
+
+foo = function (value) {
+ assertEquals(false, value);
+}
+
+function f() {
+ foo(undefined == 0);
+}
+
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-665680.js b/deps/v8/test/mjsunit/compiler/regress-665680.js
new file mode 100644
index 0000000000..b014bee9ac
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-665680.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {}
+
+var invalidAsmFunction = (function() {
+ "use asm";
+ return function() {
+ with (foo) foo();
+ }
+})();
+
+invalidAsmFunction();
+%OptimizeFunctionOnNextCall(invalidAsmFunction);
+invalidAsmFunction();
diff --git a/deps/v8/test/mjsunit/compiler/regress-673244.js b/deps/v8/test/mjsunit/compiler/regress-673244.js
new file mode 100644
index 0000000000..b18d47b8dd
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-673244.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ var accumulator = false;
+ for (var i = 0; i < 4; i++) {
+ accumulator = accumulator.hasOwnProperty(3);
+ if (i === 1) %OptimizeOsr();
+ }
+}
+
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-v8-5573.js b/deps/v8/test/mjsunit/compiler/regress-v8-5573.js
new file mode 100644
index 0000000000..216b791a71
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-v8-5573.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var global = true;
+global = false;
+
+function f() {
+ return !global;
+}
+
+%OptimizeFunctionOnNextCall(f);
+assertTrue(f());
diff --git a/deps/v8/test/mjsunit/compiler/uint8-clamped-array.js b/deps/v8/test/mjsunit/compiler/uint8-clamped-array.js
new file mode 100644
index 0000000000..66274d54d1
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/uint8-clamped-array.js
@@ -0,0 +1,73 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ function foo(a, v) {
+ a[0] = v & 0xff;
+ }
+
+ var a = new Uint8ClampedArray(4);
+ foo(a, 1);
+ foo(a, 2);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(a, 256);
+ assertOptimized(foo);
+ assertEquals(0, a[0]);
+})();
+
+(function() {
+ function foo(a, v) {
+ a[0] = v >>> 0;
+ }
+
+ var a = new Uint8ClampedArray(4);
+ foo(a, 1);
+ foo(a, 2);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(a, 256);
+ assertOptimized(foo);
+ assertEquals(255, a[0]);
+})();
+
+(function() {
+ function foo(a, v) {
+ a[0] = v | 0;
+ }
+
+ var a = new Uint8ClampedArray(4);
+ foo(a, 1);
+ foo(a, 2);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(a, 256);
+ assertOptimized(foo);
+ assertEquals(255, a[0]);
+ foo(a, -1);
+ assertOptimized(foo);
+ assertEquals(0, a[0]);
+})();
+
+(function() {
+ function foo(a, v) {
+ a[0] = v;
+ }
+
+ var a = new Uint8ClampedArray(4);
+ foo(a, 1);
+ foo(a, 2);
+ %OptimizeFunctionOnNextCall(foo);
+ foo(a, Infinity);
+ assertOptimized(foo);
+ assertEquals(255, a[0]);
+ foo(a, -Infinity);
+ assertOptimized(foo);
+ assertEquals(0, a[0]);
+ foo(a, 0.5);
+ assertOptimized(foo);
+ assertEquals(0, a[0]);
+ foo(a, 1.5);
+ assertOptimized(foo);
+ assertEquals(2, a[0]);
+})();
diff --git a/deps/v8/test/mjsunit/debug-evaluate-arguments.js b/deps/v8/test/mjsunit/debug-evaluate-arguments.js
deleted file mode 100644
index 92b745f1da..0000000000
--- a/deps/v8/test/mjsunit/debug-evaluate-arguments.js
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-listenerComplete = false;
-exception = false;
-
-function checkArguments(frame, names, values) {
- var argc = Math.max(names.length, values.length);
- assertEquals(argc, frame.argumentCount());
- for (var i = 0; i < argc; i++) {
- if (i < names.length) {
- assertEquals(names[i], frame.argumentName(i));
- } else {
- assertEquals(void 0, frame.argumentName(i));
- }
-
- if (i < values.length) {
- assertEquals(values[i], frame.argumentValue(i).value());
- } else {
- assertEquals(void 0, frame.argumentValue(i).value());
- }
- }
-}
-
-function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break)
- {
- // Frame 0 - called with less parameters than arguments.
- checkArguments(exec_state.frame(0), ['x', 'y'], [1]);
-
- // Frame 1 - called with more parameters than arguments.
- checkArguments(exec_state.frame(1), ['x', 'y'], [1, 2, 3]);
-
- // Frame 2 - called with same number of parameters than arguments.
- checkArguments(exec_state.frame(2), ['x', 'y', 'z'], [1, 2, 3]);
-
- // Indicate that all was processed.
- listenerComplete = true;
- }
- } catch (e) {
- exception = e
- };
-};
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-function h(x, y) {
- debugger; // Breakpoint.
-};
-
-function g(x, y) {
- h(x);
-};
-
-function f(x, y, z) {
- g.apply(null, [x, y, z]);
-};
-
-f(1, 2, 3);
-
-// Make sure that the debug event listener vas invoked.
-assertTrue(listenerComplete);
-assertFalse(exception, "exception in listener")
diff --git a/deps/v8/test/mjsunit/debug-script.js b/deps/v8/test/mjsunit/debug-script.js
index 3bedb74a35..386e11606e 100644
--- a/deps/v8/test/mjsunit/debug-script.js
+++ b/deps/v8/test/mjsunit/debug-script.js
@@ -80,9 +80,9 @@ assertTrue(extension_count == 2 || extension_count == 3);
assertTrue(normal_count == 2 || normal_count == 3);
// Test a builtins script.
-var math_script = Debug.findScript('native math.js');
-assertEquals('native math.js', math_script.name);
-assertEquals(Debug.ScriptType.Native, math_script.type);
+var array_script = Debug.findScript('native array.js');
+assertEquals('native array.js', array_script.name);
+assertEquals(Debug.ScriptType.Native, array_script.type);
// Test a debugger script.
var debug_delay_script = Debug.findScript('native debug.js');
diff --git a/deps/v8/test/mjsunit/debug-set-variable-value.js b/deps/v8/test/mjsunit/debug-set-variable-value.js
index 65434289d0..6f872dfecf 100644
--- a/deps/v8/test/mjsunit/debug-set-variable-value.js
+++ b/deps/v8/test/mjsunit/debug-set-variable-value.js
@@ -63,7 +63,7 @@ function RunPauseTest(scope_number, expected_old_result, variable_name,
// Add the debug event listener.
Debug.setListener(listener);
- var actual_new_value;
+ var actual_new_result;
try {
actual_new_result = fun();
} finally {
@@ -78,7 +78,7 @@ function RunPauseTest(scope_number, expected_old_result, variable_name,
assertEquals(expected_new_result, actual_new_result);
}
-// Accepts a closure 'fun' that returns a variable from it's outer scope.
+// Accepts a closure 'fun' that returns a variable from its outer scope.
// The test changes the value of variable via the handle to function and checks
// that the return value changed accordingly.
function RunClosureTest(scope_number, expected_old_result, variable_name,
@@ -307,3 +307,32 @@ assertSame(Number, DebugCommandProcessor.resolveValue_(
{handle: Debug.MakeMirror(Number).handle()}));
assertSame(RunClosureTest, DebugCommandProcessor.resolveValue_(
{handle: Debug.MakeMirror(RunClosureTest).handle()}));
+
+
+// Test script-scope variable.
+let abc = 12;
+{
+ let exception;
+ function listener(event, exec_state) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ let scope_count = exec_state.frame().scopeCount();
+ let script_scope = exec_state.frame().scope(scope_count - 2);
+ assertTrue(script_scope.isScope());
+ assertEquals(debug.ScopeType.Script, script_scope.scopeType());
+ script_scope.setVariableValue('abc', 42);
+ }
+ } catch(e) { exception = e }
+ }
+
+ Debug.setListener(listener);
+ assertEquals(12, abc);
+ debugger;
+ assertEquals(42, abc);
+
+ if (exception != null) {
+ assertUnreachable("Exception in listener\n" + exception.stack);
+ }
+}
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-step-2.js b/deps/v8/test/mjsunit/debug-step-2.js
deleted file mode 100644
index 502b426ee2..0000000000
--- a/deps/v8/test/mjsunit/debug-step-2.js
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-
-// This test tests that full code compiled without debug break slots
-// is recompiled with debug break slots when debugging is started.
-
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-var bp;
-var done = false;
-var step_count = 0;
-
-// Debug event listener which steps until the global variable done is true.
-function listener(event, exec_state, event_data, data) {
- if (event == Debug.DebugEvent.Break) {
- if (!done) exec_state.prepareStep(Debug.StepAction.StepNext);
- step_count++;
- }
-};
-
-// Set the global variables state to prpare the stepping test.
-function prepare_step_test() {
- done = false;
- step_count = 0;
-}
-
-// Test function to step through.
-function f() {
- var i = 1;
- var j = 2;
- done = true;
-};
-
-prepare_step_test();
-f();
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-bp = Debug.setBreakPoint(f, 1);
-
-prepare_step_test();
-f();
-assertEquals(4, step_count);
-Debug.clearBreakPoint(bp);
-
-// Set a breakpoint on the first var statement (line 1).
-bp = Debug.setBreakPoint(f, 1);
-
-// Step through the function ensuring that the var statements are hit as well.
-prepare_step_test();
-f();
-assertEquals(4, step_count);
-
-// Clear the breakpoint and check that no stepping happens.
-Debug.clearBreakPoint(bp);
-prepare_step_test();
-f();
-assertEquals(0, step_count);
-
-// Get rid of the debug event listener.
-Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-step-3.js b/deps/v8/test/mjsunit/debug-step-3.js
deleted file mode 100644
index 9cac0f5619..0000000000
--- a/deps/v8/test/mjsunit/debug-step-3.js
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-
-// This test tests that full code compiled without debug break slots
-// is recompiled with debug break slots when debugging is started.
-
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-var bp;
-var done = false;
-var step_count = 0;
-var set_bp = false
-
-// Debug event listener which steps until the global variable done is true.
-function listener(event, exec_state, event_data, data) {
- if (event == Debug.DebugEvent.Break) {
- if (!done) exec_state.prepareStep(Debug.StepAction.StepNext);
- step_count++;
- }
-};
-
-// Set the global variables state to prpare the stepping test.
-function prepare_step_test() {
- done = false;
- step_count = 0;
-}
-
-// Test function to step through.
-function f() {
- var a = 0;
- if (set_bp) { bp = Debug.setBreakPoint(f, 3); }
- var i = 1;
- var j = 2;
- done = true;
-};
-
-prepare_step_test();
-f();
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-// Make f set a breakpoint with an activation on the stack.
-prepare_step_test();
-set_bp = true;
-f();
-// TODO(1782): Fix issue to bring back this assert.
-//assertEquals(4, step_count);
-Debug.clearBreakPoint(bp);
-
-// Set a breakpoint on the first var statement (line 1).
-set_bp = false;
-bp = Debug.setBreakPoint(f, 3);
-
-// Step through the function ensuring that the var statements are hit as well.
-prepare_step_test();
-f();
-assertEquals(4, step_count);
-
-// Clear the breakpoint and check that no stepping happens.
-Debug.clearBreakPoint(bp);
-prepare_step_test();
-f();
-assertEquals(0, step_count);
-
-// Get rid of the debug event listener.
-Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-step-4.js b/deps/v8/test/mjsunit/debug-step-4.js
deleted file mode 100644
index 3992f622a6..0000000000
--- a/deps/v8/test/mjsunit/debug-step-4.js
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-// Tests how debugger can step over not necessarily in the top frame.
-
-// Simple 3 functions, that protocol their execution state in global
-// variable state.
-var state;
-
-function f() {
- var a = 1978;
- for (state[2] = 0; state[2] < 3; state[2]++) {
- void String(a);
- }
-}
-function g() {
- for (state[1] = 0; state[1] < 3; state[1]++) {
- f();
- }
-}
-function h() {
- state = [-1, -1, -1];
- for (state[0] = 0; state[0] < 3; state[0]++) {
- g();
- }
-}
-
-function TestCase(expected_final_state) {
- var listener_exception = null;
- var state_snapshot;
- var listener_state;
- var bp;
-
- function listener(event, exec_state, event_data, data) {
- print("Here ("+event+"/"+listener_state+"): " +
- exec_state.frame(0).sourceLineText());
- try {
- if (event == Debug.DebugEvent.Break) {
- if (listener_state == 0) {
- Debug.clearBreakPoint(bp);
- exec_state.prepareStep(Debug.StepAction.StepNext);
- listener_state = 1;
- } else if (listener_state == 1) {
- state_snapshot = String(state);
- print("State: " + state_snapshot);
- Debug.setListener(null);
- listener_state = 2;
- }
- }
- } catch (e) {
- listener_exception = e;
- }
- }
-
-
- // Add the debug event listener.
- listener_state = 0;
- Debug.setListener(listener);
- bp = Debug.setBreakPoint(f, 1);
-
- h();
- Debug.setListener(null);
- if (listener_exception !== null) {
- print("Exception caught: " + listener_exception);
- assertUnreachable();
- }
-
- assertEquals(expected_final_state, state_snapshot);
-}
-
-
-// Warm-up -- make sure all is compiled and ready for breakpoint.
-h();
-
-TestCase("0,0,-1");
diff --git a/deps/v8/test/mjsunit/debug-step.js b/deps/v8/test/mjsunit/debug-step.js
deleted file mode 100644
index 6f5c8f410c..0000000000
--- a/deps/v8/test/mjsunit/debug-step.js
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-// Simple debug event handler which first time hit will perform 1000 steps and
-// second time hit will evaluate and store the value of "i". If requires that
-// the global property "state" is initially zero.
-
-var bp1, bp2;
-
-function listener(event, exec_state, event_data, data) {
- if (event == Debug.DebugEvent.Break) {
- if (step_count > 0) {
- exec_state.prepareStep(Debug.StepAction.StepIn);
- step_count--;
- } else {
- result = exec_state.frame().evaluate("i").value();
- // Clear the break point on line 2 if set.
- if (bp2) {
- Debug.clearBreakPoint(bp2);
- }
- }
- }
-};
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-// Test debug event for break point.
-function f() {
- var i; // Line 1.
- for (i = 0; i < 1000; i++) { // Line 2.
- x = 1; // Line 3.
- }
-};
-
-// Set a breakpoint on the for statement (line 1).
-bp1 = Debug.setBreakPoint(f, 1);
-
-// Check that performing 1000 steps will make i 333.
-var step_count = 1000;
-result = -1;
-f();
-assertEquals(333, result);
-Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-version.js b/deps/v8/test/mjsunit/debug-version.js
deleted file mode 100644
index b1bc1e8d82..0000000000
--- a/deps/v8/test/mjsunit/debug-version.js
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-// Simple function which stores the last debug event.
-listenerComplete = false;
-exception = false;
-
-var base_version_request = '"seq":0,"type":"request","command":"version"'
-
-function safeEval(code) {
- try {
- return eval('(' + code + ')');
- } catch (e) {
- assertEquals(void 0, e);
- return undefined;
- }
-}
-
-function testArguments(exec_state) {
- // Get the debug command processor in running state.
- var dcp = exec_state.debugCommandProcessor(true);
-
- assertTrue(dcp.isRunning());
-
- var version_request = '{' + base_version_request + '}'
- var version_response = safeEval(dcp.processDebugJSONRequest(version_request));
-
- assertTrue(version_response.success);
-
- var version_string = version_response.body.V8Version;
-
- assertTrue(!!version_string, version_request + ' -> expected version string');
-
- var version_pattern = /^\d*\.\d*\.\d*/;
-
- assertTrue(!!(version_string.match(version_pattern)), "unexpected format of version: " + version_string);
-}
-
-function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
-
- // Test simple suspend request.
- testArguments(exec_state);
-
- // Indicate that all was processed.
- listenerComplete = true;
- }
- } catch (e) {
- exception = e
- };
-};
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-// Stop debugger and check that suspend command changes running flag.
-debugger;
-
-assertFalse(exception, "exception in listener")
-// Make sure that the debug event listener vas invoked.
-assertTrue(listenerComplete, "listener did not run to completion");
diff --git a/deps/v8/test/mjsunit/es6/array-iterator-turbo.js b/deps/v8/test/mjsunit/es6/array-iterator-turbo.js
new file mode 100644
index 0000000000..c1b8d32072
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/array-iterator-turbo.js
@@ -0,0 +1,243 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo --turbo-escape --allow-natives-syntax
+
+"use strict";
+
+const kDeoptimized = 2;
+const kTurbofanned = 7;
+const kInterpreted = 8;
+
+function GetOptimizationStatus(fn) {
+ let status = %GetOptimizationStatus(fn);
+ switch (status) {
+ case kInterpreted: // Treat interpreted frames as unoptimized
+ status = kDeoptimized;
+ break;
+ }
+
+ return status;
+}
+
+let global = this;
+let tests = {
+ FastElementsKind() {
+ let runners = {
+ FAST_SMI_ELEMENTS(array) {
+ let sum = 0;
+ for (let x of array) sum += x;
+ return sum;
+ },
+
+ FAST_HOLEY_SMI_ELEMENTS(array) {
+ let sum = 0;
+ for (let x of array) {
+ if (x) sum += x;
+ }
+ return sum;
+ },
+
+ FAST_ELEMENTS(array) {
+ let ret = "";
+ for (let str of array) ret += `> ${str}`;
+ return ret;
+ },
+
+ FAST_HOLEY_ELEMENTS(array) {
+ let ret = "";
+ for (let str of array) ret += `> ${str}`;
+ return ret;
+ },
+
+ FAST_DOUBLE_ELEMENTS(array) {
+ let sum = 0.0;
+ for (let x of array) sum += x;
+ return sum;
+ },
+
+ FAST_HOLEY_DOUBLE_ELEMENTS(array) {
+ let sum = 0.0;
+ for (let x of array) {
+ if (x) sum += x;
+ }
+ return sum;
+ }
+ };
+
+ let tests = {
+ FAST_SMI_ELEMENTS: {
+ array: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ expected: 55,
+ array2: [1, 2, 3],
+ expected2: 6
+ },
+ FAST_HOLEY_SMI_ELEMENTS: {
+ array: [1, , 3, , 5, , 7, , 9, ,],
+ expected: 25,
+ array2: [1, , 3],
+ expected2: 4
+ },
+ FAST_ELEMENTS: {
+ array: ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"],
+ expected: "> a> b> c> d> e> f> g> h> i> j",
+ array2: ["a", "b", "c"],
+ expected2: "> a> b> c"
+ },
+ FAST_HOLEY_ELEMENTS: {
+ array: ["a", , "c", , "e", , "g", , "i", ,],
+ expected: "> a> undefined> c> undefined> e> undefined> g" +
+ "> undefined> i> undefined",
+ array2: ["a", , "c"],
+ expected2: "> a> undefined> c"
+ },
+ FAST_DOUBLE_ELEMENTS: {
+ array: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
+ expected: 5.5,
+ array2: [0.6, 0.4, 0.2],
+ expected2: 1.2
+ },
+ FAST_HOLEY_DOUBLE_ELEMENTS: {
+ array: [0.1, , 0.3, , 0.5, , 0.7, , 0.9, ,],
+ expected: 2.5,
+ array2: [0.1, , 0.3],
+ expected2: 0.4
+ }
+ };
+
+ for (let key of Object.keys(runners)) {
+ let fn = runners[key];
+ let { array, expected, array2, expected2 } = tests[key];
+
+ // Warmup:
+ fn(array);
+ fn(array);
+ %OptimizeFunctionOnNextCall(fn);
+ fn(array);
+
+ // TODO(bmeurer): FAST_HOLEY_DOUBLE_ELEMENTS maps generally deopt when
+ // a hole is encountered. Test should be fixed once that is corrected.
+ let status = /HOLEY_DOUBLE/.test(key) ? kDeoptimized : kTurbofanned;
+
+ assertEquals(status, GetOptimizationStatus(fn), key);
+ assertEquals(expected, fn(array), key);
+ assertEquals(status, GetOptimizationStatus(fn), key);
+
+ // Check no deopt when another arra with the same map is used
+ assertTrue(%HaveSameMap(array, array2), key);
+ assertEquals(status, GetOptimizationStatus(fn), key);
+ assertEquals(expected2, fn(array2), key);
+
+ // CheckMaps bailout
+ let newArray = Object.defineProperty(
+ [1, 2, 3], 2, { enumerable: false, configurable: false,
+ get() { return 7; } });
+ fn(newArray);
+ assertEquals(kDeoptimized, GetOptimizationStatus(fn), key);
+ }
+ },
+
+ TypedArrays() {
+ let tests = {
+ Uint8Array: {
+ array: new Uint8Array([1, 2, 3, 4, 5, 6, 7, 8, -1, 256]),
+ expected: 291,
+ array2: new Uint8Array([1, 2, 3]),
+ expected2: 6
+ },
+
+ Int8Array: {
+ array: new Int8Array([1, 2, 3, 4, 5, 6, 7, 8, -129, 128]),
+ expected: 35,
+ array2: new Int8Array([1, 2, 3]),
+ expected2: 6
+ },
+
+ Uint16Array: {
+ array: new Uint16Array([1, 2, 3, 4, 5, 6, 7, 8, -1, 0x10000]),
+ expected: 65571,
+ array2: new Uint16Array([1, 2, 3]),
+ expected2: 6
+ },
+
+ Int16Array: {
+ array: new Int16Array([1, 2, 3, 4, 5, 6, 7, 8, -32769, 0x7FFF]),
+ expected: 65570,
+ array2: new Int16Array([1, 2, 3]),
+ expected2: 6
+ },
+
+ Uint32Array: {
+ array: new Uint32Array([1, 2, 3, 4, 5, 6, 7, 8, -1, 0x100000000]),
+ expected: 4294967331,
+ array2: new Uint32Array([1, 2, 3]),
+ expected2: 6
+ },
+
+ Int32Array: {
+ array: new Int32Array([1, 2, 3, 4, 5, 6, 7, 8,
+ -2147483649, 0x7FFFFFFF]),
+ expected: 4294967330,
+ array2: new Int32Array([1, 2, 3]),
+ expected2: 6
+ },
+
+ Float32Array: {
+ array: new Float32Array([9.5, 8.0, 7.0, 7.0, 5.0, 4.0, 3.0, 2.0]),
+ expected: 45.5,
+ array2: new Float32Array([10.5, 5.5, 1.5]),
+ expected2: 17.5
+ },
+
+ Float64Array: {
+ array: new Float64Array([9.5, 8.0, 7.0, 7.0, 5.0, 4.0, 3.0, 2.0]),
+ expected: 45.5,
+ array2: new Float64Array([10.5, 5.5, 1.5]),
+ expected2: 17.5
+ },
+
+ Uint8ClampedArray: {
+ array: new Uint8ClampedArray([4.3, 7.45632, 3.14, 4.61, 5.0004, 6.493,
+ 7.12, 8, 1.7, 3.6]),
+ expected: 51,
+ array2: new Uint8ClampedArray([1, 2, 3]),
+ expected2: 6
+ }
+ };
+
+ for (let key of Object.keys(tests)) {
+ let test = tests[key];
+ let { array, expected, array2, expected2 } = test;
+
+ let sum = function(array) {
+ let ret = 0;
+ for (let x of array) ret += x;
+ return ret;
+ };
+
+ // Warmup
+ sum(array);
+ sum(array);
+ %OptimizeFunctionOnNextCall(sum);
+ assertEquals(expected, sum(array), key);
+
+ assertEquals(kTurbofanned, GetOptimizationStatus(sum), key);
+
+ // Not deoptimized when called on typed array of same type / map
+ assertTrue(%HaveSameMap(array, array2));
+ assertEquals(expected2, sum(array2), key);
+ assertEquals(kTurbofanned, GetOptimizationStatus(sum), key);
+
+ // Throw when detached
+ let clone = new array.constructor(array);
+ %ArrayBufferNeuter(clone.buffer);
+ assertThrows(() => sum(clone), TypeError);
+ }
+ }
+};
+
+for (let name of Object.keys(tests)) {
+ let test = tests[name];
+ test();
+}
diff --git a/deps/v8/test/mjsunit/es6/classes-subclass-builtins.js b/deps/v8/test/mjsunit/es6/classes-subclass-builtins.js
index dca514c294..23cb6c06df 100644
--- a/deps/v8/test/mjsunit/es6/classes-subclass-builtins.js
+++ b/deps/v8/test/mjsunit/es6/classes-subclass-builtins.js
@@ -920,7 +920,6 @@ function TestMapSetSubclassing(container, is_map) {
assertEquals(["match", "tostring"], log);
// TODO(littledan): Is the RegExp constructor correct to create
// the internal slots and do these type checks this way?
- assertEquals("biep", %_RegExpSource(o));
assertThrows(() => Object.getOwnPropertyDescriptor(RegExp.prototype,
'source').get(o),
TypeError);
diff --git a/deps/v8/test/mjsunit/es6/classes.js b/deps/v8/test/mjsunit/es6/classes.js
index fb77dbb8e4..816d113263 100644
--- a/deps/v8/test/mjsunit/es6/classes.js
+++ b/deps/v8/test/mjsunit/es6/classes.js
@@ -164,14 +164,15 @@
SyntaxError);
var D = class extends function() {
- arguments.caller;
+ this.args = arguments;
} {};
assertThrows(function() {
Object.getPrototypeOf(D).arguments;
}, TypeError);
- assertThrows(function() {
- new D;
- }, TypeError);
+ var e = new D();
+ assertThrows(() => e.args.callee, TypeError);
+ assertEquals(undefined, Object.getOwnPropertyDescriptor(e.args, 'caller'));
+ assertFalse('caller' in e.args);
})();
diff --git a/deps/v8/test/mjsunit/es6/completion.js b/deps/v8/test/mjsunit/es6/completion.js
index 988e9709bb..6bc7d47a02 100644
--- a/deps/v8/test/mjsunit/es6/completion.js
+++ b/deps/v8/test/mjsunit/es6/completion.js
@@ -146,3 +146,10 @@ assertUndef(eval(
assertUndef(eval("1; try{2; throwOnReturn();} catch(e){}"));
assertUndef(eval("1; twoFunc();"));
assertEquals(2, eval("1; with ( { a: 0 } ) { 2; }"));
+
+assertUndef(eval('a: while(true) { do { 0 } while(false); switch(1) { case 0: 1; case 1: break a; }; 0 }'));
+assertUndef(eval('a: while(true) { do { 0 } while(false); try {} finally { break a }; 0 }'));
+assertUndef(eval('a: while(true) { b: while(true) { 0; break b; }; switch(1) { case 1: break a; }; 2 }'));
+assertUndef(eval('a: while(true) { b: while(true) { 0; break b; }; while (true) { break a; }; 2 }'));
+assertUndef(eval('while (true) { 20; a:{ break a; } with ({}) break; 30; }'));
+assertEquals(42, eval('a: while(true) { switch(0) { case 0: 42; case 1: break a; }; 33 }'));
diff --git a/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js b/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js
index e541f0f4b4..9e3bd172c8 100644
--- a/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js
+++ b/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js
@@ -7,18 +7,12 @@
Debug = debug.Debug
var exception = null;
var break_count = 0;
-var expected_breaks = -1;
+const expected_breaks = 9;
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
assertTrue(exec_state.frameCount() != 0, "FAIL: Empty stack trace");
- if (!break_count) {
- // Count number of expected breakpoints in this source file.
- var source_text = exec_state.frame(0).func().script().source();
- expected_breaks = source_text.match(/\/\/\s*Break\s+\d+\./g).length;
- print("Expected breaks: " + expected_breaks);
- }
var source = exec_state.frame(0).sourceLineText();
print("paused at: " + source);
assertTrue(source.indexOf("// Break " + break_count + ".") > 0,
diff --git a/deps/v8/test/mjsunit/es6/default-parameters-debug.js b/deps/v8/test/mjsunit/es6/default-parameters-debug.js
index 30e19c42af..49adfa4610 100644
--- a/deps/v8/test/mjsunit/es6/default-parameters-debug.js
+++ b/deps/v8/test/mjsunit/es6/default-parameters-debug.js
@@ -9,36 +9,31 @@ Debug = debug.Debug
listenerComplete = false;
breakPointCount = 0;
+exception = null;
function listener(event, exec_state, event_data, data) {
- if (event == Debug.DebugEvent.Break) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
breakPointCount++;
if (breakPointCount == 1) {
// Break point in initializer for parameter `a`, invoked by
// initializer for parameter `b`
- assertEquals('default', exec_state.frame(1).evaluate('mode').value());
-
- // initializer for `b` can't refer to `b`
- assertThrows(function() {
- exec_state.frame(1).evaluate('b').value();
- }, ReferenceError);
-
- assertThrows(function() {
- exec_state.frame(1).evaluate('c');
- }, ReferenceError);
+ assertEquals('default', exec_state.frame(0).evaluate('mode').value());
+ assertTrue(exec_state.frame(1).evaluate('b').isUndefined());
+ assertTrue(exec_state.frame(1).evaluate('c').isUndefined());
} else if (breakPointCount == 2) {
// Break point in IIFE initializer for parameter `c`
assertEquals('modeFn', exec_state.frame(1).evaluate('a.name').value());
assertEquals('default', exec_state.frame(1).evaluate('b').value());
- assertThrows(function() {
- exec_state.frame(1).evaluate('c');
- }, ReferenceError);
+ assertTrue(exec_state.frame(1).evaluate('c').isUndefined());
} else if (breakPointCount == 3) {
// Break point in function body --- `c` parameter is shadowed
assertEquals('modeFn', exec_state.frame(0).evaluate('a.name').value());
assertEquals('default', exec_state.frame(0).evaluate('b').value());
assertEquals('local', exec_state.frame(0).evaluate('d').value());
}
+ } catch (e) {
+ exception = e;
}
};
@@ -56,3 +51,4 @@ f();
// Make sure that the debug event listener vas invoked.
assertEquals(3, breakPointCount);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/es6/reflect.js b/deps/v8/test/mjsunit/es6/reflect.js
index d597a78901..9e21717a99 100644
--- a/deps/v8/test/mjsunit/es6/reflect.js
+++ b/deps/v8/test/mjsunit/es6/reflect.js
@@ -273,6 +273,14 @@ function prepare(target) {
})();
+(function testReflectSetArrayLength() {
+ var y = [];
+ Object.defineProperty(y, 0, {value: 42, configurable: false});
+ assertFalse(Reflect.set(y, 'length', 0));
+ assertTrue(Reflect.set(y, 'length', 2));
+})();
+
+
////////////////////////////////////////////////////////////////////////////////
// Reflect.has
@@ -352,6 +360,14 @@ function prepare(target) {
})();
+(function testReflectDefinePropertyArrayLength() {
+ var y = [];
+ Object.defineProperty(y, 0, {value: 42, configurable: false});
+ assertFalse(Reflect.defineProperty(y, 'length', {value: 0}));
+ assertTrue(Reflect.defineProperty(y, 'length', {value: 2}));
+})();
+
+
// See reflect-define-property.js for further tests.
diff --git a/deps/v8/test/mjsunit/es6/regexp-flags.js b/deps/v8/test/mjsunit/es6/regexp-flags.js
index 2bcccfa760..9760cf8d5d 100644
--- a/deps/v8/test/mjsunit/es6/regexp-flags.js
+++ b/deps/v8/test/mjsunit/es6/regexp-flags.js
@@ -45,8 +45,12 @@ assertEquals(4, get_count);
// Overridden flag getters affect string.replace
// TODO(adamk): Add more tests here once we've switched
// to use [[OriginalFlags]] in more cases.
-assertEquals(expected, string.replace(r3, "X"));
-assertEquals(5, get_count);
+// TODO(jgruber): This exact case actually causes an infinite loop in the spec
+// (@@replace sees global = true while BuiltinExec sees global = false).
+// Comment the test for now and remove / fix once this has been resolved on
+// the spec side.
+//assertEquals(expected, string.replace(r3, "X"));
+//assertEquals(5, get_count);
function testName(name) {
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-468661.js b/deps/v8/test/mjsunit/es6/regress/regress-468661.js
index 4a58a71d30..db71352f74 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-468661.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-468661.js
@@ -7,6 +7,7 @@
Debug = debug.Debug
var exception = null;
var break_count = 0;
+const expected_breaks = 9;
var expected_values =
[ReferenceError, undefined, 0, 0, 0, 0, 1,
@@ -16,12 +17,6 @@ function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
assertTrue(exec_state.frameCount() != 0, "FAIL: Empty stack trace");
- // Count number of expected breakpoints in this source file.
- if (!break_count) {
- var source_text = exec_state.frame(0).func().script().source();
- expected_breaks = source_text.match(/\/\/\s*Break\s+\d+\./g).length;
- print("Expected breaks: " + expected_breaks);
- }
var frameMirror = exec_state.frame(0);
var v = null;;
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases2.js b/deps/v8/test/mjsunit/es6/regress/regress-5598.js
index 0bfefae4b8..600a866470 100644
--- a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases2.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-5598.js
@@ -2,7 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-async-await --expose-debug-as debug
-// Files: test/mjsunit/harmony/async-debug-caught-exception-cases.js
+// Flags: --turbo --turbo-escape --allow-natives-syntax
-runPart(2);
+function fn(a) {
+ var [b] = a;
+ return b;
+}
+
+fn('a');
+fn('a');
+%OptimizeFunctionOnNextCall(fn);
+
+fn('a');
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-660925.js b/deps/v8/test/mjsunit/es6/regress/regress-660925.js
new file mode 100644
index 0000000000..12c370cb6c
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/regress/regress-660925.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let array = new Array(0xFFFFFFFF);
+let it = array.keys();
+assertEquals({ value: 0, done: false }, it.next());
+
+it = array.entries();
+assertEquals({ value: [0, undefined], done: false }, it.next());
+
+it = array[Symbol.iterator]();
+assertEquals({ value: undefined, done: false }, it.next());
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-666622.js b/deps/v8/test/mjsunit/es6/regress/regress-666622.js
new file mode 100644
index 0000000000..56731ab28e
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/regress/regress-666622.js
@@ -0,0 +1,39 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function iterateArray() {
+ var array = new Array();
+ var it = array.entries();
+ it.next();
+}
+
+function iterateTypedArray() {
+ var array = new Uint8Array();
+ var it = array.entries();
+ it.next();
+}
+
+function testArray() {
+ iterateArray();
+ try {
+ } catch (e) {
+ }
+}
+testArray();
+testArray();
+%OptimizeFunctionOnNextCall(testArray);
+testArray();
+
+function testTypedArray() {
+ iterateTypedArray();
+ try {
+ } catch (e) {
+ }
+}
+testTypedArray();
+testTypedArray();
+%OptimizeFunctionOnNextCall(testTypedArray);
+testTypedArray();
diff --git a/deps/v8/test/mjsunit/es6/rest-params.js b/deps/v8/test/mjsunit/es6/rest-params.js
index 9afe9b409e..2f122daffd 100644
--- a/deps/v8/test/mjsunit/es6/rest-params.js
+++ b/deps/v8/test/mjsunit/es6/rest-params.js
@@ -136,11 +136,9 @@ var O = {
(function testUnmappedArguments() {
// Strict/Unmapped arguments should always be used for functions with rest
// parameters
- assertThrows(function(...rest) { return arguments.caller; }, TypeError);
assertThrows(function(...rest) { return arguments.callee; }, TypeError);
// TODO(caitp): figure out why this doesn't throw sometimes, even though the
// getter always does =)
- // assertThrows(function(...rest) { arguments.caller = 1; }, TypeError);
// assertThrows(function(...rest) { arguments.callee = 1; }, TypeError);
})();
diff --git a/deps/v8/test/mjsunit/harmony/sloppy-restrictive-block-function.js b/deps/v8/test/mjsunit/es6/sloppy-restrictive-block-function.js
index 07ce64b9ac..65734acd03 100644
--- a/deps/v8/test/mjsunit/harmony/sloppy-restrictive-block-function.js
+++ b/deps/v8/test/mjsunit/es6/sloppy-restrictive-block-function.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-restrictive-declarations
-
// ES#sec-functiondeclarations-in-ifstatement-statement-clauses
// Annex B 3.4 FunctionDeclarations in IfStatement Statement Clauses
// In sloppy mode, function declarations in if statements act like
diff --git a/deps/v8/test/mjsunit/es6/spread-call.js b/deps/v8/test/mjsunit/es6/spread-call.js
index 05e17741d6..de38f129cd 100644
--- a/deps/v8/test/mjsunit/es6/spread-call.js
+++ b/deps/v8/test/mjsunit/es6/spread-call.js
@@ -24,6 +24,13 @@
assertEquals(5, countArgs(...[1, 2, 3, 4, 5]));
assertEquals(6, countArgs(...[1, 2, 3, 4, 5, 6]));
+ assertEquals(1, countArgs(...[1.1]));
+ assertEquals(2, countArgs(...[1.1, 2.2]));
+ assertEquals(3, countArgs(...[1.1, 2.2, 3.3]));
+ assertEquals(4, countArgs(...[1.1, 2.2, 3.3, 4.4]));
+ assertEquals(5, countArgs(...[1.1, 2.2, 3.3, 4.4, 5.5]));
+ assertEquals(6, countArgs(...[1.1, 2.2, 3.3, 4.4, 5.5, 6.6]));
+
assertEquals(1, countArgs(...new Set([1])));
assertEquals(2, countArgs(...new Set([1, 2])));
assertEquals(3, countArgs(...new Set([1, 2, 3])));
@@ -346,6 +353,91 @@
assertEquals("ABXYC1C2DEBXYC1C2", log);
})();
+(function testArrayPrototypeHoleGetterModifiesIteratorPrototypeNext() {
+ function sum() {
+ var sum = arguments[0];
+ for (var i = 1; i < arguments.length; ++i) {
+ sum += arguments[i];
+ }
+ return sum;
+ }
+ var a = [1, 2];
+ a[3] = 4;
+ var called = 0;
+
+ Object.defineProperty(Array.prototype, 2, {
+ get: function() {
+ var ai = a[Symbol.iterator]();
+ var original_next = ai.__proto__["next"];
+ Object.defineProperty(ai.__proto__, "next", {
+ get: function() {
+ called++;
+ return original_next;
+ }
+ });
+ return 3;
+ },
+ configurable: true
+ });
+
+ assertEquals(10, sum(...a));
+ assertEquals(2, called);
+
+ Object.defineProperty(Array.prototype, 2, {});
+})();
+
+(function testArrayHasOtherPrototype() {
+ function countArgs() { return arguments.length; }
+ var a = [1, 2, 3];
+ var b = {};
+ Object.defineProperty(b, Symbol.iterator, {
+ value: function*() {
+ yield 4;
+ },
+ configurable: true
+ });
+
+ Object.setPrototypeOf(a, b);
+
+ assertEquals(1, countArgs(...a));
+})();
+
+(function testArrayIteratorPrototypeGetter() {
+ function countArgs() { return arguments.length; }
+ var a = [1, 2, 3];
+ var ai = a[Symbol.iterator]();
+ var called = 0;
+
+ var original_next = ai.__proto__["next"];
+
+ Object.defineProperty(ai.__proto__, "next", {
+ get: function() {
+ called++;
+ return original_next;
+ }
+ });
+
+ countArgs(...a);
+
+ // should be called 4 times; 3 for the values, 1 for the final
+ // {value: undefined, done: true} pair
+ assertEquals(4, called);
+})();
+
+(function testArrayIteratorPrototypeModified() {
+ function countArgs() { return arguments.length; }
+ var a = [1,2,3];
+ var ai = a[Symbol.iterator]();
+ Object.defineProperty(ai.__proto__, "next", {
+ value: function() {
+ return {value: undefined, done: true};
+ },
+ configurable: true
+ });
+
+ assertEquals(0, countArgs(...a));
+
+})();
(function testCustomArrayPrototypeIterator() {
var origIterator =
@@ -370,3 +462,29 @@
Object.defineProperty(Array.prototype, Symbol.iterator, origIterator);
})();
+
+(function testGetPropertyIteratorCalledExactlyOnce() {
+ function countArgs() { return arguments.length; }
+ var a = [1, 2, 3];
+ var called = 0;
+
+ Object.defineProperty(Array.prototype, Symbol.iterator, {
+ value: function*() {
+ yield 1;
+ yield 2;
+ },
+ configurable: true
+ });
+
+ var it = a[Symbol.iterator];
+ Object.defineProperty(a, Symbol.iterator, {
+ get: function() {
+ called++;
+ return it;
+ }
+ });
+
+ countArgs(...a);
+
+ assertEquals(1, called);
+})();
diff --git a/deps/v8/test/mjsunit/es6/string-endswith.js b/deps/v8/test/mjsunit/es6/string-endswith.js
index 4246f166a4..b776ccc4ba 100644
--- a/deps/v8/test/mjsunit/es6/string-endswith.js
+++ b/deps/v8/test/mjsunit/es6/string-endswith.js
@@ -34,6 +34,7 @@ assertFalse(testString.endsWith("world"));
assertFalse(testString.endsWith("Hello World!"));
assertFalse(testString.endsWith(null));
assertFalse(testString.endsWith(undefined));
+assertFalse(testString.endsWith());
assertTrue("null".endsWith(null));
assertTrue("undefined".endsWith(undefined));
diff --git a/deps/v8/test/mjsunit/es6/string-includes.js b/deps/v8/test/mjsunit/es6/string-includes.js
index c825ffdc68..60a7ddd685 100644
--- a/deps/v8/test/mjsunit/es6/string-includes.js
+++ b/deps/v8/test/mjsunit/es6/string-includes.js
@@ -27,6 +27,11 @@
assertEquals(1, String.prototype.includes.length);
+var s = 'a';
+assertFalse(s.includes(null));
+assertFalse(s.includes(undefined));
+assertFalse(s.includes());
+
var reString = "asdf[a-z]+(asdf)?";
assertTrue(reString.includes("[a-z]+"));
assertTrue(reString.includes("(asdf)?"));
diff --git a/deps/v8/test/mjsunit/es6/string-startswith.js b/deps/v8/test/mjsunit/es6/string-startswith.js
index f38f7b9cb8..449d07a56d 100644
--- a/deps/v8/test/mjsunit/es6/string-startswith.js
+++ b/deps/v8/test/mjsunit/es6/string-startswith.js
@@ -34,6 +34,7 @@ assertFalse(testString.startsWith("hello"));
assertFalse(testString.startsWith("Hello World!"));
assertFalse(testString.startsWith(null));
assertFalse(testString.startsWith(undefined));
+assertFalse(testString.startsWith());
assertTrue("null".startsWith(null));
assertTrue("undefined".startsWith(undefined));
diff --git a/deps/v8/test/mjsunit/es6/symbols.js b/deps/v8/test/mjsunit/es6/symbols.js
index a21afb3770..d83e2174ec 100644
--- a/deps/v8/test/mjsunit/es6/symbols.js
+++ b/deps/v8/test/mjsunit/es6/symbols.js
@@ -353,7 +353,7 @@ function TestKeyDelete(obj) {
}
-var objs = [{}, [], Object.create(null), Object(1), new Map, function(){}]
+var objs = [{}, [], Object.create({}), Object(1), new Map, function(){}]
for (var i in objs) {
var obj = objs[i]
diff --git a/deps/v8/test/mjsunit/harmony/object-entries.js b/deps/v8/test/mjsunit/es8/object-entries.js
index 101988a052..71f121b896 100644
--- a/deps/v8/test/mjsunit/harmony/object-entries.js
+++ b/deps/v8/test/mjsunit/es8/object-entries.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-object-values-entries
// Flags: --allow-natives-syntax
function TestMeta() {
diff --git a/deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js b/deps/v8/test/mjsunit/es8/object-get-own-property-descriptors.js
index c71b20a226..f88840dba4 100644
--- a/deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js
+++ b/deps/v8/test/mjsunit/es8/object-get-own-property-descriptors.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-object-own-property-descriptors
// Flags: --allow-natives-syntax
function DataDescriptor(value) {
diff --git a/deps/v8/test/mjsunit/harmony/object-values.js b/deps/v8/test/mjsunit/es8/object-values.js
index 141070db9c..ed200373a9 100644
--- a/deps/v8/test/mjsunit/harmony/object-values.js
+++ b/deps/v8/test/mjsunit/es8/object-values.js
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-object-values-entries
// Flags: --allow-natives-syntax
function TestMeta() {
diff --git a/deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js b/deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js
index bd64e3d168..6afc07a488 100644
--- a/deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js
+++ b/deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --min-preparse-length 1 --allow-natives-syntax
+// Flags: --min-preparse-length=1 --allow-natives-syntax --lazy-inner-functions
// Test that the information on which variables to allocate in context doesn't
// change when recompiling.
@@ -50,6 +50,24 @@
}
})();
+(function TestConstInInnerFunction() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ const a = 0;
+ a; b; c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
(function TestInnerFunctionParameter() {
var a = 1;
var b = 2;
@@ -67,6 +85,23 @@
}
})();
+(function TestInnerFunctionRestParameter() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner(...a) {
+ a; b; c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
(function TestInnerInnerFunctionParameter() {
var a = 1;
var b = 2;
@@ -84,6 +119,23 @@
}
})();
+(function TestInnerInnerFunctionRestParameter() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ function innerinner(...a) { a; b; c; }
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
(function TestInnerArrowFunctionParameter() {
var a = 1;
var b = 2;
@@ -101,6 +153,23 @@
}
})();
+(function TestInnerArrowFunctionRestParameter() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ var f = (...a) => a + b + c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
(function TestInnerFunctionInnerFunction() {
var a = 1;
var b = 2;
@@ -252,9 +321,171 @@
}
})();
+(function TestInnerFunctionDestructuring1() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ var [a, a2] = [1, 2];
+ a; b; c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionDestructuring2() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ let [a, a2] = [1, 2];
+ a; b; c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionDestructuring3() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ const [a, a2] = [1, 2];
+ a; b; c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionDestructuring4() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ var [a2, ...a] = [1, 2];
+ a; b; c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionDestructuring5() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ let [a2, ...a] = [1, 2];
+ a; b; c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionDestructuring6() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ const [a2, ...a] = [1, 2];
+ a; b; c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionDestructuring7() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ var {a, a2} = {a: 1, a2: 2};
+ a; b; c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionDestructuring8() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ let {a, a2} = {a: 1, a2: 2};
+ a; b; c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
+(function TestInnerFunctionDestructuring9() {
+ var a = 1;
+ var b = 2;
+ var c = 3;
+ function inner() {
+ const {a, a2} = {a: 1, a2: 2};
+ a; b; c;
+ }
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ assertEquals(1, a);
+ assertEquals(2, b);
+ assertEquals(3, c);
+ }
+})();
+
// A cluster of similar tests where the inner function only declares a variable
// whose name clashes with an outer function variable name, but doesn't use it.
-(function TestRegress650969_1() {
+(function TestRegress650969_1_var() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -266,67 +497,67 @@
}
})();
-(function TestRegress650969_2() {
+(function TestRegress650969_1_let() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
}
var a;
function inner() {
- var a = 6;
+ let a;
}
}
})();
-(function TestRegress650969_3() {
+(function TestRegress650969_2_var() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
}
var a;
function inner() {
- var a, b;
+ var a = 6;
}
}
})();
-(function TestRegress650969_4() {
+(function TestRegress650969_2_let() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
}
var a;
function inner() {
- var a = 6, b;
+ let a = 6;
}
}
})();
-(function TestRegress650969_5() {
+(function TestRegress650969_2_const() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
}
var a;
function inner() {
- let a;
+ const a = 6;
}
}
})();
-(function TestRegress650969_6() {
+(function TestRegress650969_3_var() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
}
var a;
function inner() {
- let a = 6;
+ var a, b;
}
}
})();
-(function TestRegress650969_7() {
+(function TestRegress650969_3_let() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -338,7 +569,19 @@
}
})();
-(function TestRegress650969_8() {
+(function TestRegress650969_4_var() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ var a = 6, b;
+ }
+ }
+})();
+
+(function TestRegress650969_4_let() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
%OptimizeOsr();
@@ -350,6 +593,18 @@
}
})();
+(function TestRegress650969_4_const() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ const a = 0, b = 0;
+ }
+ }
+})();
+
(function TestRegress650969_9() {
for (var i = 0; i < 3; ++i) {
if (i == 1) {
@@ -360,3 +615,307 @@
}
}
})();
+
+(function TestRegress650969_10() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner(...a) {
+ }
+ }
+})();
+
+(function TestRegress650969_11_var() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ var [a, b] = [1, 2];
+ }
+ }
+})();
+
+(function TestRegress650969_11_let() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ let [a, b] = [1, 2];
+ }
+ }
+})();
+
+(function TestRegress650969_11_const() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ const [a, b] = [1, 2];
+ }
+ }
+})();
+
+(function TestRegress650969_12_var() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ var [b, a] = [1, 2];
+ }
+ }
+})();
+
+(function TestRegress650969_12_let() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ let [b, a] = [1, 2];
+ }
+ }
+})();
+
+(function TestRegress650969_12_const() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ const [b, a] = [1, 2];
+ }
+ }
+})();
+
+(function TestRegress650969_13_var() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ var [b, ...a] = [1, 2];
+ }
+ }
+})();
+
+(function TestRegress650969_13_let() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ let [b, ...a] = [1, 2];
+ }
+ }
+})();
+
+(function TestRegress650969_13_const() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ const [b, ...a] = [1, 2];
+ }
+ }
+})();
+
+(function TestRegress650969_14_var() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ var {a, b} = {a: 1, b: 2};
+ }
+ }
+})();
+
+(function TestRegress650969_14_let() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ let {a, b} = {a: 1, b: 2};
+ }
+ }
+})();
+
+(function TestRegress650969_14_const() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ const {a, b} = {a: 1, b: 2};
+ }
+ }
+})();
+
+(function TestRegress650969_15_var() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ var {b: {a}, c} = {b: {a: 1}, c: 2};
+ }
+ }
+})();
+
+(function TestRegress650969_15_let() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ let {b: {a}, c} = {b: {a: 1}, c: 2};
+ }
+ }
+})();
+
+(function TestRegress650969_15_const() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ const {b: {a}, c} = {b: {a: 1}, c: 2};
+ }
+ }
+})();
+
+(function TestRegress650969_16_var() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ var {a: {b}, c} = {a: {b: 1}, c: 2};
+ }
+ }
+})();
+
+(function TestRegress650969_16_let() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ let {a: {b}, c} = {a: {b: 1}, c: 2};
+ }
+ }
+})();
+
+(function TestRegress650969_16_const() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ const {a: {b}, c} = {a: {b: 1}, c: 2};
+ }
+ }
+})();
+
+(function TestRegress650969_17_var() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ for (var a = 0; 0 == 1; ) { }
+ }
+ }
+})();
+
+(function TestRegress650969_17_let() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ for (let a = 0; 0 == 1; ) { }
+ }
+ }
+})();
+
+(function TestRegress650969_17_const() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a;
+ function inner() {
+ for (const a = 0; 0 == 1; ) { }
+ }
+ }
+})();
+
+// Regression tests for an intermediate stage where unresolved references were
+// discarded too aggressively.
+(function TestRegress650969_sidetrack_var() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a = 0;
+ function inner() {
+ return a;
+ var {b: {a}, c} = {b: {a: 1}, c: 2};
+ }
+ }
+})();
+
+(function TestRegress650969_sidetrack_let() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a = 0;
+ function inner() {
+ return a;
+ let {b: {a}, c} = {b: {a: 1}, c: 2};
+ }
+ }
+})();
+
+(function TestRegress650969_sidetrack_const() {
+ for (var i = 0; i < 3; ++i) {
+ if (i == 1) {
+ %OptimizeOsr();
+ }
+ var a = 0;
+ function inner() {
+ return a;
+ const {b: {a}, c} = {b: {a: 1}, c: 2};
+ }
+ }
+})();
diff --git a/deps/v8/test/mjsunit/for-in.js b/deps/v8/test/mjsunit/for-in.js
index 29d7445351..94b39cfd57 100644
--- a/deps/v8/test/mjsunit/for-in.js
+++ b/deps/v8/test/mjsunit/for-in.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --noharmony-for-in
-
function props(x) {
var array = [];
for (var p in x) array.push(p);
@@ -141,6 +139,13 @@ function props(x) {
(function forInInitialize() {
for (var hest = 'hest' in {}) { }
assertEquals('hest', hest, "empty-no-override");
+
+ // Lexical variables are disallowed
+ assertThrows("for (const x = 0 in {});", SyntaxError);
+ assertThrows("for (let x = 0 in {});", SyntaxError);
+
+ // In strict mode, var is disallowed
+ assertThrows("'use strict'; for (var x = 0 in {});", SyntaxError);
})();
(function forInObjects() {
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases1.js b/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases1.js
deleted file mode 100644
index dfafa5af26..0000000000
--- a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases1.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --harmony-async-await --expose-debug-as debug
-// Files: test/mjsunit/harmony/async-debug-caught-exception-cases.js
-
-runPart(1);
diff --git a/deps/v8/test/mjsunit/harmony/async-function-debug-scopes.js b/deps/v8/test/mjsunit/harmony/async-function-debug-scopes.js
index 3d72549d2a..b9b92841e7 100644
--- a/deps/v8/test/mjsunit/harmony/async-function-debug-scopes.js
+++ b/deps/v8/test/mjsunit/harmony/async-function-debug-scopes.js
@@ -6,8 +6,6 @@
var Debug = debug.Debug;
-var AsyncFunction = (async function() {}).constructor;
-
async function thrower() { throw 'Exception'; }
async function test(name, func, args, handler, continuation) {
diff --git a/deps/v8/test/mjsunit/harmony/for-in.js b/deps/v8/test/mjsunit/harmony/for-in.js
deleted file mode 100644
index 58e343b903..0000000000
--- a/deps/v8/test/mjsunit/harmony/for-in.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-for-in
-
-assertThrows("for (var x = 0 in {});", SyntaxError);
-assertThrows("for (const x = 0 in {});", SyntaxError);
-assertThrows("for (let x = 0 in {});", SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/generators-turbo.js b/deps/v8/test/mjsunit/harmony/generators-turbo.js
deleted file mode 100644
index 61334b93f8..0000000000
--- a/deps/v8/test/mjsunit/harmony/generators-turbo.js
+++ /dev/null
@@ -1,667 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --ignition --harmony-do-expressions
-// Flags: --allow-natives-syntax --turbo --turbo-from-bytecode
-
-
-// This file is identical to mjsunit/harmony/generators.js, except for its Flags
-// lines. The purpose is to explicitly mention --turbo-from-bytecode such that
-// Clusterfuzz can thoroughly test the new generators implementation.
-
-
-function MaybeOptimizeOrDeoptimize(f) {
- let x = Math.random(); // --random-seed makes this deterministic
- if (x <= 0.33) {
- %OptimizeFunctionOnNextCall(f);
- } else if (x <= 0.66) {
- %DeoptimizeFunction(f);
- }
-}
-
-function Next(generator, ...args) {
- MaybeOptimizeOrDeoptimize(%GeneratorGetFunction(generator));
- return generator.next(...args);
-}
-
-function Return(generator, ...args) {
- MaybeOptimizeOrDeoptimize(%GeneratorGetFunction(generator));
- return generator.return(...args);
-}
-
-function Throw(generator, ...args) {
- MaybeOptimizeOrDeoptimize(%GeneratorGetFunction(generator));
- return generator.throw(...args);
-}
-
-
-{ // yield in try-catch
-
- let g = function*() {
- try {yield 1} catch (error) {assertEquals("caught", error)}
- };
-
- assertThrowsEquals(() => Throw(g(), "not caught"), "not caught");
-
- {
- let x = g();
- assertEquals({value: 1, done: false}, Next(x));
- assertEquals({value: undefined, done: true}, Throw(x, "caught"));
- }
-
- {
- let x = g();
- assertEquals({value: 1, done: false}, Next(x));
- assertEquals({value: undefined, done: true}, Next(x));
- assertThrowsEquals(() => Throw(x, "not caught"), "not caught");
- }
-}
-
-
-{ // return that doesn't close
- let g = function*() { try {return 42} finally {yield 43} };
-
- {
- let x = g();
- assertEquals({value: 43, done: false}, Next(x));
- assertEquals({value: 42, done: true}, Next(x));
- }
-}
-
-
-{ // return that doesn't close
- let x;
- let g = function*() { try {return 42} finally {Throw(x, 666)} };
-
- {
- x = g();
- assertThrows(() => Next(x), TypeError); // still executing
- }
-}
-
-
-{ // yield in try-finally, finally clause performs return
-
- let g = function*() { try {yield 42} finally {return 13} };
-
- { // "return" closes at suspendedStart
- let x = g();
- assertEquals({value: 666, done: true}, Return(x, 666));
- assertEquals({value: undefined, done: true}, Next(x, 42));
- assertThrowsEquals(() => Throw(x, 43), 43);
- assertEquals({value: 42, done: true}, Return(x, 42));
- }
-
- { // "throw" closes at suspendedStart
- let x = g();
- assertThrowsEquals(() => Throw(x, 666), 666);
- assertEquals({value: undefined, done: true}, Next(x, 42));
- assertEquals({value: 43, done: true}, Return(x, 43));
- assertThrowsEquals(() => Throw(x, 44), 44);
- }
-
- { // "next" closes at suspendedYield
- let x = g();
- assertEquals({value: 42, done: false}, Next(x));
- assertEquals({value: 13, done: true}, Next(x, 666));
- assertEquals({value: undefined, done: true}, Next(x, 666));
- assertThrowsEquals(() => Throw(x, 666), 666);
- }
-
- { // "return" closes at suspendedYield
- let x = g();
- assertEquals({value: 42, done: false}, Next(x));
- assertEquals({value: 13, done: true}, Return(x, 666));
- assertEquals({value: undefined, done: true}, Next(x, 666));
- assertEquals({value: 666, done: true}, Return(x, 666));
- }
-
- { // "throw" closes at suspendedYield
- let x = g();
- assertEquals({value: 42, done: false}, Next(x));
- assertEquals({value: 13, done: true}, Throw(x, 666));
- assertThrowsEquals(() => Throw(x, 666), 666);
- assertEquals({value: undefined, done: true}, Next(x, 666));
- }
-}
-
-
-{ // yield in try-finally, finally clause doesn't perform return
-
- let g = function*() { try {yield 42} finally {13} };
-
- { // "return" closes at suspendedStart
- let x = g();
- assertEquals({value: 666, done: true}, Return(x, 666));
- assertEquals({value: undefined, done: true}, Next(x, 42));
- assertThrowsEquals(() => Throw(x, 43), 43);
- assertEquals({value: 42, done: true}, Return(x, 42));
- }
-
- { // "throw" closes at suspendedStart
- let x = g();
- assertThrowsEquals(() => Throw(x, 666), 666);
- assertEquals({value: undefined, done: true}, Next(x, 42));
- assertEquals({value: 43, done: true}, Return(x, 43));
- assertThrowsEquals(() => Throw(x, 44), 44);
- }
-
- { // "next" closes at suspendedYield
- let x = g();
- assertEquals({value: 42, done: false}, Next(x));
- assertEquals({value: undefined, done: true}, Next(x, 666));
- assertEquals({value: undefined, done: true}, Next(x, 666));
- assertThrowsEquals(() => Throw(x, 666), 666);
- assertEquals({value: 42, done: true}, Return(x, 42));
- }
-
- { // "return" closes at suspendedYield
- let x = g();
- assertEquals({value: 42, done: false}, Next(x));
- assertEquals({value: 666, done: true}, Return(x, 666));
- assertEquals({value: undefined, done: true}, Next(x, 666));
- assertThrowsEquals(() => Throw(x, 44), 44);
- assertEquals({value: 42, done: true}, Return(x, 42));
- }
-
- { // "throw" closes at suspendedYield
- let x = g();
- assertEquals({value: 42, done: false}, Next(x));
- assertThrowsEquals(() => Throw(x, 666), 666);
- assertEquals({value: undefined, done: true}, Next(x, 666));
- assertThrowsEquals(() => Throw(x, 666), 666);
- assertEquals({value: 42, done: true}, Return(x, 42));
- }
-}
-
-
-{ // yield in try-finally, finally clause yields and performs return
-
- let g = function*() { try {yield 42} finally {yield 43; return 13} };
-
- {
- let x = g();
- assertEquals({value: 42, done: false}, Next(x));
- assertEquals({value: 43, done: false}, Return(x, 666));
- assertEquals({value: 13, done: true}, Next(x));
- assertEquals({value: 666, done: true}, Return(x, 666));
- }
-
- {
- let x = g();
- assertEquals({value: 666, done: true}, Return(x, 666));
- assertEquals({value: undefined, done: true}, Next(x));
- assertEquals({value: 666, done: true}, Return(x, 666));
- }
-}
-
-
-{ // yield in try-finally, finally clause yields and doesn't perform return
-
- let g = function*() { try {yield 42} finally {yield 43; 13} };
-
- {
- let x = g();
- assertEquals({value: 42, done: false}, Next(x));
- assertEquals({value: 43, done: false}, Return(x, 666));
- assertEquals({value: 666, done: true}, Next(x));
- assertEquals({value: 5, done: true}, Return(x, 5));
- }
-
- {
- let x = g();
- assertEquals({value: 666, done: true}, Return(x, 666));
- assertEquals({value: undefined, done: true}, Next(x));
- assertEquals({value: 666, done: true}, Return(x, 666));
- }
-}
-
-
-{ // yield*, finally clause performs return
-
- let h = function*() { try {yield 42} finally {yield 43; return 13} };
- let g = function*() { yield 1; yield yield* h(); };
-
- {
- let x = g();
- assertEquals({value: 1, done: false}, Next(x));
- assertEquals({value: 42, done: false}, Next(x));
- assertEquals({value: 43, done: false}, Next(x, 666));
- assertEquals({value: 13, done: false}, Next(x));
- assertEquals({value: undefined, done: true}, Next(x));
- }
-
- {
- let x = g();
- assertEquals({value: 1, done: false}, Next(x));
- assertEquals({value: 42, done: false}, Next(x));
- assertEquals({value: 43, done: false}, Return(x, 666));
- assertEquals({value: 13, done: false}, Next(x));
- assertEquals({value: undefined, done: true}, Next(x));
- }
-
- {
- let x = g();
- assertEquals({value: 1, done: false}, Next(x));
- assertEquals({value: 42, done: false}, Next(x));
- assertEquals({value: 43, done: false}, Throw(x, 666));
- assertEquals({value: 13, done: false}, Next(x));
- assertEquals({value: undefined, done: true}, Next(x));
- }
-}
-
-
-{ // yield*, finally clause does not perform return
-
- let h = function*() { try {yield 42} finally {yield 43; 13} };
- let g = function*() { yield 1; yield yield* h(); };
-
- {
- let x = g();
- assertEquals({value: 1, done: false}, Next(x));
- assertEquals({value: 42, done: false}, Next(x));
- assertEquals({value: 43, done: false}, Next(x, 666));
- assertEquals({value: undefined, done: false}, Next(x));
- assertEquals({value: undefined, done: true}, Next(x));
- }
-
- {
- let x = g();
- assertEquals({value: 1, done: false}, Next(x));
- assertEquals({value: 42, done: false}, Next(x));
- assertEquals({value: 43, done: false}, Return(x, 44));
- assertEquals({value: 44, done: false}, Next(x));
- assertEquals({value: undefined, done: true}, Next(x));
- }
-
- {
- let x = g();
- assertEquals({value: 1, done: false}, Next(x));
- assertEquals({value: 42, done: false}, Next(x));
- assertEquals({value: 43, done: false}, Throw(x, 666));
- assertThrowsEquals(() => Next(x), 666);
- }
-}
-
-
-{ // yield*, .return argument is final result
-
- function* inner() {
- yield 2;
- }
-
- function* g() {
- yield 1;
- return yield* inner();
- }
-
- {
- let x = g();
- assertEquals({value: 1, done: false}, Next(x));
- assertEquals({value: 2, done: false}, Next(x));
- assertEquals({value: 42, done: true}, Return(x, 42));
- }
-}
-
-
-// More or less random tests from here on.
-
-
-{
- function* foo() { }
- let g = foo();
- assertEquals({value: undefined, done: true}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-{
- function* foo() { return new.target }
- let g = foo();
- assertEquals({value: undefined, done: true}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-{
- function* foo() { throw 666; return 42}
- let g = foo();
- assertThrowsEquals(() => Next(g), 666);
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-{
- function* foo(a) { return a; }
- let g = foo(42);
- assertEquals({value: 42, done: true}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-{
- function* foo(a) { a.iwashere = true; return a; }
- let x = {};
- let g = foo(x);
- assertEquals({value: {iwashere: true}, done: true}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-{
- let a = 42;
- function* foo() { return a; }
- let g = foo();
- assertEquals({value: 42, done: true}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-{
- let a = 40;
- function* foo(b) { return a + b; }
- let g = foo(2);
- assertEquals({value: 42, done: true}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-{
- let a = 40;
- function* foo(b) { a--; b++; return a + b; }
- let g = foo(2);
- assertEquals({value: 42, done: true}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-{
- let g;
- function* foo() { Next(g) }
- g = foo();
- assertThrows(() => Next(g), TypeError);
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-{
- function* foo() { yield 2; yield 3; yield 4 }
- g = foo();
- assertEquals({value: 2, done: false}, Next(g));
- assertEquals({value: 3, done: false}, Next(g));
- assertEquals({value: 4, done: false}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-
-{
- function* foo() { yield 2; if (true) { yield 3 }; yield 4 }
- g = foo();
- assertEquals({value: 2, done: false}, Next(g));
- assertEquals({value: 3, done: false}, Next(g));
- assertEquals({value: 4, done: false}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-{
- function* foo() { yield 2; if (true) { yield 3; yield 4 } }
- g = foo();
- assertEquals({value: 2, done: false}, Next(g));
- assertEquals({value: 3, done: false}, Next(g));
- assertEquals({value: 4, done: false}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-{
- function* foo() { yield 2; if (false) { yield 3 }; yield 4 }
- g = foo();
- assertEquals({value: 2, done: false}, Next(g));
- assertEquals({value: 4, done: false}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-{
- function* foo() { yield 2; while (true) { yield 3 }; yield 4 }
- g = foo();
- assertEquals({value: 2, done: false}, Next(g));
- assertEquals({value: 3, done: false}, Next(g));
- assertEquals({value: 3, done: false}, Next(g));
- assertEquals({value: 3, done: false}, Next(g));
- assertEquals({value: 3, done: false}, Next(g));
-}
-
-{
- function* foo() { yield 2; (yield 3) + 42; yield 4 }
- g = foo();
- assertEquals({value: 2, done: false}, Next(g));
- assertEquals({value: 3, done: false}, Next(g));
- assertEquals({value: 4, done: false}, Next(g));
-}
-
-{
- function* foo() { yield 2; (do {yield 3}) + 42; yield 4 }
- g = foo();
- assertEquals({value: 2, done: false}, Next(g));
- assertEquals({value: 3, done: false}, Next(g));
- assertEquals({value: 4, done: false}, Next(g));
-}
-
-{
- function* foo() { yield 2; return (yield 3) + 42; yield 4 }
- g = foo();
- assertEquals({value: 2, done: false}, Next(g));
- assertEquals({value: 3, done: false}, Next(g));
- assertEquals({value: 42, done: true}, Next(g, 0));
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-{
- let x = 42;
- function* foo() {
- yield x;
- for (let x in {a: 1, b: 2}) {
- let i = 2;
- yield x;
- yield i;
- do {
- yield i;
- } while (i-- > 0);
- }
- yield x;
- return 5;
- }
- g = foo();
- assertEquals({value: 42, done: false}, Next(g));
- assertEquals({value: 'a', done: false}, Next(g));
- assertEquals({value: 2, done: false}, Next(g));
- assertEquals({value: 2, done: false}, Next(g));
- assertEquals({value: 1, done: false}, Next(g));
- assertEquals({value: 0, done: false}, Next(g));
- assertEquals({value: 'b', done: false}, Next(g));
- assertEquals({value: 2, done: false}, Next(g));
- assertEquals({value: 2, done: false}, Next(g));
- assertEquals({value: 1, done: false}, Next(g));
- assertEquals({value: 0, done: false}, Next(g));
- assertEquals({value: 42, done: false}, Next(g));
- assertEquals({value: 5, done: true}, Next(g));
-}
-
-{
- let a = 3;
- function* foo() {
- let b = 4;
- yield 1;
- { let c = 5; yield 2; yield a; yield b; yield c; }
- }
- g = foo();
- assertEquals({value: 1, done: false}, Next(g));
- assertEquals({value: 2, done: false}, Next(g));
- assertEquals({value: 3, done: false}, Next(g));
- assertEquals({value: 4, done: false}, Next(g));
- assertEquals({value: 5, done: false}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-{
- function* foo() {
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- yield 42;
- }
- g = foo();
- for (let i = 0; i < 100; ++i) {
- assertEquals({value: 42, done: false}, i%25 === 0 ? Next(g) : g.next());
- }
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-{
- function* foo() {
- for (let i = 0; i < 3; ++i) {
- let j = 0
- yield i;
- do {
- yield (i + 10);
- } while (++j < 2);
- }
- }
- g = foo();
- assertEquals({value: 0, done: false}, Next(g));
- assertEquals({value: 10, done: false}, Next(g));
- assertEquals({value: 10, done: false}, Next(g));
- assertEquals({value: 1, done: false}, Next(g));
- assertEquals({value: 11, done: false}, Next(g));
- assertEquals({value: 11, done: false}, Next(g));
- assertEquals({value: 2, done: false}, Next(g));
- assertEquals({value: 12, done: false}, Next(g));
- assertEquals({value: 12, done: false}, Next(g));
- assertEquals({value: undefined, done: true}, Next(g));
-}
-
-{
- let foo = function*() {
- while (true) {
- if (true || false) yield 42;
- continue;
- }
- }
- g = foo();
- assertEquals({value: 42, done: false}, Next(g));
- assertEquals({value: 42, done: false}, Next(g));
- assertEquals({value: 42, done: false}, Next(g));
-}
-
-{
- let foo = function*() {
- yield* (function*() { yield 42; }());
- assertUnreachable();
- }
- g = foo();
- assertEquals({value: 42, done: false}, Next(g));
- assertEquals({value: 23, done: true}, Return(g, 23));
-}
-
-{
- let iterable = {
- [Symbol.iterator]() {
- return { next() { return {} } };
- }
- };
- let foo = function*() { yield* iterable };
- g = foo();
- g.next();
- assertThrows(() => Throw(g), TypeError);
-}
diff --git a/deps/v8/test/mjsunit/harmony/private.js b/deps/v8/test/mjsunit/harmony/private.js
index 7d34db40a8..cd65af1c70 100644
--- a/deps/v8/test/mjsunit/harmony/private.js
+++ b/deps/v8/test/mjsunit/harmony/private.js
@@ -295,7 +295,7 @@ function TestKeyDelete(obj) {
}
-var objs = [{}, [], Object.create(null), Object(1), new Map, function(){}]
+var objs = [{}, [], Object.create({}), Object(1), new Map, function(){}]
for (var i in objs) {
var obj = objs[i]
diff --git a/deps/v8/test/mjsunit/harmony/sloppy-implicit-block-function.js b/deps/v8/test/mjsunit/harmony/sloppy-implicit-block-function.js
deleted file mode 100644
index 0702320864..0000000000
--- a/deps/v8/test/mjsunit/harmony/sloppy-implicit-block-function.js
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --no-harmony-restrictive-declarations
-
-// ES#sec-functiondeclarations-in-ifstatement-statement-clauses
-// Annex B 3.4 FunctionDeclarations in IfStatement Statement Clauses
-// In sloppy mode, function declarations in if statements act like
-// they have a block around them. Prohibited in strict mode.
-(function() {
- assertEquals(undefined, f);
- if (false) function f() { };
- assertEquals(undefined, f);
-})();
-
-(function() {
- assertEquals(undefined, f);
- if (true) function f() { };
- assertEquals('function', typeof f);
-})();
-
-(function() {
- assertEquals(undefined, f);
- if (true) {} else function f() { };
- assertEquals(undefined, f);
-})();
-
-(function() {
- assertEquals(undefined, f);
- if (false) {} else function f() { };
- assertEquals('function', typeof f);
-})();
-
-// For legacy reasons, we also support these types of semantics as
-// the body of a for or with statement.
-(function() {
- for (;false;) function f() { };
- assertEquals(undefined, f);
-})();
-
-(function() {
- for (var x in {}) function f() { };
- assertEquals(undefined, f);
-})();
-
-(function() {
- var x;
- for (x in {}) function f() { };
- assertEquals(undefined, f);
-})();
-
-(function() {
- for (var i = 0; i < 1; i++) function f() { };
- assertEquals('function', typeof f);
-})();
-
-(function() {
- for (var x in {a: 1}) function f() { };
- assertEquals('function', typeof f);
-})();
-
-(function() {
- var x;
- for (x in {a: 1}) function f() { };
- assertEquals('function', typeof f);
-})();
-
-(function() {
- with ({}) function f() { };
- assertEquals('function', typeof f);
-})();
-
-(function() {
- do function f() {} while (0);
- assertEquals('function', typeof f);
-})();
-
-// Labeled function declarations undergo the same hoisting/FiB semantics as if
-// they were unalbeled.
-(function() {
- function bar() {
- return f;
- x: function f() {}
- }
- assertEquals('function', typeof bar());
-})();
-
-(function() {
- function bar() {
- return f;
- {
- x: function f() {}
- }
- }
- assertEquals(undefined, bar());
-})();
diff --git a/deps/v8/test/mjsunit/ignition/osr-from-bytecode.js b/deps/v8/test/mjsunit/ignition/osr-from-bytecode.js
index d4f40bad79..8f9c1be3af 100644
--- a/deps/v8/test/mjsunit/ignition/osr-from-bytecode.js
+++ b/deps/v8/test/mjsunit/ignition/osr-from-bytecode.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --ignition --ignition-osr --turbo-from-bytecode
+// Flags: --allow-natives-syntax --ignition --ignition-osr
function f() {
for (var i = 0; i < 10; i++) {
diff --git a/deps/v8/test/mjsunit/ignition/regress-662418.js b/deps/v8/test/mjsunit/ignition/regress-662418.js
new file mode 100644
index 0000000000..5ac3ff1952
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/regress-662418.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var valueof_calls = 0;
+
+var v = {
+ toString: function() {
+ var z = w++;
+ }
+};
+var w = {
+ valueOf: function() {
+ valueof_calls++;
+ }
+};
+var x = { [v]: 'B' };
+assertTrue(valueof_calls == 1);
diff --git a/deps/v8/test/mjsunit/ignition/regress-664146.js b/deps/v8/test/mjsunit/ignition/regress-664146.js
new file mode 100644
index 0000000000..415713a92f
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/regress-664146.js
@@ -0,0 +1,27 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var foo_call_count = 0;
+function foo() { foo_call_count++; }
+
+// These || and && combinations shouldn't call foo().
+(true || foo()) ? 1 : 2;
+assertTrue(foo_call_count == 0);
+(false && foo()) ? 1 : 2;
+assertTrue(foo_call_count == 0);
+
+// These || and && combinations should all call foo().
+(foo() || true) ? 1 : 2;
+assertTrue(foo_call_count == 1);
+(false || foo()) ? 1 : 2;
+assertTrue(foo_call_count == 2);
+(foo() || false) ? 1 : 2;
+assertTrue(foo_call_count == 3);
+
+(true && foo()) ? 1 : 2;
+assertTrue(foo_call_count == 4);
+(foo() && true) ? 1 : 2;
+assertTrue(foo_call_count == 5);
+(foo() && false) ? 1 : 2;
+assertTrue(foo_call_count == 6);
diff --git a/deps/v8/test/mjsunit/ignition/regress-672027.js b/deps/v8/test/mjsunit/ignition/regress-672027.js
new file mode 100644
index 0000000000..b0182675cf
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/regress-672027.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --max-old-space-size=100
+
+(function() {
+ var source = "[]"
+ for (var i = 0; i < 300; i++) {
+ source += ".concat([";
+ for (var j = 0; j < 1000; j++) {
+ source += "0,";
+ }
+ source += "0])"
+ }
+ eval(source);
+})();
diff --git a/deps/v8/test/mjsunit/invalid-lhs.js b/deps/v8/test/mjsunit/invalid-lhs.js
index d28dc9ccf8..92929b68e5 100644
--- a/deps/v8/test/mjsunit/invalid-lhs.js
+++ b/deps/v8/test/mjsunit/invalid-lhs.js
@@ -33,7 +33,7 @@ assertThrows("12 = 12", ReferenceError);
assertThrows("x++ = 12", ReferenceError);
assertThrows("eval('var x') = 12", ReferenceError);
assertThrows("if (false) 12 = 12", ReferenceError);
-assertDoesNotThrow("if (false) eval('var x') = 12", ReferenceError);
+assertDoesNotThrow("if (false) eval('var x') = 12");
// Pre- and post-fix operations:
assertThrows("12++", ReferenceError);
@@ -46,20 +46,20 @@ assertThrows("if (false) 12++", ReferenceError);
assertThrows("if (false) 12--", ReferenceError);
assertThrows("if (false) ++12", ReferenceError);
assertThrows("if (false) --12", ReferenceError);
-assertDoesNotThrow("if (false) ++(eval('12'))", ReferenceError);
-assertDoesNotThrow("if (false) (eval('12'))++", ReferenceError);
+assertDoesNotThrow("if (false) ++(eval('12'))");
+assertDoesNotThrow("if (false) (eval('12'))++");
// For in:
assertThrows("for (12 in [1]) print(12);", SyntaxError);
assertThrows("for (eval('var x') in [1]) print(12);", ReferenceError);
assertThrows("if (false) for (12 in [1]) print(12);", SyntaxError);
-assertDoesNotThrow("if (false) for (eval('0') in [1]) print(12);", ReferenceError);
+assertDoesNotThrow("if (false) for (eval('0') in [1]) print(12);");
// For:
assertThrows("for (12 = 1;;) print(12);", ReferenceError);
assertThrows("for (eval('var x') = 1;;) print(12);", ReferenceError);
assertThrows("if (false) for (12 = 1;;) print(12);", ReferenceError);
-assertDoesNotThrow("if (false) for (eval('var x') = 1;;) print(12);", ReferenceError);
+assertDoesNotThrow("if (false) for (eval('var x') = 1;;) print(12);");
// Assignments to 'this'.
assertThrows("this = 42", ReferenceError);
diff --git a/deps/v8/test/mjsunit/lazy-inner-functions.js b/deps/v8/test/mjsunit/lazy-inner-functions.js
index 127d349b1b..dfcd6db853 100644
--- a/deps/v8/test/mjsunit/lazy-inner-functions.js
+++ b/deps/v8/test/mjsunit/lazy-inner-functions.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --min-preparse-length 1
+// Flags: --min-preparse-length=1
(function TestLazyInnerFunctionCallsEval() {
var i = (function eager_outer() {
diff --git a/deps/v8/test/mjsunit/math-floor-part2.js b/deps/v8/test/mjsunit/math-floor-part2.js
index eab3ab3a2b..658979841a 100644
--- a/deps/v8/test/mjsunit/math-floor-part2.js
+++ b/deps/v8/test/mjsunit/math-floor-part2.js
@@ -109,6 +109,7 @@ function test() {
// Test in a loop to cover the custom IC and GC-related issues.
-for (var i = 0; i < 100; i++) {
+for (var i = 0; i < 10; i++) {
test();
+ new Array(i * 10000);
}
diff --git a/deps/v8/test/mjsunit/math-floor-part3.js b/deps/v8/test/mjsunit/math-floor-part3.js
index 6a536657ac..4ce645ea0d 100644
--- a/deps/v8/test/mjsunit/math-floor-part3.js
+++ b/deps/v8/test/mjsunit/math-floor-part3.js
@@ -111,6 +111,7 @@ function test() {
// Test in a loop to cover the custom IC and GC-related issues.
-for (var i = 0; i < 100; i++) {
+for (var i = 0; i < 10; i++) {
test();
+ new Array(i * 10000);
}
diff --git a/deps/v8/test/mjsunit/math-floor-part4.js b/deps/v8/test/mjsunit/math-floor-part4.js
index 9ae83d87c6..499200288e 100644
--- a/deps/v8/test/mjsunit/math-floor-part4.js
+++ b/deps/v8/test/mjsunit/math-floor-part4.js
@@ -93,11 +93,11 @@ function test() {
// Test in a loop to cover the custom IC and GC-related issues.
-for (var i = 0; i < 100; i++) {
+for (var i = 0; i < 10; i++) {
test();
+ new Array(i * 10000);
}
-
// Regression test for a bug where a negative zero coming from Math.floor
// was not properly handled by other operations.
function floorsum(i, n) {
diff --git a/deps/v8/test/mjsunit/mirror-script.js b/deps/v8/test/mjsunit/mirror-script.js
index 635c658ac3..74f37f80bb 100644
--- a/deps/v8/test/mjsunit/mirror-script.js
+++ b/deps/v8/test/mjsunit/mirror-script.js
@@ -83,7 +83,6 @@ function testScriptMirror(f, file_name, file_lines, type, compilation_type,
// Test the script mirror for different functions.
-testScriptMirror(function(){}, 'mirror-script.js', 90, 2, 0);
-testScriptMirror(Math.random, 'native math.js', -1, 0, 0);
-testScriptMirror(eval('(function(){})'), null, 1, 2, 1, '(function(){})', 87);
-testScriptMirror(eval('(function(){\n })'), null, 2, 2, 1, '(function(){\n })', 88);
+testScriptMirror(function(){}, 'mirror-script.js', 89, 2, 0);
+testScriptMirror(eval('(function(){})'), null, 1, 2, 1, '(function(){})', 86);
+testScriptMirror(eval('(function(){\n })'), null, 2, 2, 1, '(function(){\n })', 87);
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 6a7c2da9e4..d4759a281f 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -117,6 +117,9 @@ var assertUnoptimized;
// Assert that a string contains another expected substring.
var assertContains;
+// Assert that a string matches a given regex.
+var assertMatches;
+
(function () { // Scope for utility functions.
@@ -201,6 +204,11 @@ var assertContains;
}
+ function failWithMessage(message) {
+ throw new MjsUnitAssertionError(message);
+ }
+
+
function fail(expectedText, found, name_opt) {
var message = "Fail" + "ure";
if (name_opt) {
@@ -208,8 +216,12 @@ var assertContains;
message += " (" + name_opt + ")";
}
- message += ": expected <" + expectedText +
- "> found <" + PrettyPrint(found) + ">";
+ var foundText = PrettyPrint(found);
+ if (expectedText.length <= 40 && foundText.length <= 40) {
+ message += ": expected <" + expectedText + "> found <" + foundText + ">";
+ } else {
+ message += ":\nexpected:\n" + expectedText + "\nfound:\n" + foundText;
+ }
throw new MjsUnitAssertionError(message);
}
@@ -361,7 +373,7 @@ var assertContains;
if (typeof type_opt === 'function') {
assertInstanceof(e, type_opt);
} else if (type_opt !== void 0) {
- fail("invalid use of assertThrows, maybe you want assertThrowsEquals");
+ failWithMessage("invalid use of assertThrows, maybe you want assertThrowsEquals");
}
if (arguments.length >= 3) {
assertEquals(e.type, cause_opt);
@@ -369,7 +381,7 @@ var assertContains;
// Success.
return;
}
- throw new MjsUnitAssertionError("Did not throw exception");
+ failWithMessage("Did not throw exception");
};
@@ -380,7 +392,7 @@ var assertContains;
assertEquals(val, e);
return;
}
- throw new MjsUnitAssertionError("Did not throw exception");
+ failWithMessage("Did not throw exception");
};
@@ -391,9 +403,9 @@ var assertContains;
if (typeof actualConstructor === "function") {
actualTypeName = actualConstructor.name || String(actualConstructor);
}
- fail("Object <" + PrettyPrint(obj) + "> is not an instance of <" +
+ failWithMessage("Object <" + PrettyPrint(obj) + "> is not an instance of <" +
(type.name || type) + ">" +
- (actualTypeName ? " but of < " + actualTypeName + ">" : ""));
+ (actualTypeName ? " but of <" + actualTypeName + ">" : ""));
}
};
@@ -406,7 +418,7 @@ var assertContains;
eval(code);
}
} catch (e) {
- fail("threw an exception: ", e.message || e, name_opt);
+ failWithMessage("threw an exception: " + (e.message || e));
}
};
@@ -416,7 +428,7 @@ var assertContains;
if (name_opt) {
message += " - " + name_opt;
}
- throw new MjsUnitAssertionError(message);
+ failWithMessage(message);
};
assertContains = function(sub, value, name_opt) {
@@ -425,6 +437,15 @@ var assertContains;
}
};
+ assertMatches = function(regexp, str, name_opt) {
+ if (!(regexp instanceof RegExp)) {
+ regexp = new RegExp(regexp);
+ }
+ if (!str.match(regexp)) {
+ fail("should match '" + regexp + "'", str, name_opt);
+ }
+ };
+
var OptimizationStatusImpl = undefined;
var OptimizationStatus = function(fun, sync_opt) {
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index aa653e73a5..983bfe5ffd 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -27,6 +27,10 @@
[
[ALWAYS, {
+ # Modules which are only meant to be imported from by other tests, not to be
+ # tested standalone.
+ 'modules-skip*': [SKIP],
+
# All tests in the bug directory are expected to fail.
'bugs/*': [FAIL],
@@ -55,9 +59,9 @@
'es6/debug-promises/reject-with-undefined-reject': [FAIL],
'es6/debug-promises/reject-with-invalid-reject': [FAIL],
- # Issue 3660: Replacing activated TurboFan frames by unoptimized code does
- # not work, but we expect it to not crash.
- 'debug-step-turbofan': [PASS, FAIL],
+ # Issue 5587: The eval'ed code is piped through Ignition and fails when being
+ # live edited. This needs investigation.
+ 'debug-liveedit-double-call': [SKIP],
##############################################################################
# Too slow in debug mode with --stress-opt mode.
@@ -154,22 +158,9 @@
'numops-fuzz-part*': [PASS, ['mode == debug', SLOW]],
'readonly': [PASS, SLOW],
'regress/regress-1200351': [PASS, SLOW],
- 'regress/regress-crbug-474297': [PASS, ['mode == debug', SLOW]],
+ 'regress/regress-crbug-474297': [PASS, ['gc_stress == False', SKIP]],
'es6/tail-call-megatest*': [PASS, SLOW, FAST_VARIANTS, ['tsan', SKIP]],
- # TODO(titzer): ASM->WASM tests on these platforms
- 'wasm/asm-wasm': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el]', SKIP]],
- # TODO(branelson): Figure out why ignition + asm-wasm-stdlib fails.
- # TODO(branelson): The old status entry was misconfigured as it added
- # "or ignition == True". This was deprecated at some point and was never
- # true. Essentially the next lines skip the tests for a bunch of
- # architectures.
- 'wasm/asm-wasm-stdlib': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el]', SKIP]],
- 'wasm/asm-wasm-literals': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el]', SKIP]],
- 'wasm/asm-wasm-copy': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el]', SKIP]],
- 'wasm/asm-wasm-deopt': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el]', SKIP]],
- 'wasm/asm-wasm-switch': [PASS, ['arch in [arm, arm64, mips, mipsel, mips64, mips64el]', SKIP]],
-
# case-insensitive unicode regexp relies on case mapping provided by ICU.
'es6/unicode-regexp-ignore-case': [PASS, ['no_i18n == True', FAIL]],
'es6/unicode-regexp-ignore-case-noi18n': [FAIL, ['no_i18n == True', PASS]],
@@ -177,13 +168,12 @@
# desugaring regexp property class relies on ICU.
'harmony/regexp-property-*': [PASS, ['no_i18n == True', FAIL]],
+ # Allocates a large array buffer, which TSAN sometimes cannot handle.
+ 'regress/regress-599717': [PASS, ['tsan', SKIP]],
+
# TODO(bmeurer): Flaky timeouts (sometimes <1s, sometimes >3m).
'unicodelctest': [PASS, NO_VARIANTS],
'unicodelctest-no-optimization': [PASS, NO_VARIANTS],
-
- # TODO(rmcilroy,5038): Crashes in Deoptimizer::PatchCodeForDeoptimization on
- # nosnap builds when --stress-opt and --turbo-from-bytecode is enabled.
- 'harmony/generators-turbo': [PASS, FAST_VARIANTS],
}], # ALWAYS
['novfp3 == True', {
@@ -215,7 +205,6 @@
'regress/regress-4121': [SKIP],
'compare-known-objects-slow': [SKIP],
# Tests taking too long
- 'debug-stepout-scope-part8': [SKIP],
'mirror-object': [SKIP],
'packed-elements': [SKIP],
'regress/regress-1122': [SKIP],
@@ -230,10 +219,6 @@
# Async function tests taking too long
# https://bugs.chromium.org/p/v8/issues/detail?id=5411
'harmony/async-function-debug-scopes': [SKIP],
- 'harmony/async-debug-caught-exception-cases0': [SKIP],
- 'harmony/async-debug-caught-exception-cases1': [SKIP],
- 'harmony/async-debug-caught-exception-cases2': [SKIP],
- 'harmony/async-debug-caught-exception-cases3': [SKIP],
# TODO(mstarzinger): Takes too long with TF.
'array-sort': [PASS, NO_VARIANTS],
@@ -250,9 +235,6 @@
# Too slow for gc stress.
'asm/embenchen/box2d': [SKIP],
- # Issue 3723.
- 'regress/regress-3717': [SKIP],
-
# BUG(v8:4237)
'regress/regress-3976': [SKIP],
@@ -268,8 +250,8 @@
# BUG(v8:4779): Crashes flakily with stress mode on arm64.
'array-splice': [PASS, SLOW, ['arch == arm64', FAST_VARIANTS]],
- # BUG(v8:5053).
- 'wasm/embenchen/fasta': [PASS, FAST_VARIANTS],
+ # BUG(chromium:664490)
+ 'debug-scopes': [SKIP],
}], # 'gc_stress == True'
##############################################################################
@@ -576,12 +558,21 @@
# Too slow.
'es6/tail-call-megatest*': [SKIP],
+ # Forced optimisation path tests.
+ 'shared-function-tier-up-default': [SKIP],
+ 'shared-function-tier-up-ignition': [SKIP],
+ 'shared-function-tier-up-turbo': [SKIP],
+
# Ongoing implementation of modules.
# https://bugs.chromium.org/p/v8/issues/detail?id=1569
# The deopt fuzzer currently does not respect the 'variant != ignition' rule
# further down in this file, so we have to duplicate this here.
# https://bugs.chromium.org/p/v8/issues/detail?id=5374
'modules-*': [SKIP],
+
+ # Fails deopt_fuzzer due to --deopt_every_n_times or
+ # deopt_every_n_garbage_collections
+ 'es6/array-iterator-turbo': [SKIP]
}], # 'deopt_fuzzer == True'
##############################################################################
@@ -608,30 +599,25 @@
}], # 'arch == ppc64'
##############################################################################
+['variant == nocrankshaft', {
+ 'es6/array-iterator-turbo': [SKIP],
+}], # variant == nocranshaft
+
+##############################################################################
['variant == stress', {
- 'debug-evaluate-locals-optimized': [FAIL],
- 'debug-evaluate-locals-optimized-double': [FAIL],
+ 'es6/array-iterator-turbo': [SKIP],
+
'ignition/regress-599001-verifyheap': [SKIP],
'unicode-test': [SKIP],
}], # variant == stress
##############################################################################
-['variant == turbofan', {
-
- # Assumptions about optimization need investigation in TurboFan.
- 'regress-sync-optimized-lists': [FAIL],
-
-}], # variant == turbofan
-
-##############################################################################
['variant == turbofan_opt', {
+ 'es6/array-iterator-turbo': [SKIP],
# TODO(jarin/mstarzinger): Investigate debugger issues with TurboFan.
- 'debug-evaluate-closure': [FAIL],
'debug-evaluate-locals': [FAIL],
'debug-set-variable-value': [FAIL],
- 'debug-evaluate-locals-optimized': [FAIL],
- 'debug-evaluate-locals-optimized-double': [FAIL],
'debug-liveedit-double-call': [FAIL],
'es6/debug-evaluate-blockscopes': [FAIL],
@@ -646,99 +632,20 @@
}], # variant == turbofan_opt
##############################################################################
-['variant == ignition', {
+['variant == ignition or variant == ignition_staging', {
+ # TODO(5587): fails to liveedit evaled code.
'debug-liveedit-double-call': [FAIL],
- 'regress-sync-optimized-lists': [FAIL],
-
- # Might trigger stack overflow.
- 'unicode-test': [SKIP],
-
- # TODO(4680): Test doesn't know about three tier compiler pipeline.
- 'assert-opt-and-deopt': [SKIP],
-
- # Fails because concurrent compilation is not triggered on bytecode.
- # Check in Runtime_OptimizeFunctionOnNextCall.
- 'compiler/manual-concurrent-recompile': [FAIL],
- 'regress/regress-embedded-cons-string': [FAIL],
- 'regress/regress-prepare-break-while-recompile': [FAIL],
-
- # BUG(v8:5451): Flaky crashes.
- 'wasm/asm-wasm': [PASS, ['gc_stress', SKIP]],
}], # variant == ignition
-['variant == ignition and arch == arm64', {
- # TODO(rmcilroy,4680): Arm64 specific timeouts.
- 'asm/construct-double': [SKIP],
- 'compiler/osr-one': [SKIP],
- 'compiler/osr-two': [SKIP],
- 'wasm/asm-wasm-i32': [SKIP],
- 'wasm/asm-wasm-u32': [SKIP],
-}], # variant == ignition and arch == arm64
-
-['variant == ignition and arch == arm', {
- # TODO(rmcilroy,4680): Arm specific timeouts.
- 'compiler/osr-one': [SKIP],
- 'compiler/osr-two': [SKIP],
- 'regress/regress-1257': [SKIP],
-}], # variant == ignition and arch == arm
-
-['variant == ignition and msan', {
- # TODO(mythria,4680): All of these tests have large loops and hence slow
- # and timeout.
- 'compiler/osr-big': [SKIP],
- 'compiler/osr-nested': [SKIP],
- 'regress/regress-298269': [SKIP],
- 'regress/regress-crbug-319860': [SKIP],
- 'regress/regress-deep-proto': [SKIP],
- 'try': [SKIP],
- # Too slow for interpreter and msan.
- 'es6/tail-call-megatest*': [SKIP],
-}], # variant == ignition and msan
-
-##############################################################################
-['variant == ignition_staging', {
- 'assert-opt-and-deopt': [SKIP],
- 'debug-liveedit-double-call': [FAIL],
- 'regress-sync-optimized-lists': [FAIL],
-
- # Fails because concurrent compilation is not triggered on bytecode.
- # Check in Runtime_OptimizeFunctionOnNextCall.
- 'compiler/manual-concurrent-recompile': [FAIL],
- 'regress/regress-embedded-cons-string': [FAIL],
- 'regress/regress-prepare-break-while-recompile': [FAIL],
-
- # Flaky.
- 'asm/int32div': [SKIP],
- 'asm/int32mod': [SKIP],
- 'compiler/uint32': [SKIP],
- 'regress/regress-454725': [SKIP],
-
- # Might trigger stack overflow.
- 'unicode-test': [SKIP],
-
- # BUG(v8:5451): Flaky crashes.
- 'wasm/asm-wasm': [PASS, ['gc_stress', SKIP]],
-}], # variant == ignition_staging
-
##############################################################################
['variant == ignition_turbofan', {
+ # TODO(5587): fails to liveedit evaled code.
'debug-liveedit-double-call': [FAIL],
- # Might trigger stack overflow.
- 'unicode-test': [SKIP],
-
- 'wasm/asm-wasm-f32': [PASS, ['arch in [arm64]', SKIP]],
- 'wasm/asm-wasm-f64': [PASS, ['arch in [arm64]', SKIP]],
-
# TODO(rmcilroy,titzer): Times out after
# https://codereview.chromium.org/1951013002 .
'regress/regress-599717': [PASS, ['tsan', SKIP]],
- # TODO(rmcilroy,5038): Crashes in Deoptimizer::PatchCodeForDeoptimization on
- # nosnap builds when --stress-opt and --turbo-from-bytecode is enabled.
- 'harmony/generators-turbo': [PASS, FAST_VARIANTS],
- 'regress/regress-crbug-352058': [SKIP],
-
# TODO(jarin): No truncations on CheckFloat64Hole.
'getters-on-elements': [SKIP],
@@ -746,47 +653,7 @@
'unicodelctest-no-optimization': [SKIP],
}], # variant == ignition_turbofan
-['variant == ignition_turbofan and arch == arm64', {
- # TODO(rmcilroy,4680): Arm64 specific timeouts.
- 'asm/construct-double': [SKIP],
- 'compiler/osr-one': [SKIP],
- 'compiler/osr-two': [SKIP],
- 'wasm/asm-wasm-i32': [SKIP],
- 'wasm/asm-wasm-u32': [SKIP],
-}], # variant == ignition_turbofan and arch == arm64
-
-['variant == ignition_turbofan and arch == arm', {
- # TODO(rmcilroy,4680): Arm specific timeouts.
- 'compiler/osr-one': [SKIP],
- 'compiler/osr-two': [SKIP],
- 'regress/regress-1257': [SKIP],
-}], # variant == ignition_turbofan and arch == arm
-
-['variant == ignition_turbofan and msan', {
- # TODO(mythria,4680): All of these tests have large loops and hence slow
- # and timeout.
- 'compiler/osr-big': [SKIP],
- 'compiler/osr-nested': [SKIP],
- 'regress/regress-298269': [SKIP],
- 'regress/regress-crbug-319860': [SKIP],
- 'regress/regress-deep-proto': [SKIP],
- 'try': [SKIP],
- # Too slow for interpreter and msan.
- 'es6/tail-call-megatest*': [SKIP],
-}], # variant == ignition_turbofan and msan
-
##############################################################################
-['variant == ignition or variant == ignition_staging or variant == ignition_turbofan', {
- # Modules for which execution must fail (e.g. because of unresolved imports).
- # Eventually we should test for the precise error message, but for now we only
- # ensure that there is an error.
- 'modules-fail*': [FAIL],
-
- # Modules which are only meant to be imported from by other tests, not to be
- # tested standalone.
- 'modules-skip*': [SKIP],
-}], # variant == ignition or variant == ignition_staging or variant == ignition_turbofan
-
['variant != ignition and variant != ignition_staging and variant != ignition_turbofan', {
# Ongoing implementation of modules.
# https://bugs.chromium.org/p/v8/issues/detail?id=1569
@@ -803,6 +670,12 @@
}], # 'gcov_coverage'
##############################################################################
+['variant == stress or variant == turbofan_opt', {
+ # BUG(v8:5553).
+ 'wasm/grow-memory': [SKIP],
+}], # variant == stress or variant == turbofan_opt
+
+##############################################################################
['variant == asm_wasm', {
# Skip stuff uninteresting for asm.js
'bugs/*': [SKIP],
@@ -821,13 +694,4 @@
'whitespaces': [SKIP],
}], # variant == asm_wasm
-##############################################################################
-# This test allocates a 2G block of memory and if there are multiple
-# varients this leads kills by the OOM killer, crashes or messages
-# indicating the OS cannot allocate memory, exclude for Node.js runs
-# re-evalute when we move up to v8 5.1
-[ALWAYS, {
-'regress/regress-crbug-514081': [PASS, NO_VARIANTS],
-}], # ALWAYS
-
]
diff --git a/deps/v8/test/mjsunit/modules-debug-scopes1.js b/deps/v8/test/mjsunit/modules-debug-scopes1.js
new file mode 100644
index 0000000000..02de75510e
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-debug-scopes1.js
@@ -0,0 +1,897 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MODULE
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// These tests are copied from mjsunit/debug-scopes.js and adapted for modules.
+
+
+var Debug = debug.Debug;
+
+var test_name;
+var listener_delegate;
+var listener_called;
+var exception;
+var begin_test_count = 0;
+var end_test_count = 0;
+var break_count = 0;
+
+
+// Debug event listener which delegates.
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ break_count++;
+ listener_called = true;
+ listener_delegate(exec_state);
+ }
+ } catch (e) {
+ exception = e;
+ }
+}
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+
+// Initialize for a new test.
+function BeginTest(name) {
+ test_name = name;
+ listener_delegate = null;
+ listener_called = false;
+ exception = null;
+ begin_test_count++;
+}
+
+
+// Check result of a test.
+function EndTest() {
+ assertTrue(listener_called, "listener not called for " + test_name);
+ assertNull(exception, test_name + " / " + exception);
+ end_test_count++;
+}
+
+
+// Check that two scope are the same.
+function assertScopeMirrorEquals(scope1, scope2) {
+ assertEquals(scope1.scopeType(), scope2.scopeType());
+ assertEquals(scope1.frameIndex(), scope2.frameIndex());
+ assertEquals(scope1.scopeIndex(), scope2.scopeIndex());
+ assertPropertiesEqual(scope1.scopeObject().value(), scope2.scopeObject().value());
+}
+
+function CheckFastAllScopes(scopes, exec_state)
+{
+ var fast_all_scopes = exec_state.frame().allScopes(true);
+ var length = fast_all_scopes.length;
+ assertTrue(scopes.length >= length);
+ for (var i = 0; i < scopes.length && i < length; i++) {
+ var scope = fast_all_scopes[length - i - 1];
+ assertTrue(scope.isScope());
+ assertEquals(scopes[scopes.length - i - 1], scope.scopeType());
+ }
+}
+
+
+// Check that the scope chain contains the expected types of scopes.
+function CheckScopeChain(scopes, exec_state) {
+ var all_scopes = exec_state.frame().allScopes();
+ assertEquals(scopes.length, exec_state.frame().scopeCount());
+ assertEquals(scopes.length, all_scopes.length, "FrameMirror.allScopes length");
+ for (var i = 0; i < scopes.length; i++) {
+ var scope = exec_state.frame().scope(i);
+ assertTrue(scope.isScope());
+ assertEquals(scopes[i], scope.scopeType());
+ assertScopeMirrorEquals(all_scopes[i], scope);
+ }
+ CheckFastAllScopes(scopes, exec_state);
+
+ // Get the debug command processor.
+ var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
+ // Send a scopes request and check the result.
+ var json;
+ var request_json = '{"seq":0,"type":"request","command":"scopes"}';
+ var response_json = dcp.processDebugJSONRequest(request_json);
+ var response = JSON.parse(response_json);
+ assertEquals(scopes.length, response.body.scopes.length);
+ for (var i = 0; i < scopes.length; i++) {
+ assertEquals(i, response.body.scopes[i].index);
+ assertEquals(scopes[i], response.body.scopes[i].type);
+ if (scopes[i] == debug.ScopeType.Local ||
+ scopes[i] == debug.ScopeType.Script ||
+ scopes[i] == debug.ScopeType.Closure) {
+ assertTrue(response.body.scopes[i].object.ref < 0);
+ } else {
+ assertTrue(response.body.scopes[i].object.ref >= 0);
+ }
+ var found = false;
+ for (var j = 0; j < response.refs.length && !found; j++) {
+ found = response.refs[j].handle == response.body.scopes[i].object.ref;
+ }
+ assertTrue(found, "Scope object " + response.body.scopes[i].object.ref + " not found");
+ }
+}
+
+
+// Check that the scope chain contains the expected names of scopes.
+function CheckScopeChainNames(names, exec_state) {
+ var all_scopes = exec_state.frame().allScopes();
+ assertEquals(names.length, all_scopes.length, "FrameMirror.allScopes length");
+ for (var i = 0; i < names.length; i++) {
+ var scope = exec_state.frame().scope(i);
+ assertTrue(scope.isScope());
+ assertEquals(names[i], scope.details().name())
+ }
+}
+
+
+// Check that the scope contains at least minimum_content. For functions just
+// check that there is a function.
+function CheckScopeContent(minimum_content, number, exec_state) {
+ var scope = exec_state.frame().scope(number);
+ var minimum_count = 0;
+ for (var p in minimum_content) {
+ var property_mirror = scope.scopeObject().property(p);
+ assertFalse(property_mirror.isUndefined(), 'property ' + p + ' not found in scope');
+ if (typeof(minimum_content[p]) === 'function') {
+ assertTrue(property_mirror.value().isFunction());
+ } else {
+ assertEquals(minimum_content[p], property_mirror.value().value(), 'property ' + p + ' has unexpected value');
+ }
+ minimum_count++;
+ }
+
+ // 'arguments' and might be exposed in the local and closure scope. Just
+ // ignore this.
+ var scope_size = scope.scopeObject().properties().length;
+ if (!scope.scopeObject().property('arguments').isUndefined()) {
+ scope_size--;
+ }
+ // Ditto for 'this'.
+ if (!scope.scopeObject().property('this').isUndefined()) {
+ scope_size--;
+ }
+ // Temporary variables introduced by the parser have not been materialized.
+ assertTrue(scope.scopeObject().property('').isUndefined());
+
+ if (scope_size < minimum_count) {
+ print('Names found in scope:');
+ var names = scope.scopeObject().propertyNames();
+ for (var i = 0; i < names.length; i++) {
+ print(names[i]);
+ }
+ }
+ assertTrue(scope_size >= minimum_count);
+
+ // Get the debug command processor.
+ var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
+ // Send a scope request for information on a single scope and check the
+ // result.
+ var request_json = '{"seq":0,"type":"request","command":"scope","arguments":{"number":';
+ request_json += scope.scopeIndex();
+ request_json += '}}';
+ var response_json = dcp.processDebugJSONRequest(request_json);
+ var response = JSON.parse(response_json);
+ assertEquals(scope.scopeType(), response.body.type);
+ assertEquals(number, response.body.index);
+ if (scope.scopeType() == debug.ScopeType.Local ||
+ scope.scopeType() == debug.ScopeType.Script ||
+ scope.scopeType() == debug.ScopeType.Closure) {
+ assertTrue(response.body.object.ref < 0);
+ } else {
+ assertTrue(response.body.object.ref >= 0);
+ }
+ var found = false;
+ for (var i = 0; i < response.refs.length && !found; i++) {
+ found = response.refs[i].handle == response.body.object.ref;
+ }
+ assertTrue(found, "Scope object " + response.body.object.ref + " not found");
+}
+
+// Check that the scopes have positions as expected.
+function CheckScopeChainPositions(positions, exec_state) {
+ var all_scopes = exec_state.frame().allScopes();
+ assertTrue(positions.length <= all_scopes.length, "FrameMirror.allScopes length");
+ for (var i = 0; i < positions.length; i++) {
+ var scope = exec_state.frame().scope(i);
+ assertTrue(scope.isScope());
+ var position = positions[i];
+ if (!position)
+ continue;
+
+ print(`Checking position.start = ${position.start}, .end = ${position.end}`);
+ assertEquals(position.start, scope.details().startPosition())
+ assertEquals(position.end, scope.details().endPosition())
+ }
+}
+
+// Simple empty local scope.
+BeginTest("Local 1");
+
+function local_1() {
+ debugger;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({}, 0, exec_state);
+};
+local_1();
+EndTest();
+
+
+// Local scope with a parameter.
+BeginTest("Local 2");
+
+function local_2(a) {
+ debugger;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1}, 0, exec_state);
+};
+local_2(1);
+EndTest();
+
+
+// Local scope with a parameter and a local variable.
+BeginTest("Local 3");
+
+function local_3(a) {
+ var x = 3;
+ debugger;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,x:3}, 0, exec_state);
+};
+local_3(1);
+EndTest();
+
+
+// Local scope with parameters and local variables.
+BeginTest("Local 4");
+
+function local_4(a, b) {
+ var x = 3;
+ var y = 4;
+ debugger;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4}, 0, exec_state);
+};
+local_4(1, 2);
+EndTest();
+
+
+// Empty local scope with use of eval.
+BeginTest("Local 5");
+
+function local_5() {
+ eval('');
+ debugger;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({}, 0, exec_state);
+};
+local_5();
+EndTest();
+
+
+// Local introducing local variable using eval.
+BeginTest("Local 6");
+
+function local_6() {
+ eval('var i = 5');
+ debugger;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({}, 0, exec_state);
+};
+local_6();
+EndTest();
+
+
+// Local scope with parameters and local variables.
+BeginTest("Local 7");
+
+function local_7(a, b) {
+ var x = 3;
+ var y = 4;
+ eval('var i = 5');
+ eval('var j = 6');
+ debugger;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4}, 0, exec_state);
+};
+local_7(1, 2);
+EndTest();
+
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments.
+BeginTest("Closure 1");
+
+function closure_1(a) {
+ function f() {
+ debugger;
+ return a;
+ };
+ return f;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1}, 1, exec_state);
+ CheckScopeChainNames(["f", "closure_1", undefined, undefined, undefined], exec_state)
+};
+closure_1(1)();
+EndTest();
+
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments. Due to VM optimizations parts of the actual closure is
+// missing from the debugger information.
+BeginTest("Closure 2");
+
+function closure_2(a, b) {
+ var x = a + 2;
+ var y = b + 2;
+ function f() {
+ debugger;
+ return a + x;
+ };
+ return f;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,x:3}, 1, exec_state);
+ CheckScopeChainNames(["f", "closure_2", undefined, undefined, undefined], exec_state)
+};
+closure_2(1, 2)();
+EndTest();
+
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments. Using all arguments and locals from the outer function
+// in the inner function makes these part of the debugger information on the
+// closure.
+BeginTest("Closure 3");
+
+function closure_3(a, b) {
+ var x = a + 2;
+ var y = b + 2;
+ function f() {
+ debugger;
+ return a + b + x + y;
+ };
+ return f;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4}, 1, exec_state);
+ CheckScopeChainNames(["f", "closure_3", undefined, undefined, undefined], exec_state)
+};
+closure_3(1, 2)();
+EndTest();
+
+
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments. Using all arguments and locals from the outer function
+// in the inner function makes these part of the debugger information on the
+// closure. Use the inner function as well...
+BeginTest("Closure 4");
+
+function closure_4(a, b) {
+ var x = a + 2;
+ var y = b + 2;
+ function f() {
+ debugger;
+ if (f) {
+ return a + b + x + y;
+ }
+ };
+ return f;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4,f:function(){}}, 1, exec_state);
+ CheckScopeChainNames(["f", "closure_4", undefined, undefined, undefined], exec_state)
+};
+closure_4(1, 2)();
+EndTest();
+
+
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments. In the presence of eval all arguments and locals
+// (including the inner function itself) from the outer function becomes part of
+// the debugger infformation on the closure.
+BeginTest("Closure 5");
+
+function closure_5(a, b) {
+ var x = 3;
+ var y = 4;
+ function f() {
+ eval('');
+ debugger;
+ return 1;
+ };
+ return f;
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4,f:function(){}}, 1, exec_state);
+ CheckScopeChainNames(["f", "closure_5", undefined, undefined, undefined], exec_state)
+};
+closure_5(1, 2)();
+EndTest();
+
+
+// Two closures. Due to optimizations only the parts actually used are provided
+// through the debugger information.
+BeginTest("Closure 6");
+let some_global;
+function closure_6(a, b) {
+ function f(a, b) {
+ var x = 3;
+ var y = 4;
+ return function() {
+ var x = 3;
+ var y = 4;
+ debugger;
+ some_global = a;
+ return f;
+ };
+ }
+ return f(a, b);
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({a:1}, 1, exec_state);
+ CheckScopeContent({f:function(){}}, 2, exec_state);
+ CheckScopeChainNames([undefined, "f", "closure_6", undefined, undefined, undefined], exec_state)
+};
+closure_6(1, 2)();
+EndTest();
+
+
+// Two closures. In the presence of eval all information is provided as the
+// compiler cannot determine which parts are used.
+BeginTest("Closure 7");
+function closure_7(a, b) {
+ var x = 3;
+ var y = 4;
+ eval('var i = 5');
+ eval('var j = 6');
+ function f(a, b) {
+ var x = 3;
+ var y = 4;
+ eval('var i = 5');
+ eval('var j = 6');
+ return function() {
+ debugger;
+ some_global = a;
+ return f;
+ };
+ }
+ return f(a, b);
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({}, 0, exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4}, 1, exec_state);
+ CheckScopeContent({a:1,b:2,x:3,y:4,f:function(){}}, 2, exec_state);
+ CheckScopeChainNames([undefined, "f", "closure_7", undefined, undefined, undefined], exec_state)
+};
+closure_7(1, 2)();
+EndTest();
+
+
+// Closure that may be optimized out.
+BeginTest("Closure 8");
+function closure_8() {
+ (function inner(x) {
+ debugger;
+ })(2);
+}
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({x: 2}, 0, exec_state);
+ CheckScopeChainNames(["inner", undefined, undefined, undefined], exec_state)
+};
+closure_8();
+EndTest();
+
+
+BeginTest("Closure 9");
+let closure_9 = Function(' \
+ eval("var y = 1;"); \
+ eval("var z = 1;"); \
+ (function inner(x) { \
+ y++; \
+ z++; \
+ debugger; \
+ })(2); \
+')
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Closure,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeChainNames(["inner", undefined, undefined, undefined], exec_state)
+};
+closure_9();
+EndTest();
+
+
+// Test global scope.
+BeginTest("Global");
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Module, debug.ScopeType.Script, debug.ScopeType.Global], exec_state);
+ CheckScopeChainNames([undefined, undefined, undefined], exec_state)
+};
+debugger;
+EndTest();
+
+
+BeginTest("Catch block 1");
+function catch_block_1() {
+ try {
+ throw 'Exception';
+ } catch (e) {
+ debugger;
+ }
+};
+
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({e:'Exception'}, 0, exec_state);
+ CheckScopeChainNames(["catch_block_1", "catch_block_1", undefined, undefined, undefined], exec_state)
+};
+catch_block_1();
+EndTest();
+
+
+BeginTest("Catch block 3");
+function catch_block_3() {
+ eval("var y = 78;");
+ try {
+ throw 'Exception';
+ } catch (e) {
+ debugger;
+ }
+};
+
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({e:'Exception'}, 0, exec_state);
+ CheckScopeContent({}, 1, exec_state);
+ CheckScopeChainNames(["catch_block_3", "catch_block_3", undefined, undefined, undefined], exec_state)
+};
+catch_block_3();
+EndTest();
+
+
+// Test catch in global scope.
+BeginTest("Catch block 5");
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({e:'Exception'}, 0, exec_state);
+ CheckScopeChainNames([undefined, undefined, undefined, undefined], exec_state)
+};
+
+try {
+ throw 'Exception';
+} catch (e) {
+ debugger;
+}
+
+EndTest();
+
+
+// Closure inside catch in global code.
+BeginTest("Catch block 6");
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Catch,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({x: 2}, 0, exec_state);
+ CheckScopeContent({e:'Exception'}, 1, exec_state);
+ CheckScopeChainNames([undefined, undefined, undefined, undefined, undefined], exec_state)
+};
+
+try {
+ throw 'Exception';
+} catch (e) {
+ (function(x) {
+ debugger;
+ })(2);
+}
+EndTest();
+
+
+// Catch block in function that is marked for optimization while being executed.
+BeginTest("Catch block 7");
+function catch_block_7() {
+ %OptimizeFunctionOnNextCall(catch_block_7);
+ try {
+ throw 'Exception';
+ } catch (e) {
+ debugger;
+ }
+};
+
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Catch,
+ debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({e:'Exception'}, 0, exec_state);
+ CheckScopeChainNames(["catch_block_7", "catch_block_7", undefined, undefined, undefined], exec_state)
+};
+catch_block_7();
+EndTest();
+
+
+BeginTest("Classes and methods 1");
+
+listener_delegate = function(exec_state) {
+ "use strict"
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({}, 1, exec_state);
+ CheckScopeChainNames(["m", undefined, undefined, undefined], exec_state)
+};
+
+(function() {
+ "use strict";
+ class C1 {
+ m() {
+ debugger;
+ }
+ }
+ new C1().m();
+})();
+
+EndTest();
+
+BeginTest("Scope positions");
+var code1 = "function f() { \n" +
+ " var a = 1; \n" +
+ " function b() { \n" +
+ " debugger; \n" +
+ " return a + 1; \n" +
+ " } \n" +
+ " b(); \n" +
+ "} \n" +
+ "f(); \n";
+
+listener_delegate = function(exec_state) {
+ CheckScopeChainPositions([{start: 58, end: 118}, {start: 10, end: 162}], exec_state);
+}
+eval(code1);
+EndTest();
+
+
+BeginTest("Scope positions in for statement");
+var code3 = "function for_statement() { \n" +
+ " for (let i = 0; i < 1; i++) { \n" +
+ " debugger; \n" +
+ " } \n" +
+ "} \n" +
+ "for_statement(); \n";
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Block,
+ debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeChainPositions([{start: 52, end: 111}, {start: 22, end: 145}], exec_state);
+}
+eval(code3);
+EndTest();
+
+BeginTest("Scope positions in for statement with lexical block");
+var code4 = "function for_statement() { \n" +
+ " for (let i = 0; i < 1; i++) { \n" +
+ " let j; \n" +
+ " debugger; \n" +
+ " } \n" +
+ "} \n" +
+ "for_statement(); \n";
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Block,
+ debug.ScopeType.Block,
+ debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeChainPositions([{start: 66, end: 147}, {start: 52, end: 147}, {start: 22, end: 181}], exec_state);
+}
+eval(code4);
+EndTest();
+
+BeginTest("Scope positions in lexical for each statement");
+var code5 = "function for_each_statement() { \n" +
+ " for (let i of [0]) { \n" +
+ " debugger; \n" +
+ " } \n" +
+ "} \n" +
+ "for_each_statement(); \n";
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Block,
+ debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeChainPositions([{start: 55, end: 111}, {start: 27, end: 145}], exec_state);
+}
+eval(code5);
+EndTest();
+
+BeginTest("Scope positions in lexical for each statement with lexical block");
+var code6 = "function for_each_statement() { \n" +
+ " for (let i of [0]) { \n" +
+ " let j; \n" +
+ " debugger; \n" +
+ " } \n" +
+ "} \n" +
+ "for_each_statement(); \n";
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Block,
+ debug.ScopeType.Block,
+ debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeChainPositions([{start: 57, end: 147}, {start: 55, end: 147}, {start: 27, end: 181}], exec_state);
+}
+eval(code6);
+EndTest();
+
+BeginTest("Scope positions in non-lexical for each statement");
+var code7 = "function for_each_statement() { \n" +
+ " var i; \n" +
+ " for (i of [0]) { \n" +
+ " debugger; \n" +
+ " } \n" +
+ "} \n" +
+ "for_each_statement(); \n";
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeChainPositions([{start: 27, end: 181}], exec_state);
+}
+eval(code7);
+EndTest();
+
+BeginTest("Scope positions in non-lexical for each statement with lexical block");
+var code8 = "function for_each_statement() { \n" +
+ " var i; \n" +
+ " for (i of [0]) { \n" +
+ " let j; \n" +
+ " debugger; \n" +
+ " } \n" +
+ "} \n" +
+ "for_each_statement(); \n";
+
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Block,
+ debug.ScopeType.Local,
+ debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeChainPositions([{start: 89, end: 183}, {start: 27, end: 217}], exec_state);
+}
+eval(code8);
+EndTest();
+
+assertEquals(begin_test_count, break_count,
+ 'one or more tests did not enter the debugger');
+assertEquals(begin_test_count, end_test_count,
+ 'one or more tests did not have its result checked');
diff --git a/deps/v8/test/mjsunit/modules-debug-scopes2.js b/deps/v8/test/mjsunit/modules-debug-scopes2.js
new file mode 100644
index 0000000000..ffe712f5d0
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-debug-scopes2.js
@@ -0,0 +1,239 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MODULE
+// Flags: --expose-debug-as debug
+
+
+var Debug = debug.Debug;
+
+var test_name;
+var listener_delegate;
+var listener_called;
+var exception;
+var begin_test_count = 0;
+var end_test_count = 0;
+var break_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ break_count++;
+ listener_called = true;
+ listener_delegate(exec_state);
+ }
+ } catch (e) {
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+
+
+function BeginTest(name) {
+ test_name = name;
+ listener_delegate = null;
+ listener_called = false;
+ exception = null;
+ begin_test_count++;
+}
+
+function EndTest() {
+ assertTrue(listener_called, "listener not called for " + test_name);
+ assertNull(exception, test_name + " / " + exception);
+ end_test_count++;
+}
+
+
+// Check that two scope are the same.
+function assertScopeMirrorEquals(scope1, scope2) {
+ assertEquals(scope1.scopeType(), scope2.scopeType());
+ assertEquals(scope1.frameIndex(), scope2.frameIndex());
+ assertEquals(scope1.scopeIndex(), scope2.scopeIndex());
+ assertPropertiesEqual(scope1.scopeObject().value(), scope2.scopeObject().value());
+}
+
+function CheckFastAllScopes(scopes, exec_state)
+{
+ var fast_all_scopes = exec_state.frame().allScopes(true);
+ var length = fast_all_scopes.length;
+ assertTrue(scopes.length >= length);
+ for (var i = 0; i < scopes.length && i < length; i++) {
+ var scope = fast_all_scopes[length - i - 1];
+ assertTrue(scope.isScope());
+ assertEquals(scopes[scopes.length - i - 1], scope.scopeType());
+ }
+}
+
+
+// Check that the scope chain contains the expected types of scopes.
+function CheckScopeChain(scopes, exec_state) {
+ var all_scopes = exec_state.frame().allScopes();
+ assertEquals(scopes.length, exec_state.frame().scopeCount());
+ assertEquals(scopes.length, all_scopes.length, "FrameMirror.allScopes length");
+ for (var i = 0; i < scopes.length; i++) {
+ var scope = exec_state.frame().scope(i);
+ assertTrue(scope.isScope());
+ assertEquals(scopes[i], scope.scopeType());
+ assertScopeMirrorEquals(all_scopes[i], scope);
+ }
+ CheckFastAllScopes(scopes, exec_state);
+
+ // Get the debug command processor.
+ var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
+ // Send a scopes request and check the result.
+ var json;
+ var request_json = '{"seq":0,"type":"request","command":"scopes"}';
+ var response_json = dcp.processDebugJSONRequest(request_json);
+ var response = JSON.parse(response_json);
+ assertEquals(scopes.length, response.body.scopes.length);
+ for (var i = 0; i < scopes.length; i++) {
+ assertEquals(i, response.body.scopes[i].index);
+ assertEquals(scopes[i], response.body.scopes[i].type);
+ if (scopes[i] == debug.ScopeType.Local ||
+ scopes[i] == debug.ScopeType.Script ||
+ scopes[i] == debug.ScopeType.Closure) {
+ assertTrue(response.body.scopes[i].object.ref < 0);
+ } else {
+ assertTrue(response.body.scopes[i].object.ref >= 0);
+ }
+ var found = false;
+ for (var j = 0; j < response.refs.length && !found; j++) {
+ found = response.refs[j].handle == response.body.scopes[i].object.ref;
+ }
+ assertTrue(found, "Scope object " + response.body.scopes[i].object.ref + " not found");
+ }
+}
+
+
+function CheckScopeDoesNotHave(properties, number, exec_state) {
+ var scope = exec_state.frame().scope(number);
+ for (var p of properties) {
+ var property_mirror = scope.scopeObject().property(p);
+ assertTrue(property_mirror.isUndefined(), 'property ' + p + ' found in scope');
+ }
+}
+
+
+// Check that the scope contains at least minimum_content. For functions just
+// check that there is a function.
+function CheckScopeContent(minimum_content, number, exec_state) {
+ var scope = exec_state.frame().scope(number);
+ var minimum_count = 0;
+ for (var p in minimum_content) {
+ var property_mirror = scope.scopeObject().property(p);
+ assertFalse(property_mirror.isUndefined(), 'property ' + p + ' not found in scope');
+ if (typeof(minimum_content[p]) === 'function') {
+ assertTrue(property_mirror.value().isFunction());
+ } else {
+ assertEquals(minimum_content[p], property_mirror.value().value(), 'property ' + p + ' has unexpected value');
+ }
+ minimum_count++;
+ }
+
+ // 'arguments' and might be exposed in the local and closure scope. Just
+ // ignore this.
+ var scope_size = scope.scopeObject().properties().length;
+ if (!scope.scopeObject().property('arguments').isUndefined()) {
+ scope_size--;
+ }
+ // Ditto for 'this'.
+ if (!scope.scopeObject().property('this').isUndefined()) {
+ scope_size--;
+ }
+ // Temporary variables introduced by the parser have not been materialized.
+ assertTrue(scope.scopeObject().property('').isUndefined());
+
+ if (scope_size < minimum_count) {
+ print('Names found in scope:');
+ var names = scope.scopeObject().propertyNames();
+ for (var i = 0; i < names.length; i++) {
+ print(names[i]);
+ }
+ }
+ assertTrue(scope_size >= minimum_count);
+
+ // Get the debug command processor.
+ var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
+ // Send a scope request for information on a single scope and check the
+ // result.
+ var request_json = '{"seq":0,"type":"request","command":"scope","arguments":{"number":';
+ request_json += scope.scopeIndex();
+ request_json += '}}';
+ var response_json = dcp.processDebugJSONRequest(request_json);
+ var response = JSON.parse(response_json);
+ assertEquals(scope.scopeType(), response.body.type);
+ assertEquals(number, response.body.index);
+ if (scope.scopeType() == debug.ScopeType.Local ||
+ scope.scopeType() == debug.ScopeType.Script ||
+ scope.scopeType() == debug.ScopeType.Closure) {
+ assertTrue(response.body.object.ref < 0);
+ } else {
+ assertTrue(response.body.object.ref >= 0);
+ }
+ var found = false;
+ for (var i = 0; i < response.refs.length && !found; i++) {
+ found = response.refs[i].handle == response.body.object.ref;
+ }
+ assertTrue(found, "Scope object " + response.body.object.ref + " not found");
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Actual tests.
+////////////////////////////////////////////////////////////////////////////////
+
+
+BeginTest();
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent(
+ {local_var: undefined, exported_var: undefined, imported_var: undefined},
+ 0, exec_state);
+ CheckScopeDoesNotHave(
+ ["doesnotexist", "local_let", "exported_let", "imported_let"],
+ 0, exec_state);
+};
+debugger;
+EndTest();
+
+let local_let = 1;
+var local_var = 2;
+export let exported_let = 3;
+export var exported_var = 4;
+import {exported_let as imported_let} from "modules-debug-scopes2.js";
+import {exported_var as imported_var} from "modules-debug-scopes2.js";
+
+BeginTest();
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent(
+ {local_let: 1, local_var: 2, exported_let: 3, exported_var: 4,
+ imported_let: 3, imported_var: 4}, 0, exec_state);
+};
+debugger;
+EndTest();
+
+local_let += 10;
+local_var += 10;
+exported_let += 10;
+exported_var += 10;
+
+BeginTest();
+listener_delegate = function(exec_state) {
+ CheckScopeChain([debug.ScopeType.Module,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent(
+ {local_let: 11, local_var: 12, exported_let: 13, exported_var: 14,
+ imported_let: 13, imported_var: 14}, 0, exec_state);
+};
+debugger;
+EndTest();
diff --git a/deps/v8/test/mjsunit/modules-fail-5.js b/deps/v8/test/mjsunit/modules-fail-5.js
deleted file mode 100644
index 046275b2d3..0000000000
--- a/deps/v8/test/mjsunit/modules-fail-5.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MODULE
-
-import {a as b} from "modules-fail-5.js";
-export {c as a} from "modules-fail-5.js";
-import {c} from "modules-fail-5.js";
diff --git a/deps/v8/test/mjsunit/modules-fail-7.js b/deps/v8/test/mjsunit/modules-fail-7.js
deleted file mode 100644
index 766cf43852..0000000000
--- a/deps/v8/test/mjsunit/modules-fail-7.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MODULE
-
-// Star exports do not propagate a default export.
-import a from "modules-skip-4.js";
diff --git a/deps/v8/test/mjsunit/modules-fail-cyclic-3.js b/deps/v8/test/mjsunit/modules-fail-cyclic-3.js
deleted file mode 100644
index a216569fff..0000000000
--- a/deps/v8/test/mjsunit/modules-fail-cyclic-3.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// MODULE
-
-export {a as x} from "modules-skip-cyclic-3.js";
-export {b as y} from "modules-skip-cyclic-3.js";
diff --git a/deps/v8/test/mjsunit/modules-init3.js b/deps/v8/test/mjsunit/modules-init3.js
index 36ff1e8ffa..e8b46106f7 100644
--- a/deps/v8/test/mjsunit/modules-init3.js
+++ b/deps/v8/test/mjsunit/modules-init3.js
@@ -11,6 +11,11 @@ assertThrows(() => x, ReferenceError);
assertThrows(() => y, ReferenceError);
assertThrows(() => z, ReferenceError);
+assertEquals(23, w = 23);
+assertThrows(() => x = 666, ReferenceError);
+assertThrows(() => y = 666, ReferenceError);
+assertThrows(() => z = 666, TypeError);
+
export function* v() { return 40 }
export var w = 41;
export let x = 42;
diff --git a/deps/v8/test/mjsunit/modules-namespace1.js b/deps/v8/test/mjsunit/modules-namespace1.js
new file mode 100644
index 0000000000..227b9c8a4c
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-namespace1.js
@@ -0,0 +1,107 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+let ja = 42;
+export {ja as yo};
+export const bla = "blaa";
+export {foo as foo_again};
+// See further below for the actual star import that declares "foo".
+
+// The object itself.
+assertEquals("object", typeof foo);
+assertThrows(() => foo = 666, TypeError);
+assertFalse(Reflect.isExtensible(foo));
+assertTrue(Reflect.preventExtensions(foo));
+assertThrows(() => Reflect.apply(foo, {}, []));
+assertThrows(() => Reflect.construct(foo, {}, []));
+assertSame(null, Reflect.getPrototypeOf(foo));
+// TODO(neis): The next one should be False.
+assertTrue(Reflect.setPrototypeOf(foo, null));
+assertFalse(Reflect.setPrototypeOf(foo, {}));
+assertSame(null, Reflect.getPrototypeOf(foo));
+assertEquals(
+ ["bla", "foo_again", "yo", Symbol.toStringTag, Symbol.iterator],
+ Reflect.ownKeys(foo));
+
+// Its "yo" property.
+assertEquals(
+ {value: 42, enumerable: true, configurable: false, writable: true},
+ Reflect.getOwnPropertyDescriptor(foo, "yo"));
+assertFalse(Reflect.deleteProperty(foo, "yo"));
+assertTrue(Reflect.has(foo, "yo"));
+assertFalse(Reflect.set(foo, "yo", true));
+// TODO(neis): The next two should be False.
+assertTrue(Reflect.defineProperty(foo, "yo",
+ Reflect.getOwnPropertyDescriptor(foo, "yo")));
+assertTrue(Reflect.defineProperty(foo, "yo", {}));
+assertFalse(Reflect.defineProperty(foo, "yo", {get() {return 1}}));
+assertEquals(42, Reflect.get(foo, "yo"));
+assertEquals(43, (ja++, foo.yo));
+
+// Its "foo_again" property.
+assertSame(foo, foo.foo_again);
+
+// Its @@toStringTag property.
+assertTrue(Reflect.has(foo, Symbol.toStringTag));
+assertEquals("string", typeof Reflect.get(foo, Symbol.toStringTag));
+assertEquals(
+ {value: "Module", configurable: true, writable: false, enumerable: false},
+ Reflect.getOwnPropertyDescriptor(foo, Symbol.toStringTag));
+
+// Its @@iterator property.
+assertTrue(Reflect.has(foo, Symbol.iterator));
+assertEquals("function", typeof Reflect.get(foo, Symbol.iterator));
+assertEquals("[Symbol.iterator]", foo[Symbol.iterator].name);
+assertEquals(0, foo[Symbol.iterator].length);
+assertSame(Function.prototype, foo[Symbol.iterator].__proto__);
+assertEquals(
+ {value: foo[Symbol.iterator],
+ configurable: true, writable: true, enumerable: false},
+ Reflect.getOwnPropertyDescriptor(foo, Symbol.iterator));
+assertEquals(["bla", "foo_again", "yo"], [...foo]);
+assertThrows(() => (42, foo[Symbol.iterator])(), TypeError);
+{
+ let it = foo[Symbol.iterator]();
+ assertSame(it.__proto__, ([][Symbol.iterator]()).__proto__.__proto__);
+ assertEquals(["next"], Reflect.ownKeys(it));
+ assertEquals(
+ {value: it.next, configurable: true, writable: true, enumerable: false},
+ Reflect.getOwnPropertyDescriptor(it, "next"));
+ assertEquals("function", typeof it.next);
+ assertEquals("next", it.next.name);
+ assertEquals(0, it.next.length);
+ assertSame(Function.prototype, it.next.__proto__);
+ assertFalse(it === foo[Symbol.iterator]());
+ assertFalse(it.next === foo[Symbol.iterator]().next);
+ assertThrows(() => (42, it.next)(), TypeError);
+ assertThrows(() => it.next.call(foo[Symbol.iterator]()), TypeError);
+ let next = it.next;
+ assertEquals(42, (it.next = 42, it.next));
+ assertEquals(43, (it.bla = 43, it.bla));
+ assertTrue(delete it.next);
+ assertThrows(() => next.call(foo[Symbol.iterator]()), TypeError);
+}
+
+// TODO(neis): Clarify spec w.r.t. other symbols.
+
+// Nonexistant properties.
+let nonexistant = ["gaga", 123, Symbol('')];
+for (let key of nonexistant) {
+ assertSame(undefined, Reflect.getOwnPropertyDescriptor(foo, key));
+ assertTrue(Reflect.deleteProperty(foo, key));
+ assertFalse(Reflect.set(foo, key, true));
+ assertSame(undefined, Reflect.get(foo, key));
+ assertFalse(Reflect.defineProperty(foo, key, {get() {return 1}}));
+ assertFalse(Reflect.has(foo, key));
+}
+
+// The actual star import that we are testing. Namespace imports are
+// initialized before evaluation.
+import * as foo from "modules-namespace1.js";
+
+// There can be only one namespace object.
+import * as bar from "modules-namespace1.js";
+assertSame(foo, bar);
diff --git a/deps/v8/test/mjsunit/modules-namespace2.js b/deps/v8/test/mjsunit/modules-namespace2.js
new file mode 100644
index 0000000000..43529b916d
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-namespace2.js
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+assertEquals(
+ ["b", "c", "get_a", "ns2", "set_a", "zzz",
+ Symbol.toStringTag, Symbol.iterator],
+ Reflect.ownKeys(ns));
+assertEquals(["b", "c", "get_a", "ns2", "set_a", "zzz"], [...ns]);
+
+import * as foo from "modules-skip-1.js";
+assertSame(foo.a, ns.b);
+assertSame(foo.a, ns.c);
+assertSame(foo.get_a, ns.get_a);
+assertSame(foo.set_a, ns.set_a);
+assertEquals(123, ns.zzz);
+
+assertSame(ns, ns.ns2.ns);
+import * as ns from "modules-skip-namespace.js";
+export {ns};
diff --git a/deps/v8/test/mjsunit/modules-namespace3.js b/deps/v8/test/mjsunit/modules-namespace3.js
new file mode 100644
index 0000000000..df9ef7806b
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-namespace3.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import * as foo from "modules-namespace3.js";
+export * from "modules-namespace3.js";
+export var bar;
+assertEquals(["bar", "default"], Object.getOwnPropertyNames(foo));
+export default function() {};
diff --git a/deps/v8/test/mjsunit/modules-namespace4.js b/deps/v8/test/mjsunit/modules-namespace4.js
new file mode 100644
index 0000000000..ef508d5fb8
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-namespace4.js
@@ -0,0 +1,59 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import * as foo from "modules-namespace4.js";
+
+assertSame(undefined, a);
+assertThrows(() => b, ReferenceError);
+assertThrows(() => B, ReferenceError);
+assertThrows(() => c, ReferenceError);
+assertEquals(45, d());
+
+assertSame(undefined, foo.a);
+assertThrows(() => foo.b, ReferenceError);
+assertThrows(() => foo.B, ReferenceError);
+assertThrows(() => foo.c, ReferenceError);
+assertEquals(45, foo.d());
+assertThrows(() => foo.default, ReferenceError);
+assertSame(undefined, foo.doesnotexist);
+
+Function("Foo", " \
+ with (Foo) { \
+ assertEquals(undefined, a); \
+ assertThrows(() => b, ReferenceError); \
+ assertThrows(() => B, ReferenceError); \
+ assertThrows(() => c, ReferenceError); \
+ assertEquals(45, d()); \
+ }")(foo);
+
+export var a = 42;
+export let b = 43;
+export {b as B};
+export const c = 44;
+export function d() { return 45 };
+export default 46;
+
+assertEquals(42, a);
+assertEquals(43, b);
+assertEquals(44, c);
+assertEquals(45, d());
+
+assertEquals(42, foo.a);
+assertEquals(43, foo.b);
+assertEquals(43, foo.B);
+assertEquals(44, foo.c);
+assertEquals(45, foo.d());
+assertEquals(46, foo.default);
+assertSame(undefined, foo.doesnotexist);
+
+Function("Foo", " \
+ with (Foo) { \
+ assertEquals(42, a); \
+ assertEquals(43, b); \
+ assertEquals(43, B); \
+ assertEquals(44, c); \
+ assertEquals(45, d()); \
+ }")(foo);
diff --git a/deps/v8/test/mjsunit/modules-fail-star-exports-conflict.js b/deps/v8/test/mjsunit/modules-relative-path.js
index 6e2b219342..7e6a37ac1c 100644
--- a/deps/v8/test/mjsunit/modules-fail-star-exports-conflict.js
+++ b/deps/v8/test/mjsunit/modules-relative-path.js
@@ -4,7 +4,11 @@
//
// MODULE
-export * from "modules-skip-star-exports-conflict.js";
-export * from "modules-skip-6.js";
+import {x as y} from "./modules-relative-path.js";
+export let x = 0;
-import {a} from "modules-fail-star-exports-conflict.js";
+assertEquals(0, x);
+assertEquals(x, y);
+x++;
+assertEquals(1, x);
+assertEquals(x, y);
diff --git a/deps/v8/test/mjsunit/modules-skip-2.js b/deps/v8/test/mjsunit/modules-skip-2.js
index fdd576a988..d5ff578b49 100644
--- a/deps/v8/test/mjsunit/modules-skip-2.js
+++ b/deps/v8/test/mjsunit/modules-skip-2.js
@@ -5,3 +5,4 @@
export {a as b, default} from "modules-skip-1.js";
import {a as tmp} from "modules-skip-1.js";
export {tmp as c};
+export const zzz = 999;
diff --git a/deps/v8/test/mjsunit/modules-skip-cyclic-3.js b/deps/v8/test/mjsunit/modules-skip-cyclic-3.js
deleted file mode 100644
index ced96270b1..0000000000
--- a/deps/v8/test/mjsunit/modules-skip-cyclic-3.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-export {y as a} from "modules-fail-cyclic-3.js";
-export {x as b} from "modules-fail-cyclic-3.js";
diff --git a/deps/v8/test/mjsunit/modules-skip-init3.js b/deps/v8/test/mjsunit/modules-skip-init3.js
index eac1ae172b..589b2cfb16 100644
--- a/deps/v8/test/mjsunit/modules-skip-init3.js
+++ b/deps/v8/test/mjsunit/modules-skip-init3.js
@@ -10,6 +10,12 @@ assertThrows(() => x, ReferenceError);
assertThrows(() => y, ReferenceError);
assertThrows(() => z, ReferenceError);
+assertThrows(() => v = 666, TypeError);
+assertThrows(() => w = 666, TypeError);
+assertThrows(() => x = 666, TypeError);
+assertThrows(() => y = 666, TypeError);
+assertThrows(() => z = 666, TypeError);
+
export function check() {
assertEquals({value: 40, done: true}, v().next());
assertEquals(41, w);
diff --git a/deps/v8/test/mjsunit/modules-skip-namespace.js b/deps/v8/test/mjsunit/modules-skip-namespace.js
new file mode 100644
index 0000000000..ff6a7b81d3
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-skip-namespace.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//assertEquals(
+// ["ns", Symbol.toStringTag, Symbol.iterator], Reflect.ownKeys(ns2));
+//assertEquals(["ns"], [...ns2]);
+
+export * from "modules-skip-4.js";
+export * from "modules-skip-5.js";
+export var zzz = 123;
+export {ns2};
+import * as ns2 from "modules-namespace2.js";
diff --git a/deps/v8/test/mjsunit/modules-turbo.js b/deps/v8/test/mjsunit/modules-turbo.js
new file mode 100644
index 0000000000..7c31682a5b
--- /dev/null
+++ b/deps/v8/test/mjsunit/modules-turbo.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MODULE
+// Flags: --allow-natives-syntax
+
+export let x = 0;
+function foo() { x++ };
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
+assertOptimized(foo);
+assertEquals(2, x);
diff --git a/deps/v8/test/mjsunit/object-create.js b/deps/v8/test/mjsunit/object-create.js
index d8385842a3..d2f676e77c 100644
--- a/deps/v8/test/mjsunit/object-create.js
+++ b/deps/v8/test/mjsunit/object-create.js
@@ -49,6 +49,27 @@ try {
assertTrue(/Object or null/.test(e));
}
+try {
+ Object.create(null, this);
+ assertTrue(false);
+} catch(e) {
+ assertTrue(/Property description/.test(e))
+}
+
+try {
+ Object.create(null, [1, 2, 3]);
+ assertTrue(false);
+} catch(e) {
+ assertTrue(/Property description/.test(e))
+}
+
+try {
+ Object.create(null, new Proxy([1, 2, 3], {}));
+ assertTrue(false);
+} catch(e) {
+ assertTrue(/Property description/.test(e))
+}
+
var ctr = 0;
var ctr2 = 0;
var ctr3 = 0;
@@ -248,3 +269,14 @@ for (x in sonOfTricky) {
sum += sonOfTricky[x];
}
assertEquals(16, sum);
+
+
+(function createWithEmptyProtoInfoCreateMap() {
+ var proto = {a:1};
+ var instance = {__proto__: proto };
+ // Try force creation of prototype info on proto by loading a proto property.
+ assertEquals(instance.a, 1);
+ var result = Object.create(proto, {});
+ assertEquals(result.a, 1);
+ assertEquals(result.__proto__, proto);
+})()
diff --git a/deps/v8/test/mjsunit/preparse-toplevel-strict-eval.js b/deps/v8/test/mjsunit/preparse-toplevel-strict-eval.js
new file mode 100644
index 0000000000..6d99ac755e
--- /dev/null
+++ b/deps/v8/test/mjsunit/preparse-toplevel-strict-eval.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --min-preparse-length=0
+
+"use strict";
+var x = 1;
+var g = eval("var y = 100; function h(s) { if (s) x = s; return x+y; }; h");
+
+assertEquals(101, g());
+assertEquals(102, g(2));
+assertEquals(102, g(2));
diff --git a/deps/v8/test/mjsunit/print.js b/deps/v8/test/mjsunit/print.js
new file mode 100644
index 0000000000..1ec3383e47
--- /dev/null
+++ b/deps/v8/test/mjsunit/print.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals("function", typeof print, "print should be defined");
+assertEquals("function", typeof printErr, "printErr should be defined");
diff --git a/deps/v8/test/mjsunit/prototype-non-existing.js b/deps/v8/test/mjsunit/prototype-non-existing.js
new file mode 100644
index 0000000000..367a59f547
--- /dev/null
+++ b/deps/v8/test/mjsunit/prototype-non-existing.js
@@ -0,0 +1,92 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Dictionary object in the prototype chain.
+(function() {
+ function A() {
+ this.z = "a";
+ }
+ var a = new A();
+
+ function B() {
+ this.b = "b";
+ }
+ B.prototype = a;
+ var b = new B();
+
+ // Ensure b stays slow.
+ for (var i = 0; i < 1200; i++) {
+ b["b"+i] = 0;
+ }
+ assertFalse(%HasFastProperties(b));
+
+ function C() {
+ this.c = "c";
+ }
+ C.prototype = b;
+ var c = new C();
+
+ function f(expected) {
+ assertFalse(%HasFastProperties(b));
+ var result = c.z;
+ assertEquals(expected, result);
+ }
+ f("a");
+ f("a");
+ f("a");
+ %OptimizeFunctionOnNextCall(f);
+ f("a");
+
+ a.z = "z";
+ f("z");
+ f("z");
+ f("z");
+
+ b.z = "bz";
+ f("bz");
+ f("bz");
+ f("bz");
+})();
+
+
+// Global object in the prototype chain.
+(function() {
+ var global = this;
+
+ function A() {
+ this.z = "a";
+ }
+ A.prototype = global.__proto__;
+ var a = new A();
+
+ global.__proto__ = a;
+
+ function C() {
+ this.c = "c";
+ }
+ C.prototype = global;
+ var c = new C();
+
+ function f(expected) {
+ var result = c.z;
+ assertEquals(expected, result);
+ }
+ f("a");
+ f("a");
+ f("a");
+ %OptimizeFunctionOnNextCall(f);
+ f("a");
+
+ a.z = "z";
+ f("z");
+ f("z");
+ f("z");
+
+ global.z = "bz";
+ f("bz");
+ f("bz");
+ f("bz");
+})();
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases3.js b/deps/v8/test/mjsunit/regexp-regexpexec.js
index 6fc7eab0cf..bfc42b6a16 100644
--- a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases3.js
+++ b/deps/v8/test/mjsunit/regexp-regexpexec.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-async-await --expose-debug-as debug
-// Files: test/mjsunit/harmony/async-debug-caught-exception-cases.js
-
-runPart(3);
+// Test that the fallback path in RegExpExec executes the default exec
+// implementation.
+delete RegExp.prototype.exec;
+assertEquals("b", "aba".replace(/a/g, ""));
diff --git a/deps/v8/test/mjsunit/regexp.js b/deps/v8/test/mjsunit/regexp.js
index ddaf022d19..04c723665a 100644
--- a/deps/v8/test/mjsunit/regexp.js
+++ b/deps/v8/test/mjsunit/regexp.js
@@ -731,3 +731,36 @@ assertEquals(["acbc", "c", "c"], /a(.\2)b(\1)/.exec("aabcacbc"));
// \u{daff}\u{e000} is not a surrogate pair, while \u{daff}\u{dfff} is.
assertEquals(["\u{daff}", "\u{e000}"], "\u{daff}\u{e000}".split(/[a-z]{0,1}/u));
assertEquals(["\u{daff}\u{dfff}"], "\u{daff}\u{dfff}".split(/[a-z]{0,1}/u));
+
+// Test that changing a property on RegExp.prototype results in us taking the
+// slow path, which executes RegExp.prototype.exec instead of our
+// RegExpExecStub.
+const RegExpPrototypeExec = RegExp.prototype.exec;
+RegExp.prototype.exec = function() { throw new Error(); }
+assertThrows(() => "abc".replace(/./, ""));
+RegExp.prototype.exec = RegExpPrototypeExec;
+
+// Test the code path in RE.proto[@@search] when previousLastIndex is a receiver
+// but can't be converted to a primitive. This exposed a crash in an older
+// C++ implementation of @@search which a) still relied on Object::Equals,
+// and b) incorrectly returned isolate->pending_exception() on error.
+
+var re = /./;
+re.lastIndex = { [Symbol.toPrimitive]: 42 };
+try { "abc".search(re); } catch (_) {} // Ensure we don't crash.
+
+// Test lastIndex values of -0.0 and NaN (since @@search uses SameValue).
+
+var re = /./;
+re.exec = function(str) { assertEquals(0, re.lastIndex); return []; }
+re.lastIndex = -0.0;
+assertEquals(-0, re.lastIndex);
+"abc".search(re);
+assertEquals(-0, re.lastIndex);
+
+var re = /./;
+re.exec = function(str) { assertEquals(0, re.lastIndex); return []; }
+re.lastIndex = NaN;
+assertEquals(NaN, re.lastIndex);
+"abc".search(re);
+assertEquals(NaN, re.lastIndex);
diff --git a/deps/v8/test/mjsunit/regress-3456.js b/deps/v8/test/mjsunit/regress-3456.js
index 498953b807..3636de3011 100644
--- a/deps/v8/test/mjsunit/regress-3456.js
+++ b/deps/v8/test/mjsunit/regress-3456.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --min-preparse-length 1
+// Flags: --min-preparse-length=1
// Arrow function parsing (commit r22366) changed the flags stored in
// PreParserExpression, and IsValidReferenceExpression() would return
diff --git a/deps/v8/test/mjsunit/regress-604044.js b/deps/v8/test/mjsunit/regress-604044.js
index 58ccfbed99..882fd5644f 100644
--- a/deps/v8/test/mjsunit/regress-604044.js
+++ b/deps/v8/test/mjsunit/regress-604044.js
@@ -2,6 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --min-preparse-length 1
+// Flags: --min-preparse-length=1
(function(_ = function() {}){})
diff --git a/deps/v8/test/mjsunit/regress-sync-optimized-lists.js b/deps/v8/test/mjsunit/regress-sync-optimized-lists.js
index 0f7eeba0d3..9297c2df54 100644
--- a/deps/v8/test/mjsunit/regress-sync-optimized-lists.js
+++ b/deps/v8/test/mjsunit/regress-sync-optimized-lists.js
@@ -29,11 +29,9 @@ f1(new Ctor(), false);
// Kick off concurrent recompilation and OSR.
var o = new Ctor();
f1(o, true);
-assertOptimized(f1, "no sync");
// Flush the optimizing compiler's queue.
%NotifyContextDisposed();
-assertUnoptimized(f1, "no sync");
// Trigger deopt.
o.c = 2.2;
diff --git a/deps/v8/test/mjsunit/regress/regress-1387.js b/deps/v8/test/mjsunit/regress/regress-1387.js
index d171d381ad..0d58eab499 100644
--- a/deps/v8/test/mjsunit/regress/regress-1387.js
+++ b/deps/v8/test/mjsunit/regress/regress-1387.js
@@ -26,13 +26,13 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Tests that we always return the same type error function when trying to
-// access strict mode caller and callee.
+// access or set strict mode callee.
function foo() {
'use strict';
return arguments;
}
-var get1 = Object.getOwnPropertyDescriptor(foo(), "caller").get;
-var get2 = Object.getOwnPropertyDescriptor(foo(), "callee").get;
-assertEquals(get1, get2);
+var get = Object.getOwnPropertyDescriptor(foo(), "callee").get;
+var set = Object.getOwnPropertyDescriptor(foo(), "callee").set;
+assertEquals(get, set);
diff --git a/deps/v8/test/mjsunit/regress/regress-2296.js b/deps/v8/test/mjsunit/regress/regress-2296.js
index c00f14f172..b81e8ee784 100644
--- a/deps/v8/test/mjsunit/regress/regress-2296.js
+++ b/deps/v8/test/mjsunit/regress/regress-2296.js
@@ -30,7 +30,11 @@
Debug = debug.Debug
function listener(event, exec_state, event_data, data) {
- event_data.script().setSource(1);
+ try {
+ event_data.script().setSource(1);
+ } catch (e) {
+ assertTrue(String(e).indexOf("Source is not a string") > 0);
+ }
};
Debug.setListener(listener);
diff --git a/deps/v8/test/mjsunit/regress/regress-2318.js b/deps/v8/test/mjsunit/regress/regress-2318.js
index 771d195212..6f184302eb 100644
--- a/deps/v8/test/mjsunit/regress/regress-2318.js
+++ b/deps/v8/test/mjsunit/regress/regress-2318.js
@@ -25,11 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --nostack-trace-on-abort --stack-size=150
+// Flags: --expose-debug-as debug --stack-size=150
function f() {
var i = 0;
-
// Stack-allocate to reach the end of stack quickly.
var _A0 = 00; var _A1 = 01; var _A2 = 02; var _A3 = 03; var _A4 = 04;
var _B0 = 05; var _B1 = 06; var _B2 = 07; var _B3 = 08; var _B4 = 09;
@@ -58,10 +57,16 @@ function f() {
Debug = debug.Debug;
function listener(event, exec_state, event_data, data) {
- result = exec_state.frame().evaluate("i").value();
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ exec_state.frame(0).evaluate("i");
+ } catch (e) {
+ }
};
Debug.setListener(listener);
-var bp = Debug.setBreakPoint(f, 0);
+var bp = Debug.setBreakPoint(f, 1);
assertThrows(function() { f(); }, RangeError);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/regress/regress-252797.js b/deps/v8/test/mjsunit/regress/regress-252797.js
index 379205f599..ac776a17d8 100644
--- a/deps/v8/test/mjsunit/regress/regress-252797.js
+++ b/deps/v8/test/mjsunit/regress/regress-252797.js
@@ -31,11 +31,17 @@
// starting with a negative lookup.
// Create a holder in fast mode.
-var holder = Object.create(null, {
+var holder = Object.create({}, {
holderMethod: {value: function() {}}
});
assertTrue(%HasFastProperties(holder));
+// We assume dict usage for null prototype.
+var holder = Object.create(null, {
+ holderMethod: {value: function() {}}
+});
+assertFalse(%HasFastProperties(holder));
+
// Create a receiver into dictionary mode.
var receiver = Object.create(holder, {
killMe: {value: 0, configurable: true},
diff --git a/deps/v8/test/mjsunit/regress/regress-2618.js b/deps/v8/test/mjsunit/regress/regress-2618.js
index 2634c80c66..faffc5bf47 100644
--- a/deps/v8/test/mjsunit/regress/regress-2618.js
+++ b/deps/v8/test/mjsunit/regress/regress-2618.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --use-osr --allow-natives-syntax --ignition-osr --turbo-from-bytecode
+// Flags: --use-osr --allow-natives-syntax --ignition-osr
function f() {
do {
diff --git a/deps/v8/test/mjsunit/regress/regress-5071.js b/deps/v8/test/mjsunit/regress/regress-5071.js
deleted file mode 100644
index 41c1250031..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-5071.js
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-debug-as debug
-
-var Debug = debug.Debug;
-
-function listener(event, exec_state, event_data, data) {
- assertEquals(2, exec_state.frameCount());
- assertEquals("a", exec_state.frame(0).localName(0));
- assertEquals("1", exec_state.frame(0).localValue(0).value());
- assertEquals(1, exec_state.frame(0).localCount());
-}
-
-Debug.setListener(listener);
-
-function f() {
- var a = 1;
- {
- let b = 2;
- debugger;
- }
-}
-
-f();
diff --git a/deps/v8/test/mjsunit/regress/regress-5252.js b/deps/v8/test/mjsunit/regress/regress-5252.js
index 682d3193ea..c645416982 100644
--- a/deps/v8/test/mjsunit/regress/regress-5252.js
+++ b/deps/v8/test/mjsunit/regress/regress-5252.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --ignition --ignition-osr --turbo-from-bytecode
+// Flags: --allow-natives-syntax --ignition --ignition-osr
(function TestNonLoopyLoop() {
function f() {
diff --git a/deps/v8/test/mjsunit/regress/regress-5262.js b/deps/v8/test/mjsunit/regress/regress-5262.js
index 394bb49ca5..0b54b80623 100644
--- a/deps/v8/test/mjsunit/regress/regress-5262.js
+++ b/deps/v8/test/mjsunit/regress/regress-5262.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --ignition --ignition-osr --turbo-from-bytecode --allow-natives-syntax
+// Flags: --ignition --ignition-osr --allow-natives-syntax
function g() { return 23 }
function h() { return 42 }
diff --git a/deps/v8/test/mjsunit/regress/regress-536751.js b/deps/v8/test/mjsunit/regress/regress-536751.js
deleted file mode 100644
index a63fae3957..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-536751.js
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --no-harmony-restrictive-declarations
-
-// At some point, this code led to DCHECK errors in debug mode
-
-for (; false;) function foo() {};
-
-for (x in []) function foo() {};
diff --git a/deps/v8/test/mjsunit/regress/regress-542099.js b/deps/v8/test/mjsunit/regress/regress-542099.js
deleted file mode 100644
index 6345fd468a..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-542099.js
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --no-harmony-restrictive-declarations
-
-// Previously, this caused a CHECK fail in debug mode
-// https://code.google.com/p/chromium/issues/detail?id=542099
-
-var foo = {};
-var bar = foo;
-for (foo.x in {a: 1}) function foo() { return foo; }
-assertEquals("object", typeof bar);
-assertEquals("a", bar.x);
-assertEquals("function", typeof foo);
-assertEquals("function", typeof foo());
-assertSame(foo, foo());
-assertEquals(undefined, foo.x);
diff --git a/deps/v8/test/mjsunit/regress/regress-542100.js b/deps/v8/test/mjsunit/regress/regress-542100.js
deleted file mode 100644
index c16e6284fa..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-542100.js
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --no-harmony-restrictive-declarations
-
-(function() {
- var x = {a: 1}
- assertEquals("undefined", typeof f);
- with (x)
- function f() { return a; }
- assertEquals("function", typeof f);
- assertEquals(1, f());
- x.a = 2;
- assertEquals(2, f());
-})();
-
-var y = {b: 1}
-assertEquals("undefined", typeof g);
-with (y)
- function g() { return b; }
-assertEquals("function", typeof g);
-assertEquals(1, g());
-y.b = 2;
-assertEquals(2, g());
diff --git a/deps/v8/test/mjsunit/regress/regress-5434.js b/deps/v8/test/mjsunit/regress/regress-5434.js
new file mode 100644
index 0000000000..8c45a96079
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5434.js
@@ -0,0 +1,42 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Ensure that we have the correct number of accesses to exec in split, and
+// that exec is called at the correct point in time.
+
+var lastIndexHasBeenSet = false;
+var countOfExecGets = 0;
+
+// Force the slow path and make sure the created splitter object has our
+// overwritten exec method (@@split does not call exec on the original regexp
+// but on a newly-created splitter which is guaranteed to be sticky).
+class ObservableExecRegExp extends RegExp {
+ constructor(pattern, flags) {
+ super(pattern, flags);
+ this.lastIndex = 42;
+
+ const re = this;
+ Object.defineProperty(this, "exec", {
+ get: function() {
+ // Ensure exec is first accessed after lastIndex has been reset to
+ // satisfy the spec.
+ assertTrue(re.lastIndex != 42);
+ countOfExecGets++;
+ return RegExp.prototype.exec;
+ }
+ });
+ }
+}
+
+
+
+var re = new ObservableExecRegExp(/x/);
+
+assertEquals(42, re.lastIndex);
+assertEquals(0, countOfExecGets);
+
+var result = "axbxc".split(re);
+
+assertEquals(5, countOfExecGets);
+assertEquals(["a", "b", "c"], result);
diff --git a/deps/v8/test/mjsunit/regress/regress-5476.js b/deps/v8/test/mjsunit/regress/regress-5476.js
new file mode 100644
index 0000000000..5d18cebf7a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5476.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+'use strict'
+
+class LeakyPromise extends Promise {
+ constructor(executor) {
+ super((resolve, reject) => { resolve();});
+ this.resolve = function() {assertEquals(this, undefined); };
+ this.reject = function() {assertEquals(this, undefined); };
+ executor(this.resolve, this.reject);
+ }
+}
+
+const p1 = new LeakyPromise((r) => r());
+const p2 = new LeakyPromise((_, r) => r());
diff --git a/deps/v8/test/mjsunit/regress/regress-5566.js b/deps/v8/test/mjsunit/regress/regress-5566.js
new file mode 100644
index 0000000000..98e1f07208
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5566.js
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// https://github.com/tc39/proposal-regexp-legacy-features#additional-properties-of-the-regexp-constructor
+
+const props = [ "input", "$_"
+ , "lastMatch", "$&"
+ , "lastParen", "$+"
+ , "leftContext", "$`"
+ , "rightContext", "$'"
+ , "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", "$9"
+ ];
+
+for (let i = 0; i < props.length; i++) {
+ const prop = props[i];
+ const desc = Object.getOwnPropertyDescriptor(RegExp, prop);
+ assertTrue(desc.configurable, prop);
+ assertFalse(desc.enumerable, prop);
+ assertTrue(desc.get !== undefined, prop);
+
+ // TODO(jgruber): Although the spec proposal specifies setting setters to
+ // undefined, we are not sure that this change would be web-compatible, and
+ // we are intentionally sticking with the old behavior for now.
+ assertTrue(desc.set !== undefined, prop);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-568765.js b/deps/v8/test/mjsunit/regress/regress-568765.js
deleted file mode 100644
index 9efd8599a5..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-568765.js
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --expose-gc --gc-interval=216
-// Flags: --nonative-context-specialization
-
-function PrettyPrint() { return ""; }
-function fail() { }
-assertSame = function assertSame() { if (found === expected) { if (1 / found) return; } else if ((expected !== expected) && (found !== found)) { return; }; }; assertEquals = function assertEquals(expected, found, name_opt) { if ( expected) { fail(PrettyPrint()); } };
-assertTrue = function assertTrue() { assertEquals(); };
-assertThrows = function assertThrows(code, type_opt, cause_opt) { var threwException = true; try { if (typeof code == 'function') { code(); } else {; } threwException = false; } catch (e) { if (typeof type_opt == 'function') {; } if (arguments.length >= 3) {; } return; } };
-assertInstanceof = function assertInstanceof() { if (obj instanceof type) { var actualTypeName = null; var actualConstructor = Object.getPrototypeOf().constructor; if (typeof actualConstructor == "function") {; }; } };
-function modifyPropertyOrValue() { var names; try {; } catch(e) {; return; } if(!names) return; name = names[rand_value % names.length]; if (isNaN()); }
-function nop() {}
-var __v_5 = {};
-var __v_12 = {};
-var __v_13 = {};
-var __v_16 = {};
-function __f_0() {
-}
-(function (){
- function __f_6() {
- }
- a = __f_6();
- b = __f_6();
- name = "Array";
-})();
-(function (){
- function __f_1() {
- assertTrue();
- }
- __f_1();
-})();
-__v_10 = {
-}
-__v_11 = new Object();
-tailee1 = function() {
- "use strict";
- if (__v_12-- == 0) {
- }
- return nop();
-};
-%OptimizeFunctionOnNextCall(tailee1);
-assertEquals(__v_10, tailee1.call());
-__v_14 = 100000;
-gc();
-tailee2 = function() {
- "use strict";
- __v_14 = ((__v_14 | 0) - 1) | 0;
- if ((__v_14 | 0) === 0) {
- }
-};
-%OptimizeFunctionOnNextCall(tailee2);
-assertEquals(__v_11, tailee2.call());
-__v_13 = 999999;
-tailee3 = function() {
- "use strict";
- if (__v_13-- == 0) {
- }
-};
-%OptimizeFunctionOnNextCall(tailee3);
-assertEquals(__v_9, tailee3.call(__v_11, __v_9));
-tailee4 = function(px) {
- return nop(tailee4, this, px, undefined);
-};
-%OptimizeFunctionOnNextCall(tailee4);
-assertThrows(function() { tailee4.call(); });
-tailee5 = function() {
- return nop();
-};
-%OptimizeFunctionOnNextCall(tailee5);
-assertThrows(function() { tailee5.call(); });
-tailee6 = function() {
-}
-tailee7 = function( py, pz, pa, pb, pc) {
- return nop();
-};
-%OptimizeFunctionOnNextCall(tailee7);
- tailee7.call();
-
-(function() {
- Number.prototype[0] = "a";
- Number.prototype[1] = "b";
- Object.defineProperty(Number.prototype, 2, {
- get: function() {
- }
- });
- Number.prototype.length = 3;
-Array.prototype.includes.call(5);
-})();
-var __v_9 = -8;
-var __v_20 = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-575364.js b/deps/v8/test/mjsunit/regress/regress-575364.js
index 8671aec06b..34957a23c1 100644
--- a/deps/v8/test/mjsunit/regress/regress-575364.js
+++ b/deps/v8/test/mjsunit/regress/regress-575364.js
@@ -8,5 +8,4 @@ function f() {
"use asm";
}
-assertFalse(Wasm == undefined);
assertTrue(%IsNotAsmWasmCode(f));
diff --git a/deps/v8/test/mjsunit/regress/regress-5763-1.js b/deps/v8/test/mjsunit/regress/regress-5763-1.js
new file mode 100644
index 0000000000..50ad4035ae
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5763-1.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+try {
+ var TA = Object.getPrototypeOf(Int8Array);
+ var obj = Reflect.construct(TA, [], Int8Array);
+ Int8Array.prototype.values.call(obj).next();
+} catch (e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-5763-2.js b/deps/v8/test/mjsunit/regress/regress-5763-2.js
new file mode 100644
index 0000000000..0964b7f8e6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5763-2.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+try {
+ var TA = Object.getPrototypeOf(Int8Array);
+ var obj = Reflect.construct(TA, [], Int8Array);
+ new Int8Array(4).set(obj);
+} catch (e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-5790.js b/deps/v8/test/mjsunit/regress/regress-5790.js
new file mode 100644
index 0000000000..7790db5ea9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5790.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(a) {
+ "use strict";
+ if (a) return arguments[1];
+}
+
+foo(false);
+foo(false);
+%OptimizeFunctionOnNextCall(foo);
+foo(true, 1);
+foo(true, 1);
+%OptimizeFunctionOnNextCall(foo);
+foo(false);
+foo(true, 1);
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-5802.js b/deps/v8/test/mjsunit/regress/regress-5802.js
new file mode 100644
index 0000000000..6a84c09851
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-5802.js
@@ -0,0 +1,117 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ function eq(a, b) { return a == b; }
+
+ var o = { [Symbol.toPrimitive]: () => "o" };
+
+ assertTrue(eq(o, o));
+ %BaselineFunctionOnNextCall(eq);
+ assertTrue(eq(o, o));
+ %OptimizeFunctionOnNextCall(eq);
+ assertTrue(eq(o, o));
+ assertTrue(eq("o", o));
+ assertTrue(eq(o, "o"));
+ %OptimizeFunctionOnNextCall(eq);
+ assertTrue(eq(o, o));
+ assertTrue(eq("o", o));
+ assertTrue(eq(o, "o"));
+ assertOptimized(eq);
+})();
+
+(function() {
+ function ne(a, b) { return a != b; }
+
+ var o = { [Symbol.toPrimitive]: () => "o" };
+
+ assertFalse(ne(o, o));
+ %BaselineFunctionOnNextCall(ne);
+ assertFalse(ne(o, o));
+ %OptimizeFunctionOnNextCall(ne);
+ assertFalse(ne(o, o));
+ assertFalse(ne("o", o));
+ assertFalse(ne(o, "o"));
+ %OptimizeFunctionOnNextCall(ne);
+ assertFalse(ne(o, o));
+ assertFalse(ne("o", o));
+ assertFalse(ne(o, "o"));
+ assertOptimized(ne);
+})();
+
+(function() {
+ function eq(a, b) { return a == b; }
+
+ var a = {};
+ var b = {b};
+ var u = %GetUndetectable();
+
+ assertTrue(eq(a, a));
+ assertTrue(eq(b, b));
+ assertFalse(eq(a, b));
+ assertFalse(eq(b, a));
+ %BaselineFunctionOnNextCall(eq);
+ assertTrue(eq(a, a));
+ assertTrue(eq(b, b));
+ assertFalse(eq(a, b));
+ assertFalse(eq(b, a));
+ %OptimizeFunctionOnNextCall(eq);
+ assertTrue(eq(a, a));
+ assertTrue(eq(b, b));
+ assertFalse(eq(a, b));
+ assertFalse(eq(b, a));
+ assertTrue(eq(null, u));
+ assertTrue(eq(undefined, u));
+ assertTrue(eq(u, null));
+ assertTrue(eq(u, undefined));
+ %OptimizeFunctionOnNextCall(eq);
+ assertTrue(eq(a, a));
+ assertTrue(eq(b, b));
+ assertFalse(eq(a, b));
+ assertFalse(eq(b, a));
+ assertTrue(eq(null, u));
+ assertTrue(eq(undefined, u));
+ assertTrue(eq(u, null));
+ assertTrue(eq(u, undefined));
+ assertOptimized(eq);
+})();
+
+(function() {
+ function ne(a, b) { return a != b; }
+
+ var a = {};
+ var b = {b};
+ var u = %GetUndetectable();
+
+ assertFalse(ne(a, a));
+ assertFalse(ne(b, b));
+ assertTrue(ne(a, b));
+ assertTrue(ne(b, a));
+ %BaselineFunctionOnNextCall(ne);
+ assertFalse(ne(a, a));
+ assertFalse(ne(b, b));
+ assertTrue(ne(a, b));
+ assertTrue(ne(b, a));
+ %OptimizeFunctionOnNextCall(ne);
+ assertFalse(ne(a, a));
+ assertFalse(ne(b, b));
+ assertTrue(ne(a, b));
+ assertTrue(ne(b, a));
+ assertFalse(ne(null, u));
+ assertFalse(ne(undefined, u));
+ assertFalse(ne(u, null));
+ assertFalse(ne(u, undefined));
+ %OptimizeFunctionOnNextCall(ne);
+ assertFalse(ne(a, a));
+ assertFalse(ne(b, b));
+ assertTrue(ne(a, b));
+ assertTrue(ne(b, a));
+ assertFalse(ne(null, u));
+ assertFalse(ne(undefined, u));
+ assertFalse(ne(u, null));
+ assertFalse(ne(u, undefined));
+ assertOptimized(ne);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-632289.js b/deps/v8/test/mjsunit/regress/regress-632289.js
index 65a22558de..3f3059b2d4 100644
--- a/deps/v8/test/mjsunit/regress/regress-632289.js
+++ b/deps/v8/test/mjsunit/regress/regress-632289.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --ignition --turbo-from-bytecode --always-opt --allow-natives-syntax
+// Flags: --ignition --always-opt --allow-natives-syntax
try {
} catch(e) {; }
diff --git a/deps/v8/test/mjsunit/regress/regress-653407.js b/deps/v8/test/mjsunit/regress/regress-653407.js
new file mode 100644
index 0000000000..6dc28a9fd0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-653407.js
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --ignition --turbo
+
+// This is to test if 'this' gets correctly initialized when inlining
+// constructors in turbofan.
+
+class superClass {
+ constructor () {}
+}
+
+class subClass extends superClass {
+ constructor () {
+ super();
+ }
+}
+
+function f() {
+ new subClass();
+}
+
+f(); // We need this to collect feedback, so that subClass gets inlined in f.
+%OptimizeFunctionOnNextCall(f)
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-662845.js b/deps/v8/test/mjsunit/regress/regress-662845.js
new file mode 100644
index 0000000000..7740ed10ff
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-662845.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) {
+ (function() { x = 1; })()
+ return arguments[0];
+}
+
+assertEquals(1, foo(42));
+assertEquals(1, foo(42));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(1, foo(42));
diff --git a/deps/v8/test/mjsunit/regress/regress-662904.js b/deps/v8/test/mjsunit/regress/regress-662904.js
new file mode 100644
index 0000000000..00ab1cd65f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-662904.js
@@ -0,0 +1,19 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(a) {
+ var sum = 0;
+ var a = [0, "a"];
+ for (var i in a) {
+ sum += a[i];
+ }
+ return sum;
+}
+
+assertEquals("0a", foo());
+assertEquals("0a", foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals("0a", foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-664087.js b/deps/v8/test/mjsunit/regress/regress-664087.js
new file mode 100644
index 0000000000..6739167930
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-664087.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function g() {
+ throw 1;
+}
+
+var v = { valueOf : g };
+
+function foo(v) {
+ v++;
+}
+
+%NeverOptimizeFunction(g);
+assertThrows(function () { foo(v); });
+assertThrows(function () { foo(v); });
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(function () { foo(v); });
diff --git a/deps/v8/test/mjsunit/regress/regress-998565.js b/deps/v8/test/mjsunit/regress/regress-998565.js
deleted file mode 100644
index 840c045c0c..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-998565.js
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-listenerCalled = false;
-
-function listener(event, exec_state, event_data, data) {
- listenerCalled = true;
- throw 1;
-};
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-function f() {
- a=1
-};
-
-// Set a break point and call to invoke the debug event listener.
-Debug.setBreakPoint(f, 0, 0);
-f();
-
-// Make sure that the debug event listener vas invoked.
-assertTrue(listenerCalled);
diff --git a/deps/v8/test/mjsunit/regress/regress-abort-preparsing-params.js b/deps/v8/test/mjsunit/regress/regress-abort-preparsing-params.js
index d2bdc5084d..d535321795 100644
--- a/deps/v8/test/mjsunit/regress/regress-abort-preparsing-params.js
+++ b/deps/v8/test/mjsunit/regress/regress-abort-preparsing-params.js
@@ -4,7 +4,7 @@
var outer_a;
-function f(a) {
+function f(a, b, a) {
outer_a = a;
x = 1;
x = 1;
@@ -942,5 +942,5 @@ function f(a) {
x = 1;
x = 1;
}
-f(1);
+f(1, 2, 1);
assertEquals(1, outer_a);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-632800.js b/deps/v8/test/mjsunit/regress/regress-crbug-632800.js
index 6296572c17..a4c2301217 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-632800.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-632800.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --ignition --ignition-osr --turbo-from-bytecode
+// Flags: --ignition --ignition-osr
function osr() {
for (var i = 0; i < 50000; ++i) Math.random();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-635923.js b/deps/v8/test/mjsunit/regress/regress-crbug-635923.js
index aea5e3a97c..5b697d74ad 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-635923.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-635923.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --ignition --turbo-from-bytecode --turbo-filter=f
+// Flags: --allow-natives-syntax --ignition --turbo-filter=f
function f(x) { return x + 23 }
function g(x) { return f(x) + 42 }
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-644631.js b/deps/v8/test/mjsunit/regress/regress-crbug-644631.js
index 5e649a4946..bf845c7aee 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-644631.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-644631.js
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --turbo --always-opt
+// Flags: --allow-natives-syntax --turbo --always-opt
function f() {
- new Int8Array(new ArrayBuffer(2147483648));
+ var obj = Object.freeze({});
+ %_CreateDataProperty(obj, "foo", "bar");
}
// Should not crash
-assertThrows(f, RangeError);
+assertThrows(f, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-656275.js b/deps/v8/test/mjsunit/regress/regress-crbug-656275.js
new file mode 100644
index 0000000000..74b29c1458
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-656275.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = 1;
+
+function foo(x) { a = Math.fround(x + 1); }
+
+foo(1);
+foo(1);
+%OptimizeFunctionOnNextCall(foo);
+foo(1.3);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-658185.js b/deps/v8/test/mjsunit/regress/regress-crbug-658185.js
new file mode 100644
index 0000000000..60de8d6458
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-658185.js
@@ -0,0 +1,20 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-escape
+
+var t = 0;
+function foo() {
+ var o = {x:1};
+ var p = {y:2.5, x:0};
+ o = [];
+ for (var i = 0; i < 2; ++i) {
+ t = o.x;
+ o = p;
+ }
+}
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-658528.js b/deps/v8/test/mjsunit/regress/regress-crbug-658528.js
new file mode 100644
index 0000000000..a6b07482b7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-658528.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f() {
+ eval("var x = 1");
+ const x = 2;
+}
+
+assertThrows(f, SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-658691.js b/deps/v8/test/mjsunit/regress/regress-crbug-658691.js
new file mode 100644
index 0000000000..fee95f1eb3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-658691.js
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls --ignition --turbo
+
+// The {f} function is compiled using TurboFan.
+// 1) The call to {Reflect.set} has no arguments adaptation.
+// 2) The call to {Reflect.set} is in tail position.
+function f(a, b, c) {
+ "use strict";
+ return Reflect.set({});
+}
+
+// The {g} function is compiled using Ignition.
+// 1) The call to {f} requires arguments adaptation.
+// 2) The call to {f} is not in tail position.
+function g() {
+ return f() + "-no-tail";
+}
+
+assertEquals("true-no-tail", g());
+%OptimizeFunctionOnNextCall(f);
+assertEquals("true-no-tail", g());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-659915a.js b/deps/v8/test/mjsunit/regress/regress-crbug-659915a.js
new file mode 100644
index 0000000000..ef672b187f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-659915a.js
@@ -0,0 +1,24 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --min-preparse-length=10
+
+let x;
+function f(a) {
+ x += a;
+}
+function g(a) {
+ f(a); return x;
+}
+function h(a) {
+ x = a; return x;
+}
+
+function boom() { return g(1) }
+
+assertEquals(1, h(1));
+assertEquals(2, boom());
+assertEquals(3, boom());
+%OptimizeFunctionOnNextCall(boom);
+assertEquals(4, boom());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-659915b.js b/deps/v8/test/mjsunit/regress/regress-crbug-659915b.js
new file mode 100644
index 0000000000..cc47f03bb1
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-659915b.js
@@ -0,0 +1,20 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --min-preparse-length=10
+
+(function() {
+ var x = 23;
+ function f() { return x; }
+ function g() { [x] = [x + 1]; }
+ function h() { g(); return x; }
+
+ function boom() { return h() }
+
+ assertEquals(24, boom());
+ assertEquals(25, boom());
+ assertEquals(26, boom());
+ %OptimizeFunctionOnNextCall(boom);
+ assertEquals(27, boom());
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-659967.js b/deps/v8/test/mjsunit/regress/regress-crbug-659967.js
new file mode 100644
index 0000000000..654f00de60
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-659967.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f() { null >> arguments; }
+
+f();
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-660379.js b/deps/v8/test/mjsunit/regress/regress-crbug-660379.js
new file mode 100644
index 0000000000..84634628d6
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-660379.js
@@ -0,0 +1,42 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function InlinedThrowAtEndOfTry() {
+ function g() {
+ %DeoptimizeFunction(f);
+ throw "boom";
+ }
+ function f() {
+ try {
+ g(); // Right at the end of try.
+ } catch (e) {
+ assertEquals("boom", e)
+ }
+ }
+ assertDoesNotThrow(f);
+ assertDoesNotThrow(f);
+ %OptimizeFunctionOnNextCall(f);
+ assertDoesNotThrow(f);
+})();
+
+(function InlinedThrowInFrontOfTry() {
+ function g() {
+ %DeoptimizeFunction(f);
+ throw "boom";
+ }
+ function f() {
+ g(); // Right in front of try.
+ try {
+ Math.random();
+ } catch (e) {
+ assertUnreachable();
+ }
+ }
+ assertThrows(f);
+ assertThrows(f);
+ %OptimizeFunctionOnNextCall(f);
+ assertThrows(f);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases0.js b/deps/v8/test/mjsunit/regress/regress-crbug-661949.js
index 7a422c542b..e083cae349 100644
--- a/deps/v8/test/mjsunit/harmony/async-debug-caught-exception-cases0.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-661949.js
@@ -2,7 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-async-await --expose-debug-as debug
-// Files: test/mjsunit/harmony/async-debug-caught-exception-cases.js
+// Flags: --allow-natives-syntax
-runPart(0);
+var foo = (function() {
+ 'use asm';
+ function foo() { ''[0]; }
+ return foo;
+})();
+
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-662367.js b/deps/v8/test/mjsunit/regress/regress-crbug-662367.js
new file mode 100644
index 0000000000..8e1eafa30a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-662367.js
@@ -0,0 +1,37 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --fold-constants
+
+var zero = 0;
+
+(function ConstantFoldZeroDivZero() {
+ function f() {
+ return 0 / zero;
+ }
+ assertTrue(isNaN(f()));
+ assertTrue(isNaN(f()));
+ %OptimizeFunctionOnNextCall(f);
+ assertTrue(isNaN(f()));
+})();
+
+(function ConstantFoldMinusZeroDivZero() {
+ function f() {
+ return -0 / zero;
+ }
+ assertTrue(isNaN(f()));
+ assertTrue(isNaN(f()));
+ %OptimizeFunctionOnNextCall(f);
+ assertTrue(isNaN(f()));
+})();
+
+(function ConstantFoldNaNDivZero() {
+ function f() {
+ return NaN / 0;
+ }
+ assertTrue(isNaN(f()));
+ assertTrue(isNaN(f()));
+ %OptimizeFunctionOnNextCall(f);
+ assertTrue(isNaN(f()));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-662410.js b/deps/v8/test/mjsunit/regress/regress-crbug-662410.js
new file mode 100644
index 0000000000..f1cbc6b824
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-662410.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function g(v) { return v.constructor; }
+
+g({});
+g({});
+
+function f() {
+ var i = 0;
+ do {
+ i = i + 1;
+ g(i);
+ } while (i < 1);
+}
+
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-662830.js b/deps/v8/test/mjsunit/regress/regress-crbug-662830.js
new file mode 100644
index 0000000000..3126978d7d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-662830.js
@@ -0,0 +1,19 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ %_DeoptimizeNow();
+ throw 1;
+}
+
+function g() {
+ try { f(); } catch(e) { }
+ for (var i = 0; i < 3; ++i) if (i === 1) %OptimizeOsr();
+ %_DeoptimizeNow();
+}
+
+%OptimizeFunctionOnNextCall(g);
+g();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-663750.js b/deps/v8/test/mjsunit/regress/regress-crbug-663750.js
new file mode 100644
index 0000000000..ba758e77e4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-663750.js
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var v = 0;
+function foo(a) {
+ v = a;
+}
+this.x = 0;
+delete x;
+
+foo(1);
+foo(2);
+%OptimizeFunctionOnNextCall(foo);
+foo(3);
+assertEquals(3, v);
+
+Object.freeze(this);
+
+foo(4);
+foo(5);
+%OptimizeFunctionOnNextCall(foo);
+foo(6);
+assertEquals(3, v);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-664084.js b/deps/v8/test/mjsunit/regress/regress-crbug-664084.js
new file mode 100644
index 0000000000..79e221e495
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-664084.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ return +({} + 1);
+}
+
+assertEquals(NaN, foo());
+assertEquals(NaN, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(NaN, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-664469.js b/deps/v8/test/mjsunit/regress/regress-crbug-664469.js
new file mode 100644
index 0000000000..e163391112
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-664469.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(a, i) {
+ a[i] = "object";
+}
+
+f("make it generic", 0);
+
+// Nearly kMaxRegularHeapObjectSize's worth of doubles.
+var kLength = 500000 / 8;
+var kValue = 0.1;
+var a = new Array(kLength);
+for (var i = 0; i < kLength; i++) {
+ a[i] = kValue;
+}
+f(a, 0);
+for (var i = 1; i < kLength; i++) {
+ assertEquals(kValue, a[i]);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-664506.js b/deps/v8/test/mjsunit/regress/regress-crbug-664506.js
new file mode 100644
index 0000000000..b0bf5e7591
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-664506.js
@@ -0,0 +1,11 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --predictable --random-seed=-1109634722
+
+gc();
+gc();
+assertEquals("[object Object]", Object.prototype.toString.call({}));
+gc();
+assertEquals("[object Array]", Object.prototype.toString.call([]));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-664802.js b/deps/v8/test/mjsunit/regress/regress-crbug-664802.js
new file mode 100644
index 0000000000..3395f4c676
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-664802.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o = {};
+o.__proto__ = new Proxy({}, {});
+
+var m = new Map();
+m.set({});
+m.set(o);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-664942.js b/deps/v8/test/mjsunit/regress/regress-crbug-664942.js
new file mode 100644
index 0000000000..3b76990511
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-664942.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ return 'x'[0];
+}
+foo();
+%OptimizeFunctionOnNextCall(foo);
+assertEquals("x", foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-664974.js b/deps/v8/test/mjsunit/regress/regress-crbug-664974.js
new file mode 100644
index 0000000000..37ad4f9dc2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-664974.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+for (var i = 0; i < 2000; i++) {
+ Object.prototype['X'+i] = true;
+}
+
+var m = new Map();
+m.set(Object.prototype, 23);
+
+var o = {};
+m.set(o, 42);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-665886.js b/deps/v8/test/mjsunit/regress/regress-crbug-665886.js
new file mode 100644
index 0000000000..b3309c249f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-665886.js
@@ -0,0 +1,14 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+[1].toLocaleString();
+delete Number.prototype.toLocaleString;
+[1].toLocaleString();
+var o = {};
+o.__proto__ = { toString: Array.prototype.toString };
+o.toString();
+Number.prototype.arrayToString = Array.prototype.toString;
+(42).arrayToString();
+var a = [7];
+a.toLocaleString();
diff --git a/deps/v8/test/mjsunit/regress/regress-409533.js b/deps/v8/test/mjsunit/regress/regress-crbug-679202.js
index e51065e4bf..e4350224fd 100644
--- a/deps/v8/test/mjsunit/regress/regress-409533.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-679202.js
@@ -1,12 +1,13 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax
-function f() {
- %_RegExpConstructResult(0, {}, {});
-}
+var x = Object.prototype;
+
+function f() { return x <= x; }
+
f();
f();
%OptimizeFunctionOnNextCall(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-682194.js b/deps/v8/test/mjsunit/regress/regress-crbug-682194.js
new file mode 100644
index 0000000000..62a4347eef
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-682194.js
@@ -0,0 +1,35 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc
+
+var proxy = new Proxy([], {
+ defineProperty() {
+ w.length = 1; // shorten the array so the backstore pointer is relocated
+ gc(); // force gc to move the array's elements backstore
+ return Object.defineProperty.apply(this, arguments);
+ }
+});
+
+class MyArray extends Array {
+ // custom constructor which returns a proxy object
+ static get[Symbol.species](){
+ return function() {
+ return proxy;
+ }
+ };
+}
+
+var w = new MyArray(100);
+w[1] = 0.1;
+w[2] = 0.1;
+
+var result = Array.prototype.concat.call(w);
+
+assertEquals(undefined, result[0]);
+assertEquals(0.1, result[1]);
+
+for (var i = 2; i < 200; i++) {
+ assertEquals(undefined, result[i]);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js b/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
index b08a94257c..57028b0cef 100644
--- a/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
+++ b/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
@@ -36,6 +36,7 @@ if (!%IsConcurrentRecompilationSupported()) {
function test(fun) {
fun();
+ %BaselineFunctionOnNextCall(fun);
fun();
// Mark for concurrent optimization.
%OptimizeFunctionOnNextCall(fun, "concurrent");
diff --git a/deps/v8/test/mjsunit/regress/regress-prepare-break-while-recompile.js b/deps/v8/test/mjsunit/regress/regress-prepare-break-while-recompile.js
index 0673220e4a..528f517148 100644
--- a/deps/v8/test/mjsunit/regress/regress-prepare-break-while-recompile.js
+++ b/deps/v8/test/mjsunit/regress/regress-prepare-break-while-recompile.js
@@ -46,6 +46,8 @@ function bar() {
}
foo();
+%BaselineFunctionOnNextCall(foo);
+foo();
// Mark and kick off recompilation.
%OptimizeFunctionOnNextCall(foo, "concurrent");
foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-trap-allocation-memento.js b/deps/v8/test/mjsunit/regress/regress-trap-allocation-memento.js
new file mode 100644
index 0000000000..12baca0918
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-trap-allocation-memento.js
@@ -0,0 +1,50 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --noalways-opt
+
+var elements_kind = {
+ fast_smi_only : 'fast smi only elements',
+ fast : 'fast elements',
+ fast_double : 'fast double elements',
+ dictionary : 'dictionary elements',
+}
+
+function getKind(obj) {
+ if (%HasFastSmiElements(obj)) return elements_kind.fast_smi_only;
+ if (%HasFastObjectElements(obj)) return elements_kind.fast;
+ if (%HasFastDoubleElements(obj)) return elements_kind.fast_double;
+ if (%HasDictionaryElements(obj)) return elements_kind.dictionary;
+}
+
+function assertKind(expected, obj, name_opt) {
+ assertEquals(expected, getKind(obj), name_opt);
+}
+
+(function() {
+ function make1() { return new Array(); }
+ function make2() { return new Array(); }
+ function make3() { return new Array(); }
+ function foo(a, i) { a[0] = i; }
+
+ function run_test(maker_function) {
+ var one = maker_function();
+ assertKind(elements_kind.fast_smi_only, one);
+ // Use memento to pre-transition allocation site to DOUBLE elements.
+ foo(one, 1.5);
+ // Newly created arrays should now have DOUBLE elements right away.
+ var two = maker_function();
+ assertKind(elements_kind.fast_double, two);
+ }
+
+ // Initialize the KeyedStoreIC in foo; the actual operation will be done
+ // in the runtime.
+ run_test(make1);
+ // Run again; the IC optimistically assumed to only see the transitioned
+ // (double-elements) map again, so this will make it polymorphic.
+ // The actual operation will again be done in the runtime.
+ run_test(make2);
+ // Finally, check if the initialized IC honors the allocation memento.
+ run_test(make3);
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/loop-stack-check.js b/deps/v8/test/mjsunit/regress/wasm/loop-stack-check.js
new file mode 100644
index 0000000000..a76ad017d9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/loop-stack-check.js
@@ -0,0 +1,19 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function() {
+ var builder = new WasmModuleBuilder();
+ builder.addFunction("foo", kSig_i_ii)
+ .addBody([
+ kExprLoop, 00,
+ kExprBrTable, 0xfb, 0xff, 0xff, 0xff,
+ ])
+ .exportFunc();
+ assertThrows(function() { builder.instantiate(); });
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-02256b.js b/deps/v8/test/mjsunit/regress/wasm/regression-02256b.js
new file mode 100644
index 0000000000..032a02684b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-02256b.js
@@ -0,0 +1,502 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --random-seed=891196975 --expose-gc --allow-natives-syntax
+// Flags: --gc-interval=207 --stress-compaction --validate-asm
+//
+// /v8/test/mjsunit/wasm/grow-memory.js
+// /v8/test/mjsunit/regress/regress-540.js
+// /v8/test/mjsunit/regress/wasm/regression-02862.js
+// /v8/test/mjsunit/regress/regress-2813.js
+// /v8/test/mjsunit/regress/regress-323845.js
+// Begin stripped down and modified version of mjsunit.js for easy minimization in CF.
+
+function MjsUnitAssertionError(message) {}
+MjsUnitAssertionError.prototype.toString = function() {
+ return this.message;
+};
+var assertSame;
+var assertEquals;
+var assertEqualsDelta;
+var assertArrayEquals;
+var assertPropertiesEqual;
+var assertToStringEquals;
+var assertTrue;
+var assertFalse;
+var triggerAssertFalse;
+var assertNull;
+var assertNotNull;
+var assertThrows;
+var assertDoesNotThrow;
+var assertInstanceof;
+var assertUnreachable;
+var assertOptimized;
+var assertUnoptimized;
+
+function classOf(object) {
+ var string = Object.prototype.toString.call(object);
+ return string.substring(8, string.length - 1);
+}
+
+function PrettyPrint(value) {
+ return "";
+}
+
+function PrettyPrintArrayElement(value, index, array) {
+ return "";
+}
+
+function fail(expectedText, found, name_opt) {}
+
+function deepObjectEquals(a, b) {
+ var aProps = Object.keys(a);
+ aProps.sort();
+ var bProps = Object.keys(b);
+ bProps.sort();
+ if (!deepEquals(aProps, bProps)) {
+ return false;
+ }
+ for (var i = 0; i < aProps.length; i++) {
+ if (!deepEquals(a[aProps[i]], b[aProps[i]])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+function deepEquals(a, b) {
+ if (a === b) {
+ if (a === 0) return (1 / a) === (1 / b);
+ return true;
+ }
+ if (typeof a != typeof b) return false;
+ if (typeof a == "number") return isNaN(a) && isNaN(b);
+ if (typeof a !== "object" && typeof a !== "function") return false;
+ var objectClass = classOf(a);
+ if (objectClass !== classOf(b)) return false;
+ if (objectClass === "RegExp") {
+ return (a.toString() === b.toString());
+ }
+ if (objectClass === "Function") return false;
+ if (objectClass === "Array") {
+ var elementCount = 0;
+ if (a.length != b.length) {
+ return false;
+ }
+ for (var i = 0; i < a.length; i++) {
+ if (!deepEquals(a[i], b[i])) return false;
+ }
+ return true;
+ }
+ if (objectClass == "String" || objectClass == "Number" || objectClass == "Boolean" || objectClass == "Date") {
+ if (a.valueOf() !== b.valueOf()) return false;
+ }
+ return deepObjectEquals(a, b);
+}
+assertSame = function assertSame(expected, found, name_opt) {
+ if (found === expected) {
+ if (expected !== 0 || (1 / expected) == (1 / found)) return;
+ } else if ((expected !== expected) && (found !== found)) {
+ return;
+ }
+ fail(PrettyPrint(expected), found, name_opt);
+};
+assertEquals = function assertEquals(expected, found, name_opt) {
+ if (!deepEquals(found, expected)) {
+ fail(PrettyPrint(expected), found, name_opt);
+ }
+};
+assertEqualsDelta = function assertEqualsDelta(expected, found, delta, name_opt) {
+ assertTrue(Math.abs(expected - found) <= delta, name_opt);
+};
+assertArrayEquals = function assertArrayEquals(expected, found, name_opt) {
+ var start = "";
+ if (name_opt) {
+ start = name_opt + " - ";
+ }
+ assertEquals(expected.length, found.length, start + "array length");
+ if (expected.length == found.length) {
+ for (var i = 0; i < expected.length; ++i) {
+ assertEquals(expected[i], found[i], start + "array element at index " + i);
+ }
+ }
+};
+assertPropertiesEqual = function assertPropertiesEqual(expected, found, name_opt) {
+ if (!deepObjectEquals(expected, found)) {
+ fail(expected, found, name_opt);
+ }
+};
+assertToStringEquals = function assertToStringEquals(expected, found, name_opt) {
+ if (expected != String(found)) {
+ fail(expected, found, name_opt);
+ }
+};
+assertTrue = function assertTrue(value, name_opt) {
+ assertEquals(true, value, name_opt);
+};
+assertFalse = function assertFalse(value, name_opt) {
+ assertEquals(false, value, name_opt);
+};
+assertNull = function assertNull(value, name_opt) {
+ if (value !== null) {
+ fail("null", value, name_opt);
+ }
+};
+assertNotNull = function assertNotNull(value, name_opt) {
+ if (value === null) {
+ fail("not null", value, name_opt);
+ }
+};
+assertThrows = function assertThrows(code, type_opt, cause_opt) {
+ var threwException = true;
+ try {
+ if (typeof code == 'function') {
+ code();
+ } else {
+ eval(code);
+ }
+ threwException = false;
+ } catch (e) {
+ if (typeof type_opt == 'function') {
+ assertInstanceof(e, type_opt);
+ }
+ if (arguments.length >= 3) {
+ assertEquals(e.type, cause_opt);
+ }
+ return;
+ }
+};
+assertInstanceof = function assertInstanceof(obj, type) {
+ if (!(obj instanceof type)) {
+ var actualTypeName = null;
+ var actualConstructor = Object.getPrototypeOf(obj).constructor;
+ if (typeof actualConstructor == "function") {
+ actualTypeName = actualConstructor.name || String(actualConstructor);
+ }
+ fail("Object <" + PrettyPrint(obj) + "> is not an instance of <" + (type.name || type) + ">" + (actualTypeName ? " but of < " + actualTypeName + ">" : ""));
+ }
+};
+assertDoesNotThrow = function assertDoesNotThrow(code, name_opt) {
+ try {
+ if (typeof code == 'function') {
+ code();
+ } else {
+ eval(code);
+ }
+ } catch (e) {
+ fail("threw an exception: ", e.message || e, name_opt);
+ }
+};
+assertUnreachable = function assertUnreachable(name_opt) {
+ var message = "Fail" + "ure: unreachable";
+ if (name_opt) {
+ message += " - " + name_opt;
+ }
+};
+var OptimizationStatus = function() {}
+assertUnoptimized = function assertUnoptimized(fun, sync_opt, name_opt) {
+ if (sync_opt === undefined) sync_opt = "";
+ assertTrue(OptimizationStatus(fun, sync_opt) != 1, name_opt);
+}
+assertOptimized = function assertOptimized(fun, sync_opt, name_opt) {
+ if (sync_opt === undefined) sync_opt = "";
+ assertTrue(OptimizationStatus(fun, sync_opt) != 2, name_opt);
+}
+triggerAssertFalse = function() {}
+try {
+ console.log;
+ print = console.log;
+ alert = console.log;
+} catch (e) {}
+
+function runNearStackLimit(f) {
+ function t() {
+ try {
+ t();
+ } catch (e) {
+ f();
+ }
+ };
+ try {
+ t();
+ } catch (e) {}
+}
+
+function quit() {}
+
+function nop() {}
+try {
+ gc;
+} catch (e) {
+ gc = nop;
+}
+
+function getRandomProperty(v, rand) {
+ var properties = Object.getOwnPropertyNames(v);
+ var proto = Object.getPrototypeOf(v);
+ if (proto) {
+ properties = properties.concat(Object.getOwnPropertyNames(proto));
+ }
+ if (properties.includes("constructor") && v.constructor.hasOwnProperty("__proto__")) {
+ properties = properties.concat(Object.getOwnPropertyNames(v.constructor.__proto__));
+ }
+ if (properties.length == 0) {
+ return "0";
+ }
+ return properties[rand % properties.length];
+}
+// End stripped down and modified version of mjsunit.js.
+
+var __v_0 = {};
+var __v_1 = {};
+var __v_2 = {};
+var __v_3 = {};
+var __v_4 = -1073741824;
+var __v_5 = {};
+var __v_6 = 1;
+var __v_7 = 1073741823;
+var __v_8 = {};
+var __v_9 = {};
+var __v_10 = 4294967295;
+var __v_11 = this;
+var __v_12 = {};
+var __v_13 = {};
+
+
+function __f_18(__f_17, y) {
+ eval(__f_17);
+ return y();
+}
+try {
+ var __v_17 = __f_18("function y() { return 1; }", function() {
+ return 0;
+ })
+ assertEquals(1, __v_17);
+ gc();
+ __v_17 =
+ (function(__f_17) {
+ function __f_17() {
+ return 3;
+ }
+ return __f_17();
+ })(function() {
+ return 2;
+ });
+ assertEquals(3, __v_17);
+ __v_17 =
+ (function(__f_17) {
+ function __f_17() {
+ return 5;
+ }
+ return arguments[0]();
+ })(function() {
+ return -1073741825;
+ });
+ assertEquals(5, __v_17);
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_27() {}
+try {
+ var __v_24 = {};
+ var __v_21 = {};
+ var __v_22 = {};
+ var __v_20 = {};
+ __v_58 = {
+ instantiateModuleFromAsm: function(text, ffi, heap) {
+ var __v_21 = eval('(' + text + ')');
+ if (__f_27()) {
+ throw "validate failure";
+ }
+ var __v_20 = __v_21();
+ if (__f_27()) {
+ throw "bad module args";
+ }
+ }
+ };
+ __f_21 = function __f_21() {
+ if (found === expected) {
+ if (1 / expected) return;
+ } else if ((expected !== expected) && (found !== found)) {
+ return;
+ };
+ };
+ __f_28 = function __f_28() {
+ if (!__f_23()) {
+ __f_125(__f_69(), found, name_opt);
+ }
+ };
+ __f_24 = function __f_24(code, type_opt, cause_opt) {
+ var __v_24 = true;
+ try {
+ if (typeof code == 'function') {
+ code();
+ } else {
+ eval();
+ }
+ __v_24 = false;
+ } catch (e) {
+ if (typeof type_opt == 'function') {
+ __f_22();
+ }
+ if (arguments.length >= 3) {
+ __f_28();
+ }
+ return;
+ }
+ };
+ __f_22 = function __f_22() {
+ if (obj instanceof type) {
+ obj.constructor;
+ if (typeof __v_57 == "function") {;
+ };
+ }
+ };
+ try {
+ __f_28();
+ __v_82.__p_750895751 = __v_82[getRandomProperty()];
+ } catch (e) {
+ "Caught: " + e;
+ }
+ __f_19();
+ gc();
+ __f_19(19, __f_24);
+ __f_19();
+ __f_19();
+ __f_24(function() {
+ __v_58.instantiateModuleFromAsm(__f_28.toString()).__f_20();
+ });
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_19() {
+ "use asm";
+
+ function __f_20() {}
+ return {
+ __f_20: __f_20
+ };
+}
+try {
+ __f_19();
+ __f_19();
+ __f_19();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_29() {}
+try {
+ __f_19();
+ try {
+ __f_19();
+ gc();
+ __f_25();
+ } catch (e) {
+ "Caught: " + e;
+ }
+ __f_19();
+ __f_19();
+ __f_19();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_23() {
+ "use asm";
+
+ function __f_20() {}
+ return {
+ __f_20: __f_20
+ };
+}
+try {
+ __f_19();
+ __f_19();
+ __f_19();
+ __f_19();
+ gc();
+ __f_19();
+ __f_19();
+ __f_19();
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_26(stdlib) {
+ "use asm";
+ var __v_2 = new stdlib.Int32Array();
+ __v_22[4294967295] | 14 + 1 | 14;
+ return {
+ __f_20: __f_20
+ };
+}
+
+function __f_25() {
+ var __v_19 = new ArrayBuffer();
+ var __v_23 = new Int32Array(__v_19);
+ var module = __v_58.instantiateModuleFromAsm(__f_26.toString());
+ __f_28();
+ gc();
+}
+try {
+ (function() {})();
+ (function() {})();
+ try {
+ (function() {
+ __v_23.__defineGetter__(getRandomProperty(__v_23, 580179357), function() {
+ gc();
+ return __f_25(__v_23);
+ });
+ var __v_23 = 0x87654321;
+ __v_19.__f_89();
+ })();
+ } catch (e) {;
+ }
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_30(x) {
+ var __v_30 = x + 1;
+ var __v_31 = x + 2;
+ if (x != 0) {
+ if (x > 0 & x < 100) {
+ return __v_30;
+ }
+ }
+ return 0;
+}
+try {
+ assertEquals(0, __f_30(0));
+ assertEquals(0, __f_30(0));
+ %OptimizeFunctionOnNextCall(__f_30);
+ assertEquals(3, __f_30(2));
+} catch (e) {
+ print("Caught: " + e);
+}
+
+function __f_31() {
+ __f_32.arguments;
+}
+
+function __f_32(x) {
+ __f_31();
+}
+
+function __f_33() {
+ __f_32({});
+}
+try {
+ __f_33();
+ __f_33();
+ __f_33();
+ %OptimizeFunctionOnNextCall(__f_33);
+ __f_33();
+ gc();
+} catch (e) {
+ print("Caught: " + e);
+}
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-5531.js b/deps/v8/test/mjsunit/regress/wasm/regression-5531.js
new file mode 100644
index 0000000000..9c1c092519
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-5531.js
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function() {
+ var builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1, false);
+ builder.addFunction("foo", kSig_i_v)
+ .addBody([
+ kExprI32Const, 0x00,
+ kExprI8Const, 0xcb,
+ kExprI8Const, 0xff,
+ kExprBrTable, 0xcb, 0xcb, 0xcb, 0x00, 0x00, 0xcb, 0x00 // entries=1238475
+ ])
+ .exportFunc();
+ assertThrows(function() { builder.instantiate(); });
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-648079.js b/deps/v8/test/mjsunit/regress/wasm/regression-648079.js
new file mode 100644
index 0000000000..e9d14175e4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-648079.js
@@ -0,0 +1,324 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function() {
+"use asm";
+var builder = new WasmModuleBuilder();
+builder.addFunction("regression_648079", kSig_s_v)
+ .addBody([
+ // locals:
+ 0x00,
+ // body:
+ kExprI64RemU,
+ kExprI64Ctz,
+ kExprI64LeU,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprI64Ctz,
+ kExprI64Ne,
+ kExprI64ShrS,
+ kExprI64GtS,
+ kExprI64RemU,
+ kExprUnreachable,
+ kExprI64RemU,
+ kExprI32Eqz,
+ kExprI64LeU,
+ kExprDrop,
+ kExprF32Add,
+ kExprI64Ior,
+ kExprF32CopySign,
+ kExprI64Ne,
+ kExprI64GeS,
+ kExprUnreachable,
+ kExprF32Trunc,
+ kExprF32Trunc,
+ kExprUnreachable,
+ kExprIf, 10, // @32
+ kExprBlock, 00, // @34
+ kExprBr, // depth=109
+ kExprI64Shl,
+ kExprI64LeU,
+ kExprI64GeS,
+ kExprI64Clz,
+ kExprF32Min,
+ kExprF32Eq,
+ kExprF32Trunc,
+ kExprF32Trunc,
+ kExprF32Trunc,
+ kExprUnreachable,
+ kExprI32Const,
+ kExprUnreachable,
+ kExprBr, // depth=101
+ kExprF32Div,
+ kExprI64GtU,
+ kExprI64GeS,
+ kExprI64Clz,
+ kExprSelect,
+ kExprI64GtS,
+ kExprI64RemU,
+ kExprI64LeU,
+ kExprI64Shl,
+ kExprI64Ctz,
+ kExprLoop, 01, // @63 i32
+ kExprElse, // @65
+ kExprI64LeU,
+ kExprI64RemU,
+ kExprI64Ne,
+ kExprI64GeS,
+ kExprI32Const,
+ kExprI64GtS,
+ kExprI64LoadMem32U,
+ kExprI64Clz,
+ kExprI64Shl,
+ kExprI64Ne,
+ kExprI64ShrS,
+ kExprI64GtS,
+ kExprI64DivU,
+ kExprI64Ne,
+ kExprI64GtS,
+ kExprI64Ne,
+ kExprI64Popcnt,
+ kExprI64DivU,
+ kExprI64DivU,
+ kExprSelect,
+ kExprI64Ctz,
+ kExprI64Popcnt,
+ kExprI64RemU,
+ kExprI64Clz,
+ kExprF64Sub,
+ kExprF32Trunc,
+ kExprF32Trunc,
+ kExprI64RemU,
+ kExprI64Ctz,
+ kExprI64LeU,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprBrIf, // depth=116
+ kExprF32Min,
+ kExprI64GtU,
+ kExprBlock, 01, // @107 i32
+ kExprTeeLocal,
+ kExprBlock, 01, // @111 i32
+ kExprBlock, 01, // @113 i32
+ kExprBlock, 01, // @115 i32
+ kExprBlock, 01, // @117 i32
+ kExprBlock, 01, // @119 i32
+ kExprBlock, 01, // @121 i32
+ kExprBlock, 01, // @123 i32
+ kExprBlock, 88, // @125
+ kExprF32Trunc,
+ kExprF32Trunc,
+ kExprF32Trunc,
+ kExprUnreachable,
+ kExprLoop, 40, // @131
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprI32Add,
+ kExprBlock, 05, // @136
+ kExprUnreachable,
+ kExprIf, 02, // @139 i64
+ kExprBlock, 01, // @141 i32
+ kExprBrIf, // depth=16
+ kExprLoop, 00, // @145
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprReturn,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprI64LoadMem16U,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprNop,
+ kExprBr, // depth=1
+ kExprElse, // @164
+ kExprF32Trunc,
+ kExprI32Add,
+ kExprCallIndirect, // sig #1
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprBlock, 00, // @172
+ kExprI64RemU,
+ kExprI64Ctz,
+ kExprI64LeU,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprDrop,
+ kExprI64Popcnt,
+ kExprF32Min,
+ kExprUnreachable,
+ kExprF64Sub,
+ kExprI32Const,
+ kExprUnreachable,
+ kExprGetLocal,
+ kExprI64LoadMem32U,
+ kExprUnreachable,
+ kExprI64RemU,
+ kExprI32Eqz,
+ kExprI64LeU,
+ kExprDrop,
+ kExprF32Add,
+ kExprI64Ior,
+ kExprF32CopySign,
+ kExprI64Ne,
+ kExprI64GeS,
+ kExprUnreachable,
+ kExprF32Trunc,
+ kExprF32Trunc,
+ kExprUnreachable,
+ kExprIf, 10, // @216
+ kExprBlock, 00, // @218
+ kExprBr, // depth=109
+ kExprI64Shl,
+ kExprI64LeU,
+ kExprI64GeS,
+ kExprI64Clz,
+ kExprF32Min,
+ kExprF32Eq,
+ kExprF32Trunc,
+ kExprF32Trunc,
+ kExprF32Trunc,
+ kExprUnreachable,
+ kExprF64Min,
+ kExprI32Const,
+ kExprBr, // depth=101
+ kExprF32Div,
+ kExprI64GtU,
+ kExprI64GeS,
+ kExprI64Clz,
+ kExprI64Popcnt,
+ kExprF64Lt,
+ kExprF32Trunc,
+ kExprF32Trunc,
+ kExprF32Trunc,
+ kExprUnreachable,
+ kExprLoop, 01, // @247 i32
+ kExprElse, // @249
+ kExprI64LeU,
+ kExprI64RemU,
+ kExprI64Ne,
+ kExprI64GeS,
+ kExprI32Const,
+ kExprBlock, 01, // @256 i32
+ kExprBlock, 01, // @258 i32
+ kExprBlock, 01, // @260 i32
+ kExprBlock, 01, // @262 i32
+ kExprBlock, 01, // @264 i32
+ kExprF32Ge,
+ kExprF32Trunc,
+ kExprF32Trunc,
+ kExprF32Trunc,
+ kExprUnreachable,
+ kExprLoop, 40, // @271
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprI32Add,
+ kExprBlock, 01, // @276 i32
+ kExprUnreachable,
+ kExprIf, 02, // @279 i64
+ kExprBlock, 00, // @281
+ kExprBrIf, // depth=16
+ kExprLoop, 00, // @285
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprReturn,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprI64LoadMem16U,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprNop,
+ kExprBr, // depth=1
+ kExprElse, // @304
+ kExprF32Trunc,
+ kExprI32Add,
+ kExprCallIndirect, // sig #1
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprBlock, 00, // @312
+ kExprI64RemU,
+ kExprI64Ctz,
+ kExprI64LeU,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprDrop,
+ kExprI64Popcnt,
+ kExprF32Min,
+ kExprUnreachable,
+ kExprF64Sub,
+ kExprI32Const,
+ kExprUnreachable,
+ kExprGetLocal,
+ kExprI64LoadMem32U,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprNop,
+ kExprBr, // depth=1
+ kExprElse, // @348
+ kExprF32Trunc,
+ kExprI32Add,
+ kExprCallIndirect, // sig #1
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprBlock, 00, // @356
+ kExprI64RemU,
+ kExprI64Ctz,
+ kExprI64LeU,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprUnreachable,
+ kExprDrop,
+ kExprI64Popcnt,
+ kExprF32Min,
+ kExprUnreachable,
+ kExprF64Sub,
+ kExprI32Const,
+ kExprUnreachable,
+ kExprGetLocal,
+ kExprI64LoadMem32U,
+ kExprF64Min,
+ kExprF64Min,
+ kExprF64Min,
+ kExprF64Min,
+ kExprF64Min,
+ kExprF32Trunc,
+ kExprF32Trunc,
+ kExprF32Trunc,
+ kExprUnreachable,
+ kExprF64Min,
+ kExprF64Min,
+ kExprF64Min,
+ kExprF64Min,
+ kExprF64Min,
+ kExprF64Min,
+ kExprF64Min,
+ kExprF64Min,
+ kExprF64Min,
+ kExprF64Min,
+ ])
+ .exportFunc();
+assertThrows(function() { builder.instantiate(); });
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-651961.js b/deps/v8/test/mjsunit/regress/wasm/regression-651961.js
index abdec98358..30f6565d32 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-651961.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-651961.js
@@ -9,12 +9,12 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
(function() {
var builder = new WasmModuleBuilder();
- builder.addMemory(1, 1, false);
+ builder.addMemory(1, 32, false);
builder.addFunction("foo", kSig_i_v)
.addBody([
- kExprMemorySize,
+ kExprMemorySize, kMemoryZero,
kExprI32Const, 0x10,
- kExprGrowMemory,
+ kExprGrowMemory, kMemoryZero,
kExprI32Mul,
])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-654377.js b/deps/v8/test/mjsunit/regress/wasm/regression-654377.js
new file mode 100644
index 0000000000..871da72114
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-654377.js
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function() {
+ var builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1, false);
+ builder.addFunction("foo", kSig_i_v)
+ .addBody([
+ kExprI32Const, 00,
+ kExprMemorySize,
+ kExprBrIf, 00,
+ kExprMemorySize,
+ kExprBr, 0xe7, 0xd2, 0xf2, 0xff, 0x1d
+ ])
+ .exportFunc();
+ assertThrows(function() { builder.instantiate(); });
+})();
diff --git a/deps/v8/test/mjsunit/shared-function-tier-up-default.js b/deps/v8/test/mjsunit/shared-function-tier-up-default.js
new file mode 100644
index 0000000000..09c5f8e7fe
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-function-tier-up-default.js
@@ -0,0 +1,32 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --mark-shared-functions-for-tier-up --allow-natives-syntax --no-ignition --no-ignition-staging --no-turbo
+
+(function() {
+ var sum = 0;
+ var i = 0;
+ for (var i = 0; i < 3; ++i) {
+ var f = function(x) {
+ return 2 * x;
+ }
+ sum += f(i);
+
+ if (%GetOptimizationStatus(f) == 3 || %GetOptimizationStatus(f) == 4) {
+ // If we are always or never optimizing f, just exit, this test is useless.
+ return;
+ }
+
+ if (i == 1) {
+ // f must be baseline code.
+ assertEquals(2, %GetOptimizationStatus(f));
+
+ // Run twice (i = 0, 1), then tier-up.
+ %OptimizeFunctionOnNextCall(f);
+ } else if (i == 2) {
+ // Tier-up at i = 2 should go up to crankshaft.
+ assertEquals(1, %GetOptimizationStatus(f));
+ }
+ }
+})()
diff --git a/deps/v8/test/mjsunit/shared-function-tier-up-ignition.js b/deps/v8/test/mjsunit/shared-function-tier-up-ignition.js
new file mode 100644
index 0000000000..607c2e1e04
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-function-tier-up-ignition.js
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --mark-shared-functions-for-tier-up --allow-natives-syntax --ignition-staging --no-turbo
+
+(function() {
+ var sum = 0;
+ var i = 0;
+ for (var i = 0; i < 5; ++i) {
+ var f = function(x) {
+ return 2 * x;
+ }
+ sum += f(i);
+
+ if (%GetOptimizationStatus(f) == 3 || %GetOptimizationStatus(f) == 4) {
+ // If we are always or never optimizing f, just exit, this test is useless.
+ return;
+ }
+
+ if (i == 1) {
+ // f must be interpreted code.
+ assertEquals(8, %GetOptimizationStatus(f));
+
+ // Allow it to run twice (i = 0, 1), then tier-up to baseline.
+ %BaselineFunctionOnNextCall(f);
+ } else if (i == 2) {
+ // Tier-up at i = 2 should only go up to baseline.
+ assertEquals(2, %GetOptimizationStatus(f));
+ } else if (i == 3) {
+ // Now f must be baseline code.
+ assertEquals(2, %GetOptimizationStatus(f));
+
+ // Run two more times (i = 2, 3), then tier-up to optimized.
+ %OptimizeFunctionOnNextCall(f);
+ } else if (i == 4) {
+ // Tier-up at i = 4 should now go up to crankshaft.
+ assertEquals(1, %GetOptimizationStatus(f));
+ }
+ }
+})()
diff --git a/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js b/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
new file mode 100644
index 0000000000..aed30e8150
--- /dev/null
+++ b/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
@@ -0,0 +1,32 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --mark-shared-functions-for-tier-up --allow-natives-syntax --ignition-staging --turbo
+
+(function() {
+ var sum = 0;
+ var i = 0;
+ for (var i = 0; i < 3; ++i) {
+ var f = function(x) {
+ return 2 * x;
+ }
+ sum += f(i);
+
+ if (%GetOptimizationStatus(f) == 3 || %GetOptimizationStatus(f) == 4) {
+ // If we are always or never optimizing f, just exit, this test is useless.
+ return;
+ }
+
+ if (i == 1) {
+ // f must be interpreted code.
+ assertEquals(8, %GetOptimizationStatus(f));
+
+ // Run twice (i = 0, 1), then tier-up.
+ %OptimizeFunctionOnNextCall(f);
+ } else if (i == 2) {
+ // Tier-up at i = 2 should go up to turbofan.
+ assertEquals(7, %GetOptimizationStatus(f));
+ }
+ }
+})()
diff --git a/deps/v8/test/mjsunit/strict-mode.js b/deps/v8/test/mjsunit/strict-mode.js
index f7f95da88c..736ff1b9f1 100644
--- a/deps/v8/test/mjsunit/strict-mode.js
+++ b/deps/v8/test/mjsunit/strict-mode.js
@@ -1111,14 +1111,14 @@ function CheckArgumentsPillDescriptor(func, name) {
}
var args = strict();
- CheckArgumentsPillDescriptor(args, "caller");
+ assertEquals(undefined, Object.getOwnPropertyDescriptor(args, "caller"));
CheckArgumentsPillDescriptor(args, "callee");
args = strict(17, "value", strict);
assertEquals(17, args[0])
assertEquals("value", args[1])
assertEquals(strict, args[2]);
- CheckArgumentsPillDescriptor(args, "caller");
+ assertEquals(undefined, Object.getOwnPropertyDescriptor(args, "caller"));
CheckArgumentsPillDescriptor(args, "callee");
function outer() {
@@ -1130,14 +1130,14 @@ function CheckArgumentsPillDescriptor(func, name) {
}
var args = outer()();
- CheckArgumentsPillDescriptor(args, "caller");
+ assertEquals(undefined, Object.getOwnPropertyDescriptor(args, "caller"));
CheckArgumentsPillDescriptor(args, "callee");
args = outer()(17, "value", strict);
assertEquals(17, args[0])
assertEquals("value", args[1])
assertEquals(strict, args[2]);
- CheckArgumentsPillDescriptor(args, "caller");
+ assertEquals(undefined, Object.getOwnPropertyDescriptor(args, "caller"));
CheckArgumentsPillDescriptor(args, "callee");
})();
diff --git a/deps/v8/test/mjsunit/tools/profviz-test.default b/deps/v8/test/mjsunit/tools/profviz-test.default
index bff249d651..040afb4217 100644
--- a/deps/v8/test/mjsunit/tools/profviz-test.default
+++ b/deps/v8/test/mjsunit/tools/profviz-test.default
@@ -1,5 +1,5 @@
[
- "set yrange [0:25.5]",
+ "set yrange [0:24.5]",
"set xlabel \"execution time in ms\"",
"set xrange [2.4204999999999997:141.1669999999999]",
"set style fill pattern 2 bo 1",
@@ -17,7 +17,7 @@
"set object 6 rect from 57.242999999999974, 7 to 57.329716562499975, 6.766323024054983 fc rgb \"#9944CC\"",
"set object 7 rect from 58.751499999999965, 7 to 58.838216562499966, 6.766323024054983 fc rgb \"#9944CC\"",
"set object 8 rect from 60.72499999999996, 7 to 60.81171656249996, 6.766323024054983 fc rgb \"#9944CC\"",
- "set ytics out nomirror (\"execution (59.6%%)\" 12.5, \"external (0.2%%)\" 13.5, \"compile unopt (3.1%%)\" 14.5, \"recompile sync (6.6%%)\" 15.5, \"recompile async (11.6%%)\" 16.5, \"compile eval (0.0%%)\" 17.5, \"ic miss (0.0%%)\" 18.5, \"parse (9.9%%)\" 19.5, \"preparse (0.6%%)\" 20.5, \"lazy parse (2.9%%)\" 21.5, \"gc scavenge (1.6%%)\" 22.5, \"gc compaction (3.3%%)\" 23.5, \"gc context (0.0%%)\" 24.5, \"code kind color coding\" 11, \"code kind in execution\" 10, \"top 8 js stack frames\" 9, \"pause times\" 0, \"max deopt size: 9.1 kB\" 7)",
+ "set ytics out nomirror (\"execution (59.6%%)\" 12.5, \"external (0.2%%)\" 13.5, \"compile unopt (3.1%%)\" 14.5, \"recompile sync (6.6%%)\" 15.5, \"recompile async (11.6%%)\" 16.5, \"compile eval (0.0%%)\" 17.5, \"parse (9.9%%)\" 18.5, \"preparse (0.6%%)\" 19.5, \"lazy parse (2.9%%)\" 20.5, \"gc scavenge (1.6%%)\" 21.5, \"gc compaction (3.3%%)\" 22.5, \"gc context (0.0%%)\" 23.5, \"code kind color coding\" 11, \"code kind in execution\" 10, \"top 8 js stack frames\" 9, \"pause times\" 0, \"max deopt size: 9.1 kB\" 7)",
"set object 9 rect from 42.11000000000001, 12.83 to 42.28050000000001, 12.17 fc rgb \"#000000\"",
"set object 10 rect from 42.298000000000016, 12.83 to 42.30000000000002, 12.17 fc rgb \"#000000\"",
"set object 11 rect from 42.31450000000002, 12.83 to 42.62700000000002, 12.17 fc rgb \"#000000\"",
@@ -448,232 +448,232 @@
"set object 436 rect from 108.1159999999999, 16.83 to 110.07649999999991, 16.17 fc rgb \"#CC4499\"",
"set object 437 rect from 131.1424999999999, 16.83 to 133.02899999999988, 16.17 fc rgb \"#CC4499\"",
"set object 438 rect from 141.13349999999986, 16.83 to 141.1669999999999, 16.17 fc rgb \"#CC4499\"",
- "set object 439 rect from 22.2675, 19.83 to 22.3815, 19.17 fc rgb \"#00CC00\"",
- "set object 440 rect from 22.665, 19.83 to 23.1135, 19.17 fc rgb \"#00CC00\"",
- "set object 441 rect from 27.951000000000004, 19.83 to 27.972500000000004, 19.17 fc rgb \"#00CC00\"",
- "set object 442 rect from 27.993000000000002, 19.83 to 28.013500000000004, 19.17 fc rgb \"#00CC00\"",
- "set object 443 rect from 28.043000000000003, 19.83 to 28.063500000000005, 19.17 fc rgb \"#00CC00\"",
- "set object 444 rect from 28.085000000000004, 19.83 to 28.087500000000002, 19.17 fc rgb \"#00CC00\"",
- "set object 445 rect from 28.115000000000002, 19.83 to 28.139500000000005, 19.17 fc rgb \"#00CC00\"",
- "set object 446 rect from 28.154000000000007, 19.83 to 28.260000000000005, 19.17 fc rgb \"#00CC00\"",
- "set object 447 rect from 28.309500000000003, 19.83 to 28.374000000000006, 19.17 fc rgb \"#00CC00\"",
- "set object 448 rect from 28.383500000000005, 19.83 to 28.385000000000005, 19.17 fc rgb \"#00CC00\"",
- "set object 449 rect from 28.396500000000003, 19.83 to 28.445000000000007, 19.17 fc rgb \"#00CC00\"",
- "set object 450 rect from 28.459500000000006, 19.83 to 28.463000000000005, 19.17 fc rgb \"#00CC00\"",
- "set object 451 rect from 28.489500000000007, 19.83 to 28.499000000000006, 19.17 fc rgb \"#00CC00\"",
- "set object 452 rect from 28.512500000000006, 19.83 to 28.516000000000005, 19.17 fc rgb \"#00CC00\"",
- "set object 453 rect from 28.529500000000006, 19.83 to 28.533000000000005, 19.17 fc rgb \"#00CC00\"",
- "set object 454 rect from 28.554500000000004, 19.83 to 28.557000000000006, 19.17 fc rgb \"#00CC00\"",
- "set object 455 rect from 28.573500000000006, 19.83 to 28.579000000000008, 19.17 fc rgb \"#00CC00\"",
- "set object 456 rect from 28.59950000000001, 19.83 to 28.602000000000007, 19.17 fc rgb \"#00CC00\"",
- "set object 457 rect from 28.623500000000007, 19.83 to 28.625000000000007, 19.17 fc rgb \"#00CC00\"",
- "set object 458 rect from 28.637500000000006, 19.83 to 28.647000000000006, 19.17 fc rgb \"#00CC00\"",
- "set object 459 rect from 28.657500000000006, 19.83 to 28.669000000000008, 19.17 fc rgb \"#00CC00\"",
- "set object 460 rect from 28.682500000000005, 19.83 to 28.686000000000007, 19.17 fc rgb \"#00CC00\"",
- "set object 461 rect from 28.695500000000006, 19.83 to 28.701000000000008, 19.17 fc rgb \"#00CC00\"",
- "set object 462 rect from 28.72450000000001, 19.83 to 28.811000000000007, 19.17 fc rgb \"#00CC00\"",
- "set object 463 rect from 28.83250000000001, 19.83 to 28.907500000000006, 19.17 fc rgb \"#00CC00\"",
- "set object 464 rect from 28.97100000000001, 19.83 to 28.97450000000001, 19.17 fc rgb \"#00CC00\"",
- "set object 465 rect from 28.99600000000001, 19.83 to 28.99850000000001, 19.17 fc rgb \"#00CC00\"",
- "set object 466 rect from 29.01200000000001, 19.83 to 29.01350000000001, 19.17 fc rgb \"#00CC00\"",
- "set object 467 rect from 29.02600000000001, 19.83 to 29.056500000000007, 19.17 fc rgb \"#00CC00\"",
- "set object 468 rect from 29.06900000000001, 19.83 to 29.159500000000012, 19.17 fc rgb \"#00CC00\"",
- "set object 469 rect from 29.17100000000001, 19.83 to 29.18450000000001, 19.17 fc rgb \"#00CC00\"",
- "set object 470 rect from 29.19400000000001, 19.83 to 41.84850000000001, 19.17 fc rgb \"#00CC00\"",
- "set object 471 rect from 41.87900000000001, 19.83 to 41.88650000000001, 19.17 fc rgb \"#00CC00\"",
- "set object 472 rect from 27.972500000000004, 20.83 to 28.053000000000004, 20.17 fc rgb \"#44CC00\"",
- "set object 473 rect from 28.063500000000005, 20.83 to 28.169000000000004, 20.17 fc rgb \"#44CC00\"",
- "set object 474 rect from 28.260000000000005, 20.83 to 28.489500000000007, 20.17 fc rgb \"#44CC00\"",
- "set object 475 rect from 28.499000000000006, 20.83 to 28.761500000000005, 20.17 fc rgb \"#44CC00\"",
- "set object 476 rect from 28.78900000000001, 20.83 to 28.847500000000007, 20.17 fc rgb \"#44CC00\"",
- "set object 477 rect from 28.907500000000006, 20.83 to 29.047000000000008, 20.17 fc rgb \"#44CC00\"",
- "set object 478 rect from 29.056500000000007, 20.83 to 29.111000000000008, 20.17 fc rgb \"#44CC00\"",
- "set object 479 rect from 29.12350000000001, 20.83 to 29.21900000000001, 20.17 fc rgb \"#44CC00\"",
- "set object 480 rect from 41.82650000000001, 20.83 to 41.83500000000001, 20.17 fc rgb \"#44CC00\"",
- "set object 481 rect from 41.84850000000001, 20.83 to 41.87900000000001, 20.17 fc rgb \"#44CC00\"",
- "set object 482 rect from 16.737, 21.83 to 16.9595, 21.17 fc rgb \"#00CC44\"",
- "set object 483 rect from 17.8715, 21.83 to 18.017000000000003, 21.17 fc rgb \"#00CC44\"",
- "set object 484 rect from 18.992, 21.83 to 19.0685, 21.17 fc rgb \"#00CC44\"",
- "set object 485 rect from 20.52, 21.83 to 20.5975, 21.17 fc rgb \"#00CC44\"",
- "set object 486 rect from 21.109, 21.83 to 21.1335, 21.17 fc rgb \"#00CC44\"",
- "set object 487 rect from 21.212, 21.83 to 21.2695, 21.17 fc rgb \"#00CC44\"",
- "set object 488 rect from 21.4595, 21.83 to 21.49, 21.17 fc rgb \"#00CC44\"",
- "set object 489 rect from 21.566499999999998, 21.83 to 21.588, 21.17 fc rgb \"#00CC44\"",
- "set object 490 rect from 21.6535, 21.83 to 21.727, 21.17 fc rgb \"#00CC44\"",
- "set object 491 rect from 22.445, 21.83 to 22.4625, 21.17 fc rgb \"#00CC44\"",
- "set object 492 rect from 22.502000000000002, 21.83 to 22.5165, 21.17 fc rgb \"#00CC44\"",
- "set object 493 rect from 22.553, 21.83 to 22.5645, 21.17 fc rgb \"#00CC44\"",
- "set object 494 rect from 23.233, 21.83 to 23.336000000000002, 21.17 fc rgb \"#00CC44\"",
- "set object 495 rect from 23.4255, 21.83 to 23.506, 21.17 fc rgb \"#00CC44\"",
- "set object 496 rect from 23.5895, 21.83 to 23.613, 21.17 fc rgb \"#00CC44\"",
- "set object 497 rect from 23.870500000000003, 21.83 to 23.907, 21.17 fc rgb \"#00CC44\"",
- "set object 498 rect from 24.393, 21.83 to 24.430500000000002, 21.17 fc rgb \"#00CC44\"",
- "set object 499 rect from 24.470000000000002, 21.83 to 24.504500000000004, 21.17 fc rgb \"#00CC44\"",
- "set object 500 rect from 25.267500000000002, 21.83 to 25.283, 21.17 fc rgb \"#00CC44\"",
- "set object 501 rect from 25.4195, 21.83 to 25.427, 21.17 fc rgb \"#00CC44\"",
- "set object 502 rect from 25.519500000000004, 21.83 to 25.526000000000003, 21.17 fc rgb \"#00CC44\"",
- "set object 503 rect from 42.28050000000001, 21.83 to 42.298000000000016, 21.17 fc rgb \"#00CC44\"",
- "set object 504 rect from 42.62700000000002, 21.83 to 42.656500000000015, 21.17 fc rgb \"#00CC44\"",
- "set object 505 rect from 42.747000000000014, 21.83 to 42.763500000000015, 21.17 fc rgb \"#00CC44\"",
- "set object 506 rect from 42.80300000000001, 21.83 to 42.81050000000001, 21.17 fc rgb \"#00CC44\"",
- "set object 507 rect from 42.844000000000015, 21.83 to 42.858500000000014, 21.17 fc rgb \"#00CC44\"",
- "set object 508 rect from 43.60550000000001, 21.83 to 43.62000000000002, 21.17 fc rgb \"#00CC44\"",
- "set object 509 rect from 44.796000000000014, 21.83 to 44.81150000000002, 21.17 fc rgb \"#00CC44\"",
- "set object 510 rect from 44.84500000000001, 21.83 to 44.87150000000002, 21.17 fc rgb \"#00CC44\"",
- "set object 511 rect from 44.996000000000016, 21.83 to 45.00850000000001, 21.17 fc rgb \"#00CC44\"",
- "set object 512 rect from 45.04700000000001, 21.83 to 45.06450000000002, 21.17 fc rgb \"#00CC44\"",
- "set object 513 rect from 45.09600000000001, 21.83 to 45.107500000000016, 21.17 fc rgb \"#00CC44\"",
- "set object 514 rect from 45.14400000000002, 21.83 to 45.16150000000002, 21.17 fc rgb \"#00CC44\"",
- "set object 515 rect from 45.32050000000002, 21.83 to 45.33700000000002, 21.17 fc rgb \"#00CC44\"",
- "set object 516 rect from 45.38750000000002, 21.83 to 45.402000000000015, 21.17 fc rgb \"#00CC44\"",
- "set object 517 rect from 45.43250000000002, 21.83 to 45.442000000000014, 21.17 fc rgb \"#00CC44\"",
- "set object 518 rect from 45.46050000000002, 21.83 to 45.46500000000002, 21.17 fc rgb \"#00CC44\"",
- "set object 519 rect from 45.47750000000001, 21.83 to 45.48300000000001, 21.17 fc rgb \"#00CC44\"",
- "set object 520 rect from 45.49750000000001, 21.83 to 45.55900000000001, 21.17 fc rgb \"#00CC44\"",
- "set object 521 rect from 45.66050000000001, 21.83 to 45.70300000000001, 21.17 fc rgb \"#00CC44\"",
- "set object 522 rect from 45.79350000000001, 21.83 to 45.81700000000001, 21.17 fc rgb \"#00CC44\"",
- "set object 523 rect from 45.86950000000001, 21.83 to 45.92300000000001, 21.17 fc rgb \"#00CC44\"",
- "set object 524 rect from 45.99450000000001, 21.83 to 46.060500000000005, 21.17 fc rgb \"#00CC44\"",
- "set object 525 rect from 46.18500000000001, 21.83 to 46.28150000000001, 21.17 fc rgb \"#00CC44\"",
- "set object 526 rect from 46.550000000000004, 21.83 to 46.5915, 21.17 fc rgb \"#00CC44\"",
- "set object 527 rect from 46.65500000000001, 21.83 to 46.691500000000005, 21.17 fc rgb \"#00CC44\"",
- "set object 528 rect from 46.861000000000004, 21.83 to 46.8935, 21.17 fc rgb \"#00CC44\"",
- "set object 529 rect from 47.039500000000004, 21.83 to 47.049, 21.17 fc rgb \"#00CC44\"",
- "set object 530 rect from 47.0765, 21.83 to 47.135000000000005, 21.17 fc rgb \"#00CC44\"",
- "set object 531 rect from 47.4125, 21.83 to 47.465, 21.17 fc rgb \"#00CC44\"",
- "set object 532 rect from 49.454499999999996, 21.83 to 49.467, 21.17 fc rgb \"#00CC44\"",
- "set object 533 rect from 49.6855, 21.83 to 49.726, 21.17 fc rgb \"#00CC44\"",
- "set object 534 rect from 49.799499999999995, 21.83 to 49.812999999999995, 21.17 fc rgb \"#00CC44\"",
- "set object 535 rect from 49.841499999999996, 21.83 to 49.849999999999994, 21.17 fc rgb \"#00CC44\"",
- "set object 536 rect from 49.894499999999994, 21.83 to 49.9695, 21.17 fc rgb \"#00CC44\"",
- "set object 537 rect from 50.083999999999996, 21.83 to 50.14149999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 538 rect from 50.29299999999999, 21.83 to 50.31249999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 539 rect from 50.36699999999999, 21.83 to 50.39849999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 540 rect from 50.520999999999994, 21.83 to 50.528499999999994, 21.17 fc rgb \"#00CC44\"",
- "set object 541 rect from 50.54899999999999, 21.83 to 50.62049999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 542 rect from 51.27549999999999, 21.83 to 51.29099999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 543 rect from 51.52249999999999, 21.83 to 51.56899999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 544 rect from 51.87299999999998, 21.83 to 51.89049999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 545 rect from 52.115999999999985, 21.83 to 52.13449999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 546 rect from 52.286999999999985, 21.83 to 52.300499999999985, 21.17 fc rgb \"#00CC44\"",
- "set object 547 rect from 52.326999999999984, 21.83 to 52.33049999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 548 rect from 52.362999999999985, 21.83 to 52.404499999999985, 21.17 fc rgb \"#00CC44\"",
- "set object 549 rect from 54.566499999999984, 21.83 to 54.64299999999998, 21.17 fc rgb \"#00CC44\"",
- "set object 550 rect from 55.49149999999998, 21.83 to 55.53099999999998, 21.17 fc rgb \"#00CC44\"",
- "set object 551 rect from 56.64049999999998, 21.83 to 56.64999999999998, 21.17 fc rgb \"#00CC44\"",
- "set object 552 rect from 56.750999999999976, 21.83 to 56.76449999999998, 21.17 fc rgb \"#00CC44\"",
- "set object 553 rect from 57.039499999999975, 21.83 to 57.076499999999974, 21.17 fc rgb \"#00CC44\"",
- "set object 554 rect from 57.885999999999974, 21.83 to 57.893499999999975, 21.17 fc rgb \"#00CC44\"",
- "set object 555 rect from 57.97749999999997, 21.83 to 57.99099999999997, 21.17 fc rgb \"#00CC44\"",
- "set object 556 rect from 58.04499999999997, 21.83 to 58.055499999999974, 21.17 fc rgb \"#00CC44\"",
- "set object 557 rect from 58.14549999999997, 21.83 to 58.15399999999997, 21.17 fc rgb \"#00CC44\"",
- "set object 558 rect from 58.17549999999997, 21.83 to 58.18399999999997, 21.17 fc rgb \"#00CC44\"",
- "set object 559 rect from 58.40999999999997, 21.83 to 58.431499999999964, 21.17 fc rgb \"#00CC44\"",
- "set object 560 rect from 58.51699999999997, 21.83 to 58.53049999999997, 21.17 fc rgb \"#00CC44\"",
- "set object 561 rect from 58.590999999999966, 21.83 to 58.60049999999997, 21.17 fc rgb \"#00CC44\"",
- "set object 562 rect from 59.65599999999996, 21.83 to 59.669499999999964, 21.17 fc rgb \"#00CC44\"",
- "set object 563 rect from 60.05149999999996, 21.83 to 60.060999999999964, 21.17 fc rgb \"#00CC44\"",
- "set object 564 rect from 60.176999999999964, 21.83 to 60.19499999999996, 21.17 fc rgb \"#00CC44\"",
- "set object 565 rect from 60.26949999999996, 21.83 to 60.27999999999996, 21.17 fc rgb \"#00CC44\"",
- "set object 566 rect from 60.31149999999996, 21.83 to 60.34699999999996, 21.17 fc rgb \"#00CC44\"",
- "set object 567 rect from 60.471499999999956, 21.83 to 60.48399999999996, 21.17 fc rgb \"#00CC44\"",
- "set object 568 rect from 60.508499999999955, 21.83 to 60.51999999999996, 21.17 fc rgb \"#00CC44\"",
- "set object 569 rect from 60.92099999999996, 21.83 to 60.98249999999996, 21.17 fc rgb \"#00CC44\"",
- "set object 570 rect from 63.15199999999995, 21.83 to 63.228499999999954, 21.17 fc rgb \"#00CC44\"",
- "set object 571 rect from 67.34999999999994, 21.83 to 67.36349999999995, 21.17 fc rgb \"#00CC44\"",
- "set object 572 rect from 67.40699999999995, 21.83 to 67.41249999999995, 21.17 fc rgb \"#00CC44\"",
- "set object 573 rect from 67.45699999999994, 21.83 to 67.46599999999995, 21.17 fc rgb \"#00CC44\"",
- "set object 574 rect from 69.11299999999994, 21.83 to 69.12949999999995, 21.17 fc rgb \"#00CC44\"",
- "set object 575 rect from 69.19199999999995, 21.83 to 69.22649999999994, 21.17 fc rgb \"#00CC44\"",
- "set object 576 rect from 69.30799999999994, 21.83 to 69.31949999999995, 21.17 fc rgb \"#00CC44\"",
- "set object 577 rect from 69.34699999999995, 21.83 to 69.35749999999994, 21.17 fc rgb \"#00CC44\"",
- "set object 578 rect from 69.38399999999996, 21.83 to 69.40549999999995, 21.17 fc rgb \"#00CC44\"",
- "set object 579 rect from 69.45099999999994, 21.83 to 69.46349999999994, 21.17 fc rgb \"#00CC44\"",
- "set object 580 rect from 70.31749999999994, 21.83 to 70.33949999999994, 21.17 fc rgb \"#00CC44\"",
- "set object 581 rect from 74.41449999999995, 21.83 to 74.43899999999994, 21.17 fc rgb \"#00CC44\"",
- "set object 582 rect from 74.52049999999994, 21.83 to 74.54499999999993, 21.17 fc rgb \"#00CC44\"",
- "set object 583 rect from 74.59549999999994, 21.83 to 74.60899999999995, 21.17 fc rgb \"#00CC44\"",
- "set object 584 rect from 84.09999999999994, 21.83 to 84.15349999999994, 21.17 fc rgb \"#00CC44\"",
- "set object 585 rect from 84.26099999999994, 21.83 to 84.27549999999994, 21.17 fc rgb \"#00CC44\"",
- "set object 586 rect from 84.31099999999992, 21.83 to 84.31949999999993, 21.17 fc rgb \"#00CC44\"",
- "set object 587 rect from 84.34199999999993, 21.83 to 84.35349999999993, 21.17 fc rgb \"#00CC44\"",
- "set object 588 rect from 84.37299999999993, 21.83 to 84.40149999999993, 21.17 fc rgb \"#00CC44\"",
- "set object 589 rect from 84.43999999999994, 21.83 to 84.46149999999993, 21.17 fc rgb \"#00CC44\"",
- "set object 590 rect from 84.53049999999993, 21.83 to 84.60099999999994, 21.17 fc rgb \"#00CC44\"",
- "set object 591 rect from 84.68049999999992, 21.83 to 84.69199999999992, 21.17 fc rgb \"#00CC44\"",
- "set object 592 rect from 84.71649999999993, 21.83 to 84.72799999999992, 21.17 fc rgb \"#00CC44\"",
- "set object 593 rect from 84.92199999999994, 21.83 to 84.93849999999993, 21.17 fc rgb \"#00CC44\"",
- "set object 594 rect from 84.99799999999993, 21.83 to 85.01049999999992, 21.17 fc rgb \"#00CC44\"",
- "set object 595 rect from 85.03599999999992, 21.83 to 85.04449999999993, 21.17 fc rgb \"#00CC44\"",
- "set object 596 rect from 85.06199999999993, 21.83 to 85.07249999999993, 21.17 fc rgb \"#00CC44\"",
- "set object 597 rect from 85.09499999999994, 21.83 to 85.10249999999992, 21.17 fc rgb \"#00CC44\"",
- "set object 598 rect from 85.38399999999993, 21.83 to 85.43999999999994, 21.17 fc rgb \"#00CC44\"",
- "set object 599 rect from 85.59949999999992, 21.83 to 85.61599999999993, 21.17 fc rgb \"#00CC44\"",
- "set object 600 rect from 85.63749999999993, 21.83 to 85.65899999999993, 21.17 fc rgb \"#00CC44\"",
- "set object 601 rect from 85.69649999999993, 21.83 to 85.70599999999993, 21.17 fc rgb \"#00CC44\"",
- "set object 602 rect from 85.73249999999993, 21.83 to 85.76899999999992, 21.17 fc rgb \"#00CC44\"",
- "set object 603 rect from 85.86549999999993, 21.83 to 85.87599999999992, 21.17 fc rgb \"#00CC44\"",
- "set object 604 rect from 85.91149999999992, 21.83 to 85.92499999999993, 21.17 fc rgb \"#00CC44\"",
- "set object 605 rect from 102.74599999999992, 21.83 to 102.80749999999992, 21.17 fc rgb \"#00CC44\"",
- "set object 606 rect from 107.5244999999999, 21.83 to 107.57199999999992, 21.17 fc rgb \"#00CC44\"",
- "set object 607 rect from 107.62449999999991, 21.83 to 107.6389999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 608 rect from 107.6674999999999, 21.83 to 107.6759999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 609 rect from 107.69849999999991, 21.83 to 107.70999999999992, 21.17 fc rgb \"#00CC44\"",
- "set object 610 rect from 107.7294999999999, 21.83 to 107.7469999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 611 rect from 107.7834999999999, 21.83 to 107.79299999999992, 21.17 fc rgb \"#00CC44\"",
- "set object 612 rect from 107.82049999999991, 21.83 to 107.8529999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 613 rect from 107.9294999999999, 21.83 to 107.94099999999992, 21.17 fc rgb \"#00CC44\"",
- "set object 614 rect from 107.9654999999999, 21.83 to 107.97599999999991, 21.17 fc rgb \"#00CC44\"",
- "set object 615 rect from 130.5489999999999, 21.83 to 130.5954999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 616 rect from 130.6469999999999, 21.83 to 130.6614999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 617 rect from 130.68999999999988, 21.83 to 130.6994999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 618 rect from 130.7219999999999, 21.83 to 130.7324999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 619 rect from 130.7519999999999, 21.83 to 130.76949999999988, 21.17 fc rgb \"#00CC44\"",
- "set object 620 rect from 130.8059999999999, 21.83 to 130.8154999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 621 rect from 130.84299999999988, 21.83 to 130.87549999999987, 21.17 fc rgb \"#00CC44\"",
- "set object 622 rect from 130.95199999999988, 21.83 to 130.9644999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 623 rect from 130.99099999999987, 21.83 to 131.00249999999988, 21.17 fc rgb \"#00CC44\"",
- "set object 624 rect from 140.86699999999988, 21.83 to 140.8814999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 625 rect from 140.9319999999999, 21.83 to 140.9574999999999, 21.17 fc rgb \"#00CC44\"",
- "set object 626 rect from 141.0299999999999, 21.83 to 141.03849999999989, 21.17 fc rgb \"#00CC44\"",
- "set object 627 rect from 55.79999999999998, 22.83 to 56.198999999999984, 22.17 fc rgb \"#0044CC\"",
- "set object 628 rect from 62.16149999999996, 22.83 to 62.548999999999964, 22.17 fc rgb \"#0044CC\"",
- "set object 629 rect from 65.56449999999995, 22.83 to 65.61699999999995, 22.17 fc rgb \"#0044CC\"",
- "set object 630 rect from 68.70599999999996, 22.83 to 68.76649999999995, 22.17 fc rgb \"#0044CC\"",
- "set object 631 rect from 72.22199999999995, 22.83 to 72.28049999999995, 22.17 fc rgb \"#0044CC\"",
- "set object 632 rect from 75.41849999999994, 22.83 to 75.46799999999995, 22.17 fc rgb \"#0044CC\"",
- "set object 633 rect from 78.16449999999993, 22.83 to 78.23649999999994, 22.17 fc rgb \"#0044CC\"",
- "set object 634 rect from 80.90399999999994, 22.83 to 80.95049999999993, 22.17 fc rgb \"#0044CC\"",
- "set object 635 rect from 83.58349999999993, 22.83 to 83.63999999999993, 22.17 fc rgb \"#0044CC\"",
- "set object 636 rect from 88.75199999999992, 22.83 to 88.82299999999992, 22.17 fc rgb \"#0044CC\"",
- "set object 637 rect from 91.90999999999991, 22.83 to 91.96649999999993, 22.17 fc rgb \"#0044CC\"",
- "set object 638 rect from 94.55599999999993, 22.83 to 94.6054999999999, 22.17 fc rgb \"#0044CC\"",
- "set object 639 rect from 97.20749999999991, 22.83 to 97.26099999999992, 22.17 fc rgb \"#0044CC\"",
- "set object 640 rect from 99.86649999999992, 22.83 to 99.92199999999991, 22.17 fc rgb \"#0044CC\"",
- "set object 641 rect from 102.56049999999992, 22.83 to 102.61199999999991, 22.17 fc rgb \"#0044CC\"",
- "set object 642 rect from 105.88099999999991, 22.83 to 105.93349999999991, 22.17 fc rgb \"#0044CC\"",
- "set object 643 rect from 109.2659999999999, 22.83 to 109.38599999999991, 22.17 fc rgb \"#0044CC\"",
- "set object 644 rect from 109.4024999999999, 22.83 to 109.41799999999989, 22.17 fc rgb \"#0044CC\"",
- "set object 645 rect from 112.6029999999999, 22.83 to 112.6564999999999, 22.17 fc rgb \"#0044CC\"",
- "set object 646 rect from 115.36399999999989, 22.83 to 115.4124999999999, 22.17 fc rgb \"#0044CC\"",
- "set object 647 rect from 118.1434999999999, 22.83 to 118.19199999999991, 22.17 fc rgb \"#0044CC\"",
- "set object 648 rect from 120.9194999999999, 22.83 to 121.0104999999999, 22.17 fc rgb \"#0044CC\"",
- "set object 649 rect from 121.0259999999999, 22.83 to 121.0314999999999, 22.17 fc rgb \"#0044CC\"",
- "set object 650 rect from 123.77499999999989, 22.83 to 123.8254999999999, 22.17 fc rgb \"#0044CC\"",
- "set object 651 rect from 126.55149999999989, 22.83 to 126.59899999999989, 22.17 fc rgb \"#0044CC\"",
- "set object 652 rect from 129.3344999999999, 22.83 to 129.4124999999999, 22.17 fc rgb \"#0044CC\"",
- "set object 653 rect from 129.4249999999999, 22.83 to 129.48849999999987, 22.17 fc rgb \"#0044CC\"",
- "set object 654 rect from 132.8659999999999, 22.83 to 132.92249999999987, 22.17 fc rgb \"#0044CC\"",
- "set object 655 rect from 136.14449999999988, 22.83 to 136.19799999999987, 22.17 fc rgb \"#0044CC\"",
- "set object 656 rect from 138.9289999999999, 22.83 to 138.98049999999986, 22.17 fc rgb \"#0044CC\"",
- "set object 657 rect from 2.4204999999999997, 23.83 to 3.7920000000000003, 23.17 fc rgb \"#4444CC\"",
- "set object 658 rect from 3.8075, 23.83 to 3.8129999999999997, 23.17 fc rgb \"#4444CC\"",
- "set object 659 rect from 6.2695, 23.83 to 7.373, 23.17 fc rgb \"#4444CC\"",
- "set object 660 rect from 7.3865, 23.83 to 7.3919999999999995, 23.17 fc rgb \"#4444CC\"",
- "set object 661 rect from 9.2915, 23.83 to 10.405000000000001, 23.17 fc rgb \"#4444CC\"",
- "set object 662 rect from 10.4235, 23.83 to 10.43, 23.17 fc rgb \"#4444CC\"",
- "set object 663 rect from 12.8765, 23.83 to 13.897, 23.17 fc rgb \"#4444CC\"",
- "set object 664 rect from 13.910499999999999, 23.83 to 13.915999999999999, 23.17 fc rgb \"#4444CC\"",
+ "set object 439 rect from 22.2675, 18.83 to 22.3815, 18.17 fc rgb \"#00CC00\"",
+ "set object 440 rect from 22.665, 18.83 to 23.1135, 18.17 fc rgb \"#00CC00\"",
+ "set object 441 rect from 27.951000000000004, 18.83 to 27.972500000000004, 18.17 fc rgb \"#00CC00\"",
+ "set object 442 rect from 27.993000000000002, 18.83 to 28.013500000000004, 18.17 fc rgb \"#00CC00\"",
+ "set object 443 rect from 28.043000000000003, 18.83 to 28.063500000000005, 18.17 fc rgb \"#00CC00\"",
+ "set object 444 rect from 28.085000000000004, 18.83 to 28.087500000000002, 18.17 fc rgb \"#00CC00\"",
+ "set object 445 rect from 28.115000000000002, 18.83 to 28.139500000000005, 18.17 fc rgb \"#00CC00\"",
+ "set object 446 rect from 28.154000000000007, 18.83 to 28.260000000000005, 18.17 fc rgb \"#00CC00\"",
+ "set object 447 rect from 28.309500000000003, 18.83 to 28.374000000000006, 18.17 fc rgb \"#00CC00\"",
+ "set object 448 rect from 28.383500000000005, 18.83 to 28.385000000000005, 18.17 fc rgb \"#00CC00\"",
+ "set object 449 rect from 28.396500000000003, 18.83 to 28.445000000000007, 18.17 fc rgb \"#00CC00\"",
+ "set object 450 rect from 28.459500000000006, 18.83 to 28.463000000000005, 18.17 fc rgb \"#00CC00\"",
+ "set object 451 rect from 28.489500000000007, 18.83 to 28.499000000000006, 18.17 fc rgb \"#00CC00\"",
+ "set object 452 rect from 28.512500000000006, 18.83 to 28.516000000000005, 18.17 fc rgb \"#00CC00\"",
+ "set object 453 rect from 28.529500000000006, 18.83 to 28.533000000000005, 18.17 fc rgb \"#00CC00\"",
+ "set object 454 rect from 28.554500000000004, 18.83 to 28.557000000000006, 18.17 fc rgb \"#00CC00\"",
+ "set object 455 rect from 28.573500000000006, 18.83 to 28.579000000000008, 18.17 fc rgb \"#00CC00\"",
+ "set object 456 rect from 28.59950000000001, 18.83 to 28.602000000000007, 18.17 fc rgb \"#00CC00\"",
+ "set object 457 rect from 28.623500000000007, 18.83 to 28.625000000000007, 18.17 fc rgb \"#00CC00\"",
+ "set object 458 rect from 28.637500000000006, 18.83 to 28.647000000000006, 18.17 fc rgb \"#00CC00\"",
+ "set object 459 rect from 28.657500000000006, 18.83 to 28.669000000000008, 18.17 fc rgb \"#00CC00\"",
+ "set object 460 rect from 28.682500000000005, 18.83 to 28.686000000000007, 18.17 fc rgb \"#00CC00\"",
+ "set object 461 rect from 28.695500000000006, 18.83 to 28.701000000000008, 18.17 fc rgb \"#00CC00\"",
+ "set object 462 rect from 28.72450000000001, 18.83 to 28.811000000000007, 18.17 fc rgb \"#00CC00\"",
+ "set object 463 rect from 28.83250000000001, 18.83 to 28.907500000000006, 18.17 fc rgb \"#00CC00\"",
+ "set object 464 rect from 28.97100000000001, 18.83 to 28.97450000000001, 18.17 fc rgb \"#00CC00\"",
+ "set object 465 rect from 28.99600000000001, 18.83 to 28.99850000000001, 18.17 fc rgb \"#00CC00\"",
+ "set object 466 rect from 29.01200000000001, 18.83 to 29.01350000000001, 18.17 fc rgb \"#00CC00\"",
+ "set object 467 rect from 29.02600000000001, 18.83 to 29.056500000000007, 18.17 fc rgb \"#00CC00\"",
+ "set object 468 rect from 29.06900000000001, 18.83 to 29.159500000000012, 18.17 fc rgb \"#00CC00\"",
+ "set object 469 rect from 29.17100000000001, 18.83 to 29.18450000000001, 18.17 fc rgb \"#00CC00\"",
+ "set object 470 rect from 29.19400000000001, 18.83 to 41.84850000000001, 18.17 fc rgb \"#00CC00\"",
+ "set object 471 rect from 41.87900000000001, 18.83 to 41.88650000000001, 18.17 fc rgb \"#00CC00\"",
+ "set object 472 rect from 27.972500000000004, 19.83 to 28.053000000000004, 19.17 fc rgb \"#44CC00\"",
+ "set object 473 rect from 28.063500000000005, 19.83 to 28.169000000000004, 19.17 fc rgb \"#44CC00\"",
+ "set object 474 rect from 28.260000000000005, 19.83 to 28.489500000000007, 19.17 fc rgb \"#44CC00\"",
+ "set object 475 rect from 28.499000000000006, 19.83 to 28.761500000000005, 19.17 fc rgb \"#44CC00\"",
+ "set object 476 rect from 28.78900000000001, 19.83 to 28.847500000000007, 19.17 fc rgb \"#44CC00\"",
+ "set object 477 rect from 28.907500000000006, 19.83 to 29.047000000000008, 19.17 fc rgb \"#44CC00\"",
+ "set object 478 rect from 29.056500000000007, 19.83 to 29.111000000000008, 19.17 fc rgb \"#44CC00\"",
+ "set object 479 rect from 29.12350000000001, 19.83 to 29.21900000000001, 19.17 fc rgb \"#44CC00\"",
+ "set object 480 rect from 41.82650000000001, 19.83 to 41.83500000000001, 19.17 fc rgb \"#44CC00\"",
+ "set object 481 rect from 41.84850000000001, 19.83 to 41.87900000000001, 19.17 fc rgb \"#44CC00\"",
+ "set object 482 rect from 16.737, 20.83 to 16.9595, 20.17 fc rgb \"#00CC44\"",
+ "set object 483 rect from 17.8715, 20.83 to 18.017000000000003, 20.17 fc rgb \"#00CC44\"",
+ "set object 484 rect from 18.992, 20.83 to 19.0685, 20.17 fc rgb \"#00CC44\"",
+ "set object 485 rect from 20.52, 20.83 to 20.5975, 20.17 fc rgb \"#00CC44\"",
+ "set object 486 rect from 21.109, 20.83 to 21.1335, 20.17 fc rgb \"#00CC44\"",
+ "set object 487 rect from 21.212, 20.83 to 21.2695, 20.17 fc rgb \"#00CC44\"",
+ "set object 488 rect from 21.4595, 20.83 to 21.49, 20.17 fc rgb \"#00CC44\"",
+ "set object 489 rect from 21.566499999999998, 20.83 to 21.588, 20.17 fc rgb \"#00CC44\"",
+ "set object 490 rect from 21.6535, 20.83 to 21.727, 20.17 fc rgb \"#00CC44\"",
+ "set object 491 rect from 22.445, 20.83 to 22.4625, 20.17 fc rgb \"#00CC44\"",
+ "set object 492 rect from 22.502000000000002, 20.83 to 22.5165, 20.17 fc rgb \"#00CC44\"",
+ "set object 493 rect from 22.553, 20.83 to 22.5645, 20.17 fc rgb \"#00CC44\"",
+ "set object 494 rect from 23.233, 20.83 to 23.336000000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 495 rect from 23.4255, 20.83 to 23.506, 20.17 fc rgb \"#00CC44\"",
+ "set object 496 rect from 23.5895, 20.83 to 23.613, 20.17 fc rgb \"#00CC44\"",
+ "set object 497 rect from 23.870500000000003, 20.83 to 23.907, 20.17 fc rgb \"#00CC44\"",
+ "set object 498 rect from 24.393, 20.83 to 24.430500000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 499 rect from 24.470000000000002, 20.83 to 24.504500000000004, 20.17 fc rgb \"#00CC44\"",
+ "set object 500 rect from 25.267500000000002, 20.83 to 25.283, 20.17 fc rgb \"#00CC44\"",
+ "set object 501 rect from 25.4195, 20.83 to 25.427, 20.17 fc rgb \"#00CC44\"",
+ "set object 502 rect from 25.519500000000004, 20.83 to 25.526000000000003, 20.17 fc rgb \"#00CC44\"",
+ "set object 503 rect from 42.28050000000001, 20.83 to 42.298000000000016, 20.17 fc rgb \"#00CC44\"",
+ "set object 504 rect from 42.62700000000002, 20.83 to 42.656500000000015, 20.17 fc rgb \"#00CC44\"",
+ "set object 505 rect from 42.747000000000014, 20.83 to 42.763500000000015, 20.17 fc rgb \"#00CC44\"",
+ "set object 506 rect from 42.80300000000001, 20.83 to 42.81050000000001, 20.17 fc rgb \"#00CC44\"",
+ "set object 507 rect from 42.844000000000015, 20.83 to 42.858500000000014, 20.17 fc rgb \"#00CC44\"",
+ "set object 508 rect from 43.60550000000001, 20.83 to 43.62000000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 509 rect from 44.796000000000014, 20.83 to 44.81150000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 510 rect from 44.84500000000001, 20.83 to 44.87150000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 511 rect from 44.996000000000016, 20.83 to 45.00850000000001, 20.17 fc rgb \"#00CC44\"",
+ "set object 512 rect from 45.04700000000001, 20.83 to 45.06450000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 513 rect from 45.09600000000001, 20.83 to 45.107500000000016, 20.17 fc rgb \"#00CC44\"",
+ "set object 514 rect from 45.14400000000002, 20.83 to 45.16150000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 515 rect from 45.32050000000002, 20.83 to 45.33700000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 516 rect from 45.38750000000002, 20.83 to 45.402000000000015, 20.17 fc rgb \"#00CC44\"",
+ "set object 517 rect from 45.43250000000002, 20.83 to 45.442000000000014, 20.17 fc rgb \"#00CC44\"",
+ "set object 518 rect from 45.46050000000002, 20.83 to 45.46500000000002, 20.17 fc rgb \"#00CC44\"",
+ "set object 519 rect from 45.47750000000001, 20.83 to 45.48300000000001, 20.17 fc rgb \"#00CC44\"",
+ "set object 520 rect from 45.49750000000001, 20.83 to 45.55900000000001, 20.17 fc rgb \"#00CC44\"",
+ "set object 521 rect from 45.66050000000001, 20.83 to 45.70300000000001, 20.17 fc rgb \"#00CC44\"",
+ "set object 522 rect from 45.79350000000001, 20.83 to 45.81700000000001, 20.17 fc rgb \"#00CC44\"",
+ "set object 523 rect from 45.86950000000001, 20.83 to 45.92300000000001, 20.17 fc rgb \"#00CC44\"",
+ "set object 524 rect from 45.99450000000001, 20.83 to 46.060500000000005, 20.17 fc rgb \"#00CC44\"",
+ "set object 525 rect from 46.18500000000001, 20.83 to 46.28150000000001, 20.17 fc rgb \"#00CC44\"",
+ "set object 526 rect from 46.550000000000004, 20.83 to 46.5915, 20.17 fc rgb \"#00CC44\"",
+ "set object 527 rect from 46.65500000000001, 20.83 to 46.691500000000005, 20.17 fc rgb \"#00CC44\"",
+ "set object 528 rect from 46.861000000000004, 20.83 to 46.8935, 20.17 fc rgb \"#00CC44\"",
+ "set object 529 rect from 47.039500000000004, 20.83 to 47.049, 20.17 fc rgb \"#00CC44\"",
+ "set object 530 rect from 47.0765, 20.83 to 47.135000000000005, 20.17 fc rgb \"#00CC44\"",
+ "set object 531 rect from 47.4125, 20.83 to 47.465, 20.17 fc rgb \"#00CC44\"",
+ "set object 532 rect from 49.454499999999996, 20.83 to 49.467, 20.17 fc rgb \"#00CC44\"",
+ "set object 533 rect from 49.6855, 20.83 to 49.726, 20.17 fc rgb \"#00CC44\"",
+ "set object 534 rect from 49.799499999999995, 20.83 to 49.812999999999995, 20.17 fc rgb \"#00CC44\"",
+ "set object 535 rect from 49.841499999999996, 20.83 to 49.849999999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 536 rect from 49.894499999999994, 20.83 to 49.9695, 20.17 fc rgb \"#00CC44\"",
+ "set object 537 rect from 50.083999999999996, 20.83 to 50.14149999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 538 rect from 50.29299999999999, 20.83 to 50.31249999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 539 rect from 50.36699999999999, 20.83 to 50.39849999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 540 rect from 50.520999999999994, 20.83 to 50.528499999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 541 rect from 50.54899999999999, 20.83 to 50.62049999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 542 rect from 51.27549999999999, 20.83 to 51.29099999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 543 rect from 51.52249999999999, 20.83 to 51.56899999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 544 rect from 51.87299999999998, 20.83 to 51.89049999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 545 rect from 52.115999999999985, 20.83 to 52.13449999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 546 rect from 52.286999999999985, 20.83 to 52.300499999999985, 20.17 fc rgb \"#00CC44\"",
+ "set object 547 rect from 52.326999999999984, 20.83 to 52.33049999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 548 rect from 52.362999999999985, 20.83 to 52.404499999999985, 20.17 fc rgb \"#00CC44\"",
+ "set object 549 rect from 54.566499999999984, 20.83 to 54.64299999999998, 20.17 fc rgb \"#00CC44\"",
+ "set object 550 rect from 55.49149999999998, 20.83 to 55.53099999999998, 20.17 fc rgb \"#00CC44\"",
+ "set object 551 rect from 56.64049999999998, 20.83 to 56.64999999999998, 20.17 fc rgb \"#00CC44\"",
+ "set object 552 rect from 56.750999999999976, 20.83 to 56.76449999999998, 20.17 fc rgb \"#00CC44\"",
+ "set object 553 rect from 57.039499999999975, 20.83 to 57.076499999999974, 20.17 fc rgb \"#00CC44\"",
+ "set object 554 rect from 57.885999999999974, 20.83 to 57.893499999999975, 20.17 fc rgb \"#00CC44\"",
+ "set object 555 rect from 57.97749999999997, 20.83 to 57.99099999999997, 20.17 fc rgb \"#00CC44\"",
+ "set object 556 rect from 58.04499999999997, 20.83 to 58.055499999999974, 20.17 fc rgb \"#00CC44\"",
+ "set object 557 rect from 58.14549999999997, 20.83 to 58.15399999999997, 20.17 fc rgb \"#00CC44\"",
+ "set object 558 rect from 58.17549999999997, 20.83 to 58.18399999999997, 20.17 fc rgb \"#00CC44\"",
+ "set object 559 rect from 58.40999999999997, 20.83 to 58.431499999999964, 20.17 fc rgb \"#00CC44\"",
+ "set object 560 rect from 58.51699999999997, 20.83 to 58.53049999999997, 20.17 fc rgb \"#00CC44\"",
+ "set object 561 rect from 58.590999999999966, 20.83 to 58.60049999999997, 20.17 fc rgb \"#00CC44\"",
+ "set object 562 rect from 59.65599999999996, 20.83 to 59.669499999999964, 20.17 fc rgb \"#00CC44\"",
+ "set object 563 rect from 60.05149999999996, 20.83 to 60.060999999999964, 20.17 fc rgb \"#00CC44\"",
+ "set object 564 rect from 60.176999999999964, 20.83 to 60.19499999999996, 20.17 fc rgb \"#00CC44\"",
+ "set object 565 rect from 60.26949999999996, 20.83 to 60.27999999999996, 20.17 fc rgb \"#00CC44\"",
+ "set object 566 rect from 60.31149999999996, 20.83 to 60.34699999999996, 20.17 fc rgb \"#00CC44\"",
+ "set object 567 rect from 60.471499999999956, 20.83 to 60.48399999999996, 20.17 fc rgb \"#00CC44\"",
+ "set object 568 rect from 60.508499999999955, 20.83 to 60.51999999999996, 20.17 fc rgb \"#00CC44\"",
+ "set object 569 rect from 60.92099999999996, 20.83 to 60.98249999999996, 20.17 fc rgb \"#00CC44\"",
+ "set object 570 rect from 63.15199999999995, 20.83 to 63.228499999999954, 20.17 fc rgb \"#00CC44\"",
+ "set object 571 rect from 67.34999999999994, 20.83 to 67.36349999999995, 20.17 fc rgb \"#00CC44\"",
+ "set object 572 rect from 67.40699999999995, 20.83 to 67.41249999999995, 20.17 fc rgb \"#00CC44\"",
+ "set object 573 rect from 67.45699999999994, 20.83 to 67.46599999999995, 20.17 fc rgb \"#00CC44\"",
+ "set object 574 rect from 69.11299999999994, 20.83 to 69.12949999999995, 20.17 fc rgb \"#00CC44\"",
+ "set object 575 rect from 69.19199999999995, 20.83 to 69.22649999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 576 rect from 69.30799999999994, 20.83 to 69.31949999999995, 20.17 fc rgb \"#00CC44\"",
+ "set object 577 rect from 69.34699999999995, 20.83 to 69.35749999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 578 rect from 69.38399999999996, 20.83 to 69.40549999999995, 20.17 fc rgb \"#00CC44\"",
+ "set object 579 rect from 69.45099999999994, 20.83 to 69.46349999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 580 rect from 70.31749999999994, 20.83 to 70.33949999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 581 rect from 74.41449999999995, 20.83 to 74.43899999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 582 rect from 74.52049999999994, 20.83 to 74.54499999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 583 rect from 74.59549999999994, 20.83 to 74.60899999999995, 20.17 fc rgb \"#00CC44\"",
+ "set object 584 rect from 84.09999999999994, 20.83 to 84.15349999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 585 rect from 84.26099999999994, 20.83 to 84.27549999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 586 rect from 84.31099999999992, 20.83 to 84.31949999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 587 rect from 84.34199999999993, 20.83 to 84.35349999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 588 rect from 84.37299999999993, 20.83 to 84.40149999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 589 rect from 84.43999999999994, 20.83 to 84.46149999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 590 rect from 84.53049999999993, 20.83 to 84.60099999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 591 rect from 84.68049999999992, 20.83 to 84.69199999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 592 rect from 84.71649999999993, 20.83 to 84.72799999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 593 rect from 84.92199999999994, 20.83 to 84.93849999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 594 rect from 84.99799999999993, 20.83 to 85.01049999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 595 rect from 85.03599999999992, 20.83 to 85.04449999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 596 rect from 85.06199999999993, 20.83 to 85.07249999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 597 rect from 85.09499999999994, 20.83 to 85.10249999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 598 rect from 85.38399999999993, 20.83 to 85.43999999999994, 20.17 fc rgb \"#00CC44\"",
+ "set object 599 rect from 85.59949999999992, 20.83 to 85.61599999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 600 rect from 85.63749999999993, 20.83 to 85.65899999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 601 rect from 85.69649999999993, 20.83 to 85.70599999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 602 rect from 85.73249999999993, 20.83 to 85.76899999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 603 rect from 85.86549999999993, 20.83 to 85.87599999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 604 rect from 85.91149999999992, 20.83 to 85.92499999999993, 20.17 fc rgb \"#00CC44\"",
+ "set object 605 rect from 102.74599999999992, 20.83 to 102.80749999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 606 rect from 107.5244999999999, 20.83 to 107.57199999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 607 rect from 107.62449999999991, 20.83 to 107.6389999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 608 rect from 107.6674999999999, 20.83 to 107.6759999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 609 rect from 107.69849999999991, 20.83 to 107.70999999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 610 rect from 107.7294999999999, 20.83 to 107.7469999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 611 rect from 107.7834999999999, 20.83 to 107.79299999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 612 rect from 107.82049999999991, 20.83 to 107.8529999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 613 rect from 107.9294999999999, 20.83 to 107.94099999999992, 20.17 fc rgb \"#00CC44\"",
+ "set object 614 rect from 107.9654999999999, 20.83 to 107.97599999999991, 20.17 fc rgb \"#00CC44\"",
+ "set object 615 rect from 130.5489999999999, 20.83 to 130.5954999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 616 rect from 130.6469999999999, 20.83 to 130.6614999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 617 rect from 130.68999999999988, 20.83 to 130.6994999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 618 rect from 130.7219999999999, 20.83 to 130.7324999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 619 rect from 130.7519999999999, 20.83 to 130.76949999999988, 20.17 fc rgb \"#00CC44\"",
+ "set object 620 rect from 130.8059999999999, 20.83 to 130.8154999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 621 rect from 130.84299999999988, 20.83 to 130.87549999999987, 20.17 fc rgb \"#00CC44\"",
+ "set object 622 rect from 130.95199999999988, 20.83 to 130.9644999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 623 rect from 130.99099999999987, 20.83 to 131.00249999999988, 20.17 fc rgb \"#00CC44\"",
+ "set object 624 rect from 140.86699999999988, 20.83 to 140.8814999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 625 rect from 140.9319999999999, 20.83 to 140.9574999999999, 20.17 fc rgb \"#00CC44\"",
+ "set object 626 rect from 141.0299999999999, 20.83 to 141.03849999999989, 20.17 fc rgb \"#00CC44\"",
+ "set object 627 rect from 55.79999999999998, 21.83 to 56.198999999999984, 21.17 fc rgb \"#0044CC\"",
+ "set object 628 rect from 62.16149999999996, 21.83 to 62.548999999999964, 21.17 fc rgb \"#0044CC\"",
+ "set object 629 rect from 65.56449999999995, 21.83 to 65.61699999999995, 21.17 fc rgb \"#0044CC\"",
+ "set object 630 rect from 68.70599999999996, 21.83 to 68.76649999999995, 21.17 fc rgb \"#0044CC\"",
+ "set object 631 rect from 72.22199999999995, 21.83 to 72.28049999999995, 21.17 fc rgb \"#0044CC\"",
+ "set object 632 rect from 75.41849999999994, 21.83 to 75.46799999999995, 21.17 fc rgb \"#0044CC\"",
+ "set object 633 rect from 78.16449999999993, 21.83 to 78.23649999999994, 21.17 fc rgb \"#0044CC\"",
+ "set object 634 rect from 80.90399999999994, 21.83 to 80.95049999999993, 21.17 fc rgb \"#0044CC\"",
+ "set object 635 rect from 83.58349999999993, 21.83 to 83.63999999999993, 21.17 fc rgb \"#0044CC\"",
+ "set object 636 rect from 88.75199999999992, 21.83 to 88.82299999999992, 21.17 fc rgb \"#0044CC\"",
+ "set object 637 rect from 91.90999999999991, 21.83 to 91.96649999999993, 21.17 fc rgb \"#0044CC\"",
+ "set object 638 rect from 94.55599999999993, 21.83 to 94.6054999999999, 21.17 fc rgb \"#0044CC\"",
+ "set object 639 rect from 97.20749999999991, 21.83 to 97.26099999999992, 21.17 fc rgb \"#0044CC\"",
+ "set object 640 rect from 99.86649999999992, 21.83 to 99.92199999999991, 21.17 fc rgb \"#0044CC\"",
+ "set object 641 rect from 102.56049999999992, 21.83 to 102.61199999999991, 21.17 fc rgb \"#0044CC\"",
+ "set object 642 rect from 105.88099999999991, 21.83 to 105.93349999999991, 21.17 fc rgb \"#0044CC\"",
+ "set object 643 rect from 109.2659999999999, 21.83 to 109.38599999999991, 21.17 fc rgb \"#0044CC\"",
+ "set object 644 rect from 109.4024999999999, 21.83 to 109.41799999999989, 21.17 fc rgb \"#0044CC\"",
+ "set object 645 rect from 112.6029999999999, 21.83 to 112.6564999999999, 21.17 fc rgb \"#0044CC\"",
+ "set object 646 rect from 115.36399999999989, 21.83 to 115.4124999999999, 21.17 fc rgb \"#0044CC\"",
+ "set object 647 rect from 118.1434999999999, 21.83 to 118.19199999999991, 21.17 fc rgb \"#0044CC\"",
+ "set object 648 rect from 120.9194999999999, 21.83 to 121.0104999999999, 21.17 fc rgb \"#0044CC\"",
+ "set object 649 rect from 121.0259999999999, 21.83 to 121.0314999999999, 21.17 fc rgb \"#0044CC\"",
+ "set object 650 rect from 123.77499999999989, 21.83 to 123.8254999999999, 21.17 fc rgb \"#0044CC\"",
+ "set object 651 rect from 126.55149999999989, 21.83 to 126.59899999999989, 21.17 fc rgb \"#0044CC\"",
+ "set object 652 rect from 129.3344999999999, 21.83 to 129.4124999999999, 21.17 fc rgb \"#0044CC\"",
+ "set object 653 rect from 129.4249999999999, 21.83 to 129.48849999999987, 21.17 fc rgb \"#0044CC\"",
+ "set object 654 rect from 132.8659999999999, 21.83 to 132.92249999999987, 21.17 fc rgb \"#0044CC\"",
+ "set object 655 rect from 136.14449999999988, 21.83 to 136.19799999999987, 21.17 fc rgb \"#0044CC\"",
+ "set object 656 rect from 138.9289999999999, 21.83 to 138.98049999999986, 21.17 fc rgb \"#0044CC\"",
+ "set object 657 rect from 2.4204999999999997, 22.83 to 3.7920000000000003, 22.17 fc rgb \"#4444CC\"",
+ "set object 658 rect from 3.8075, 22.83 to 3.8129999999999997, 22.17 fc rgb \"#4444CC\"",
+ "set object 659 rect from 6.2695, 22.83 to 7.373, 22.17 fc rgb \"#4444CC\"",
+ "set object 660 rect from 7.3865, 22.83 to 7.3919999999999995, 22.17 fc rgb \"#4444CC\"",
+ "set object 661 rect from 9.2915, 22.83 to 10.405000000000001, 22.17 fc rgb \"#4444CC\"",
+ "set object 662 rect from 10.4235, 22.83 to 10.43, 22.17 fc rgb \"#4444CC\"",
+ "set object 663 rect from 12.8765, 22.83 to 13.897, 22.17 fc rgb \"#4444CC\"",
+ "set object 664 rect from 13.910499999999999, 22.83 to 13.915999999999999, 22.17 fc rgb \"#4444CC\"",
"set object 665 rect from 18.803, 10.2 to 19.803, 9.8 fc rgb \"#000000\"",
"set object 666 rect from 19.8815, 10.2 to 20.8815, 9.8 fc rgb \"#000000\"",
"set object 667 rect from 20.910999999999998, 10.2 to 21.910999999999998, 9.8 fc rgb \"#000000\"",
@@ -1371,7 +1371,7 @@
"set label \"1 ms\" at 14.3305828125,1 font \"Helvetica,7'\"",
"set label \"0 ms\" at 18.204082812499998,1 font \"Helvetica,7'\"",
"set label \"0 ms\" at 85.27908281249994,1 font \"Helvetica,7'\"",
- "set y2range [0:62.076318181818216]",
+ "set y2range [0:59.54259090909095]",
"plot '-' using 1:2 axes x1y2 with impulses ls 1",
"41.88650000000001 13.935500000000008",
"3.7920000000000003 1.3375000000000004",
@@ -1563,4 +1563,4 @@
"# start: 2.4204999999999997",
"# end: 141.1669999999999",
"# objects: 1547"
-] \ No newline at end of file
+]
diff --git a/deps/v8/test/mjsunit/tools/profviz.js b/deps/v8/test/mjsunit/tools/profviz.js
index 3a14f4e6be..6ff0e2cb9e 100644
--- a/deps/v8/test/mjsunit/tools/profviz.js
+++ b/deps/v8/test/mjsunit/tools/profviz.js
@@ -78,6 +78,5 @@ var create_baseline = false;
if (create_baseline) {
print(JSON.stringify(output_lines, null, 2));
} else {
- assertArrayEquals(output_lines,
- JSON.parse(read(reference_file)));
+ assertArrayEquals(JSON.parse(read(reference_file)), output_lines);
}
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-stack.js b/deps/v8/test/mjsunit/wasm/asm-wasm-stack.js
new file mode 100644
index 0000000000..be728394ea
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-stack.js
@@ -0,0 +1,105 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+var filename = '(?:[^ ]+/)?test/mjsunit/wasm/asm-wasm-stack.js';
+filename = filename.replace(/\//g, '[/\\\\]');
+
+function checkPreformattedStack(e, expected_lines) {
+ print('preformatted stack: ' + e.stack);
+ var lines = e.stack.split('\n');
+ assertEquals(expected_lines.length, lines.length);
+ for (var i = 0; i < lines.length; ++i) {
+ assertMatches(expected_lines[i], lines[i], 'line ' + i);
+ }
+}
+
+function checkFunctionsOnCallsites(e, locations) {
+ var stack = e.stack;
+ print('callsite objects (size ' + stack.length + '):');
+ for (var i = 0; i < stack.length; ++i) {
+ var s = stack[i];
+ print(
+ ' [' + i + '] ' + s.getFunctionName() + ' (' + s.getFileName() + ':' +
+ s.getLineNumber() + ':' + s.getColumnNumber() + ')');
+ }
+ assertEquals(locations.length, stack.length, 'stack size');
+ for (var i = 0; i < locations.length; ++i) {
+ var cs = stack[i];
+ assertMatches('^' + filename + '$', cs.getFileName(), 'file name at ' + i);
+ assertEquals(
+ locations[i][0], cs.getFunctionName(), 'function name at ' + i);
+ assertEquals(locations[i][1], cs.getLineNumber(), 'line number at ' + i);
+ assertEquals(
+ locations[i][2], cs.getColumnNumber(), 'column number at ' + i);
+ assertNotNull(cs.getThis(), 'receiver should be global');
+ assertEquals(stack[0].getThis(), cs.getThis(), 'receiver should be global');
+ }
+}
+
+function throwException() {
+ throw new Error('exception from JS');
+}
+
+function generateWasmFromAsmJs(stdlib, foreign, heap) {
+ 'use asm';
+ var throwFunc = foreign.throwFunc;
+ function callThrow() {
+ throwFunc();
+ }
+ function redirectFun(i) {
+ i = i|0;
+ switch (i|0) {
+ case 0: callThrow(); break;
+ case 1: redirectFun(0); break;
+ case 2: redirectFun(1); break;
+ }
+ }
+ return redirectFun;
+}
+
+(function PreformattedStackTraceFromJS() {
+ var fun = generateWasmFromAsmJs(this, {throwFunc: throwException}, undefined);
+ var e = null;
+ try {
+ fun(0);
+ } catch (ex) {
+ e = ex;
+ }
+ assertInstanceof(e, Error, 'exception should have been thrown');
+ checkPreformattedStack(e, [
+ '^Error: exception from JS$',
+ '^ *at throwException \\(' + filename + ':43:9\\)$',
+ '^ *at callThrow \\(' + filename + ':50:5\\)$',
+ '^ *at redirectFun \\(' + filename + ':55:15\\)$',
+ '^ *at PreformattedStackTraceFromJS \\(' + filename + ':67:5\\)$',
+ '^ *at ' + filename + ':80:3$'
+ ]);
+})();
+
+// Now collect the Callsite objects instead of just a string.
+Error.prepareStackTrace = function(error, frames) {
+ return frames;
+};
+
+(function CallsiteObjectsFromJS() {
+ var fun = generateWasmFromAsmJs(this, {throwFunc: throwException}, undefined);
+ var e = null;
+ try {
+ fun(2);
+ } catch (ex) {
+ e = ex;
+ }
+ assertInstanceof(e, Error, 'exception should have been thrown');
+ checkFunctionsOnCallsites(e, [
+ ['throwException', 43, 9], // --
+ ['callThrow', 50, 5], // --
+ ['redirectFun', 55, 15], // --
+ ['redirectFun', 56, 15], // --
+ ['redirectFun', 57, 15], // --
+ ['CallsiteObjectsFromJS', 91, 5], // --
+ [null, 105, 3]
+ ]);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/compiled-module-management.js b/deps/v8/test/mjsunit/wasm/compiled-module-management.js
index a1bd2ce3c8..8c4c3b32af 100644
--- a/deps/v8/test/mjsunit/wasm/compiled-module-management.js
+++ b/deps/v8/test/mjsunit/wasm/compiled-module-management.js
@@ -2,49 +2,70 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// TODO (mtrofin): re-enable ignition (v8:5345)
-// Flags: --no-ignition --no-ignition-staging
// Flags: --expose-wasm --expose-gc --allow-natives-syntax
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
+// Use global variables for all values where the test wants to maintain strict
+// control over value lifetime. Using local variables would not give sufficient
+// guarantees of the value lifetime.
+var module;
+var instance1;
+var instance2;
+var instance3;
+var instance4;
-(function CompiledModuleInstancesAreGCed() {
+(function CompiledModuleInstancesInitialize1to3() {
var builder = new WasmModuleBuilder();
builder.addMemory(1,1, true);
- builder.addImport("getValue", kSig_i);
- builder.addFunction("f", kSig_i)
+ builder.addImport("getValue", kSig_i_v);
+ builder.addFunction("f", kSig_i_v)
.addBody([
kExprCallFunction, 0
]).exportFunc();
- var module = new WebAssembly.Module(builder.toBuffer());
+ module = new WebAssembly.Module(builder.toBuffer());
%ValidateWasmModuleState(module);
%ValidateWasmInstancesChain(module, 0);
- var i1 = new WebAssembly.Instance(module, {getValue: () => 1});
+ instance1 = new WebAssembly.Instance(module, {getValue: () => 1});
%ValidateWasmInstancesChain(module, 1);
- var i2 = new WebAssembly.Instance(module, {getValue: () => 2});
+ instance2 = new WebAssembly.Instance(module, {getValue: () => 2});
%ValidateWasmInstancesChain(module, 2);
- var i3 = new WebAssembly.Instance(module, {getValue: () => 3});
+ instance3 = new WebAssembly.Instance(module, {getValue: () => 3});
%ValidateWasmInstancesChain(module, 3);
+})();
- assertEquals(1, i1.exports.f());
- i1 = null;
- gc();
- %ValidateWasmInstancesChain(module, 2);
- assertEquals(3, i3.exports.f());
- i3 = null;
- gc();
- %ValidateWasmInstancesChain(module, 1);
- assertEquals(2, i2.exports.f());
- i2 = null;
- gc();
- %ValidateWasmModuleState(module);
- var i4 = new WebAssembly.Instance(module, {getValue: () => 4});
- assertEquals(4, i4.exports.f());
+(function CompiledModuleInstancesClear1() {
+ assertEquals(1, instance1.exports.f());
+ instance1 = null;
+})();
+
+gc();
+%ValidateWasmInstancesChain(module, 2);
+
+(function CompiledModuleInstancesClear3() {
+ assertEquals(3, instance3.exports.f());
+ instance3 = null;
+})();
+
+gc();
+%ValidateWasmInstancesChain(module, 1);
+
+(function CompiledModuleInstancesClear2() {
+ assertEquals(2, instance2.exports.f());
+ instance2 = null;
+})();
+
+gc();
+%ValidateWasmModuleState(module);
+
+(function CompiledModuleInstancesInitialize4AndClearModule() {
+ instance4 = new WebAssembly.Instance(module, {getValue: () => 4});
+ assertEquals(4, instance4.exports.f());
module = null;
- gc();
- %ValidateWasmOrphanedInstance(i4);
})();
+
+gc();
+%ValidateWasmOrphanedInstance(instance4);
diff --git a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
index aa36b71882..eb6534d0a8 100644
--- a/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
+++ b/deps/v8/test/mjsunit/wasm/compiled-module-serialization.js
@@ -12,7 +12,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
builder.addMemory(1,1, true);
var kSig_v_i = makeSig([kAstI32], []);
var signature = builder.addType(kSig_v_i);
- builder.addImport("some_value", kSig_i);
+ builder.addImport("some_value", kSig_i_v);
builder.addImport("writer", signature);
builder.addFunction("main", kSig_i_i)
@@ -20,7 +20,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
kExprGetLocal, 0,
kExprI32LoadMem, 0, 0,
kExprI32Const, 1,
- kExprCallIndirect, signature,
+ kExprCallIndirect, signature, kTableZero,
kExprGetLocal,0,
kExprI32LoadMem,0, 0,
kExprCallFunction, 0,
@@ -35,15 +35,15 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
kExprCallFunction, 1]);
builder.appendToTable([2, 3]);
-
- var module = new WebAssembly.Module(builder.toBuffer());
+ var wire_bytes = builder.toBuffer();
+ var module = new WebAssembly.Module(wire_bytes);
var buff = %SerializeWasmModule(module);
module = null;
gc();
- module = %DeserializeWasmModule(buff);
+ module = %DeserializeWasmModule(buff, wire_bytes);
- var mem_1 = new ArrayBuffer(4);
- var view_1 = new Int32Array(mem_1);
+ var mem_1 = new WebAssembly.Memory({initial: 1});
+ var view_1 = new Int32Array(mem_1.buffer);
view_1[0] = 42;
@@ -59,19 +59,20 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
(function DeserializeInvalidObject() {
var invalid_buffer = new ArrayBuffer(10);
- module = %DeserializeWasmModule(invalid_buffer);
+ module = %DeserializeWasmModule(invalid_buffer, invalid_buffer);
assertEquals(module, undefined);
})();
(function RelationBetweenModuleAndClone() {
let builder = new WasmModuleBuilder();
- builder.addFunction("main", kSig_i)
+ builder.addFunction("main", kSig_i_v)
.addBody([kExprI8Const, 42])
.exportFunc();
- var compiled_module = new WebAssembly.Module(builder.toBuffer());
+ var wire_bytes = builder.toBuffer();
+ var compiled_module = new WebAssembly.Module(wire_bytes);
var serialized = %SerializeWasmModule(compiled_module);
- var clone = %DeserializeWasmModule(serialized);
+ var clone = %DeserializeWasmModule(serialized, wire_bytes);
assertNotNull(clone);
assertFalse(clone == undefined);
@@ -81,15 +82,16 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
(function SerializeAfterInstantiation() {
let builder = new WasmModuleBuilder();
- builder.addFunction("main", kSig_i)
+ builder.addFunction("main", kSig_i_v)
.addBody([kExprI8Const, 42])
.exportFunc();
- var compiled_module = new WebAssembly.Module(builder.toBuffer());
+ var wire_bytes = builder.toBuffer()
+ var compiled_module = new WebAssembly.Module(wire_bytes);
var instance1 = new WebAssembly.Instance(compiled_module);
var instance2 = new WebAssembly.Instance(compiled_module);
var serialized = %SerializeWasmModule(compiled_module);
- var clone = %DeserializeWasmModule(serialized);
+ var clone = %DeserializeWasmModule(serialized, wire_bytes);
assertNotNull(clone);
assertFalse(clone == undefined);
diff --git a/deps/v8/test/mjsunit/wasm/data-segments.js b/deps/v8/test/mjsunit/wasm/data-segments.js
new file mode 100644
index 0000000000..8cff5e1756
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/data-segments.js
@@ -0,0 +1,66 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+var debug = false;
+
+function SimpleDataSegmentTest(offset) {
+ print("SimpleDataSegmentTest(" + offset + ")...");
+ var builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1, false);
+ builder.addFunction("load", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .exportAs("load");
+ builder.addDataSegment(offset, [9, 9, 9, 9]);
+
+ var buffer = builder.toBuffer(debug);
+ var instance = new WebAssembly.Instance(new WebAssembly.Module(buffer));
+ for (var i = offset - 20; i < offset + 20; i += 4) {
+ if (i < 0) continue;
+ var expected = (i == offset) ? 151587081 : 0;
+ assertEquals(expected, instance.exports.load(i));
+ }
+}
+
+SimpleDataSegmentTest(0);
+SimpleDataSegmentTest(4);
+SimpleDataSegmentTest(12);
+SimpleDataSegmentTest(1064);
+
+function GlobalImportedInitTest(pad) {
+ print("GlobaleImportedInitTest(" + pad + ")...");
+ var builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1, false);
+
+ while (pad-- > 0) builder.addGlobal(kAstI32); // pad
+
+ var g = builder.addImportedGlobal("offset", undefined, kAstI32);
+
+ while (pad-- > 0) builder.addGlobal(kAstI32); // pad
+
+ builder.addFunction("load", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .exportAs("load");
+ builder.addDataSegment(g.index, [5, 5, 5, 5], true);
+
+ var buffer = builder.toBuffer(debug);
+ var module = new WebAssembly.Module(buffer);
+
+ for (var offset of [0, 12, 192, 1024]) {
+ var instance = new WebAssembly.Instance(module, {offset: offset});
+ for (var i = offset - 20; i < offset + 20; i += 4) {
+ if (i < 0) continue;
+ var expected = i == offset ? 84215045 : 0;
+ assertEquals(expected, instance.exports.load(i));
+ }
+ }
+}
+
+GlobalImportedInitTest(0);
+GlobalImportedInitTest(1);
+GlobalImportedInitTest(4);
diff --git a/deps/v8/test/mjsunit/wasm/debug-disassembly.js b/deps/v8/test/mjsunit/wasm/debug-disassembly.js
deleted file mode 100644
index ac09d4af96..0000000000
--- a/deps/v8/test/mjsunit/wasm/debug-disassembly.js
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm --expose-debug-as debug
-
-load("test/mjsunit/wasm/wasm-constants.js");
-load("test/mjsunit/wasm/wasm-module-builder.js");
-
-Debug = debug.Debug
-
-// Initialized in setup().
-var exception;
-var break_count;
-var num_wasm_scripts;
-var module;
-
-function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
- ++break_count;
- // Request frame details. This should trigger creation of the Script
- // objects for all frames on the stack.
- var num_frames = exec_state.frameCount();
- for (var i = 0; i < num_frames; ++i) {
- var frame = exec_state.frame(i);
- var details = frame.details();
- var script = details.script();
- if (script.type == Debug.ScriptType.Wasm) {
- var pos = frame.sourcePosition();
- var name = script.nameOrSourceURL();
- var disassembly = Debug.disassembleWasmFunction(script.id);
- var offset_table = Debug.getWasmFunctionOffsetTable(script.id);
- assertEquals(0, offset_table.length % 3);
- var lineNr = null;
- var columnNr = null;
- for (var p = 0; p < offset_table.length; p += 3) {
- if (offset_table[p] != pos) continue;
- lineNr = offset_table[p+1];
- columnNr = offset_table[p+2];
- }
- assertNotNull(lineNr, "position should occur in offset table");
- assertNotNull(columnNr, "position should occur in offset table");
- var line = disassembly.split("\n")[lineNr];
- assertTrue(!!line, "line number must occur in disassembly");
- assertTrue(line.length > columnNr, "column number must be valid");
- var expected_string;
- if (name.endsWith("/1")) {
- // Function 0 calls the imported function.
- expected_string = "kExprCallFunction,";
- } else if (name.endsWith("/2")) {
- // Function 1 calls function 0.
- expected_string = "kExprCallFunction,";
- } else {
- assertTrue(false, "Unexpected wasm script: " + name);
- }
- assertTrue(line.substr(columnNr).startsWith(expected_string),
- "offset " + columnNr + " should start with '" + expected_string
- + "': " + line);
- }
- }
- } else if (event == Debug.DebugEvent.AfterCompile) {
- var script = event_data.script();
- if (script.scriptType() == Debug.ScriptType.Wasm) {
- ++num_wasm_scripts;
- }
- }
- } catch (e) {
- print("exception: " + e);
- exception = e;
- }
-};
-
-var builder = new WasmModuleBuilder();
-
-builder.addImport("func", kSig_v_v);
-
-builder.addFunction("call_import", kSig_v_v)
- .addBody([kExprCallFunction, 0])
- .exportFunc();
-
-// Add a bit of unneccessary code to increase the byte offset.
-builder.addFunction("call_call_import", kSig_v_v)
- .addLocals({i32_count: 2})
- .addBody([
- kExprI32Const, 27, kExprSetLocal, 0,
- kExprI32Const, (-7 & 0x7f), kExprSetLocal, 1,
- kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add, kExprI64UConvertI32,
- kExprI64Const, 0,
- kExprI64Ne, kExprIf, kAstStmt,
- kExprCallFunction, 1,
- kExprEnd
- ])
- .exportFunc();
-
-function call_debugger() {
- debugger;
-}
-
-function setup() {
- module = builder.instantiate({func: call_debugger});
- exception = null;
- break_count = 0;
- num_wasm_scripts = 0;
-}
-
-(function testRegisteredWasmScripts1() {
- setup();
- Debug.setListener(listener);
- // Call the "call_import" function -> 1 script.
- module.exports.call_import();
- module.exports.call_import();
- module.exports.call_call_import();
- Debug.setListener(null);
-
- assertEquals(3, break_count);
- if (exception) throw exception;
-})();
-
-(function testRegisteredWasmScripts2() {
- setup();
- Debug.setListener(listener);
- module.exports.call_call_import();
- Debug.setListener(null);
-
- assertEquals(1, break_count);
- if (exception) throw exception;
-})();
diff --git a/deps/v8/test/mjsunit/wasm/errors.js b/deps/v8/test/mjsunit/wasm/errors.js
new file mode 100644
index 0000000000..97ce89998b
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/errors.js
@@ -0,0 +1,134 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+'use strict';
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function module(bytes) {
+ let buffer = bytes;
+ if (typeof buffer === 'string') {
+ buffer = new ArrayBuffer(bytes.length);
+ let view = new Uint8Array(buffer);
+ for (let i = 0; i < bytes.length; ++i) {
+ view[i] = bytes.charCodeAt(i);
+ }
+ }
+ return new WebAssembly.Module(buffer);
+}
+
+function instance(bytes, imports = {}) {
+ return new WebAssembly.Instance(module(bytes), imports);
+}
+
+function builder() {
+ return new WasmModuleBuilder;
+}
+
+function assertCompileError(bytes) {
+ assertThrows(() => module(bytes), WebAssembly.CompileError);
+}
+
+function assertLinkError(bytes, imports = {}) {
+ assertThrows(() => instance(bytes, imports), TypeError);
+}
+
+function assertRuntimeError(bytes, imports = {}) {
+ assertThrows(() => instance(bytes, imports).exports.run(),
+ WebAssembly.RuntimeError);
+}
+
+function assertConversionError(bytes, imports = {}) {
+ assertThrows(() => instance(bytes, imports).exports.run(), TypeError);
+}
+
+(function TestDecodingError() {
+ assertCompileError("");
+ assertCompileError("X");
+ assertCompileError("\0x00asm");
+})();
+
+(function TestValidationError() {
+ assertCompileError(builder().addFunction("f", kSig_i_v).end().toBuffer());
+ assertCompileError(builder().addFunction("f", kSig_i_v).addBody([
+ kExprReturn
+ ]).end().toBuffer());
+ assertCompileError(builder().addFunction("f", kSig_v_v).addBody([
+ kExprGetLocal, 0
+ ]).end().toBuffer());
+ assertCompileError(builder().addStart(0).toBuffer());
+})();
+
+(function TestLinkingError() {
+ let b;
+
+ b = builder();
+ b.addImportWithModule("foo", "bar", kSig_v_v);
+ assertLinkError(b.toBuffer(), {});
+ b = builder();
+ b.addImportWithModule("foo", "bar", kSig_v_v);
+ assertLinkError(b.toBuffer(), {foo: {}});
+ b = builder();
+ b.addImportWithModule("foo", "bar", kSig_v_v);
+ assertLinkError(b.toBuffer(), {foo: {bar: 9}});
+
+ b = builder();
+ b.addImportedGlobal("foo", "bar", kAstI32);
+ assertLinkError(b.toBuffer(), {});
+ // TODO(titzer): implement stricter import checks for globals.
+ // b = builder();
+ // b.addImportedGlobal("foo", "bar", kAstI32);
+ // assertLinkError(b.toBuffer(), {foo: {}});
+ // b = builder();
+ // b.addImportedGlobal("foo", "bar", kAstI32);
+ // assertLinkError(b.toBuffer(), {foo: {bar: ""}});
+ // b = builder();
+ // b.addImportedGlobal("foo", "bar", kAstI32);
+ // assertLinkError(b.toBuffer(), {foo: {bar: () => 9}});
+
+ b = builder();
+ b.addImportedMemory("foo", "bar");
+ assertLinkError(b.toBuffer(), {});
+ b = builder();
+ b.addImportedMemory("foo", "bar");
+ assertLinkError(b.toBuffer(), {foo: {}});
+ // TODO(titzer): implement stricter import checks for globals.
+ // b = builder();
+ // b.addImportedMemory("foo", "bar", 1);
+ // assertLinkError(b.toBuffer(),
+ // {foo: {bar: new WebAssembly.Memory({initial: 0})}});
+})();
+
+(function TestTrapError() {
+ assertRuntimeError(builder().addFunction("run", kSig_v_v).addBody([
+ kExprUnreachable
+ ]).exportFunc().end().toBuffer());
+
+ assertRuntimeError(builder().addFunction("run", kSig_v_v).addBody([
+ kExprI32Const, 1,
+ kExprI32Const, 0,
+ kExprI32DivS,
+ kExprDrop
+ ]).exportFunc().end().toBuffer());
+
+ assertRuntimeError(builder().addFunction("run", kSig_v_v).addBody([
+ ]).exportFunc().end().
+ addFunction("start", kSig_v_v).addBody([
+ kExprUnreachable
+ ]).end().addStart(1).toBuffer());
+})();
+
+(function TestConversionError() {
+ let b = builder();
+ b.addImportWithModule("foo", "bar", kSig_v_l);
+ assertConversionError(b.addFunction("run", kSig_v_v).addBody([
+ kExprI64Const, 0, kExprCallFunction, 0
+ ]).exportFunc().end().toBuffer());
+ assertConversionError(builder().addFunction("run", kSig_l_v).addBody([
+ kExprI64Const, 0
+ ]).exportFunc().end().toBuffer());
+})();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions.js b/deps/v8/test/mjsunit/wasm/exceptions.js
index 71bd5f18ed..d02e34b9d0 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions.js
@@ -106,7 +106,7 @@ var test_catch = (function () {
}
var kJSThrowLarge = builder.addImport("throw_large", sig_index);
- // Helper function for throwing from Wasm.
+ // Helper function for throwing from WebAssembly.
var kWasmThrowFunction =
builder.addFunction("throw", kSig_v_i)
.addBody([
diff --git a/deps/v8/test/mjsunit/wasm/export-table.js b/deps/v8/test/mjsunit/wasm/export-table.js
index 6d21cf5790..4e9f1314e3 100644
--- a/deps/v8/test/mjsunit/wasm/export-table.js
+++ b/deps/v8/test/mjsunit/wasm/export-table.js
@@ -8,10 +8,11 @@ load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
(function testExportedMain() {
+ print("TestExportedMain...");
var kReturnValue = 88;
var builder = new WasmModuleBuilder();
- builder.addFunction("main", kSig_i)
+ builder.addFunction("main", kSig_i_v)
.addBody([
kExprI8Const,
kReturnValue,
@@ -28,11 +29,12 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
})();
(function testExportedTwice() {
+ print("TestExportedTwice...");
var kReturnValue = 99;
var builder = new WasmModuleBuilder();
- builder.addFunction("main", kSig_i)
+ builder.addFunction("main", kSig_i_v)
.addBody([
kExprI8Const,
kReturnValue,
@@ -49,15 +51,17 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertEquals(kReturnValue, module.exports.foo());
assertEquals(kReturnValue, module.exports.blah());
+ assertSame(module.exports.blah, module.exports.foo);
})();
(function testNumericName() {
+ print("TestNumericName...");
var kReturnValue = 93;
var builder = new WasmModuleBuilder();
- builder.addFunction("main", kSig_i)
+ builder.addFunction("main", kSig_i_v)
.addBody([
kExprI8Const,
kReturnValue,
@@ -74,6 +78,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
})();
(function testExportNameClash() {
+ print("TestExportNameClash...");
var builder = new WasmModuleBuilder();
builder.addFunction("one", kSig_v_v).addBody([kExprNop]).exportAs("main");
@@ -87,3 +92,23 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
assertContains("Duplicate export", e.toString());
}
})();
+
+
+(function testExportMultipleIdentity() {
+ print("TestExportMultipleIdentity...");
+ var builder = new WasmModuleBuilder();
+
+ builder.addFunction("one", kSig_v_v).addBody([kExprNop])
+ .exportAs("a")
+ .exportAs("b")
+ .exportAs("c");
+
+ let instance = builder.instantiate();
+ let e = instance.exports;
+ assertEquals("function", typeof e.a);
+ assertEquals("function", typeof e.b);
+ assertEquals("function", typeof e.c);
+ assertSame(e.a, e.b);
+ assertSame(e.a, e.c);
+ assertEquals("a", e.a.name);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/ffi.js b/deps/v8/test/mjsunit/wasm/ffi.js
index 5966ec8c5e..4b503efce4 100644
--- a/deps/v8/test/mjsunit/wasm/ffi.js
+++ b/deps/v8/test/mjsunit/wasm/ffi.js
@@ -94,7 +94,7 @@ print("Native function");
var builder = new WasmModuleBuilder();
- var sig_index = builder.addType(kSig_d);
+ var sig_index = builder.addType(kSig_d_v);
builder.addImport("func", sig_index);
builder.addFunction("main", sig_index)
.addBody([
diff --git a/deps/v8/test/mjsunit/wasm/frame-inspection.js b/deps/v8/test/mjsunit/wasm/frame-inspection.js
index 9d45239e4a..01f9142d3a 100644
--- a/deps/v8/test/mjsunit/wasm/frame-inspection.js
+++ b/deps/v8/test/mjsunit/wasm/frame-inspection.js
@@ -12,10 +12,14 @@ Debug = debug.Debug
var exception = null;
var break_count = 0;
-const expected_num_frames = 5;
-const expected_wasm_frames = [false, true, true, false, false];
-const expected_wasm_positions = [0, 1, 2, 0, 0];
-const expected_function_names = ["call_debugger", "wasm_2", "wasm_1", "testFrameInspection", ""];
+const expected_frames = [
+ // func-name; wasm?; pos; line; col
+ ['call_debugger', false], // --
+ ['wasm_2', true, 56, 2, 1], // --
+ ['wasm_1', true, 52, 1, 2], // --
+ ['testFrameInspection', false], // --
+ ['', false]
+];
function listener(event, exec_state, event_data, data) {
if (event != Debug.DebugEvent.Break) return;
@@ -23,20 +27,24 @@ function listener(event, exec_state, event_data, data) {
try {
var break_id = exec_state.break_id;
var frame_count = exec_state.frameCount();
- assertEquals(expected_num_frames, frame_count);
+ assertEquals(expected_frames.length, frame_count, 'frame count');
for (var i = 0; i < frame_count; ++i) {
var frame = exec_state.frame(i);
+ assertEquals(expected_frames[i][0], frame.func().name(), 'name at ' + i);
// wasm frames have unresolved function, others resolved ones.
- assertEquals(expected_wasm_frames[i], !frame.func().resolved());
- assertEquals(expected_function_names[i], frame.func().name());
- if (expected_wasm_frames[i]) {
+ assertEquals(
+ expected_frames[i][1], !frame.func().resolved(), 'resolved at ' + i);
+ if (expected_frames[i][1]) { // wasm frame?
var script = frame.details().script();
- assertNotNull(script);
- assertEquals(expected_wasm_positions[i], frame.details().sourcePosition());
+ assertNotNull(script, 'script at ' + i);
+ assertEquals(
+ expected_frames[i][2], frame.details().sourcePosition(),
+ 'source pos at ' + i);
var loc = script.locationFromPosition(frame.details().sourcePosition());
- assertEquals(expected_wasm_positions[i], loc.column);
- assertEquals(expected_wasm_positions[i], loc.position);
+ assertEquals(expected_frames[i][2], loc.position, 'pos at ' + i);
+ assertEquals(expected_frames[i][3], loc.line, 'line at ' + i);
+ assertEquals(expected_frames[i][4], loc.column, 'column at ' + i);
}
}
} catch (e) {
@@ -49,14 +57,13 @@ var builder = new WasmModuleBuilder();
// wasm_1 calls wasm_2 on offset 2.
// wasm_2 calls call_debugger on offset 1.
-builder.addImport("func", kSig_v_v);
+builder.addImport('func', kSig_v_v);
-builder.addFunction("wasm_1", kSig_v_v)
- .addBody([kExprNop, kExprCallFunction, 2])
- .exportAs("main");
+builder.addFunction('wasm_1', kSig_v_v)
+ .addBody([kExprNop, kExprCallFunction, 2])
+ .exportAs('main');
-builder.addFunction("wasm_2", kSig_v_v)
- .addBody([kExprCallFunction, 0]);
+builder.addFunction('wasm_2', kSig_v_v).addBody([kExprCallFunction, 0]);
function call_debugger() {
debugger;
diff --git a/deps/v8/test/mjsunit/wasm/function-prototype.js b/deps/v8/test/mjsunit/wasm/function-prototype.js
index 25339adea7..fdf788f773 100644
--- a/deps/v8/test/mjsunit/wasm/function-prototype.js
+++ b/deps/v8/test/mjsunit/wasm/function-prototype.js
@@ -10,7 +10,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
(function TestFunctionPrototype() {
var builder = new WasmModuleBuilder();
- builder.addFunction("nine", kSig_i)
+ builder.addFunction("nine", kSig_i_v)
.addBody([kExprI8Const, 9])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/gc-buffer.js b/deps/v8/test/mjsunit/wasm/gc-buffer.js
new file mode 100644
index 0000000000..d33695532a
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/gc-buffer.js
@@ -0,0 +1,42 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --stress-gc --expose-gc
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function run(f) {
+ // wrap the creation in a closure so that the only thing returned is
+ // the module (i.e. the underlying array buffer of WASM wire bytes dies).
+ var module = (() => {
+ var builder = new WasmModuleBuilder();
+ builder.addImport("the_name_of_my_import", kSig_i_i);
+ builder.addFunction("main", kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallFunction, 0])
+ .exportAs("main");
+ print("module");
+ return new WebAssembly.Module(builder.toBuffer());
+ })();
+
+ gc();
+ for (var i = 0; i < 10; i++) {
+ print(" instance " + i);
+ var instance = new WebAssembly.Instance(module, {the_name_of_my_import: f});
+ var g = instance.exports.main;
+ assertEquals("function", typeof g);
+ for (var j = 0; j < 10; j++) {
+ assertEquals(f(j), g(j));
+ }
+ }
+}
+
+(function test() {
+ for (var i = 0; i < 3; i++) {
+ run(x => (x + 19));
+ run(x => (x - 18));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/gc-stress.js b/deps/v8/test/mjsunit/wasm/gc-stress.js
new file mode 100644
index 0000000000..468be105cc
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/gc-stress.js
@@ -0,0 +1,37 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm --stress-gc
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function run(f) {
+ var builder = new WasmModuleBuilder();
+ builder.addImport("f", kSig_i_i);
+ builder.addFunction("main", kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallFunction, 0])
+ .exportAs("main");
+
+ print("module");
+ var module = new WebAssembly.Module(builder.toBuffer());
+
+ for (var i = 0; i < 10; i++) {
+ print(" instance " + i);
+ var instance = new WebAssembly.Instance(module, {f: f});
+ var g = instance.exports.main;
+ for (var j = 0; j < 10; j++) {
+ assertEquals(f(j), g(j));
+ }
+ }
+}
+
+(function test() {
+ for (var i = 0; i < 100; i++) {
+ run(x => (x + 19));
+ run(x => (x - 18));
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/globals.js b/deps/v8/test/mjsunit/wasm/globals.js
new file mode 100644
index 0000000000..ca421477a2
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/globals.js
@@ -0,0 +1,88 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function TestImported(type, val, expected) {
+ print("TestImported " + type + "(" + val +")" + " = " + expected);
+ var builder = new WasmModuleBuilder();
+ var sig = makeSig([], [type]);
+ var g = builder.addImportedGlobal("foo", undefined, type);
+ builder.addFunction("main", sig)
+ .addBody([kExprGetGlobal, g.index])
+ .exportAs("main");
+ builder.addGlobal(kAstI32); // pad
+
+ var instance = builder.instantiate({foo: val});
+ assertEquals(expected, instance.exports.main());
+}
+
+TestImported(kAstI32, 300.1, 300);
+TestImported(kAstF32, 87234.87238, Math.fround(87234.87238));
+TestImported(kAstF64, 77777.88888, 77777.88888);
+TestImported(kAstF64, "89", 89);
+
+
+function TestExported(type, val, expected) {
+ print("TestExported " + type + "(" + val +")" + " = " + expected);
+ var builder = new WasmModuleBuilder();
+ var sig = makeSig([type], []);
+ builder.addGlobal(kAstI32); // pad
+ var g = builder.addGlobal(type, false)
+ .exportAs("foo");
+ g.init = val;
+ builder.addGlobal(kAstI32); // pad
+
+ var instance = builder.instantiate();
+ assertEquals(expected, instance.exports.foo);
+}
+
+TestExported(kAstI32, 455.5, 455);
+TestExported(kAstF32, -999.34343, Math.fround(-999.34343));
+TestExported(kAstF64, 87347.66666, 87347.66666);
+
+
+function TestImportedExported(type, val, expected) {
+ print("TestImportedExported " + type + "(" + val +")" + " = " + expected);
+ var builder = new WasmModuleBuilder();
+ var sig = makeSig([type], []);
+ var i = builder.addImportedGlobal("foo", undefined, type);
+ builder.addGlobal(kAstI32); // pad
+ var o = builder.addGlobal(type, false)
+ .exportAs("bar");
+ o.init_index = i;
+ builder.addGlobal(kAstI32); // pad
+
+ var instance = builder.instantiate({foo: val});
+ assertEquals(expected, instance.exports.bar);
+}
+
+TestImportedExported(kAstI32, 415.5, 415);
+TestImportedExported(kAstF32, -979.34343, Math.fround(-979.34343));
+TestImportedExported(kAstF64, 81347.66666, 81347.66666);
+
+function TestGlobalIndexSpace(type, val) {
+ print("TestGlobalIndexSpace(" + val + ") = " + val);
+ var builder = new WasmModuleBuilder();
+ var im = builder.addImportedGlobal("foo", undefined, type);
+ assertEquals(0, im);
+ var def = builder.addGlobal(type, false);
+ assertEquals(1, def.index);
+ def.init_index = im;
+
+ var sig = makeSig([], [type]);
+ builder.addFunction("main", sig)
+ .addBody([kExprGetGlobal, def.index])
+ .exportAs("main");
+
+ var instance = builder.instantiate({foo: val});
+ assertEquals(val, instance.exports.main());
+}
+
+TestGlobalIndexSpace(kAstI32, 123);
+TestGlobalIndexSpace(kAstF32, 54321.125);
+TestGlobalIndexSpace(kAstF64, 12345.678);
diff --git a/deps/v8/test/mjsunit/wasm/grow-memory.js b/deps/v8/test/mjsunit/wasm/grow-memory.js
index ecc105ee0d..37289a5b77 100644
--- a/deps/v8/test/mjsunit/wasm/grow-memory.js
+++ b/deps/v8/test/mjsunit/wasm/grow-memory.js
@@ -2,17 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --expose-gc --stress-compaction
+// Flags: --expose-wasm --stress-compaction
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
var kPageSize = 0x10000;
+var kV8MaxPages = 16384;
function genGrowMemoryBuilder() {
var builder = new WasmModuleBuilder();
builder.addFunction("grow_memory", kSig_i_i)
- .addBody([kExprGetLocal, 0, kExprGrowMemory])
+ .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
.exportFunc();
builder.addFunction("load", kSig_i_i)
.addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
@@ -38,9 +39,11 @@ function genGrowMemoryBuilder() {
return builder;
}
+// TODO(gdeepti): Generate tests programatically for all the sizes instead of
+// current implementation.
function testGrowMemoryReadWrite32() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, 1, false);
+ builder.addMemory(1, kV8MaxPages, false);
var module = builder.instantiate();
var offset;
function peek() { return module.exports.load(offset); }
@@ -87,7 +90,7 @@ testGrowMemoryReadWrite32();
function testGrowMemoryReadWrite16() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, 1, false);
+ builder.addMemory(1, kV8MaxPages, false);
var module = builder.instantiate();
var offset;
function peek() { return module.exports.load16(offset); }
@@ -134,7 +137,7 @@ testGrowMemoryReadWrite16();
function testGrowMemoryReadWrite8() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, 1, false);
+ builder.addMemory(1, kV8MaxPages, false);
var module = builder.instantiate();
var offset;
function peek() { return module.exports.load8(offset); }
@@ -197,15 +200,96 @@ function testGrowMemoryZeroInitialSize() {
assertEquals(20, peek());
}
- //TODO(gdeepti): Fix tests with correct write boundaries
- //when runtime function is fixed.
- for(offset = kPageSize; offset <= kPageSize + 5; offset++) {
+ for(offset = kPageSize - 3; offset <= kPageSize + 5; offset++) {
assertTraps(kTrapMemOutOfBounds, peek);
}
+
+ offset = 3*kPageSize;
+ for (var i = 1; i < 4; i++) {
+ assertTraps(kTrapMemOutOfBounds, poke);
+ assertEquals(i, growMem(1));
+ }
+ poke(20);
+ assertEquals(20, peek());
}
testGrowMemoryZeroInitialSize();
+function testGrowMemoryZeroInitialSize32() {
+ var builder = genGrowMemoryBuilder();
+ var module = builder.instantiate();
+ var offset;
+ function peek() { return module.exports.load(offset); }
+ function poke(value) { return module.exports.store(offset, value); }
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+
+ assertTraps(kTrapMemOutOfBounds, peek);
+ assertTraps(kTrapMemOutOfBounds, poke);
+
+ assertEquals(0, growMem(1));
+
+ for(offset = 0; offset <= kPageSize - 4; offset++) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+
+ for(offset = kPageSize - 3; offset <= kPageSize + 5; offset++) {
+ assertTraps(kTrapMemOutOfBounds, peek);
+ }
+}
+
+testGrowMemoryZeroInitialSize32();
+
+function testGrowMemoryZeroInitialSize16() {
+ var builder = genGrowMemoryBuilder();
+ var module = builder.instantiate();
+ var offset;
+ function peek() { return module.exports.load16(offset); }
+ function poke(value) { return module.exports.store16(offset, value); }
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+
+ assertTraps(kTrapMemOutOfBounds, peek);
+ assertTraps(kTrapMemOutOfBounds, poke);
+
+ assertEquals(0, growMem(1));
+
+ for(offset = 0; offset <= kPageSize - 2; offset++) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+
+ for(offset = kPageSize - 1; offset <= kPageSize + 5; offset++) {
+ assertTraps(kTrapMemOutOfBounds, peek);
+ }
+}
+
+testGrowMemoryZeroInitialSize16();
+
+function testGrowMemoryZeroInitialSize8() {
+ var builder = genGrowMemoryBuilder();
+ var module = builder.instantiate();
+ var offset;
+ function peek() { return module.exports.load8(offset); }
+ function poke(value) { return module.exports.store8(offset, value); }
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+
+ assertTraps(kTrapMemOutOfBounds, peek);
+ assertTraps(kTrapMemOutOfBounds, poke);
+
+ assertEquals(0, growMem(1));
+
+ for(offset = 0; offset <= kPageSize - 1; offset++) {
+ poke(20);
+ assertEquals(20, peek());
+ }
+
+ for(offset = kPageSize; offset <= kPageSize + 5; offset++) {
+ assertTraps(kTrapMemOutOfBounds, peek);
+ }
+}
+
+testGrowMemoryZeroInitialSize8();
+
function testGrowMemoryTrapMaxPagesZeroInitialMemory() {
var builder = genGrowMemoryBuilder();
var module = builder.instantiate();
@@ -240,9 +324,9 @@ testGrowMemoryTrapsWithNonSmiInput();
function testGrowMemoryCurrentMemory() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, 1, false);
+ builder.addMemory(1, kV8MaxPages, false);
builder.addFunction("memory_size", kSig_i_v)
- .addBody([kExprMemorySize])
+ .addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
var module = builder.instantiate();
function growMem(pages) { return module.exports.grow_memory(pages); }
@@ -256,7 +340,7 @@ testGrowMemoryCurrentMemory();
function testGrowMemoryPreservesDataMemOp32() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, 1, false);
+ builder.addMemory(1, kV8MaxPages, false);
var module = builder.instantiate();
var offset, val;
function peek() { return module.exports.load(offset); }
@@ -279,7 +363,7 @@ testGrowMemoryPreservesDataMemOp32();
function testGrowMemoryPreservesDataMemOp16() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, 1, false);
+ builder.addMemory(1, kV8MaxPages, false);
var module = builder.instantiate();
var offset, val;
function peek() { return module.exports.load16(offset); }
@@ -302,7 +386,7 @@ testGrowMemoryPreservesDataMemOp16();
function testGrowMemoryPreservesDataMemOp8() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, 1, false);
+ builder.addMemory(1, kV8MaxPages, false);
var module = builder.instantiate();
var offset, val = 0;
function peek() { return module.exports.load8(offset); }
@@ -329,7 +413,7 @@ testGrowMemoryPreservesDataMemOp8();
function testGrowMemoryOutOfBoundsOffset() {
var builder = genGrowMemoryBuilder();
- builder.addMemory(1, 1, false);
+ builder.addMemory(1, kV8MaxPages, false);
var module = builder.instantiate();
var offset, val;
function peek() { return module.exports.load(offset); }
@@ -358,3 +442,48 @@ function testGrowMemoryOutOfBoundsOffset() {
}
testGrowMemoryOutOfBoundsOffset();
+
+function testGrowMemoryOutOfBoundsOffset2() {
+ var builder = new WasmModuleBuilder();
+ builder.addMemory(16, 128, false);
+ builder.addFunction("main", kSig_v_v)
+ .addBody([
+ kExprI32Const, 20,
+ kExprI32Const, 29,
+ kExprGrowMemory, kMemoryZero,
+ kExprI32StoreMem, 0, 0xFF, 0xFF, 0xFF, 0x3a
+ ])
+ .exportAs("main");
+ var module = builder.instantiate();
+ assertTraps(kTrapMemOutOfBounds, module.exports.main);
+}
+
+testGrowMemoryOutOfBoundsOffset2();
+
+function testGrowMemoryDeclaredMaxTraps() {
+ var builder = genGrowMemoryBuilder();
+ builder.addMemory(1, 16, false);
+ var module = builder.instantiate();
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+ assertEquals(1, growMem(5));
+ assertEquals(6, growMem(5));
+ assertEquals(-1, growMem(6));
+}
+
+testGrowMemoryDeclaredMaxTraps();
+
+function testGrowMemoryDeclaredSpecMaxTraps() {
+ // The spec maximum is higher than the internal V8 maximum. This test only
+ // checks that grow_memory does not grow past the internally defined maximum
+ // to reflect the currentl implementation.
+ var builder = genGrowMemoryBuilder();
+ var kSpecMaxPages = 65535;
+ builder.addMemory(1, kSpecMaxPages, false);
+ var module = builder.instantiate();
+ function poke(value) { return module.exports.store(offset, value); }
+ function growMem(pages) { return module.exports.grow_memory(pages); }
+ assertEquals(1, growMem(20));
+ assertEquals(-1, growMem(kV8MaxPages - 20));
+}
+
+testGrowMemoryDeclaredSpecMaxTraps();
diff --git a/deps/v8/test/mjsunit/wasm/import-memory.js b/deps/v8/test/mjsunit/wasm/import-memory.js
new file mode 100644
index 0000000000..845da91d1d
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/import-memory.js
@@ -0,0 +1,231 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function TestOne() {
+ print("TestOne");
+ let memory = new WebAssembly.Memory({initial: 1});
+ assertEquals(kPageSize, memory.buffer.byteLength);
+ let i32 = new Int32Array(memory.buffer);
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory("mine");
+ builder.addFunction("main", kSig_i_v)
+ .addBody([
+ kExprI32Const, 0,
+ kExprI32LoadMem, 0, 0])
+ .exportAs("main");
+
+ let main = builder.instantiate({mine: memory}).exports.main;
+ assertEquals(0, main());
+
+ i32[0] = 993377;
+
+ assertEquals(993377, main());
+})();
+
+(function TestIdentity() {
+ print("TestIdentity");
+ let memory = new WebAssembly.Memory({initial: 1});
+ let i32 = new Int32Array(memory.buffer);
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory("garg");
+ builder.exportMemoryAs("daggle");
+
+ let instance = builder.instantiate({garg: memory});
+ assertSame(memory, instance.exports.daggle);
+})();
+
+
+(function TestImportExport() {
+ print("TestImportExport");
+ var i1;
+ {
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1, false);
+ builder.exportMemoryAs("exported_mem");
+ builder.addFunction("foo", kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprI32LoadMem, 0, 0])
+ .exportAs("foo");
+ i1 = builder.instantiate();
+ }
+
+ var i2;
+ {
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1, false);
+ builder.addImportedMemory("imported_mem");
+ builder.addFunction("bar", kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprI32LoadMem, 0, 0])
+ .exportAs("bar");
+ i2 = builder.instantiate({imported_mem: i1.exports.exported_mem});
+ }
+
+ let i32 = new Int32Array(i1.exports.exported_mem.buffer);
+
+ for (var i = 0; i < 1e11; i = i * 3 + 5) {
+ for (var j = 0; j < 10; j++) {
+ var val = i + 99077 + j;
+ i32[j] = val;
+ assertEquals(val | 0, i1.exports.foo(j * 4));
+ assertEquals(val | 0, i2.exports.bar(j * 4));
+ }
+ }
+})();
+
+(function ValidateBoundsCheck() {
+ print("ValidateBoundsCheck");
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 5});
+ assertEquals(kPageSize, memory.buffer.byteLength);
+ let i32 = new Int32Array(memory.buffer);
+ let builder = new WasmModuleBuilder();
+ // builder.addImportedMemory("mine");
+ builder.addImportedMemory("mine");
+ builder.addFunction("load", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .exportFunc();
+ builder.addFunction("store", kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0,
+ kExprGetLocal, 1])
+ .exportFunc();
+ var offset;
+ let instance = builder.instantiate({mine: memory});
+ function load() { return instance.exports.load(offset); }
+ function store(value) { return instance.exports.store(offset, value); }
+
+ for (offset = 0; offset < kPageSize - 3; offset+=4) {
+ store(offset);
+ }
+ for (offset = 0; offset < kPageSize - 3; offset+=4) {
+ assertEquals(offset, load());
+ }
+ for (offset = kPageSize - 3; offset < kPageSize + 4; offset++) {
+ assertTraps(kTrapMemOutOfBounds, load);
+ }
+})();
+
+(function TestGrowMemoryMaxDesc() {
+ print("MaximumDescriptor");
+ let memory = new WebAssembly.Memory({initial: 1, maximum: 5});
+ assertEquals(kPageSize, memory.buffer.byteLength);
+ let i32 = new Int32Array(memory.buffer);
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory("mine", "", 0, 20);
+ builder.addFunction("load", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .exportFunc();
+ builder.addFunction("store", kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0,
+ kExprGetLocal, 1])
+ .exportFunc();
+ var offset;
+ let instance = builder.instantiate({mine: memory});
+ function load() { return instance.exports.load(offset); }
+ function store(value) { return instance.exports.store(offset, value); }
+
+ for (var i = 1; i < 5; i++) {
+ for (offset = (i - 1) * kPageSize; offset < i * kPageSize - 3; offset+=4) {
+ store(offset * 2);
+ }
+ assertEquals(i, memory.grow(1));
+ assertEquals((i + 1) * kPageSize, memory.buffer.byteLength);
+ }
+ for (offset = 4 * kPageSize; offset < 5 * kPageSize - 3; offset+=4) {
+ store(offset * 2);
+ }
+ for (offset = 0; offset < 5 * kPageSize - 3; offset+=4) {
+ assertEquals(offset * 2, load());
+ }
+ for (offset = 5 * kPageSize; offset < 5 * kPageSize + 4; offset++) {
+ assertThrows(load);
+ }
+ assertThrows(() => memory.grow(1));
+})();
+
+(function TestGrowMemoryZeroInitialMemory() {
+ print("ZeroInitialMemory");
+ let memory = new WebAssembly.Memory({initial: 0});
+ assertEquals(0, memory.buffer.byteLength);
+ let i32 = new Int32Array(memory.buffer);
+ let builder = new WasmModuleBuilder();
+ builder.addImportedMemory("mine");
+ builder.addFunction("load", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
+ .exportFunc();
+ builder.addFunction("store", kSig_i_ii)
+ .addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32StoreMem, 0, 0,
+ kExprGetLocal, 1])
+ .exportFunc();
+ var offset;
+ let instance = builder.instantiate({mine: memory});
+ function load() { return instance.exports.load(offset); }
+ function store(value) { return instance.exports.store(offset, value); }
+
+ for (var i = 1; i < 5; i++) {
+ assertEquals(i - 1, memory.grow(1));
+ assertEquals(i * kPageSize, memory.buffer.byteLength);
+ for (offset = (i - 1) * kPageSize; offset < i * kPageSize - 3; offset++) {
+ store(offset * 2);
+ }
+ }
+ for (offset = 5 * kPageSize; offset < 5 * kPageSize + 4; offset++) {
+ assertThrows(load);
+ }
+ assertThrows(() => memory.grow(16381));
+})();
+
+(function ImportedMemoryBufferLength() {
+ print("ImportedMemoryBufferLength");
+ let memory = new WebAssembly.Memory({initial: 2, maximum: 10});
+ assertEquals(2*kPageSize, memory.buffer.byteLength);
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("grow", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .exportFunc();
+ builder.addImportedMemory("mine");
+ let instance = builder.instantiate({mine: memory});
+ function grow(pages) { return instance.exports.grow(pages); }
+ assertEquals(2, grow(3));
+ assertEquals(5*kPageSize, memory.buffer.byteLength);
+ assertEquals(5, grow(5));
+ assertEquals(10*kPageSize, memory.buffer.byteLength);
+ assertThrows(() => memory.grow(1));
+})();
+
+(function TestGrowMemoryExportedMaximum() {
+ print("TestGrowMemoryExportedMaximum");
+ let initial_size = 1, maximum_size = 10;
+ var exp_instance;
+ {
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(initial_size, maximum_size, true);
+ builder.exportMemoryAs("exported_mem");
+ exp_instance = builder.instantiate();
+ }
+ var instance;
+ {
+ var builder = new WasmModuleBuilder();
+ builder.addImportedMemory("imported_mem");
+ builder.addFunction("mem_size", kSig_i_v)
+ .addBody([kExprMemorySize, kMemoryZero])
+ .exportFunc();
+ builder.addFunction("grow", kSig_i_i)
+ .addBody([kExprGetLocal, 0, kExprGrowMemory, kMemoryZero])
+ .exportFunc();
+ instance = builder.instantiate({
+ imported_mem: exp_instance.exports.exported_mem});
+ }
+ for (var i = initial_size; i < maximum_size; i++) {
+ assertEquals(i, instance.exports.grow(1));
+ assertEquals((i+1), instance.exports.mem_size());
+ }
+ assertEquals(-1, instance.exports.grow(1));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/import-table.js b/deps/v8/test/mjsunit/wasm/import-table.js
index aa836d6eac..e00e533ae1 100644
--- a/deps/v8/test/mjsunit/wasm/import-table.js
+++ b/deps/v8/test/mjsunit/wasm/import-table.js
@@ -266,9 +266,9 @@ testCallPrint();
function testCallImport2(foo, bar, expected) {
var builder = new WasmModuleBuilder();
- builder.addImport("foo", kSig_i);
- builder.addImport("bar", kSig_i);
- builder.addFunction("main", kSig_i)
+ builder.addImport("foo", kSig_i_v);
+ builder.addImport("bar", kSig_i_v);
+ builder.addFunction("main", kSig_i_v)
.addBody([
kExprCallFunction, 0, // --
kExprCallFunction, 1, // --
diff --git a/deps/v8/test/mjsunit/wasm/incrementer.wasm b/deps/v8/test/mjsunit/wasm/incrementer.wasm
index 30b51c2e1b..4ab5a90448 100644
--- a/deps/v8/test/mjsunit/wasm/incrementer.wasm
+++ b/deps/v8/test/mjsunit/wasm/incrementer.wasm
Binary files differ
diff --git a/deps/v8/test/mjsunit/wasm/indirect-calls.js b/deps/v8/test/mjsunit/wasm/indirect-calls.js
index 26021bb74d..ac4fa54e6d 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-calls.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-calls.js
@@ -27,7 +27,7 @@ var module = (function () {
kExprGetLocal, 1,
kExprGetLocal, 2,
kExprGetLocal, 0,
- kExprCallIndirect, sig_index
+ kExprCallIndirect, sig_index, kTableZero
])
.exportFunc()
builder.appendToTable([1, 2, 3]);
@@ -54,27 +54,27 @@ module = (function () {
var sig_i_ii = builder.addType(kSig_i_ii);
var sig_i_i = builder.addType(kSig_i_i);
- builder.addImport("mul", sig_i_ii);
- builder.addFunction("add", sig_i_ii)
+ var mul = builder.addImport("mul", sig_i_ii);
+ var add = builder.addFunction("add", sig_i_ii)
.addBody([
kExprGetLocal, 0, // --
kExprGetLocal, 1, // --
kExprI32Add // --
]);
- builder.addFunction("popcnt", sig_i_i)
+ var popcnt = builder.addFunction("popcnt", sig_i_i)
.addBody([
kExprGetLocal, 0, // --
kExprI32Popcnt // --
]);
- builder.addFunction("main", kSig_i_iii)
+ var main = builder.addFunction("main", kSig_i_iii)
.addBody([
kExprGetLocal, 1,
kExprGetLocal, 2,
kExprGetLocal, 0,
- kExprCallIndirect, sig_i_ii
+ kExprCallIndirect, sig_i_ii, kTableZero
])
- .exportFunc()
- builder.appendToTable([0, 1, 2, 3]);
+ .exportFunc();
+ builder.appendToTable([mul.index, add.index, popcnt.index, main.index]);
return builder.instantiate({mul: function(a, b) { return a * b | 0; }});
})();
@@ -84,3 +84,125 @@ assertEquals(99, module.exports.main(1, 22, 77));
assertTraps(kTrapFuncSigMismatch, "module.exports.main(2, 12, 33)");
assertTraps(kTrapFuncSigMismatch, "module.exports.main(3, 12, 33)");
assertTraps(kTrapFuncInvalid, "module.exports.main(4, 12, 33)");
+
+function AddFunctions(builder) {
+ var mul = builder.addFunction("mul", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI32Mul // --
+ ]);
+ var add = builder.addFunction("add", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI32Add // --
+ ]);
+ var sub = builder.addFunction("sub", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI32Sub // --
+ ]);
+ return {mul: mul, add: add, sub: sub};
+}
+
+
+module = (function () {
+ var builder = new WasmModuleBuilder();
+
+ var f = AddFunctions(builder);
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprI32Const, 33, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallIndirect, 0, kTableZero]) // --
+ .exportAs("main");
+
+ builder.appendToTable([f.mul.index, f.add.index, f.sub.index]);
+
+ return builder.instantiate();
+})();
+
+assertEquals(33, module.exports.main(1, 0));
+assertEquals(66, module.exports.main(2, 0));
+assertEquals(34, module.exports.main(1, 1));
+assertEquals(35, module.exports.main(2, 1));
+assertEquals(32, module.exports.main(1, 2));
+assertEquals(31, module.exports.main(2, 2));
+assertTraps(kTrapFuncInvalid, "module.exports.main(12, 3)");
+
+(function ConstBaseTest() {
+ print("ConstBaseTest...");
+ function instanceWithTable(base, length) {
+ var builder = new WasmModuleBuilder();
+
+ var f = AddFunctions(builder);
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprI32Const, 33, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallIndirect, 0, kTableZero]) // --
+ .exportAs("main");
+
+ builder.setFunctionTableLength(length);
+ builder.addFunctionTableInit(base, false, [f.add.index, f.sub.index, f.mul.index]);
+
+ return builder.instantiate();
+ }
+
+ for (var i = 0; i < 5; i++) {
+ print(" base = " + i);
+ var module = instanceWithTable(i, 10);
+ main = module.exports.main;
+ for (var j = 0; j < i; j++) {
+ assertTraps(kTrapFuncSigMismatch, "main(12, " + j + ")");
+ }
+ assertEquals(34, main(1, i + 0));
+ assertEquals(35, main(2, i + 0));
+ assertEquals(32, main(1, i + 1));
+ assertEquals(31, main(2, i + 1));
+ assertEquals(33, main(1, i + 2));
+ assertEquals(66, main(2, i + 2));
+ assertTraps(kTrapFuncInvalid, "main(12, 10)");
+ }
+})();
+
+(function GlobalBaseTest() {
+ print("GlobalBaseTest...");
+
+ var builder = new WasmModuleBuilder();
+
+ var f = AddFunctions(builder);
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprI32Const, 33, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallIndirect, 0, kTableZero]) // --
+ .exportAs("main");
+
+ builder.setFunctionTableLength(10);
+ var g = builder.addImportedGlobal("base", undefined, kAstI32);
+ builder.addFunctionTableInit(g, true, [f.mul.index, f.add.index, f.sub.index]);
+
+ var module = new WebAssembly.Module(builder.toBuffer());
+
+ for (var i = 0; i < 5; i++) {
+ print(" base = " + i);
+ var instance = new WebAssembly.Instance(module, {base: i});
+ main = instance.exports.main;
+ for (var j = 0; j < i; j++) {
+ assertTraps(kTrapFuncSigMismatch, "main(12, " + j + ")");
+ }
+ assertEquals(33, main(1, i + 0));
+ assertEquals(66, main(2, i + 0));
+ assertEquals(34, main(1, i + 1));
+ assertEquals(35, main(2, i + 1));
+ assertEquals(32, main(1, i + 2));
+ assertEquals(31, main(2, i + 2));
+ assertTraps(kTrapFuncInvalid, "main(12, 10)");
+ }
+})();
diff --git a/deps/v8/test/mjsunit/wasm/indirect-tables.js b/deps/v8/test/mjsunit/wasm/indirect-tables.js
new file mode 100644
index 0000000000..62b900586c
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/indirect-tables.js
@@ -0,0 +1,377 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function AddFunctions(builder) {
+ let sig_index = builder.addType(kSig_i_ii);
+ let mul = builder.addFunction("mul", sig_index)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI32Mul // --
+ ]);
+ let add = builder.addFunction("add", sig_index)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI32Add // --
+ ]);
+ let sub = builder.addFunction("sub", sig_index)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI32Sub // --
+ ]);
+ return {mul: mul, add: add, sub: sub};
+}
+
+function js_div(a, b) { return (a / b) | 0; }
+
+(function ExportedTableTest() {
+ print("ExportedTableTest...");
+
+ let builder = new WasmModuleBuilder();
+
+ let d = builder.addImport("js_div", kSig_i_ii);
+ let f = AddFunctions(builder);
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprI32Const, 33, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallIndirect, 0, kTableZero]) // --
+ .exportAs("main");
+
+ f.add.exportAs("blarg");
+
+ builder.setFunctionTableLength(10);
+ let g = builder.addImportedGlobal("base", undefined, kAstI32);
+ builder.addFunctionTableInit(g, true, [f.mul.index, f.add.index,
+ f.sub.index,
+ d]);
+ builder.addExportOfKind("table", kExternalTable, 0);
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+
+ for (let i = 0; i < 5; i++) {
+ print(" base = " + i);
+ let instance = new WebAssembly.Instance(module, {base: i, js_div: js_div});
+ main = instance.exports.main;
+ let table = instance.exports.table;
+ assertTrue(table instanceof WebAssembly.Table);
+ assertEquals(10, table.length);
+ for (let j = 0; j < i; j++) {
+ assertSame(null, table.get(j));
+ }
+ let mul = table.get(i+0);
+ let add = table.get(i+1);
+ let sub = table.get(i+2);
+
+ print(" mul=" + mul);
+ print(" add=" + add);
+ print(" sub=" + sub);
+ assertEquals("function", typeof mul);
+ assertEquals("function", typeof add);
+ assertEquals("function", typeof sub);
+ assertEquals(2, mul.length);
+ assertEquals(2, add.length);
+ assertEquals(2, sub.length);
+ assertEquals("blarg", add.name);
+
+ let exp_div = table.get(i+3);
+ assertEquals("function", typeof exp_div);
+ print(" js_div=" + exp_div);
+ // Should have a new, wrapped version of the import.
+ assertFalse(js_div == exp_div);
+
+
+ for (let j = i + 4; j < 10; j++) {
+ assertSame(null, table.get(j));
+ }
+
+ assertEquals(-33, mul(-11, 3));
+ assertEquals(4444444, add(3333333, 1111111));
+ assertEquals(-9999, sub(1, 10000));
+ assertEquals(-44, exp_div(-88.1, 2));
+ }
+})();
+
+
+(function ImportedTableTest() {
+ let kTableSize = 10;
+ print("ImportedTableTest...");
+ var builder = new WasmModuleBuilder();
+
+ let d = builder.addImport("js_div", kSig_i_ii);
+ let f = AddFunctions(builder);
+ builder.setFunctionTableLength(kTableSize);
+ let g = builder.addImportedGlobal("base", undefined, kAstI32);
+ builder.addFunctionTableInit(g, true, [f.mul.index, f.add.index,
+ f.sub.index,
+ d]);
+ builder.addExportOfKind("table", kExternalTable, 0);
+
+ let m1 = new WebAssembly.Module(builder.toBuffer());
+
+ var builder = new WasmModuleBuilder();
+
+ builder.addImportedTable("table", undefined, kTableSize, kTableSize);
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprI32Const, 33, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallIndirect, 0, kTableZero]) // --
+ .exportAs("main");
+
+ let m2 = new WebAssembly.Module(builder.toBuffer());
+
+ // Run 5 trials at different table bases.
+ for (let i = 0; i < 5; i++) {
+ print(" base = " + i);
+ let i1 = new WebAssembly.Instance(m1, {base: i, js_div: js_div});
+ let table = i1.exports.table;
+ assertEquals(10, table.length);
+ let i2 = new WebAssembly.Instance(m2, {table: table});
+ let main = i2.exports.main;
+
+ for (var j = 0; j < i; j++) {
+ assertThrows(() => main(0, j));
+ assertSame(null, table.get(j));
+ }
+
+ // mul
+ assertEquals("function", typeof table.get(i+0));
+ assertEquals(0, main(0, i+0));
+ assertEquals(66, main(2, i+0));
+
+ // add
+ assertEquals("function", typeof table.get(i+1));
+ assertEquals(33, main(0, i+1));
+ assertEquals(38, main(5, i+1));
+
+ // sub
+ assertEquals("function", typeof table.get(i+2));
+ assertEquals(32, main(1, i+2));
+ assertEquals(28, main(5, i+2));
+
+ // div
+ assertEquals("function", typeof table.get(i+3));
+ assertEquals(8, main(4, i+3));
+ assertEquals(3, main(11, i+3));
+
+ for (var j = i + 4; j < (kTableSize + 5); j++) {
+ assertThrows(x => main(0, j));
+ if (j < kTableSize) assertSame(null, table.get(j));
+ }
+ }
+})();
+
+(function ImportedTableTest() {
+ let kTableSize = 10;
+ print("ManualTableTest...");
+
+ var builder = new WasmModuleBuilder();
+
+ let d = builder.addImport("js_div", kSig_i_ii);
+ builder.addImportedTable("table", undefined, kTableSize, kTableSize);
+ let g = builder.addImportedGlobal("base", undefined, kAstI32);
+ let f = AddFunctions(builder);
+ builder.addFunctionTableInit(g, true, [f.mul.index, f.add.index,
+ f.sub.index,
+ d]);
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprI32Const, 55, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprCallIndirect, 0, kTableZero]) // --
+ .exportAs("main");
+
+ let m2 = new WebAssembly.Module(builder.toBuffer());
+
+ // Run 5 trials at different table bases.
+ for (let i = 0; i < 5; i++) {
+ print(" base = " + i);
+ let table = new WebAssembly.Table({element: "anyfunc",
+ initial: kTableSize});
+ assertEquals(10, table.length);
+ let i2 = new WebAssembly.Instance(m2, {base: i, table: table,
+ js_div: js_div});
+ let main = i2.exports.main;
+
+ for (var j = 0; j < i; j++) {
+ assertThrows(() => main(0, j));
+ assertSame(null, table.get(j));
+ }
+
+ // mul
+ assertEquals("function", typeof table.get(i+0));
+ assertEquals(0, main(0, i+0));
+ assertEquals(110, main(2, i+0));
+
+ // add
+ assertEquals("function", typeof table.get(i+1));
+ assertEquals(55, main(0, i+1));
+ assertEquals(60, main(5, i+1));
+
+ // sub
+ assertEquals("function", typeof table.get(i+2));
+ assertEquals(54, main(1, i+2));
+ assertEquals(50, main(5, i+2));
+
+ // div
+ assertEquals("function", typeof table.get(i+3));
+ assertEquals(13, main(4, i+3));
+ assertEquals(5, main(11, i+3));
+
+ for (var j = i + 4; j < (kTableSize + 5); j++) {
+ assertThrows(x => main(0, j));
+ if (j < kTableSize) assertSame(null, table.get(j));
+ }
+ }
+})();
+
+
+(function CumulativeTest() {
+ print("CumulativeTest...");
+
+ let kTableSize = 10;
+ let table = new WebAssembly.Table({element: "anyfunc", initial: 10});
+
+ var builder = new WasmModuleBuilder();
+
+ builder.addImportedTable("table", undefined, kTableSize, kTableSize);
+ let g = builder.addImportedGlobal("base", undefined, kAstI32);
+ let sig_index = builder.addType(kSig_i_v);
+ builder.addFunction("g", sig_index)
+ .addBody([
+ kExprGetGlobal, g
+ ]);
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprCallIndirect, sig_index, kTableZero]) // --
+ .exportAs("main");
+ builder.addFunctionTableInit(g, true, [g]);
+
+ let module = new WebAssembly.Module(builder.toBuffer());
+
+ for (var i = 0; i < kTableSize; i++) {
+ print(" base = " + i);
+ let instance = new WebAssembly.Instance(module, {base: i, table: table});
+
+ for (var j = 0; j < kTableSize; j++) {
+ let func = table.get(j);
+ if (j > i) {
+ assertSame(null, func);
+ assertTraps(kTrapFuncSigMismatch, () => instance.exports.main(j));
+ } else {
+ assertEquals("function", typeof func);
+ assertEquals(j, func());
+ assertEquals(j, instance.exports.main(j));
+ }
+ }
+ }
+})();
+
+(function TwoWayTest() {
+ print("TwoWayTest...");
+ let kTableSize = 3;
+
+ // Module {m1} defines the table and exports it.
+ var builder = new WasmModuleBuilder();
+ builder.addType(kSig_i_i);
+ builder.addType(kSig_i_ii);
+ var sig_index1 = builder.addType(kSig_i_v);
+ var f1 = builder.addFunction("f1", sig_index1)
+ .addBody([kExprI32Const, 11]);
+
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprCallIndirect, sig_index1, kTableZero]) // --
+ .exportAs("main");
+
+ builder.setFunctionTableLength(kTableSize);
+ builder.addFunctionTableInit(0, false, [f1.index]);
+ builder.addExportOfKind("table", kExternalTable, 0);
+
+ var m1 = new WebAssembly.Module(builder.toBuffer());
+
+ // Module {m2} imports the table and adds {f2}.
+ var builder = new WasmModuleBuilder();
+ builder.addType(kSig_i_ii);
+ var sig_index2 = builder.addType(kSig_i_v);
+ var f2 = builder.addFunction("f2", sig_index2)
+ .addBody([kExprI32Const, 22]);
+
+ builder.addFunction("main", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0, // --
+ kExprCallIndirect, sig_index2, kTableZero]) // --
+ .exportAs("main");
+
+ builder.setFunctionTableLength(kTableSize);
+ builder.addFunctionTableInit(1, false, [f2.index]);
+ builder.addImportedTable("table", undefined, kTableSize, kTableSize);
+
+ var m2 = new WebAssembly.Module(builder.toBuffer());
+
+ assertFalse(sig_index1 == sig_index2);
+
+ var i1 = new WebAssembly.Instance(m1);
+ var i2 = new WebAssembly.Instance(m2, {table: i1.exports.table});
+
+ assertEquals(11, i1.exports.main(0));
+ assertEquals(11, i2.exports.main(0));
+
+ assertEquals(22, i1.exports.main(1));
+ assertEquals(22, i2.exports.main(1));
+
+ assertThrows(() => i1.exports.main(2));
+ assertThrows(() => i2.exports.main(2));
+ assertThrows(() => i1.exports.main(3));
+ assertThrows(() => i2.exports.main(3));
+
+})();
+
+(function MismatchedTableSize() {
+ print("MismatchedTableSize...");
+ let kTableSize = 5;
+
+ for (var expsize = 1; expsize < 4; expsize++) {
+ for (var impsize = 1; impsize < 4; impsize++) {
+ print(" expsize = " + expsize + ", impsize = " + impsize);
+ var builder = new WasmModuleBuilder();
+ builder.setFunctionTableLength(expsize);
+ builder.addExportOfKind("expfoo", kExternalTable, 0);
+
+ let m1 = new WebAssembly.Module(builder.toBuffer());
+
+ var builder = new WasmModuleBuilder();
+ builder.addImportedTable("impfoo", undefined, impsize, impsize);
+
+ let m2 = new WebAssembly.Module(builder.toBuffer());
+
+ var i1 = new WebAssembly.Instance(m1);
+
+ // TODO(titzer): v8 currently requires import table size to match
+ // export table size.
+ var ffi = {impfoo: i1.exports.expfoo};
+ if (expsize == impsize) {
+ var i2 = new WebAssembly.Instance(m2, ffi);
+ } else {
+ assertThrows(() => new WebAssembly.Instance(m2, ffi));
+ }
+ }
+ }
+
+
+
+})();
diff --git a/deps/v8/test/mjsunit/wasm/instance-gc.js b/deps/v8/test/mjsunit/wasm/instance-gc.js
index 1713f27b99..e5dd4edaf9 100644
--- a/deps/v8/test/mjsunit/wasm/instance-gc.js
+++ b/deps/v8/test/mjsunit/wasm/instance-gc.js
@@ -12,7 +12,7 @@ let nogc = () => {};
function newModule() {
let builder = new WasmModuleBuilder();
builder.addMemory(1, 1, true);
- builder.addFunction("main", kSig_i)
+ builder.addFunction("main", kSig_i_v)
.addBody([kExprI32Const, 0, kExprI32LoadMem, 0, 0])
.exportFunc();
diff --git a/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js b/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
index a0c11bdadd..36f7429d96 100644
--- a/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
+++ b/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
@@ -12,7 +12,7 @@ let kReturnValue = 117;
let buffer = (() => {
let builder = new WasmModuleBuilder();
builder.addMemory(1, 1, true);
- builder.addFunction("main", kSig_i)
+ builder.addFunction("main", kSig_i_v)
.addBody([kExprI8Const, kReturnValue])
.exportFunc();
@@ -52,9 +52,6 @@ function CheckInstance(instance) {
assertEquals(kReturnValue, main());
}
-// Deprecated experimental API.
-CheckInstance(Wasm.instantiateModule(buffer));
-
// Official API
let module = new WebAssembly.Module(buffer);
CheckInstance(new WebAssembly.Instance(module));
@@ -119,7 +116,7 @@ assertFalse(WebAssembly.validate(bytes(88, 88, 88, 88, 88, 88, 88, 88)));
builder.addMemory(1,1, true);
var kSig_v_i = makeSig([kAstI32], []);
var signature = builder.addType(kSig_v_i);
- builder.addImport("some_value", kSig_i);
+ builder.addImport("some_value", kSig_i_v);
builder.addImport("writer", signature);
builder.addFunction("main", kSig_i_i)
@@ -127,7 +124,7 @@ assertFalse(WebAssembly.validate(bytes(88, 88, 88, 88, 88, 88, 88, 88)));
kExprGetLocal, 0,
kExprI32LoadMem, 0, 0,
kExprI32Const, 1,
- kExprCallIndirect, signature,
+ kExprCallIndirect, signature, kTableZero,
kExprGetLocal,0,
kExprI32LoadMem,0, 0,
kExprCallFunction, 0,
@@ -144,10 +141,10 @@ assertFalse(WebAssembly.validate(bytes(88, 88, 88, 88, 88, 88, 88, 88)));
var module = new WebAssembly.Module(builder.toBuffer());
- var mem_1 = new ArrayBuffer(4);
- var mem_2 = new ArrayBuffer(4);
- var view_1 = new Int32Array(mem_1);
- var view_2 = new Int32Array(mem_2);
+ var mem_1 = new WebAssembly.Memory({initial: 1});
+ var mem_2 = new WebAssembly.Memory({initial: 1});
+ var view_1 = new Int32Array(mem_1.buffer);
+ var view_2 = new Int32Array(mem_2.buffer);
view_1[0] = 42;
view_2[0] = 1000;
@@ -169,7 +166,7 @@ assertFalse(WebAssembly.validate(bytes(88, 88, 88, 88, 88, 88, 88, 88)));
(function GlobalsArePrivateToTheInstance() {
print("GlobalsArePrivateToTheInstance...");
var builder = new WasmModuleBuilder();
- builder.addGlobal(kAstI32);
+ builder.addGlobal(kAstI32, true);
builder.addFunction("read", kSig_i_v)
.addBody([
kExprGetGlobal, 0])
@@ -196,16 +193,16 @@ assertFalse(WebAssembly.validate(bytes(88, 88, 88, 88, 88, 88, 88, 88)));
var builder = new WasmModuleBuilder();
builder.addMemory(1,1, true);
- builder.addFunction("f", kSig_i)
+ builder.addFunction("f", kSig_i_v)
.addBody([
kExprI32Const, 0,
kExprI32LoadMem, 0, 0
]).exportFunc();
- var mem_1 = new ArrayBuffer(65536);
- var mem_2 = new ArrayBuffer(65536);
- var view_1 = new Int32Array(mem_1);
- var view_2 = new Int32Array(mem_2);
+ var mem_1 = new WebAssembly.Memory({initial: 1});
+ var mem_2 = new WebAssembly.Memory({initial: 1});
+ var view_1 = new Int32Array(mem_1.buffer);
+ var view_2 = new Int32Array(mem_2.buffer);
view_1[0] = 1;
view_2[0] = 1000;
@@ -216,3 +213,9 @@ assertFalse(WebAssembly.validate(bytes(88, 88, 88, 88, 88, 88, 88, 88)));
assertEquals(1, i1.exports.f());
assertEquals(1000, i2.exports.f());
})();
+
+(function MustBeMemory() {
+ var memory = new ArrayBuffer(65536);
+ var module = new WebAssembly.Module(buffer);
+ assertThrows(() => new WebAssembly.Instance(module, null, memory), TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/memory-size.js b/deps/v8/test/mjsunit/wasm/memory-size.js
index 197059eb49..bd747176a8 100644
--- a/deps/v8/test/mjsunit/wasm/memory-size.js
+++ b/deps/v8/test/mjsunit/wasm/memory-size.js
@@ -11,7 +11,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
print("testMemorySizeZero()");
var builder = new WasmModuleBuilder();
builder.addFunction("memory_size", kSig_i_v)
- .addBody([kExprMemorySize])
+ .addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
var module = builder.instantiate();
assertEquals(0, module.exports.memory_size());
@@ -23,7 +23,7 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
var size = 11;
builder.addMemory(size, size, false);
builder.addFunction("memory_size", kSig_i_v)
- .addBody([kExprMemorySize])
+ .addBody([kExprMemorySize, kMemoryZero])
.exportFunc();
var module = builder.instantiate();
assertEquals(size, module.exports.memory_size());
diff --git a/deps/v8/test/mjsunit/wasm/module-memory.js b/deps/v8/test/mjsunit/wasm/module-memory.js
index 6707f08164..8c57ca00d5 100644
--- a/deps/v8/test/mjsunit/wasm/module-memory.js
+++ b/deps/v8/test/mjsunit/wasm/module-memory.js
@@ -38,11 +38,12 @@ function genModule(memory) {
.exportFunc();
var module = builder.instantiate(null, memory);
assertTrue(module.exports.memory instanceof WebAssembly.Memory);
- if (memory != null) assertEquals(memory, module.exports.memory.buffer);
+ if (memory != null) assertEquals(memory.buffer, module.exports.memory.buffer);
return module;
}
function testPokeMemory() {
+ print("testPokeMemory");
var module = genModule(null);
var buffer = module.exports.memory.buffer;
var main = module.exports.main;
@@ -89,12 +90,13 @@ testSurvivalAcrossGc();
function testPokeOuterMemory() {
- var buffer = new ArrayBuffer(kMemSize);
+ print("testPokeOuterMemory");
+ var buffer = new WebAssembly.Memory({initial: kMemSize / kPageSize});
var module = genModule(buffer);
var main = module.exports.main;
- assertEquals(kMemSize, buffer.byteLength);
+ assertEquals(kMemSize, buffer.buffer.byteLength);
- var array = new Int8Array(buffer);
+ var array = new Int8Array(buffer.buffer);
assertEquals(kMemSize, array.length);
for (var i = 0; i < kMemSize; i++) {
@@ -116,7 +118,7 @@ function testPokeOuterMemory() {
testPokeOuterMemory();
function testOuterMemorySurvivalAcrossGc() {
- var buffer = new ArrayBuffer(kMemSize);
+ var buffer = new WebAssembly.Memory({initial: kMemSize / kPageSize});
var checker = genAndGetMain(buffer);
for (var i = 0; i < 3; i++) {
print("gc run ", i);
diff --git a/deps/v8/test/mjsunit/wasm/stack.js b/deps/v8/test/mjsunit/wasm/stack.js
index 71038507db..d7c399dc03 100644
--- a/deps/v8/test/mjsunit/wasm/stack.js
+++ b/deps/v8/test/mjsunit/wasm/stack.js
@@ -135,7 +135,7 @@ Error.prepareStackTrace = function(error, frames) {
builder.addFunction("recursion", sig_index)
.addBody([
kExprI32Const, 0,
- kExprCallIndirect, sig_index
+ kExprCallIndirect, sig_index, kTableZero
])
.exportFunc()
builder.appendToTable([0]);
diff --git a/deps/v8/test/mjsunit/wasm/start-function.js b/deps/v8/test/mjsunit/wasm/start-function.js
index f0fbd081ac..da1c7c37c4 100644
--- a/deps/v8/test/mjsunit/wasm/start-function.js
+++ b/deps/v8/test/mjsunit/wasm/start-function.js
@@ -18,16 +18,6 @@ function instantiate(sig, body) {
return builder.instantiate();
}
-function assertFails(sig, body) {
- try {
- var module = instantiate(sig, body);
- print("expected failure, but passes");
- assertFalse(true);
- } catch (expected) {
- print("ok: " + expected);
- }
-}
-
function assertVerifies(sig, body) {
var module = instantiate(sig, body);
assertFalse(module === undefined);
@@ -38,12 +28,12 @@ function assertVerifies(sig, body) {
}
assertVerifies(kSig_v_v, [kExprNop]);
-assertVerifies(kSig_i, [kExprI8Const, 0]);
// Arguments aren't allow to start functions.
-assertFails(kSig_i_i, [kExprGetLocal, 0]);
-assertFails(kSig_i_ii, [kExprGetLocal, 0]);
-assertFails(kSig_i_dd, [kExprGetLocal, 0]);
+assertThrows(() => {instantiate(kSig_i_i, [kExprGetLocal, 0]);});
+assertThrows(() => {instantiate(kSig_i_ii, [kExprGetLocal, 0]);});
+assertThrows(() => {instantiate(kSig_i_dd, [kExprGetLocal, 0]);});
+assertThrows(() => {instantiate(kSig_i_v, [kExprI8Const, 0]);});
(function testInvalidIndex() {
print("testInvalidIndex");
@@ -72,14 +62,31 @@ assertFails(kSig_i_dd, [kExprGetLocal, 0]);
})();
-(function testRun() {
- print("testRun");
+(function testRun1() {
+ print("testRun1");
+ var builder = new WasmModuleBuilder();
+
+ builder.addMemory(12, 12, true);
+
+ var func = builder.addFunction("", kSig_v_v)
+ .addBody([kExprI8Const, 0, kExprI8Const, 66, kExprI32StoreMem, 0, 0]);
+
+ builder.addStart(func.index);
+
+ var module = builder.instantiate();
+ var memory = module.exports.memory.buffer;
+ var view = new Int8Array(memory);
+ assertEquals(66, view[0]);
+})();
+
+(function testRun2() {
+ print("testRun2");
var builder = new WasmModuleBuilder();
builder.addMemory(12, 12, true);
var func = builder.addFunction("", kSig_v_v)
- .addBody([kExprI8Const, 0, kExprI8Const, 77, kExprI32StoreMem, 0, 0]);
+ .addBody([kExprI8Const, 0, kExprI8Const, 22, kExprI8Const, 55, kExprI32Add, kExprI32StoreMem, 0, 0]);
builder.addStart(func.index);
diff --git a/deps/v8/test/mjsunit/wasm/table.js b/deps/v8/test/mjsunit/wasm/table.js
index 0275bc0522..1abc29664e 100644
--- a/deps/v8/test/mjsunit/wasm/table.js
+++ b/deps/v8/test/mjsunit/wasm/table.js
@@ -4,6 +4,11 @@
// Flags: --expose-wasm
+'use strict';
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
// Basic tests.
var outOfUint32RangeValue = 1e12;
@@ -45,25 +50,46 @@ function assertTableIsValid(table) {
assertThrows(() => new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: int32ButOob}));
- let table = new WebAssembly.Table({element: "anyfunc", initial: 1});
+ let table;
+ table = new WebAssembly.Table({element: "anyfunc", initial: 1});
assertTableIsValid(table);
-})();
+ assertEquals(1, table.length);
+ assertEquals(null, table.get(0));
-(function TestConstructorWithMaximum() {
- let table = new WebAssembly.Table({element: "anyfunc", maximum: 10});
+ table = new WebAssembly.Table({element: "anyfunc", initial: "2"});
assertTableIsValid(table);
-})();
+ assertEquals(2, table.length);
+ assertEquals(null, table.get(0));
+ assertEquals(null, table.get(1));
-(function TestInitialIsUndefined() {
- // New memory with initial = undefined, which means initial = 0.
- let table = new WebAssembly.Table({element: "anyfunc", initial: undefined});
+ table = new WebAssembly.Table({element: "anyfunc", initial: {valueOf() { return "1" }}});
assertTableIsValid(table);
-})();
+ assertEquals(1, table.length);
+ assertEquals(null, table.get(0));
+
+ table = new WebAssembly.Table({element: "anyfunc", initial: undefined});
+ assertTableIsValid(table);
+ assertEquals(0, table.length);
+
+ table = new WebAssembly.Table({element: "anyfunc"});
+ assertTableIsValid(table);
+ assertEquals(0, table.length);
-(function TestMaximumIsUndefined() {
- // New memory with maximum = undefined, which means maximum = 0.
- let table = new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: undefined});
+ table = new WebAssembly.Table({element: "anyfunc", maximum: 10});
assertTableIsValid(table);
+ assertEquals(0, table.length);
+
+ table = new WebAssembly.Table({element: "anyfunc", maximum: "10"});
+ assertTableIsValid(table);
+ assertEquals(0, table.length);
+
+ table = new WebAssembly.Table({element: "anyfunc", maximum: {valueOf() { return "10" }}});
+ assertTableIsValid(table);
+ assertEquals(0, table.length);
+
+ table = new WebAssembly.Table({element: "anyfunc", initial: 0, maximum: undefined});
+ assertTableIsValid(table);
+ assertEquals(0, table.length);
})();
(function TestMaximumIsReadOnce() {
@@ -93,3 +119,114 @@ function assertTableIsValid(table) {
let table = new WebAssembly.Table(desc);
assertTableIsValid(table);
})();
+
+(function TestLength() {
+ for (let i = 0; i < 10; ++i) {
+ let table = new WebAssembly.Table({element: "anyfunc", initial: i});
+ assertEquals(i, table.length);
+ }
+
+ assertThrows(() => WebAssembly.Table.prototype.length.call([]), TypeError);
+})();
+
+(function TestGet() {
+ let table = new WebAssembly.Table({element: "anyfunc", initial: 10});
+
+ for (let i = 0; i < table.length; ++i) {
+ assertEquals(null, table.get(i));
+ assertEquals(null, table.get(String(i)));
+ }
+ for (let key of [0.4, "", NaN, {}, [], () => {}]) {
+ assertEquals(null, table.get(key));
+ }
+ for (let key of [-1, table.length, table.length * 10]) {
+ assertThrows(() => table.get(key), RangeError);
+ }
+ assertThrows(() => table.get(Symbol()), TypeError);
+ assertThrows(() => WebAssembly.Table.prototype.get.call([], 0), TypeError);
+})();
+
+(function TestSet() {
+ let builder = new WasmModuleBuilder;
+ builder.addExport("wasm", builder.addFunction("", kSig_v_v));
+ builder.addExport("host", builder.addImportWithModule("test", "f", kSig_v_v));
+ let {wasm, host} = builder.instantiate({test: {f() {}}}).exports;
+
+ let table = new WebAssembly.Table({element: "anyfunc", initial: 10});
+
+ for (let f of [wasm, host]) {
+ for (let i = 0; i < table.length; ++i) table.set(i, null);
+ for (let i = 0; i < table.length; ++i) {
+ assertSame(null, table.get(i));
+ assertSame(undefined, table.set(i, f));
+ assertSame(f, table.get(i));
+ }
+
+ for (let i = 0; i < table.length; ++i) table.set(i, null);
+ for (let i = 0; i < table.length; ++i) {
+ assertSame(null, table.get(i));
+ assertSame(undefined, table.set(String(i), f));
+ assertSame(f, table.get(i));
+ }
+
+ for (let key of [0.4, "", NaN, {}, [], () => {}]) {
+ assertSame(undefined, table.set(0, null));
+ assertSame(undefined, table.set(key, f));
+ assertSame(f, table.get(0));
+ }
+
+ for (let key of [-1, table.length, table.length * 10]) {
+ assertThrows(() => table.set(key, f), RangeError);
+ }
+
+ assertThrows(() => table.set(0), TypeError);
+ for (let val of [undefined, 0, "", {}, [], () => {}]) {
+ assertThrows(() => table.set(0, val), TypeError);
+ }
+
+ assertThrows(() => table.set(Symbol(), f), TypeError);
+ assertThrows(() => WebAssembly.Table.prototype.set.call([], 0, f),
+ TypeError);
+ }
+})();
+
+(function TestGrow() {
+ let builder = new WasmModuleBuilder;
+ builder.addExport("wasm", builder.addFunction("", kSig_v_v));
+ builder.addExport("host", builder.addImportWithModule("test", "f", kSig_v_v));
+ let {wasm, host} = builder.instantiate({test: {f() {}}}).exports;
+
+ function init(table) {
+ for (let i = 0; i < 5; ++i) table.set(i, wasm);
+ for (let i = 15; i < 20; ++i) table.set(i, host);
+ }
+ function check(table) {
+ for (let i = 0; i < 5; ++i) assertSame(wasm, table.get(i));
+ for (let i = 6; i < 15; ++i) assertSame(null, table.get(i));
+ for (let i = 15; i < 20; ++i) assertSame(host, table.get(i));
+ for (let i = 21; i < table.length; ++i) assertSame(null, table.get(i));
+ }
+
+ let table = new WebAssembly.Table({element: "anyfunc", initial: 20});
+ init(table);
+ check(table);
+ table.grow(0);
+ check(table);
+ table.grow(10);
+ check(table);
+ assertThrows(() => table.grow(-10), RangeError);
+
+ table = new WebAssembly.Table({element: "anyfunc", initial: 20, maximum: 25});
+ init(table);
+ check(table);
+ table.grow(0);
+ check(table);
+ table.grow(5);
+ check(table);
+ table.grow(0);
+ check(table);
+ assertThrows(() => table.grow(1), RangeError);
+ assertThrows(() => table.grow(-10), RangeError);
+
+ assertThrows(() => WebAssembly.Table.prototype.grow.call([], 0), TypeError);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js b/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js
index df03aec9f5..e84881667d 100644
--- a/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js
+++ b/deps/v8/test/mjsunit/wasm/test-import-export-wrapper.js
@@ -66,11 +66,71 @@ var expect_no_elison = 1;
assertEquals(%CheckWasmWrapperElision(the_export, expect_elison), true);
})();
+// Function calls stack: first_export -> first_func -> first_import ->
+// second_export -> second_import
+// In this test, first_import and second_export have the same signature, and
+// therefore the wrappers will be removed. If the wrappers are not removed, then
+// the test crashes because of the int64 parameter, which is not allowed in the
+// wrappers.
+(function TestWasmWrapperElisionInt64() {
+ var imported = function (a) {
+ return a;
+ };
+
+ var second_module = new WasmModuleBuilder();
+ var sig_index1 = second_module.addType(kSig_i_i);
+ var sig_index_ll = second_module.addType(kSig_l_l);
+ second_module
+ .addImportWithModule("import_module_2", "import_name_2", sig_index1);
+ second_module
+ .addFunction("second_export", sig_index_ll)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprI32ConvertI64,
+ kExprCallFunction, 0,
+ kExprI64SConvertI32,
+ kExprReturn
+ ])
+ .exportFunc();
+
+ var first_module = new WasmModuleBuilder();
+ var sig_index = first_module.addType(kSig_i_v);
+ var sig_index_ll = first_module.addType(kSig_l_l);
+ first_module
+ .addImportWithModule("import_module_1", "import_name_1", sig_index_ll);
+ first_module
+ .addFunction("first_export", sig_index)
+ .addBody([
+ kExprI64Const, 2,
+ kExprCallFunction, 2,
+ kExprI32ConvertI64,
+ kExprReturn
+ ])
+ .exportFunc();
+ first_module
+ .addFunction("first_func", sig_index_ll)
+ .addBody([
+ kExprI64Const, 1,
+ kExprGetLocal, 0,
+ kExprI64Add,
+ kExprCallFunction, 0,
+ kExprReturn
+ ]);
+
+ var f = second_module
+ .instantiate({import_module_2: {import_name_2: imported}})
+ .exports.second_export;
+ var the_export = first_module
+ .instantiate({import_module_1: {import_name_1: f}})
+ .exports.first_export;
+ assertEquals(the_export(), 3);
+})();
+
// function calls stack: first_export -> first_func -> first_import ->
// second_export -> second_import
-// In this case, second_export has less params than first_import,
-// So that wrappers will not be removed
-(function TestWasmWrapperNoElisionLessParams() {
+// In this case, second_export has fewer params than first_import,
+// so instantiation should fail.
+assertThrows(function TestWasmWrapperNoElisionLessParams() {
var imported = function (a) {
return a;
};
@@ -121,13 +181,13 @@ var expect_no_elison = 1;
assertEquals(the_export(0, 2), 0);
assertEquals(the_export(9.9, 4.3), 9);
assertEquals(%CheckWasmWrapperElision(the_export, expect_no_elison), true);
-})();
+});
// function calls stack: first_export -> first_func -> first_import ->
// second_export -> second_import
// In this case, second_export has more params than first_import,
-// So that wrappers will not be removed
-(function TestWasmWrapperNoElisionMoreParams() {
+// so instantiation should fail.
+assertThrows(function TestWasmWrapperNoElisionMoreParams() {
var imported = function (a, b, c) {
return a+b+c;
};
@@ -180,13 +240,13 @@ var expect_no_elison = 1;
assertEquals(the_export(0, 0), 0);
assertEquals(the_export(1.1, 2.7), 3);
assertEquals(%CheckWasmWrapperElision(the_export, expect_no_elison), true);
-})();
+});
// function calls stack: first_export -> first_func -> first_import ->
// second_export -> second_import
// In this case, second_export has different params type with first_import,
-// So that wrappers will not be removed
-(function TestWasmWrapperNoElisionTypeMismatch() {
+// so instantiation should fail.
+assertThrows(function TestWasmWrapperNoElisionTypeMismatch() {
var imported = function (a, b) {
return a+b;
};
@@ -238,4 +298,4 @@ var expect_no_elison = 1;
assertEquals(the_export(0.0, 0.0), 0);
assertEquals(the_export(2, -2), 0);
assertEquals(%CheckWasmWrapperElision(the_export, expect_no_elison), true);
-})();
+});
diff --git a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
index b1a2309770..30bbe4ac93 100644
--- a/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/test-wasm-module-builder.js
@@ -9,40 +9,44 @@ load('test/mjsunit/wasm/wasm-module-builder.js');
var debug = true;
+function instantiate(buffer, ffi) {
+ return new WebAssembly.Instance(WebAssembly.Module(buffer), ffi);
+}
+
(function BasicTest() {
- var module = new WasmModuleBuilder();
- module.addMemory(1, 2, false);
- module.addFunction("foo", kSig_i)
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(1, 2, false);
+ builder.addFunction("foo", kSig_i_v)
.addBody([kExprI8Const, 11])
.exportAs("blarg");
- var buffer = module.toBuffer(debug);
- var instance = Wasm.instantiateModule(buffer);
+ var buffer = builder.toBuffer(debug);
+ var instance = instantiate(buffer);
assertEquals(11, instance.exports.blarg());
})();
(function ImportTest() {
- var module = new WasmModuleBuilder();
- var index = module.addImport("print", makeSig_v_x(kAstI32));
- module.addFunction("foo", kSig_v_v)
+ let builder = new WasmModuleBuilder();
+ var index = builder.addImport("print", makeSig_v_x(kAstI32));
+ builder.addFunction("foo", kSig_v_v)
.addBody([kExprI8Const, 13, kExprCallFunction, index])
.exportAs("main");
- var buffer = module.toBuffer(debug);
- var instance = Wasm.instantiateModule(buffer, {print: print});
+ var buffer = builder.toBuffer(debug);
+ var instance = instantiate(buffer, {print: print});
print("should print 13! ");
instance.exports.main();
})();
(function LocalsTest() {
- var module = new WasmModuleBuilder();
- module.addFunction(undefined, kSig_i_i)
+ let builder = new WasmModuleBuilder();
+ builder.addFunction(undefined, kSig_i_i)
.addLocals({i32_count: 1})
.addBody([kExprGetLocal, 0, kExprSetLocal, 1, kExprGetLocal, 1])
.exportAs("main");
- var buffer = module.toBuffer(debug);
- var instance = Wasm.instantiateModule(buffer);
+ var buffer = builder.toBuffer(debug);
+ var instance = instantiate(buffer);
assertEquals(19, instance.exports.main(19));
assertEquals(27777, instance.exports.main(27777));
})();
@@ -57,72 +61,72 @@ var debug = true;
];
for (p of types) {
- var module = new WasmModuleBuilder();
- module.addFunction(undefined, makeSig_r_x(p.type, p.type))
+ let builder = new WasmModuleBuilder();
+ builder.addFunction(undefined, makeSig_r_x(p.type, p.type))
.addLocals(p.locals)
.addBody([kExprGetLocal, 0, kExprSetLocal, 1, kExprGetLocal, 1])
.exportAs("main");
- var buffer = module.toBuffer(debug);
- var instance = Wasm.instantiateModule(buffer);
+ var buffer = builder.toBuffer(debug);
+ var instance = instantiate(buffer);
assertEquals(19, instance.exports.main(19));
assertEquals(27777, instance.exports.main(27777));
}
})();
(function CallTest() {
- var module = new WasmModuleBuilder();
- module.addFunction("add", kSig_i_ii)
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("add", kSig_i_ii)
.addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add]);
- module.addFunction("main", kSig_i_ii)
+ builder.addFunction("main", kSig_i_ii)
.addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprCallFunction, 0])
.exportAs("main");
- var instance = module.instantiate();
+ var instance = builder.instantiate();
assertEquals(44, instance.exports.main(11, 33));
assertEquals(7777, instance.exports.main(2222, 5555));
})();
(function IndirectCallTest() {
- var module = new WasmModuleBuilder();
- module.addFunction("add", kSig_i_ii)
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("add", kSig_i_ii)
.addBody([kExprGetLocal, 0, kExprGetLocal, 1, kExprI32Add]);
- module.addFunction("main", kSig_i_iii)
+ builder.addFunction("main", kSig_i_iii)
.addBody([kExprGetLocal,
- 1, kExprGetLocal, 2, kExprGetLocal, 0, kExprCallIndirect, 0])
+ 1, kExprGetLocal, 2, kExprGetLocal, 0, kExprCallIndirect, 0, kTableZero])
.exportAs("main");
- module.appendToTable([0]);
+ builder.appendToTable([0]);
- var instance = module.instantiate();
+ var instance = builder.instantiate();
assertEquals(44, instance.exports.main(0, 11, 33));
assertEquals(7777, instance.exports.main(0, 2222, 5555));
assertThrows(function() { instance.exports.main(1, 1, 1); });
})();
(function DataSegmentTest() {
- var module = new WasmModuleBuilder();
- module.addMemory(1, 1, false);
- module.addFunction("load", kSig_i_i)
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(1, 1, false);
+ builder.addFunction("load", kSig_i_i)
.addBody([kExprGetLocal, 0, kExprI32LoadMem, 0, 0])
.exportAs("load");
- module.addDataSegment(0, [9, 9, 9, 9], true);
+ builder.addDataSegment(0, [9, 9, 9, 9]);
- var buffer = module.toBuffer(debug);
- var instance = Wasm.instantiateModule(buffer);
+ var buffer = builder.toBuffer(debug);
+ var instance = instantiate(buffer);
assertEquals(151587081, instance.exports.load(0));
})();
(function BasicTestWithUint8Array() {
- var module = new WasmModuleBuilder();
- module.addMemory(1, 2, false);
- module.addFunction("foo", kSig_i)
+ let builder = new WasmModuleBuilder();
+ builder.addMemory(1, 2, false);
+ builder.addFunction("foo", kSig_i_v)
.addBody([kExprI8Const, 17])
.exportAs("blarg");
- var buffer = module.toBuffer(debug);
+ var buffer = builder.toBuffer(debug);
var array = new Uint8Array(buffer);
- var instance = Wasm.instantiateModule(array);
+ var instance = instantiate(array);
assertEquals(17, instance.exports.blarg());
var kPad = 5;
@@ -135,19 +139,19 @@ var debug = true;
for (var i = 0; i < array2.byteLength; i++) {
array2[i] = array[i];
}
- var instance = Wasm.instantiateModule(array2);
+ var instance = instantiate(array2);
assertEquals(17, instance.exports.blarg());
})();
(function ImportTestTwoLevel() {
- var module = new WasmModuleBuilder();
- var index = module.addImportWithModule("mod", "print", makeSig_v_x(kAstI32));
- module.addFunction("foo", kSig_v_v)
+ let builder = new WasmModuleBuilder();
+ var index = builder.addImportWithModule("mod", "print", makeSig_v_x(kAstI32));
+ builder.addFunction("foo", kSig_v_v)
.addBody([kExprI8Const, 19, kExprCallFunction, index])
.exportAs("main");
- var buffer = module.toBuffer(debug);
- var instance = Wasm.instantiateModule(buffer, {mod: {print: print}});
+ var buffer = builder.toBuffer(debug);
+ var instance = instantiate(buffer, {mod: {print: print}});
print("should print 19! ");
instance.exports.main();
})();
diff --git a/deps/v8/test/mjsunit/wasm/trap-location.js b/deps/v8/test/mjsunit/wasm/trap-location.js
index bc8214f9b2..5e14584224 100644
--- a/deps/v8/test/mjsunit/wasm/trap-location.js
+++ b/deps/v8/test/mjsunit/wasm/trap-location.js
@@ -52,10 +52,11 @@ builder.addFunction("main", kSig_i_i)
kExprEnd,
// offset 30
kExprGetLocal, 0,
- kExprCallIndirect, sig_index,
+ kExprCallIndirect, sig_index, kTableZero,
kExprEnd,
])
.exportAs("main");
+builder.appendToTable([0]);
var module = builder.instantiate();
diff --git a/deps/v8/test/mjsunit/wasm/verify-function-basic-errors.js b/deps/v8/test/mjsunit/wasm/verify-function-basic-errors.js
deleted file mode 100644
index 74c9a96d68..0000000000
--- a/deps/v8/test/mjsunit/wasm/verify-function-basic-errors.js
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm
-
-function Foo() { }
-
-assertThrows(function() { Wasm.verifyFunction(); })
-assertThrows(function() { Wasm.verifyFunction(0); })
-assertThrows(function() { Wasm.verifyFunction("s"); })
-assertThrows(function() { Wasm.verifyFunction(undefined); })
-assertThrows(function() { Wasm.verifyFunction(1.1); })
-assertThrows(function() { Wasm.verifyFunction(1/0); })
-assertThrows(function() { Wasm.verifyFunction(null); })
-assertThrows(function() { Wasm.verifyFunction(new Foo()); })
-assertThrows(function() { Wasm.verifyFunction(new ArrayBuffer(0)); })
-assertThrows(function() { Wasm.verifyFunction(new ArrayBuffer(140000)); })
diff --git a/deps/v8/test/mjsunit/wasm/verify-function-simple.js b/deps/v8/test/mjsunit/wasm/verify-function-simple.js
deleted file mode 100644
index 1ac25143d7..0000000000
--- a/deps/v8/test/mjsunit/wasm/verify-function-simple.js
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-wasm
-
-load("test/mjsunit/wasm/wasm-constants.js");
-
-try {
- var data = bytes(
- kWasmFunctionTypeForm, 0, kAstStmt, // signature
- kDeclNoLocals, // --
- kExprNop // body
- );
-
- Wasm.verifyFunction(data);
- print("ok");
-} catch (e) {
- assertTrue(false);
-}
-
-
-var threw = false;
-try {
- var data = bytes(
- kWasmFunctionTypeForm, 0, 1, kAstI32, // signature
- kDeclNoLocals, // --
- kExprBlock, kAstStmt, kExprNop, kExprNop, kExprEnd // body
- );
-
- Wasm.verifyFunction(data);
- print("not ok");
-} catch (e) {
- print("ok: " + e);
- threw = true;
-}
-
-assertTrue(threw);
diff --git a/deps/v8/test/mjsunit/wasm/verify-module-basic-errors.js b/deps/v8/test/mjsunit/wasm/verify-module-basic-errors.js
index 29ef2aa611..c289dea105 100644
--- a/deps/v8/test/mjsunit/wasm/verify-module-basic-errors.js
+++ b/deps/v8/test/mjsunit/wasm/verify-module-basic-errors.js
@@ -6,13 +6,13 @@
function Foo() { }
-assertThrows(function() { Wasm.verifyModule(); })
-assertThrows(function() { Wasm.verifyModule(0); })
-assertThrows(function() { Wasm.verifyModule("s"); })
-assertThrows(function() { Wasm.verifyModule(undefined); })
-assertThrows(function() { Wasm.verifyModule(1.1); })
-assertThrows(function() { Wasm.verifyModule(1/0); })
-assertThrows(function() { Wasm.verifyModule(null); })
-assertThrows(function() { Wasm.verifyModule(new Foo()); })
-assertThrows(function() { Wasm.verifyModule(new ArrayBuffer(0)); })
-assertThrows(function() { Wasm.verifyModule(new ArrayBuffer(7)); })
+assertThrows(function() { new WebAssembly.Module(); })
+assertThrows(function() { new WebAssembly.Module(0); })
+assertThrows(function() { new WebAssembly.Module("s"); })
+assertThrows(function() { new WebAssembly.Module(undefined); })
+assertThrows(function() { new WebAssembly.Module(1.1); })
+assertThrows(function() { new WebAssembly.Module(1/0); })
+assertThrows(function() { new WebAssembly.Module(null); })
+assertThrows(function() { new WebAssembly.Module(new Foo()); })
+assertThrows(function() { new WebAssembly.Module(new ArrayBuffer(0)); })
+assertThrows(function() { new WebAssembly.Module(new ArrayBuffer(7)); })
diff --git a/deps/v8/test/mjsunit/wasm/wasm-constants.js b/deps/v8/test/mjsunit/wasm/wasm-constants.js
index 388e5f5015..a064d55aaf 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-constants.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-constants.js
@@ -21,7 +21,7 @@ var kWasmH1 = 0x61;
var kWasmH2 = 0x73;
var kWasmH3 = 0x6d;
-var kWasmV0 = 0xC;
+var kWasmV0 = 0xD;
var kWasmV1 = 0;
var kWasmV2 = 0;
var kWasmV3 = 0;
@@ -48,64 +48,72 @@ function bytesWithHeader() {
return buffer;
}
-var kDeclNoLocals = 0;
+let kDeclNoLocals = 0;
// Section declaration constants
-var kUnknownSectionCode = 0;
-var kTypeSectionCode = 1; // Function signature declarations
-var kImportSectionCode = 2; // Import declarations
-var kFunctionSectionCode = 3; // Function declarations
-var kTableSectionCode = 4; // Indirect function table and other tables
-var kMemorySectionCode = 5; // Memory attributes
-var kGlobalSectionCode = 6; // Global declarations
-var kExportSectionCode = 7; // Exports
-var kStartSectionCode = 8; // Start function declaration
-var kElementSectionCode = 9; // Elements section
-var kCodeSectionCode = 10; // Function code
-var kDataSectionCode = 11; // Data segments
-var kNameSectionCode = 12; // Name section (encoded as string)
+let kUnknownSectionCode = 0;
+let kTypeSectionCode = 1; // Function signature declarations
+let kImportSectionCode = 2; // Import declarations
+let kFunctionSectionCode = 3; // Function declarations
+let kTableSectionCode = 4; // Indirect function table and other tables
+let kMemorySectionCode = 5; // Memory attributes
+let kGlobalSectionCode = 6; // Global declarations
+let kExportSectionCode = 7; // Exports
+let kStartSectionCode = 8; // Start function declaration
+let kElementSectionCode = 9; // Elements section
+let kCodeSectionCode = 10; // Function code
+let kDataSectionCode = 11; // Data segments
+let kNameSectionCode = 12; // Name section (encoded as string)
-var kWasmFunctionTypeForm = 0x40;
-var kWasmAnyFunctionTypeForm = 0x20;
+let kWasmFunctionTypeForm = 0x60;
+let kWasmAnyFunctionTypeForm = 0x70;
-var kResizableMaximumFlag = 1;
+let kResizableMaximumFlag = 1;
// Function declaration flags
-var kDeclFunctionName = 0x01;
-var kDeclFunctionImport = 0x02;
-var kDeclFunctionLocals = 0x04;
-var kDeclFunctionExport = 0x08;
+let kDeclFunctionName = 0x01;
+let kDeclFunctionImport = 0x02;
+let kDeclFunctionLocals = 0x04;
+let kDeclFunctionExport = 0x08;
// Local types
-var kAstStmt = 0;
-var kAstI32 = 1;
-var kAstI64 = 2;
-var kAstF32 = 3;
-var kAstF64 = 4;
+let kAstStmt = 0x40;
+let kAstI32 = 0x7f;
+let kAstI64 = 0x7e;
+let kAstF32 = 0x7d;
+let kAstF64 = 0x7c;
+let kAstS128 = 0x7b;
-var kExternalFunction = 0;
-var kExternalTable = 1;
-var kExternalMemory = 2;
-var kExternalGlobal = 3;
+let kExternalFunction = 0;
+let kExternalTable = 1;
+let kExternalMemory = 2;
+let kExternalGlobal = 3;
+
+let kTableZero = 0;
+let kMemoryZero = 0;
// Useful signatures
-var kSig_i = makeSig([], [kAstI32]);
-var kSig_d = makeSig([], [kAstF64]);
-var kSig_i_i = makeSig([kAstI32], [kAstI32]);
-var kSig_i_l = makeSig([kAstI64], [kAstI32]);
-var kSig_i_ii = makeSig([kAstI32, kAstI32], [kAstI32]);
-var kSig_i_iii = makeSig([kAstI32, kAstI32, kAstI32], [kAstI32]);
-var kSig_d_dd = makeSig([kAstF64, kAstF64], [kAstF64]);
-var kSig_l_ll = makeSig([kAstI64, kAstI64], [kAstI64]);
-var kSig_i_dd = makeSig([kAstF64, kAstF64], [kAstI32]);
-var kSig_v_v = makeSig([], []);
-var kSig_i_v = makeSig([], [kAstI32]);
-var kSig_v_i = makeSig([kAstI32], []);
-var kSig_v_ii = makeSig([kAstI32, kAstI32], []);
-var kSig_v_iii = makeSig([kAstI32, kAstI32, kAstI32], []);
-var kSig_v_d = makeSig([kAstF64], []);
-var kSig_v_dd = makeSig([kAstF64, kAstF64], []);
-var kSig_v_ddi = makeSig([kAstF64, kAstF64, kAstI32], []);
+let kSig_i_i = makeSig([kAstI32], [kAstI32]);
+let kSig_l_l = makeSig([kAstI64], [kAstI64]);
+let kSig_i_l = makeSig([kAstI64], [kAstI32]);
+let kSig_i_ii = makeSig([kAstI32, kAstI32], [kAstI32]);
+let kSig_i_iii = makeSig([kAstI32, kAstI32, kAstI32], [kAstI32]);
+let kSig_d_dd = makeSig([kAstF64, kAstF64], [kAstF64]);
+let kSig_l_ll = makeSig([kAstI64, kAstI64], [kAstI64]);
+let kSig_i_dd = makeSig([kAstF64, kAstF64], [kAstI32]);
+let kSig_v_v = makeSig([], []);
+let kSig_i_v = makeSig([], [kAstI32]);
+let kSig_l_v = makeSig([], [kAstI64]);
+let kSig_f_v = makeSig([], [kAstF64]);
+let kSig_d_v = makeSig([], [kAstF64]);
+let kSig_v_i = makeSig([kAstI32], []);
+let kSig_v_ii = makeSig([kAstI32, kAstI32], []);
+let kSig_v_iii = makeSig([kAstI32, kAstI32, kAstI32], []);
+let kSig_v_l = makeSig([kAstI64], []);
+let kSig_v_d = makeSig([kAstF64], []);
+let kSig_v_dd = makeSig([kAstF64, kAstF64], []);
+let kSig_v_ddi = makeSig([kAstF64, kAstF64, kAstI32], []);
+let kSig_s_v = makeSig([], [kAstS128]);
function makeSig(params, results) {
return {params: params, results: results};
@@ -132,198 +140,194 @@ function makeSig_r_xx(r, x) {
}
// Opcodes
-var kExprUnreachable = 0x00;
-var kExprNop = 0x0a;
-var kExprBlock = 0x01;
-var kExprLoop = 0x02;
-var kExprIf = 0x03;
-var kExprElse = 0x04;
-var kExprSelect = 0x05;
-var kExprBr = 0x06;
-var kExprBrIf = 0x07;
-var kExprBrTable = 0x08;
-var kExprReturn = 0x09;
-var kExprThrow = 0xfa;
-var kExprTry = 0xfb;
-var kExprCatch = 0xfe;
-var kExprEnd = 0x0f;
-var kExprTeeLocal = 0x19;
-var kExprDrop = 0x0b;
-
-var kExprI32Const = 0x10;
-var kExprI64Const = 0x11;
-var kExprF64Const = 0x12;
-var kExprF32Const = 0x13;
-var kExprGetLocal = 0x14;
-var kExprSetLocal = 0x15;
-var kExprCallFunction = 0x16;
-var kExprCallIndirect = 0x17;
-var kExprI8Const = 0xcb;
-var kExprGetGlobal = 0xbb;
-var kExprSetGlobal = 0xbc;
-
-var kExprI32LoadMem8S = 0x20;
-var kExprI32LoadMem8U = 0x21;
-var kExprI32LoadMem16S = 0x22;
-var kExprI32LoadMem16U = 0x23;
-var kExprI64LoadMem8S = 0x24;
-var kExprI64LoadMem8U = 0x25;
-var kExprI64LoadMem16S = 0x26;
-var kExprI64LoadMem16U = 0x27;
-var kExprI64LoadMem32S = 0x28;
-var kExprI64LoadMem32U = 0x29;
-var kExprI32LoadMem = 0x2a;
-var kExprI64LoadMem = 0x2b;
-var kExprF32LoadMem = 0x2c;
-var kExprF64LoadMem = 0x2d;
-
-var kExprI32StoreMem8 = 0x2e;
-var kExprI32StoreMem16 = 0x2f;
-var kExprI64StoreMem8 = 0x30;
-var kExprI64StoreMem16 = 0x31;
-var kExprI64StoreMem32 = 0x32;
-var kExprI32StoreMem = 0x33;
-var kExprI64StoreMem = 0x34;
-var kExprF32StoreMem = 0x35;
-var kExprF64StoreMem = 0x36;
-
-var kExprMemorySize = 0x3b;
-var kExprGrowMemory = 0x39;
-
-var kExprI32Add = 0x40;
-var kExprI32Sub = 0x41;
-var kExprI32Mul = 0x42;
-var kExprI32DivS = 0x43;
-var kExprI32DivU = 0x44;
-var kExprI32RemS = 0x45;
-var kExprI32RemU = 0x46;
-var kExprI32And = 0x47;
-var kExprI32Ior = 0x48;
-var kExprI32Xor = 0x49;
-var kExprI32Shl = 0x4a;
-var kExprI32ShrU = 0x4b;
-var kExprI32ShrS = 0x4c;
-var kExprI32Eq = 0x4d;
-var kExprI32Ne = 0x4e;
-var kExprI32LtS = 0x4f;
-var kExprI32LeS = 0x50;
-var kExprI32LtU = 0x51;
-var kExprI32LeU = 0x52;
-var kExprI32GtS = 0x53;
-var kExprI32GeS = 0x54;
-var kExprI32GtU = 0x55;
-var kExprI32GeU = 0x56;
-var kExprI32Clz = 0x57;
-var kExprI32Ctz = 0x58;
-var kExprI32Popcnt = 0x59;
-var kExprI32Eqz = 0x5a;
-var kExprI64Add = 0x5b;
-var kExprI64Sub = 0x5c;
-var kExprI64Mul = 0x5d;
-var kExprI64DivS = 0x5e;
-var kExprI64DivU = 0x5f;
-var kExprI64RemS = 0x60;
-var kExprI64RemU = 0x61;
-var kExprI64And = 0x62;
-var kExprI64Ior = 0x63;
-var kExprI64Xor = 0x64;
-var kExprI64Shl = 0x65;
-var kExprI64ShrU = 0x66;
-var kExprI64ShrS = 0x67;
-var kExprI64Eq = 0x68;
-var kExprI64Ne = 0x69;
-var kExprI64LtS = 0x6a;
-var kExprI64LeS = 0x6b;
-var kExprI64LtU = 0x6c;
-var kExprI64LeU = 0x6d;
-var kExprI64GtS = 0x6e;
-var kExprI64GeS = 0x6f;
-var kExprI64GtU = 0x70;
-var kExprI64GeU = 0x71;
-var kExprI64Clz = 0x72;
-var kExprI64Ctz = 0x73;
-var kExprI64Popcnt = 0x74;
-var kExprF32Add = 0x75;
-var kExprF32Sub = 0x76;
-var kExprF32Mul = 0x77;
-var kExprF32Div = 0x78;
-var kExprF32Min = 0x79;
-var kExprF32Max = 0x7a;
-var kExprF32Abs = 0x7b;
-var kExprF32Neg = 0x7c;
-var kExprF32CopySign = 0x7d;
-var kExprF32Ceil = 0x7e;
-var kExprF32Floor = 0x7f;
-var kExprF32Trunc = 0x80;
-var kExprF32NearestInt = 0x81;
-var kExprF32Sqrt = 0x82;
-var kExprF32Eq = 0x83;
-var kExprF32Ne = 0x84;
-var kExprF32Lt = 0x85;
-var kExprF32Le = 0x86;
-var kExprF32Gt = 0x87;
-var kExprF32Ge = 0x88;
-var kExprF64Add = 0x89;
-var kExprF64Sub = 0x8a;
-var kExprF64Mul = 0x8b;
-var kExprF64Div = 0x8c;
-var kExprF64Min = 0x8d;
-var kExprF64Max = 0x8e;
-var kExprF64Abs = 0x8f;
-var kExprF64Neg = 0x90;
-var kExprF64CopySign = 0x91;
-var kExprF64Ceil = 0x92;
-var kExprF64Floor = 0x93;
-var kExprF64Trunc = 0x94;
-var kExprF64NearestInt = 0x95;
-var kExprF64Sqrt = 0x96;
-var kExprF64Eq = 0x97;
-var kExprF64Ne = 0x98;
-var kExprF64Lt = 0x99;
-var kExprF64Le = 0x9a;
-var kExprF64Gt = 0x9b;
-var kExprF64Ge = 0x9c;
-var kExprI32SConvertF32 = 0x9d;
-var kExprI32SConvertF64 = 0x9e;
-var kExprI32UConvertF32 = 0x9f;
-var kExprI32UConvertF64 = 0xa0;
-var kExprI32ConvertI64 = 0xa1;
-var kExprI64SConvertF32 = 0xa2;
-var kExprI64SConvertF64 = 0xa3;
-var kExprI64UConvertF32 = 0xa4;
-var kExprI64UConvertF64 = 0xa5;
-var kExprI64SConvertI32 = 0xa6;
-var kExprI64UConvertI32 = 0xa7;
-var kExprF32SConvertI32 = 0xa8;
-var kExprF32UConvertI32 = 0xa9;
-var kExprF32SConvertI64 = 0xaa;
-var kExprF32UConvertI64 = 0xab;
-var kExprF32ConvertF64 = 0xac;
-var kExprF32ReinterpretI32 = 0xad;
-var kExprF64SConvertI32 = 0xae;
-var kExprF64UConvertI32 = 0xaf;
-var kExprF64SConvertI64 = 0xb0;
-var kExprF64UConvertI64 = 0xb1;
-var kExprF64ConvertF32 = 0xb2;
-var kExprF64ReinterpretI64 = 0xb3;
-var kExprI32ReinterpretF32 = 0xb4;
-var kExprI64ReinterpretF64 = 0xb5;
-var kExprI32Ror = 0xb6;
-var kExprI32Rol = 0xb7;
-var kExprI64Ror = 0xb8;
-var kExprI64Rol = 0xb9;
+let kExprUnreachable = 0x00;
+let kExprNop = 0x01;
+let kExprBlock = 0x02;
+let kExprLoop = 0x03;
+let kExprIf = 0x04;
+let kExprElse = 0x05;
+let kExprTry = 0x06;
+let kExprCatch = 0x07;
+let kExprThrow = 0x08;
+let kExprEnd = 0x0b;
+let kExprBr = 0x0c;
+let kExprBrIf = 0x0d;
+let kExprBrTable = 0x0e;
+let kExprReturn = 0x0f;
+let kExprCallFunction = 0x10;
+let kExprCallIndirect = 0x11;
+let kExprDrop = 0x1a;
+let kExprSelect = 0x1b;
+let kExprGetLocal = 0x20;
+let kExprSetLocal = 0x21;
+let kExprTeeLocal = 0x22;
+let kExprGetGlobal = 0x23;
+let kExprSetGlobal = 0x24;
+let kExprI32Const = 0x41;
+let kExprI64Const = 0x42;
+let kExprF32Const = 0x43;
+let kExprF64Const = 0x44;
+let kExprI8Const = 0xcb;
+let kExprI32LoadMem = 0x28;
+let kExprI64LoadMem = 0x29;
+let kExprF32LoadMem = 0x2a;
+let kExprF64LoadMem = 0x2b;
+let kExprI32LoadMem8S = 0x2c;
+let kExprI32LoadMem8U = 0x2d;
+let kExprI32LoadMem16S = 0x2e;
+let kExprI32LoadMem16U = 0x2f;
+let kExprI64LoadMem8S = 0x30;
+let kExprI64LoadMem8U = 0x31;
+let kExprI64LoadMem16S = 0x32;
+let kExprI64LoadMem16U = 0x33;
+let kExprI64LoadMem32S = 0x34;
+let kExprI64LoadMem32U = 0x35;
+let kExprI32StoreMem = 0x36;
+let kExprI64StoreMem = 0x37;
+let kExprF32StoreMem = 0x38;
+let kExprF64StoreMem = 0x39;
+let kExprI32StoreMem8 = 0x3a;
+let kExprI32StoreMem16 = 0x3b;
+let kExprI64StoreMem8 = 0x3c;
+let kExprI64StoreMem16 = 0x3d;
+let kExprI64StoreMem32 = 0x3e;
+let kExprMemorySize = 0x3f;
+let kExprGrowMemory = 0x40;
+let kExprI32Eqz = 0x45;
+let kExprI32Eq = 0x46;
+let kExprI32Ne = 0x47;
+let kExprI32LtS = 0x48;
+let kExprI32LtU = 0x49;
+let kExprI32GtS = 0x4a;
+let kExprI32GtU = 0x4b;
+let kExprI32LeS = 0x4c;
+let kExprI32LeU = 0x4d;
+let kExprI32GeS = 0x4e;
+let kExprI32GeU = 0x4f;
+let kExprI64Eqz = 0x50;
+let kExprI64Eq = 0x51;
+let kExprI64Ne = 0x52;
+let kExprI64LtS = 0x53;
+let kExprI64LtU = 0x54;
+let kExprI64GtS = 0x55;
+let kExprI64GtU = 0x56;
+let kExprI64LeS = 0x57;
+let kExprI64LeU = 0x58;
+let kExprI64GeS = 0x59;
+let kExprI64GeU = 0x5a;
+let kExprF32Eq = 0x5b;
+let kExprF32Ne = 0x5c;
+let kExprF32Lt = 0x5d;
+let kExprF32Gt = 0x5e;
+let kExprF32Le = 0x5f;
+let kExprF32Ge = 0x60;
+let kExprF64Eq = 0x61;
+let kExprF64Ne = 0x62;
+let kExprF64Lt = 0x63;
+let kExprF64Gt = 0x64;
+let kExprF64Le = 0x65;
+let kExprF64Ge = 0x66;
+let kExprI32Clz = 0x67;
+let kExprI32Ctz = 0x68;
+let kExprI32Popcnt = 0x69;
+let kExprI32Add = 0x6a;
+let kExprI32Sub = 0x6b;
+let kExprI32Mul = 0x6c;
+let kExprI32DivS = 0x6d;
+let kExprI32DivU = 0x6e;
+let kExprI32RemS = 0x6f;
+let kExprI32RemU = 0x70;
+let kExprI32And = 0x71;
+let kExprI32Ior = 0x72;
+let kExprI32Xor = 0x73;
+let kExprI32Shl = 0x74;
+let kExprI32ShrS = 0x75;
+let kExprI32ShrU = 0x76;
+let kExprI32Rol = 0x77;
+let kExprI32Ror = 0x78;
+let kExprI64Clz = 0x79;
+let kExprI64Ctz = 0x7a;
+let kExprI64Popcnt = 0x7b;
+let kExprI64Add = 0x7c;
+let kExprI64Sub = 0x7d;
+let kExprI64Mul = 0x7e;
+let kExprI64DivS = 0x7f;
+let kExprI64DivU = 0x80;
+let kExprI64RemS = 0x81;
+let kExprI64RemU = 0x82;
+let kExprI64And = 0x83;
+let kExprI64Ior = 0x84;
+let kExprI64Xor = 0x85;
+let kExprI64Shl = 0x86;
+let kExprI64ShrS = 0x87;
+let kExprI64ShrU = 0x88;
+let kExprI64Rol = 0x89;
+let kExprI64Ror = 0x8a;
+let kExprF32Abs = 0x8b;
+let kExprF32Neg = 0x8c;
+let kExprF32Ceil = 0x8d;
+let kExprF32Floor = 0x8e;
+let kExprF32Trunc = 0x8f;
+let kExprF32NearestInt = 0x90;
+let kExprF32Sqrt = 0x91;
+let kExprF32Add = 0x92;
+let kExprF32Sub = 0x93;
+let kExprF32Mul = 0x94;
+let kExprF32Div = 0x95;
+let kExprF32Min = 0x96;
+let kExprF32Max = 0x97;
+let kExprF32CopySign = 0x98;
+let kExprF64Abs = 0x99;
+let kExprF64Neg = 0x9a;
+let kExprF64Ceil = 0x9b;
+let kExprF64Floor = 0x9c;
+let kExprF64Trunc = 0x9d;
+let kExprF64NearestInt = 0x9e;
+let kExprF64Sqrt = 0x9f;
+let kExprF64Add = 0xa0;
+let kExprF64Sub = 0xa1;
+let kExprF64Mul = 0xa2;
+let kExprF64Div = 0xa3;
+let kExprF64Min = 0xa4;
+let kExprF64Max = 0xa5;
+let kExprF64CopySign = 0xa6;
+let kExprI32ConvertI64 = 0xa7;
+let kExprI32SConvertF32 = 0xa8;
+let kExprI32UConvertF32 = 0xa9;
+let kExprI32SConvertF64 = 0xaa;
+let kExprI32UConvertF64 = 0xab;
+let kExprI64SConvertI32 = 0xac;
+let kExprI64UConvertI32 = 0xad;
+let kExprI64SConvertF32 = 0xae;
+let kExprI64UConvertF32 = 0xaf;
+let kExprI64SConvertF64 = 0xb0;
+let kExprI64UConvertF64 = 0xb1;
+let kExprF32SConvertI32 = 0xb2;
+let kExprF32UConvertI32 = 0xb3;
+let kExprF32SConvertI64 = 0xb4;
+let kExprF32UConvertI64 = 0xb5;
+let kExprF32ConvertF64 = 0xb6;
+let kExprF64SConvertI32 = 0xb7;
+let kExprF64UConvertI32 = 0xb8;
+let kExprF64SConvertI64 = 0xb9;
+let kExprF64UConvertI64 = 0xba;
+let kExprF64ConvertF32 = 0xbb;
+let kExprI32ReinterpretF32 = 0xbc;
+let kExprI64ReinterpretF64 = 0xbd;
+let kExprF32ReinterpretI32 = 0xbe;
+let kExprF64ReinterpretI64 = 0xbf;
-var kTrapUnreachable = 0;
-var kTrapMemOutOfBounds = 1;
-var kTrapDivByZero = 2;
-var kTrapDivUnrepresentable = 3;
-var kTrapRemByZero = 4;
-var kTrapFloatUnrepresentable = 5;
-var kTrapFuncInvalid = 6;
-var kTrapFuncSigMismatch = 7;
-var kTrapInvalidIndex = 8;
+let kTrapUnreachable = 0;
+let kTrapMemOutOfBounds = 1;
+let kTrapDivByZero = 2;
+let kTrapDivUnrepresentable = 3;
+let kTrapRemByZero = 4;
+let kTrapFloatUnrepresentable = 5;
+let kTrapFuncInvalid = 6;
+let kTrapFuncSigMismatch = 7;
+let kTrapInvalidIndex = 8;
-var kTrapMsgs = [
+let kTrapMsgs = [
"unreachable",
"memory access out of bounds",
"divide by zero",
diff --git a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
index 7b77a8c9b1..900198d7d0 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-module-builder.js
@@ -2,6 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Used for encoding f32 and double constants to bits.
+let __buffer = new ArrayBuffer(8);
+let byte_view = new Int8Array(__buffer);
+let f32_view = new Float32Array(__buffer);
+let f64_view = new Float64Array(__buffer);
+
class Binary extends Array {
emit_u8(val) {
this.push(val);
@@ -19,7 +25,7 @@ class Binary extends Array {
this.push((val >> 24) & 0xff);
}
- emit_varint(val) {
+ emit_u32v(val) {
while (true) {
let v = val & 0xff;
val = val >>> 7;
@@ -40,7 +46,7 @@ class Binary extends Array {
emit_string(string) {
// When testing illegal names, we pass a byte array directly.
if (string instanceof Array) {
- this.emit_varint(string.length);
+ this.emit_u32v(string.length);
this.emit_bytes(string);
return;
}
@@ -48,7 +54,7 @@ class Binary extends Array {
// This is the hacky way to convert a JavaScript string to a UTF8 encoded
// string only containing single-byte characters.
let string_utf8 = unescape(encodeURIComponent(string));
- this.emit_varint(string_utf8.length);
+ this.emit_u32v(string_utf8.length);
for (let i = 0; i < string_utf8.length; i++) {
this.emit_u8(string_utf8.charCodeAt(i));
}
@@ -66,26 +72,27 @@ class Binary extends Array {
let section = new Binary;
content_generator(section);
// Emit section length.
- this.emit_varint(section.length);
+ this.emit_u32v(section.length);
// Copy the temporary buffer.
this.push(...section);
}
}
class WasmFunctionBuilder {
- constructor(name, type_index) {
+ constructor(module, name, type_index) {
+ this.module = module;
this.name = name;
this.type_index = type_index;
- this.exports = [];
+ this.body = [];
}
exportAs(name) {
- this.exports.push(name);
+ this.module.addExport(name, this.index);
return this;
}
exportFunc() {
- this.exports.push(this.name);
+ this.exportAs(this.name);
return this;
}
@@ -98,24 +105,47 @@ class WasmFunctionBuilder {
this.locals = locals;
return this;
}
+
+ end() {
+ return this.module;
+ }
+}
+
+class WasmGlobalBuilder {
+ constructor(module, type, mutable) {
+ this.module = module;
+ this.type = type;
+ this.mutable = mutable;
+ this.init = 0;
+ }
+
+ exportAs(name) {
+ this.module.exports.push({name: name, kind: kExternalGlobal,
+ index: this.index});
+ return this;
+ }
}
class WasmModuleBuilder {
constructor() {
this.types = [];
this.imports = [];
+ this.exports = [];
this.globals = [];
this.functions = [];
- this.exports = [];
- this.table = [];
+ this.function_table = [];
+ this.function_table_length = 0;
+ this.function_table_inits = [];
this.segments = [];
this.explicit = [];
- this.pad = null;
+ this.num_imported_funcs = 0;
+ this.num_imported_globals = 0;
return this;
}
addStart(start_index) {
this.start_index = start_index;
+ return this;
}
addMemory(min, max, exp) {
@@ -123,11 +153,6 @@ class WasmModuleBuilder {
return this;
}
- addPadFunctionTable(size) {
- this.pad = size;
- return this;
- }
-
addExplicitSection(bytes) {
this.explicit.push(bytes);
return this;
@@ -139,40 +164,93 @@ class WasmModuleBuilder {
return this.types.length - 1;
}
- addGlobal(local_type) {
- this.globals.push(local_type);
- return this.globals.length - 1;
+ addGlobal(local_type, mutable) {
+ let glob = new WasmGlobalBuilder(this, local_type, mutable);
+ glob.index = this.globals.length + this.num_imported_globals;
+ this.globals.push(glob);
+ return glob;
}
addFunction(name, type) {
let type_index = (typeof type) == "number" ? type : this.addType(type);
- let func = new WasmFunctionBuilder(name, type_index);
- func.index = this.functions.length + this.imports.length;
+ let func = new WasmFunctionBuilder(this, name, type_index);
+ func.index = this.functions.length + this.num_imported_funcs;
this.functions.push(func);
return func;
}
addImportWithModule(module, name, type) {
let type_index = (typeof type) == "number" ? type : this.addType(type);
- this.imports.push({module: module, name: name, type: type_index});
- return this.imports.length - 1;
+ this.imports.push({module: module, name: name, kind: kExternalFunction,
+ type: type_index});
+ return this.num_imported_funcs++;
}
addImport(name, type) {
return this.addImportWithModule(name, undefined, type);
}
- addDataSegment(addr, data, init) {
- this.segments.push({addr: addr, data: data, init: init});
+ addImportedGlobal(module, name, type) {
+ let o = {module: module, name: name, kind: kExternalGlobal, type: type,
+ mutable: false}
+ this.imports.push(o);
+ return this.num_imported_globals++;
+ }
+
+ addImportedMemory(module, name, initial = 0, maximum) {
+ let o = {module: module, name: name, kind: kExternalMemory,
+ initial: initial, maximum: maximum};
+ this.imports.push(o);
+ return this;
+ }
+
+ addImportedTable(module, name, initial, maximum) {
+ let o = {module: module, name: name, kind: kExternalTable, initial: initial,
+ maximum: maximum};
+ this.imports.push(o);
+ }
+
+ addExport(name, index) {
+ this.exports.push({name: name, kind: kExternalFunction, index: index});
+ return this;
+ }
+
+ addExportOfKind(name, kind, index) {
+ this.exports.push({name: name, kind: kind, index: index});
+ return this;
+ }
+
+ addDataSegment(addr, data, is_global = false) {
+ this.segments.push({addr: addr, data: data, is_global: is_global});
return this.segments.length - 1;
}
+ exportMemoryAs(name) {
+ this.exports.push({name: name, kind: kExternalMemory, index: 0});
+ }
+
+ addFunctionTableInit(base, is_global, array) {
+ this.function_table_inits.push({base: base, is_global: is_global,
+ array: array});
+ if (!is_global) {
+ var length = base + array.length;
+ if (length > this.function_table_length) {
+ this.function_table_length = length;
+ }
+ }
+ return this;
+ }
+
appendToTable(array) {
- this.table.push(...array);
+ return this.addFunctionTableInit(this.function_table.length, false, array);
+ }
+
+ setFunctionTableLength(length) {
+ this.function_table_length = length;
return this;
}
- toArray(debug) {
+ toArray(debug = false) {
let binary = new Binary;
let wasm = this;
@@ -183,14 +261,14 @@ class WasmModuleBuilder {
if (wasm.types.length > 0) {
if (debug) print("emitting types @ " + binary.length);
binary.emit_section(kTypeSectionCode, section => {
- section.emit_varint(wasm.types.length);
+ section.emit_u32v(wasm.types.length);
for (let type of wasm.types) {
section.emit_u8(kWasmFunctionTypeForm);
- section.emit_varint(type.params.length);
+ section.emit_u32v(type.params.length);
for (let param of type.params) {
section.emit_u8(param);
}
- section.emit_varint(type.results.length);
+ section.emit_u32v(type.results.length);
for (let result of type.results) {
section.emit_u8(result);
}
@@ -202,12 +280,30 @@ class WasmModuleBuilder {
if (wasm.imports.length > 0) {
if (debug) print("emitting imports @ " + binary.length);
binary.emit_section(kImportSectionCode, section => {
- section.emit_varint(wasm.imports.length);
+ section.emit_u32v(wasm.imports.length);
for (let imp of wasm.imports) {
section.emit_string(imp.module);
section.emit_string(imp.name || '');
- section.emit_u8(kExternalFunction);
- section.emit_varint(imp.type);
+ section.emit_u8(imp.kind);
+ if (imp.kind == kExternalFunction) {
+ section.emit_u32v(imp.type);
+ } else if (imp.kind == kExternalGlobal) {
+ section.emit_u32v(imp.type);
+ section.emit_u8(imp.mutable);
+ } else if (imp.kind == kExternalMemory) {
+ var has_max = (typeof imp.maximum) != "undefined";
+ section.emit_u8(has_max ? 1 : 0); // flags
+ section.emit_u32v(imp.initial); // initial
+ if (has_max) section.emit_u32v(imp.maximum); // maximum
+ } else if (imp.kind == kExternalTable) {
+ section.emit_u8(kWasmAnyFunctionTypeForm);
+ var has_max = (typeof imp.maximum) != "undefined";
+ section.emit_u8(has_max ? 1 : 0); // flags
+ section.emit_u32v(imp.initial); // initial
+ if (has_max) section.emit_u32v(imp.maximum); // maximum
+ } else {
+ throw new Error("unknown/unsupported import kind " + imp.kind);
+ }
}
});
}
@@ -215,29 +311,27 @@ class WasmModuleBuilder {
// Add functions declarations
let has_names = false;
let names = false;
- let exports = 0;
if (wasm.functions.length > 0) {
if (debug) print("emitting function decls @ " + binary.length);
binary.emit_section(kFunctionSectionCode, section => {
- section.emit_varint(wasm.functions.length);
+ section.emit_u32v(wasm.functions.length);
for (let func of wasm.functions) {
has_names = has_names || (func.name != undefined &&
func.name.length > 0);
- exports += func.exports.length;
- section.emit_varint(func.type_index);
+ section.emit_u32v(func.type_index);
}
});
}
- // Add table.
- if (wasm.table.length > 0) {
+ // Add function_table.
+ if (wasm.function_table_length > 0) {
if (debug) print("emitting table @ " + binary.length);
binary.emit_section(kTableSectionCode, section => {
section.emit_u8(1); // one table entry
section.emit_u8(kWasmAnyFunctionTypeForm);
section.emit_u8(1);
- section.emit_varint(wasm.table.length);
- section.emit_varint(wasm.table.length);
+ section.emit_u32v(wasm.function_table_length);
+ section.emit_u32v(wasm.function_table_length);
});
}
@@ -246,9 +340,9 @@ class WasmModuleBuilder {
if (debug) print("emitting memory @ " + binary.length);
binary.emit_section(kMemorySectionCode, section => {
section.emit_u8(1); // one memory entry
- section.emit_varint(kResizableMaximumFlag);
- section.emit_varint(wasm.memory.min);
- section.emit_varint(wasm.memory.max);
+ section.emit_u32v(kResizableMaximumFlag);
+ section.emit_u32v(wasm.memory.min);
+ section.emit_u32v(wasm.memory.max);
});
}
@@ -256,28 +350,46 @@ class WasmModuleBuilder {
if (wasm.globals.length > 0) {
if (debug) print ("emitting globals @ " + binary.length);
binary.emit_section(kGlobalSectionCode, section => {
- section.emit_varint(wasm.globals.length);
- for (let global_type of wasm.globals) {
- section.emit_u8(global_type);
- section.emit_u8(true); // mutable
- switch (global_type) {
+ section.emit_u32v(wasm.globals.length);
+ for (let global of wasm.globals) {
+ section.emit_u8(global.type);
+ section.emit_u8(global.mutable);
+ if ((typeof global.init_index) == "undefined") {
+ // Emit a constant initializer.
+ switch (global.type) {
case kAstI32:
section.emit_u8(kExprI32Const);
- section.emit_u8(0);
+ section.emit_u32v(global.init);
break;
case kAstI64:
section.emit_u8(kExprI64Const);
- section.emit_u8(0);
+ section.emit_u8(global.init);
break;
case kAstF32:
section.emit_u8(kExprF32Const);
- section.emit_u32(0);
+ f32_view[0] = global.init;
+ section.emit_u8(byte_view[0]);
+ section.emit_u8(byte_view[1]);
+ section.emit_u8(byte_view[2]);
+ section.emit_u8(byte_view[3]);
break;
case kAstF64:
- section.emit_u8(kExprI32Const);
- section.emit_u32(0);
- section.emit_u32(0);
+ section.emit_u8(kExprF64Const);
+ f64_view[0] = global.init;
+ section.emit_u8(byte_view[0]);
+ section.emit_u8(byte_view[1]);
+ section.emit_u8(byte_view[2]);
+ section.emit_u8(byte_view[3]);
+ section.emit_u8(byte_view[4]);
+ section.emit_u8(byte_view[5]);
+ section.emit_u8(byte_view[6]);
+ section.emit_u8(byte_view[7]);
break;
+ }
+ } else {
+ // Emit a global-index initializer.
+ section.emit_u8(kExprGetGlobal);
+ section.emit_u32v(global.init_index);
}
section.emit_u8(kExprEnd); // end of init expression
}
@@ -286,16 +398,15 @@ class WasmModuleBuilder {
// Add export table.
var mem_export = (wasm.memory != undefined && wasm.memory.exp);
- if (exports > 0 || mem_export) {
+ var exports_count = wasm.exports.length + (mem_export ? 1 : 0);
+ if (exports_count > 0) {
if (debug) print("emitting exports @ " + binary.length);
binary.emit_section(kExportSectionCode, section => {
- section.emit_varint(exports + (mem_export ? 1 : 0));
- for (let func of wasm.functions) {
- for (let exp of func.exports) {
- section.emit_string(exp);
- section.emit_u8(kExternalFunction);
- section.emit_varint(func.index);
- }
+ section.emit_u32v(exports_count);
+ for (let exp of wasm.exports) {
+ section.emit_string(exp.name);
+ section.emit_u8(exp.kind);
+ section.emit_u32v(exp.index);
}
if (mem_export) {
section.emit_string("memory");
@@ -309,22 +420,30 @@ class WasmModuleBuilder {
if (wasm.start_index != undefined) {
if (debug) print("emitting start function @ " + binary.length);
binary.emit_section(kStartSectionCode, section => {
- section.emit_varint(wasm.start_index);
+ section.emit_u32v(wasm.start_index);
});
}
// Add table elements.
- if (wasm.table.length > 0) {
+ if (wasm.function_table_inits.length > 0) {
if (debug) print("emitting table @ " + binary.length);
binary.emit_section(kElementSectionCode, section => {
- section.emit_u8(1);
+ var inits = wasm.function_table_inits;
+ section.emit_u32v(inits.length);
section.emit_u8(0); // table index
- section.emit_u8(kExprI32Const);
- section.emit_u8(0);
- section.emit_u8(kExprEnd);
- section.emit_varint(wasm.table.length);
- for (let index of wasm.table) {
- section.emit_varint(index);
+
+ for (let init of inits) {
+ if (init.is_global) {
+ section.emit_u8(kExprGetGlobal);
+ } else {
+ section.emit_u8(kExprI32Const);
+ }
+ section.emit_u32v(init.base);
+ section.emit_u8(kExprEnd);
+ section.emit_u32v(init.array.length);
+ for (let index of init.array) {
+ section.emit_u32v(index);
+ }
}
});
}
@@ -334,7 +453,7 @@ class WasmModuleBuilder {
// emit function bodies
if (debug) print("emitting code @ " + binary.length);
binary.emit_section(kCodeSectionCode, section => {
- section.emit_varint(wasm.functions.length);
+ section.emit_u32v(wasm.functions.length);
for (let func of wasm.functions) {
// Function body length will be patched later.
let local_decls = [];
@@ -356,13 +475,13 @@ class WasmModuleBuilder {
}
let header = new Binary;
- header.emit_varint(local_decls.length);
+ header.emit_u32v(local_decls.length);
for (let decl of local_decls) {
- header.emit_varint(decl.count);
+ header.emit_u32v(decl.count);
header.emit_u8(decl.type);
}
- section.emit_varint(header.length + func.body.length);
+ section.emit_u32v(header.length + func.body.length);
section.emit_bytes(header);
section.emit_bytes(func.body);
}
@@ -373,13 +492,20 @@ class WasmModuleBuilder {
if (wasm.segments.length > 0) {
if (debug) print("emitting data segments @ " + binary.length);
binary.emit_section(kDataSectionCode, section => {
- section.emit_varint(wasm.segments.length);
+ section.emit_u32v(wasm.segments.length);
for (let seg of wasm.segments) {
section.emit_u8(0); // linear memory index 0
- section.emit_u8(kExprI32Const);
- section.emit_varint(seg.addr);
+ if (seg.is_global) {
+ // initializer is a global variable
+ section.emit_u8(kExprGetGlobal);
+ section.emit_u32v(seg.addr);
+ } else {
+ // initializer is a constant
+ section.emit_u8(kExprI32Const);
+ section.emit_u32v(seg.addr);
+ }
section.emit_u8(kExprEnd);
- section.emit_varint(seg.data.length);
+ section.emit_u32v(seg.data.length);
section.emit_bytes(seg.data);
}
});
@@ -396,7 +522,12 @@ class WasmModuleBuilder {
if (debug) print("emitting names @ " + binary.length);
binary.emit_section(kUnknownSectionCode, section => {
section.emit_string("name");
- section.emit_varint(wasm.functions.length);
+ var count = wasm.functions.length + wasm.num_imported_funcs;
+ section.emit_u32v(count);
+ for (var i = 0; i < wasm.num_imported_funcs; i++) {
+ section.emit_u8(0); // empty string
+ section.emit_u8(0); // local names count == 0
+ }
for (let func of wasm.functions) {
var name = func.name == undefined ? "" : func.name;
section.emit_string(name);
@@ -408,7 +539,7 @@ class WasmModuleBuilder {
return binary;
}
- toBuffer(debug) {
+ toBuffer(debug = false) {
let bytes = this.toArray(debug);
let buffer = new ArrayBuffer(bytes.length);
let view = new Uint8Array(buffer);
diff --git a/deps/v8/test/mjsunit/wasm/wasm-object-api.js b/deps/v8/test/mjsunit/wasm/wasm-object-api.js
index b8663b3b29..3888e3638b 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-object-api.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-object-api.js
@@ -4,13 +4,10 @@
// Flags: --expose-wasm
-assertFalse(undefined === Wasm);
-assertFalse(undefined == Wasm);
-assertEquals("function", typeof Wasm.verifyModule);
-assertEquals("function", typeof Wasm.verifyFunction);
-assertEquals("function", typeof Wasm.instantiateModule);
-assertFalse(undefined == Wasm.experimentalVersion);
+// Old API should be gone.
+assertEquals("undefined", typeof Wasm);
+// New API should rule.
assertEquals('object', typeof WebAssembly);
assertEquals('function', typeof WebAssembly.Module);
assertEquals('function', typeof WebAssembly.Instance);
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 26503bfa6e..d5b967408b 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -870,6 +870,18 @@
}], # ALWAYS
+['variant == turbofan_opt', {
+ # These timeout flakily under CFI/turbofan_opt
+ 'js1_5/Regress/regress-360969-05': [SKIP],
+ 'js1_5/Regress/regress-360969-06': [SKIP],
+
+ # These timeout under CFI/turbofan_opt
+ 'ecma/FunctionObjects/15.3.1.1-3': [SKIP],
+ 'ecma/FunctionObjects/15.3.2.1-3': [SKIP],
+ 'ecma/FunctionObjects/15.3.5-1': [SKIP],
+}], # variant == turbofan_opt
+
+
['no_i18n == True and mode == debug', {
# Tests too slow for no18n debug.
'ecma_3/Statements/regress-302439': [PASS, FAST_VARIANTS],
diff --git a/deps/v8/test/optimize_for_size.isolate b/deps/v8/test/optimize_for_size.isolate
index 16b93157d3..6f3313e868 100644
--- a/deps/v8/test/optimize_for_size.isolate
+++ b/deps/v8/test/optimize_for_size.isolate
@@ -9,6 +9,8 @@
},
'includes': [
'cctest/cctest.isolate',
+ 'debugger/debugger.isolate',
+ 'inspector/inspector.isolate',
'intl/intl.isolate',
'mjsunit/mjsunit.isolate',
'webkit/webkit.isolate',
diff --git a/deps/v8/test/perf.isolate b/deps/v8/test/perf.isolate
index 77f66cc67c..6142152658 100644
--- a/deps/v8/test/perf.isolate
+++ b/deps/v8/test/perf.isolate
@@ -8,6 +8,11 @@
],
'files': [
'../tools/run_perf.py',
+ # TODO(machenbach): These files are referenced by the perf runner.
+ # They should be transformed into a proper python module.
+ '../tools/testrunner/local/commands.py',
+ '../tools/testrunner/local/utils.py',
+ '../tools/testrunner/objects/output.py',
# This is often used to trigger performance bots. We include it in the
# isolate to not get these builds deduped.
'../tools/whitespace.txt',
diff --git a/deps/v8/test/test262/list.py b/deps/v8/test/test262/list.py
index 0e82cb59ad..9b36ce789c 100755
--- a/deps/v8/test/test262/list.py
+++ b/deps/v8/test/test262/list.py
@@ -4,6 +4,7 @@
# found in the LICENSE file.
import os
+import sys
import tarfile
from itertools import chain
@@ -13,4 +14,8 @@ for root, dirs, files in chain(os.walk("data"), os.walk("harness")):
dirs[:] = [d for d in dirs if not d.endswith('.git')]
for name in files:
# These names are for gyp, which expects slashes on all platforms.
- print('/'.join(root.split(os.sep) + [name]))
+ pathname = '/'.join(root.split(os.sep) + [name])
+ # For gyp, quote the name in case it includes spaces
+ if len(sys.argv) > 1 and sys.argv[1] == '--quoted':
+ pathname = '"' + pathname + '"'
+ print(pathname)
diff --git a/deps/v8/test/test262/test262.gyp b/deps/v8/test/test262/test262.gyp
index 2bdc6cefe5..eb14da4010 100644
--- a/deps/v8/test/test262/test262.gyp
+++ b/deps/v8/test/test262/test262.gyp
@@ -22,7 +22,7 @@
'actions': [
{
'action_name': 'archive_test262',
- 'inputs': ['archive.py', '<!@(python list.py)'],
+ 'inputs': ['archive.py', '<!@(python list.py --quoted)'],
'outputs': ['data.tar'],
'action': ['python', 'archive.py'],
},
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 479e2cb198..17148cd0ba 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -30,11 +30,6 @@
[ALWAYS, {
###################### NEEDS INVESTIGATION #######################
- # This is an incompatibility between ES5 and V8 on enumerating
- # shadowed elements in a for..in loop.
- # https://code.google.com/p/v8/issues/detail?id=705
- 'language/statements/for-in/12.6.4-2': [PASS, FAIL_OK],
-
# Date tests that fail in CE(S)T timezone.
# https://bugs.chromium.org/p/v8/issues/detail?id=5449
'built-ins/Date/prototype/setFullYear/new-value-time-clip': [PASS, FAIL],
@@ -110,21 +105,8 @@
'built-ins/RegExp/prototype/Symbol.replace/y-init-lastindex': [FAIL],
'built-ins/RegExp/prototype/Symbol.replace/y-set-lastindex': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=5360
- 'built-ins/RegExp/prototype/Symbol.match/builtin-failure-set-lastindex-err': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.search/set-lastindex-restore-err': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=5123
- 'built-ins/RegExp/prototype/Symbol.replace/coerce-global': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.replace/coerce-unicode': [FAIL],
-
###### END REGEXP SUBCLASSING SECTION ######
- # https://bugs.chromium.org/p/v8/issues/detail?id=5360
- 'built-ins/RegExp/prototype/Symbol.match/builtin-coerce-lastindex-err': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.match/builtin-failure-set-lastindex': [FAIL],
- 'built-ins/RegExp/prototype/Symbol.search/set-lastindex-restore': [FAIL],
-
# https://code.google.com/p/v8/issues/detail?id=4360
'intl402/Collator/10.1.1_1': [FAIL],
'intl402/DateTimeFormat/12.1.1_1': [FAIL],
@@ -198,7 +180,6 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=4648
'built-ins/TypedArray/prototype/copyWithin/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/entries/detached-buffer': [FAIL],
'built-ins/TypedArray/prototype/every/detached-buffer': [FAIL],
'built-ins/TypedArray/prototype/fill/detached-buffer': [FAIL],
'built-ins/TypedArray/prototype/filter/detached-buffer': [FAIL],
@@ -208,7 +189,6 @@
'built-ins/TypedArray/prototype/includes/detached-buffer': [FAIL],
'built-ins/TypedArray/prototype/indexOf/detached-buffer': [FAIL],
'built-ins/TypedArray/prototype/join/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/keys/detached-buffer': [FAIL],
'built-ins/TypedArray/prototype/lastIndexOf/detached-buffer': [FAIL],
'built-ins/TypedArray/prototype/map/detached-buffer': [FAIL],
'built-ins/TypedArray/prototype/reverse/detached-buffer': [FAIL],
@@ -218,11 +198,21 @@
'built-ins/TypedArray/prototype/subarray/detached-buffer': [FAIL],
'built-ins/TypedArray/prototype/toLocaleString/detached-buffer': [FAIL],
'built-ins/TypedArray/prototype/toString/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/values/detached-buffer': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=4034
'built-ins/ThrowTypeError/unique-per-realm-function-proto': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5535
+ 'built-ins/ThrowTypeError/unique-per-realm-non-simple': [FAIL],
+ 'built-ins/ThrowTypeError/unique-per-realm-unmapped-args': [FAIL],
+ 'language/arguments-object/10.6-13-b-1-s': [FAIL],
+ 'language/arguments-object/10.6-13-b-2-s': [FAIL],
+ 'language/arguments-object/10.6-13-b-3-s': [FAIL],
+ 'language/arguments-object/10.6-14-1-s': [FAIL],
+ 'language/arguments-object/10.6-14-b-1-s': [FAIL],
+ 'language/arguments-object/10.6-14-b-4-s': [FAIL],
+ 'language/statements/class/strict-mode/arguments-caller': [FAIL],
+
# https://bugs.chromium.org/p/v8/issues/detail?id=4231
'language/eval-code/direct/var-env-lower-lex-catch-non-strict': [FAIL],
@@ -312,9 +302,6 @@
'built-ins/Function/prototype/toString/setter-object': [FAIL],
'built-ins/Function/prototype/toString/unicode': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=5012
- # http://bugs.icu-project.org/trac/ticket/12671
- 'intl402/Intl/getCanonicalLocales/weird-cases': [FAIL],
# https://github.com/tc39/test262/issues/743
'intl402/Intl/getCanonicalLocales/main': [FAIL],
@@ -383,12 +370,6 @@
'annexB/language/eval-code/direct/func-switch-case-eval-func-block-scoping': [FAIL],
'annexB/language/eval-code/direct/func-switch-dflt-eval-func-block-scoping': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=5136
- 'annexB/language/comments/multi-line-html-close': [FAIL],
- 'annexB/language/comments/single-line-html-close': [FAIL],
- 'annexB/language/comments/single-line-html-close-asi': [FAIL],
- 'annexB/language/comments/single-line-html-open': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=5137
'annexB/built-ins/RegExp/prototype/compile/flags-undefined': [FAIL],
'annexB/built-ins/RegExp/prototype/compile/pattern-regexp-distinct': [FAIL],
@@ -412,26 +393,9 @@
'built-ins/Number/S9.3.1_A2_U180E': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=5051
- 'language/expressions/arrow-function/params-trailing-comma': [FAIL],
- 'language/expressions/arrow-function/params-trailing-comma-length': [FAIL],
- 'language/expressions/function/params-trailing-comma': [FAIL],
- 'language/expressions/function/params-trailing-comma-arguments': [FAIL],
- 'language/expressions/function/params-trailing-comma-length': [FAIL],
- 'language/expressions/generators/params-trailing-comma': [FAIL],
- 'language/expressions/generators/params-trailing-comma-arguments': [FAIL],
- 'language/expressions/generators/params-trailing-comma-length': [FAIL],
- 'language/expressions/object/method-definition/params-trailing-comma': [FAIL],
- 'language/expressions/object/method-definition/params-trailing-comma-arguments': [FAIL],
- 'language/expressions/object/method-definition/params-trailing-comma-length': [FAIL],
+ 'language/expressions/call/trailing-comma': [FAIL],
'language/statements/class/definition/params-trailing-comma': [FAIL],
'language/statements/class/definition/params-trailing-comma-arguments': [FAIL],
- 'language/statements/class/definition/params-trailing-comma-length': [FAIL],
- 'language/statements/function/params-trailing-comma': [FAIL],
- 'language/statements/function/params-trailing-comma-arguments': [FAIL],
- 'language/statements/function/params-trailing-comma-length': [FAIL],
- 'language/statements/generators/params-trailing-comma': [FAIL],
- 'language/statements/generators/params-trailing-comma-arguments': [FAIL],
- 'language/statements/generators/params-trailing-comma-length': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=5326
'language/expressions/super/call-new-target-undef': [FAIL],
@@ -474,14 +438,14 @@
'annexB/language/eval-code/direct/func-switch-case-eval-func-no-skip-try': [FAIL],
'annexB/language/eval-code/direct/func-switch-dflt-eval-func-no-skip-try': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=5334
- 'built-ins/Proxy/setPrototypeOf/internals-call-order': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=5336
'language/expressions/super/call-proto-not-ctor': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=5337
- 'language/expressions/arrow-function/dstr-dflt-ary-ptrn-rest-ary-rest': [SKIP],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5546
+ 'language/expressions/tagged-template/invalid-escape-sequences': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=5537
+ 'built-ins/global/*': [SKIP],
######################## NEEDS INVESTIGATION ###########################
@@ -628,8 +592,10 @@
'*': [SKIP],
}], # variant == asm_wasm
+
# Module-related tests
# https://bugs.chromium.org/p/v8/issues/detail?id=1569
+
['variant != ignition and variant != ignition_staging and variant != ignition_turbofan', {
'language/eval-code/direct/export': [SKIP],
'language/eval-code/direct/import': [SKIP],
@@ -637,11 +603,23 @@
'language/eval-code/indirect/import': [SKIP],
'language/module-code/*': [SKIP],
}], # variant != ignition and variant != ignition_staging and variant != ignition_turbofan
+
['variant == ignition or variant == ignition_staging or variant == ignition_turbofan', {
- 'language/module-code/comment-*': [SKIP],
- 'language/module-code/eval-*': [SKIP],
- 'language/module-code/instn-*': [SKIP],
- 'language/module-code/namespace/*': [SKIP],
+ # v8:5485
+ 'language/module-code/comment-multi-line-html*': [FAIL],
+ 'language/module-code/comment-single-line-html*': [FAIL],
+
+ # v8:5486
+ 'language/module-code/instn-iee-star-cycle': [FAIL],
+ 'language/module-code/instn-named-star-cycle': [FAIL],
+ 'language/module-code/instn-star-star-cycle': [FAIL],
+
+ # v8:5487
+ 'language/module-code/namespace/internals/get-own-property-str-found-uninit': [FAIL],
+
+ # v8:5401
+ 'language/module-code/namespace/internals/set': [FAIL],
+ 'language/module-code/namespace/internals/define-own-property': [FAIL],
}], # variant == ignition or variant == ignition_staging or variant == ignition_turbofan
]
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index 391b0e8c4f..6319feb13c 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -28,6 +28,7 @@
import imp
import os
+import re
import sys
import tarfile
@@ -120,7 +121,7 @@ class Test262TestSuite(testsuite.TestSuite):
dirs.sort()
files.sort()
for filename in files:
- if filename.endswith(".js"):
+ if filename.endswith(".js") and not filename.endswith("_FIXTURE.js"):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.testroot) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
@@ -184,10 +185,9 @@ class Test262TestSuite(testsuite.TestSuite):
return f.read()
def _ParseException(self, str):
- for line in str.split("\n")[::-1]:
- if line and not line[0].isspace() and ":" in line:
- return line.split(":")[0]
-
+ # somefile:somelinenumber: someerror[: sometext]
+ match = re.search('^[^: ]*:[0-9]+: ([^ ]+?)($|: )', str, re.MULTILINE)
+ return match.group(1)
def IsFailureOutput(self, testcase):
output = testcase.output
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index c5a5c702f4..2e13f04b57 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -29,6 +29,7 @@ v8_executable("unittests") {
"cancelable-tasks-unittest.cc",
"char-predicates-unittest.cc",
"compiler-dispatcher/compiler-dispatcher-job-unittest.cc",
+ "compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc",
"compiler/branch-elimination-unittest.cc",
"compiler/checkpoint-elimination-unittest.cc",
"compiler/common-operator-reducer-unittest.cc",
@@ -49,6 +50,7 @@ v8_executable("unittests") {
"compiler/instruction-selector-unittest.h",
"compiler/instruction-sequence-unittest.cc",
"compiler/instruction-sequence-unittest.h",
+ "compiler/instruction-unittest.cc",
"compiler/int64-lowering-unittest.cc",
"compiler/js-builtin-reducer-unittest.cc",
"compiler/js-create-lowering-unittest.cc",
@@ -82,7 +84,7 @@ v8_executable("unittests") {
"compiler/typed-optimization-unittest.cc",
"compiler/typer-unittest.cc",
"compiler/value-numbering-reducer-unittest.cc",
- "compiler/zone-pool-unittest.cc",
+ "compiler/zone-stats-unittest.cc",
"counters-unittest.cc",
"eh-frame-iterator-unittest.cc",
"eh-frame-writer-unittest.cc",
@@ -129,6 +131,8 @@ v8_executable("unittests") {
"wasm/switch-logic-unittest.cc",
"wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc",
+ "zone/segmentpool-unittest.cc",
+ "zone/zone-chunk-list-unittest.cc",
"zone/zone-unittest.cc",
]
@@ -161,6 +165,8 @@ v8_executable("unittests") {
#}],
deps = [
+ "../..:v8",
+ "../..:v8_libbase",
"../..:v8_libplatform",
"//build/config/sanitizers:deps",
"//build/win:default_exe_manifest",
@@ -168,12 +174,8 @@ v8_executable("unittests") {
"//testing/gtest",
]
- if (is_component_build) {
- # compiler-unittests can't be built against a shared library, so we
- # need to depend on the underlying static target in that case.
- deps += [ "../..:v8_maybe_snapshot" ]
- } else {
- deps += [ "../..:v8" ]
+ if (v8_enable_i18n_support) {
+ deps += [ "//third_party/icu" ]
}
if (is_win) {
@@ -181,13 +183,5 @@ v8_executable("unittests") {
# bytecode-utils.h.
# C4309: 'static_cast': truncation of constant value
cflags = [ "/wd4309" ]
-
- # Suppress warnings about importing locally defined symbols.
- if (is_component_build) {
- ldflags = [
- "/ignore:4049",
- "/ignore:4217",
- ]
- }
}
}
diff --git a/deps/v8/test/unittests/base/atomic-utils-unittest.cc b/deps/v8/test/unittests/base/atomic-utils-unittest.cc
index 8e90c423e2..48711a2f98 100644
--- a/deps/v8/test/unittests/base/atomic-utils-unittest.cc
+++ b/deps/v8/test/unittests/base/atomic-utils-unittest.cc
@@ -16,7 +16,7 @@ TEST(AtomicNumber, Constructor) {
AtomicNumber<size_t> zero_size_t;
AtomicNumber<intptr_t> zero_intptr_t;
EXPECT_EQ(0, zero_int.Value());
- EXPECT_EQ(0U, zero_size_t.Value());
+ EXPECT_EQ(0u, zero_size_t.Value());
EXPECT_EQ(0, zero_intptr_t.Value());
}
@@ -27,7 +27,7 @@ TEST(AtomicNumber, Value) {
AtomicNumber<int> b(-1);
EXPECT_EQ(-1, b.Value());
AtomicNumber<size_t> c(1);
- EXPECT_EQ(1U, c.Value());
+ EXPECT_EQ(1u, c.Value());
AtomicNumber<size_t> d(static_cast<size_t>(-1));
EXPECT_EQ(std::numeric_limits<size_t>::max(), d.Value());
}
@@ -53,7 +53,7 @@ TEST(AtomicNumber, Increment) {
// Should work as decrement as well.
AtomicNumber<size_t> c(1);
c.Increment(-1);
- EXPECT_EQ(0U, c.Value());
+ EXPECT_EQ(0u, c.Value());
c.Increment(-1);
EXPECT_EQ(std::numeric_limits<size_t>::max(), c.Value());
}
@@ -61,7 +61,7 @@ TEST(AtomicNumber, Increment) {
TEST(AtomicNumber, Decrement) {
AtomicNumber<size_t> a(std::numeric_limits<size_t>::max());
a.Increment(1);
- EXPECT_EQ(0, a.Value());
+ EXPECT_EQ(0u, a.Value());
a.Decrement(1);
EXPECT_EQ(std::numeric_limits<size_t>::max(), a.Value());
}
@@ -84,7 +84,7 @@ TEST(AtomicNumber, OperatorSubtractionAssignment) {
namespace {
-enum TestFlag {
+enum TestFlag : base::AtomicWord {
kA,
kB,
kC,
@@ -125,6 +125,40 @@ TEST(AtomicValue, WithVoidStar) {
EXPECT_EQ(&dummy, a.Value());
}
+TEST(NoBarrierAtomicValue, Initial) {
+ NoBarrierAtomicValue<TestFlag> a(kA);
+ EXPECT_EQ(TestFlag::kA, a.Value());
+}
+
+TEST(NoBarrierAtomicValue, SetValue) {
+ NoBarrierAtomicValue<TestFlag> a(kB);
+ a.SetValue(kC);
+ EXPECT_EQ(TestFlag::kC, a.Value());
+}
+
+TEST(NoBarrierAtomicValue, WithVoidStar) {
+ NoBarrierAtomicValue<void*> a(nullptr);
+ NoBarrierAtomicValue<void*> dummy(nullptr);
+ EXPECT_EQ(nullptr, a.Value());
+ a.SetValue(&a);
+ EXPECT_EQ(&a, a.Value());
+}
+
+TEST(NoBarrierAtomicValue, Construction) {
+ NoBarrierAtomicValue<TestFlag> a(kA);
+ TestFlag b = kA;
+ NoBarrierAtomicValue<TestFlag>* ptr =
+ NoBarrierAtomicValue<TestFlag>::FromAddress(&b);
+ EXPECT_EQ(ptr->Value(), a.Value());
+}
+
+TEST(NoBarrierAtomicValue, ConstructionVoidStar) {
+ NoBarrierAtomicValue<void*> a(nullptr);
+ void* b = nullptr;
+ NoBarrierAtomicValue<void*>* ptr =
+ NoBarrierAtomicValue<void*>::FromAddress(&b);
+ EXPECT_EQ(ptr->Value(), a.Value());
+}
namespace {
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc
new file mode 100644
index 0000000000..997765ff83
--- /dev/null
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc
@@ -0,0 +1,51 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+TEST(CompilerDispatcherTracerTest, EstimateZeroWithoutSamples) {
+ CompilerDispatcherTracer tracer(nullptr);
+
+ EXPECT_EQ(0.0, tracer.EstimatePrepareToParseInMs());
+ EXPECT_EQ(0.0, tracer.EstimateParseInMs(0));
+ EXPECT_EQ(0.0, tracer.EstimateParseInMs(42));
+ EXPECT_EQ(0.0, tracer.EstimateFinalizeParsingInMs());
+ EXPECT_EQ(0.0, tracer.EstimatePrepareToCompileInMs());
+ EXPECT_EQ(0.0, tracer.EstimateCompileInMs(0));
+ EXPECT_EQ(0.0, tracer.EstimateCompileInMs(42));
+ EXPECT_EQ(0.0, tracer.EstimateFinalizeCompilingInMs());
+}
+
+TEST(CompilerDispatcherTracerTest, Average) {
+ CompilerDispatcherTracer tracer(nullptr);
+
+ EXPECT_EQ(0.0, tracer.EstimatePrepareToParseInMs());
+
+ tracer.RecordPrepareToParse(1.0);
+ tracer.RecordPrepareToParse(2.0);
+ tracer.RecordPrepareToParse(3.0);
+
+ EXPECT_EQ((1.0 + 2.0 + 3.0) / 3, tracer.EstimatePrepareToParseInMs());
+}
+
+TEST(CompilerDispatcherTracerTest, SizeBasedAverage) {
+ CompilerDispatcherTracer tracer(nullptr);
+
+ EXPECT_EQ(0.0, tracer.EstimateParseInMs(100));
+
+ // All three samples parse 100 units/ms.
+ tracer.RecordParse(1.0, 100);
+ tracer.RecordParse(2.0, 200);
+ tracer.RecordParse(3.0, 300);
+
+ EXPECT_EQ(1.0, tracer.EstimateParseInMs(100));
+ EXPECT_EQ(5.0, tracer.EstimateParseInMs(500));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc b/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
index 9486d1fe6e..9fcaedc0f8 100644
--- a/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
@@ -65,8 +65,9 @@ TEST_F(BranchEliminationTest, NestedBranchSameTrue) {
graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
inner_phi, Int32Constant(3), outer_merge);
- Node* ret = graph()->NewNode(common()->Return(), outer_phi, graph()->start(),
- outer_merge);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, outer_phi,
+ graph()->start(), outer_merge);
graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
Reduce();
@@ -106,8 +107,9 @@ TEST_F(BranchEliminationTest, NestedBranchSameFalse) {
graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
Int32Constant(1), inner_phi, outer_merge);
- Node* ret = graph()->NewNode(common()->Return(), outer_phi, graph()->start(),
- outer_merge);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, outer_phi,
+ graph()->start(), outer_merge);
graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
Reduce();
@@ -144,8 +146,9 @@ TEST_F(BranchEliminationTest, BranchAfterDiamond) {
Node* add = graph()->NewNode(machine()->Int32Add(), phi1, phi2);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
Node* ret =
- graph()->NewNode(common()->Return(), add, graph()->start(), merge2);
+ graph()->NewNode(common()->Return(), zero, add, graph()->start(), merge2);
graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
Reduce();
@@ -176,8 +179,9 @@ TEST_F(BranchEliminationTest, BranchInsideLoopSame) {
Node* inner_branch = graph()->NewNode(common()->Branch(), condition, loop);
Node* inner_if_true = graph()->NewNode(common()->IfTrue(), inner_branch);
- Node* ret1 = graph()->NewNode(common()->Return(), Int32Constant(2), effect,
- inner_if_true);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret1 = graph()->NewNode(common()->Return(), zero, Int32Constant(2),
+ effect, inner_if_true);
Node* inner_if_false = graph()->NewNode(common()->IfFalse(), inner_branch);
loop->AppendInput(zone(), inner_if_false);
@@ -191,7 +195,7 @@ TEST_F(BranchEliminationTest, BranchInsideLoopSame) {
Node* outer_ephi = graph()->NewNode(common()->EffectPhi(2), effect,
graph()->start(), outer_merge);
- Node* ret2 = graph()->NewNode(common()->Return(), Int32Constant(1),
+ Node* ret2 = graph()->NewNode(common()->Return(), zero, Int32Constant(1),
outer_ephi, outer_merge);
Node* terminate = graph()->NewNode(common()->Terminate(), effect, loop);
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index d284772395..f294a30596 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -361,7 +361,9 @@ TEST_F(CommonOperatorReducerTest, ReturnWithPhiAndEffectPhiAndMerge) {
Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
vtrue, vfalse, merge);
- Node* ret = graph()->NewNode(common()->Return(), phi, ephi, merge);
+
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, phi, ephi, merge);
graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
StrictMock<MockAdvancedReducerEditor> editor;
EXPECT_CALL(editor, Replace(merge, IsDead()));
diff --git a/deps/v8/test/unittests/compiler/common-operator-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
index 787dae01dd..7718e51e82 100644
--- a/deps/v8/test/unittests/compiler/common-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
@@ -192,10 +192,10 @@ TEST_F(CommonOperatorTest, Return) {
const Operator* const op = common()->Return(input_count);
EXPECT_EQ(IrOpcode::kReturn, op->opcode());
EXPECT_EQ(Operator::kNoThrow, op->properties());
- EXPECT_EQ(input_count, op->ValueInputCount());
+ EXPECT_EQ(input_count + 1, op->ValueInputCount());
EXPECT_EQ(1, op->EffectInputCount());
EXPECT_EQ(1, op->ControlInputCount());
- EXPECT_EQ(2 + input_count, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(3 + input_count, OperatorProperties::GetTotalInputCount(op));
EXPECT_EQ(0, op->ValueOutputCount());
EXPECT_EQ(0, op->EffectOutputCount());
EXPECT_EQ(1, op->ControlOutputCount());
diff --git a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
index 6534e90ccc..093b46abdb 100644
--- a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
@@ -4,9 +4,9 @@
#include "src/compiler/control-equivalence.h"
#include "src/bit-vector.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/node-properties.h"
-#include "src/compiler/source-position.h"
#include "src/zone/zone-containers.h"
#include "test/unittests/compiler/graph-unittest.h"
diff --git a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
index 0a12ea371a..3aee3a832f 100644
--- a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
@@ -4,6 +4,7 @@
#include "src/compiler/effect-control-linearizer.h"
#include "src/compiler/access-builder.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-properties.h"
@@ -29,16 +30,20 @@ class EffectControlLinearizerTest : public GraphTest {
javascript_(zone()),
simplified_(zone()),
jsgraph_(isolate(), graph(), common(), &javascript_, &simplified_,
- &machine_) {}
+ &machine_) {
+ source_positions_ = new (zone()) SourcePositionTable(graph());
+ }
JSGraph* jsgraph() { return &jsgraph_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ SourcePositionTable* source_positions() { return source_positions_; }
private:
MachineOperatorBuilder machine_;
JSOperatorBuilder javascript_;
SimplifiedOperatorBuilder simplified_;
JSGraph jsgraph_;
+ SourcePositionTable* source_positions_;
};
namespace {
@@ -60,7 +65,8 @@ TEST_F(EffectControlLinearizerTest, SimpleLoad) {
Node* load = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), heap_number,
graph()->start(), graph()->start());
- Node* ret = graph()->NewNode(common()->Return(), load, graph()->start(),
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, load, graph()->start(),
graph()->start());
// Build the basic block structure.
@@ -75,7 +81,8 @@ TEST_F(EffectControlLinearizerTest, SimpleLoad) {
schedule.AddReturn(start, ret);
// Run the state effect introducer.
- EffectControlLinearizer introducer(jsgraph(), &schedule, zone());
+ EffectControlLinearizer introducer(jsgraph(), &schedule, zone(),
+ source_positions());
introducer.Run();
EXPECT_THAT(load,
@@ -105,8 +112,9 @@ TEST_F(EffectControlLinearizerTest, DiamondLoad) {
Node* phi = graph()->NewNode(
common()->Phi(MachineRepresentation::kFloat64, 2), vtrue, vfalse, merge);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
Node* ret =
- graph()->NewNode(common()->Return(), phi, graph()->start(), merge);
+ graph()->NewNode(common()->Return(), zero, phi, graph()->start(), merge);
// Build the basic block structure.
BasicBlock* start = schedule.start();
@@ -135,7 +143,8 @@ TEST_F(EffectControlLinearizerTest, DiamondLoad) {
schedule.AddReturn(mblock, ret);
// Run the state effect introducer.
- EffectControlLinearizer introducer(jsgraph(), &schedule, zone());
+ EffectControlLinearizer introducer(jsgraph(), &schedule, zone(),
+ source_positions());
introducer.Run();
// The effect input to the return should be an effect phi with the
@@ -206,8 +215,9 @@ TEST_F(EffectControlLinearizerTest, FloatingDiamondsControlWiring) {
Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
Node* merge2 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- Node* ret =
- graph()->NewNode(common()->Return(), call, graph()->start(), if_success);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, call, graph()->start(),
+ if_success);
// Build the basic block structure.
BasicBlock* start = schedule.start();
@@ -252,7 +262,8 @@ TEST_F(EffectControlLinearizerTest, FloatingDiamondsControlWiring) {
schedule.AddReturn(m2block, ret);
// Run the state effect introducer.
- EffectControlLinearizer introducer(jsgraph(), &schedule, zone());
+ EffectControlLinearizer introducer(jsgraph(), &schedule, zone(),
+ source_positions());
introducer.Run();
// The effect input to the return should be an effect phi with the
@@ -289,7 +300,9 @@ TEST_F(EffectControlLinearizerTest, LoopLoad) {
simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), heap_number,
graph()->start(), loop);
- Node* ret = graph()->NewNode(common()->Return(), load, effect_phi, if_true);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret =
+ graph()->NewNode(common()->Return(), zero, load, effect_phi, if_true);
// Build the basic block structure.
BasicBlock* start = schedule.start();
@@ -318,7 +331,8 @@ TEST_F(EffectControlLinearizerTest, LoopLoad) {
schedule.AddReturn(rblock, ret);
// Run the state effect introducer.
- EffectControlLinearizer introducer(jsgraph(), &schedule, zone());
+ EffectControlLinearizer introducer(jsgraph(), &schedule, zone(),
+ source_positions());
introducer.Run();
ASSERT_THAT(ret, IsReturn(load, load, if_true));
@@ -380,7 +394,8 @@ TEST_F(EffectControlLinearizerTest, CloneBranch) {
schedule.AddNode(mblock, merge);
schedule.AddNode(mblock, graph()->end());
- EffectControlLinearizer introducer(jsgraph(), &schedule, zone());
+ EffectControlLinearizer introducer(jsgraph(), &schedule, zone(),
+ source_positions());
introducer.Run();
Capture<Node *> branch1_capture, branch2_capture;
diff --git a/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc b/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
index 3a233d6872..9cce5475fd 100644
--- a/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
@@ -119,8 +119,9 @@ class EscapeAnalysisTest : public TypedGraphTest {
if (!control) {
control = control_;
}
- return control_ =
- graph()->NewNode(common()->Return(), value, effect, control);
+ Node* zero = graph()->NewNode(common()->NumberConstant(0));
+ return control_ = graph()->NewNode(common()->Return(), zero, value, effect,
+ control);
}
void EndGraph() {
@@ -224,7 +225,7 @@ TEST_F(EscapeAnalysisTest, StraightNonEscape) {
Transformation();
- ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 0));
+ ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 1));
}
@@ -250,7 +251,7 @@ TEST_F(EscapeAnalysisTest, StraightNonEscapeNonConstStore) {
Transformation();
- ASSERT_EQ(load, NodeProperties::GetValueInput(result, 0));
+ ASSERT_EQ(load, NodeProperties::GetValueInput(result, 1));
}
@@ -272,7 +273,7 @@ TEST_F(EscapeAnalysisTest, StraightEscape) {
Transformation();
- ASSERT_EQ(allocation, NodeProperties::GetValueInput(result, 0));
+ ASSERT_EQ(allocation, NodeProperties::GetValueInput(result, 1));
}
@@ -300,7 +301,7 @@ TEST_F(EscapeAnalysisTest, StoreLoadEscape) {
Transformation();
- ASSERT_EQ(finish1, NodeProperties::GetValueInput(result, 0));
+ ASSERT_EQ(finish1, NodeProperties::GetValueInput(result, 1));
}
@@ -333,7 +334,7 @@ TEST_F(EscapeAnalysisTest, BranchNonEscape) {
Transformation();
- ASSERT_EQ(replacement_phi, NodeProperties::GetValueInput(result, 0));
+ ASSERT_EQ(replacement_phi, NodeProperties::GetValueInput(result, 1));
}
@@ -365,7 +366,7 @@ TEST_F(EscapeAnalysisTest, BranchEscapeOne) {
Transformation();
- ASSERT_EQ(load, NodeProperties::GetValueInput(result, 0));
+ ASSERT_EQ(load, NodeProperties::GetValueInput(result, 1));
}
@@ -400,7 +401,7 @@ TEST_F(EscapeAnalysisTest, BranchEscapeThroughStore) {
Transformation();
- ASSERT_EQ(allocation, NodeProperties::GetValueInput(result, 0));
+ ASSERT_EQ(allocation, NodeProperties::GetValueInput(result, 1));
}
@@ -425,7 +426,7 @@ TEST_F(EscapeAnalysisTest, DanglingLoadOrder) {
Transformation();
- ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 0));
+ ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 1));
}
@@ -461,7 +462,7 @@ TEST_F(EscapeAnalysisTest, DeoptReplacement) {
Transformation();
- ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 0));
+ ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 1));
Node* object_state = NodeProperties::GetValueInput(state_values1, 0);
ASSERT_EQ(object_state->opcode(), IrOpcode::kObjectState);
ASSERT_EQ(1, object_state->op()->ValueInputCount());
@@ -501,7 +502,7 @@ TEST_F(EscapeAnalysisTest, DISABLED_DeoptReplacementIdentity) {
Transformation();
- ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 0));
+ ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 1));
Node* object_state = NodeProperties::GetValueInput(state_values1, 0);
ASSERT_EQ(object_state->opcode(), IrOpcode::kObjectState);
diff --git a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
index 7d94793459..ca814e5af8 100644
--- a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
@@ -290,12 +290,13 @@ TEST_F(AdvancedReducerTest, ReplaceWithValue_ValueUse) {
CommonOperatorBuilder common(zone());
Node* node = graph()->NewNode(&kMockOperator);
Node* start = graph()->NewNode(common.Start(1));
- Node* use_value = graph()->NewNode(common.Return(), node, start, start);
+ Node* zero = graph()->NewNode(common.Int32Constant(0));
+ Node* use_value = graph()->NewNode(common.Return(), zero, node, start, start);
Node* replacement = graph()->NewNode(&kMockOperator);
GraphReducer graph_reducer(zone(), graph(), nullptr);
ReplaceWithValueReducer r(&graph_reducer);
r.ReplaceWithValue(node, replacement);
- EXPECT_EQ(replacement, use_value->InputAt(0));
+ EXPECT_EQ(replacement, use_value->InputAt(1));
EXPECT_EQ(0, node->UseCount());
EXPECT_EQ(1, replacement->UseCount());
EXPECT_THAT(replacement->uses(), ElementsAre(use_value));
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index 399f985370..dc2ba7814b 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -13,7 +13,11 @@ namespace v8 {
namespace internal {
namespace compiler {
-GraphTest::GraphTest(int num_parameters) : common_(zone()), graph_(zone()) {
+GraphTest::GraphTest(int num_parameters)
+ : TestWithNativeContext(),
+ TestWithIsolateAndZone(),
+ common_(zone()),
+ graph_(zone()) {
graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
graph()->SetEnd(graph()->NewNode(common()->End(1), graph()->start()));
}
@@ -54,7 +58,7 @@ Node* GraphTest::NumberConstant(volatile double value) {
Node* GraphTest::HeapConstant(const Handle<HeapObject>& value) {
Node* node = graph()->NewNode(common()->HeapConstant(value));
- Type* type = Type::Constant(value, zone());
+ Type* type = Type::NewConstant(value, zone());
NodeProperties::SetType(node, type);
return node;
}
@@ -99,10 +103,8 @@ Matcher<Node*> GraphTest::IsUndefinedConstant() {
return IsHeapConstant(factory()->undefined_value());
}
-
TypedGraphTest::TypedGraphTest(int num_parameters)
- : GraphTest(num_parameters), typer_(isolate(), graph()) {}
-
+ : GraphTest(num_parameters), typer_(isolate(), Typer::kNoFlags, graph()) {}
TypedGraphTest::~TypedGraphTest() {}
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index d4248e422b..2542e68a91 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -23,8 +23,8 @@ namespace compiler {
using ::testing::Matcher;
-
-class GraphTest : public TestWithContext, public TestWithIsolateAndZone {
+class GraphTest : public virtual TestWithNativeContext,
+ public virtual TestWithIsolateAndZone {
public:
explicit GraphTest(int num_parameters = 1);
~GraphTest() override;
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
index 18ccaaaea5..5c67a1ece0 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
@@ -5,6 +5,7 @@
#include "test/unittests/compiler/instruction-selector-unittest.h"
#include "src/code-factory.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
#include "src/compiler/schedule.h"
#include "src/flags.h"
@@ -166,7 +167,7 @@ TARGET_TEST_F(InstructionSelectorTest, ReturnFloat32Constant) {
ASSERT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
EXPECT_FLOAT_EQ(kValue, s.ToFloat32(s[0]->OutputAt(0)));
EXPECT_EQ(kArchRet, s[1]->arch_opcode());
- EXPECT_EQ(1U, s[1]->InputCount());
+ EXPECT_EQ(2U, s[1]->InputCount());
}
@@ -178,7 +179,7 @@ TARGET_TEST_F(InstructionSelectorTest, ReturnParameter) {
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kArchRet, s[1]->arch_opcode());
- EXPECT_EQ(1U, s[1]->InputCount());
+ EXPECT_EQ(2U, s[1]->InputCount());
}
@@ -192,7 +193,7 @@ TARGET_TEST_F(InstructionSelectorTest, ReturnZero) {
EXPECT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
EXPECT_EQ(0, s.ToInt32(s[0]->OutputAt(0)));
EXPECT_EQ(kArchRet, s[1]->arch_opcode());
- EXPECT_EQ(1U, s[1]->InputCount());
+ EXPECT_EQ(2U, s[1]->InputCount());
}
@@ -251,7 +252,7 @@ TARGET_TEST_F(InstructionSelectorTest, FinishRegion) {
ASSERT_TRUE(s[0]->Output()->IsUnallocated());
EXPECT_EQ(kArchRet, s[1]->arch_opcode());
EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[1]->InputAt(1)));
EXPECT_TRUE(s.IsReference(finish));
}
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h b/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
index 93cef0544e..cdc14a37c2 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
@@ -49,7 +49,8 @@ class InstructionSelectorTest : public TestWithContext,
test->isolate(), new (test->zone()) Graph(test->zone()),
MakeCallDescriptor(test->zone(), return_type, parameter0_type),
MachineType::PointerRepresentation(),
- MachineOperatorBuilder::kAllOptionalOps),
+ MachineOperatorBuilder::kAllOptionalOps,
+ InstructionSelector::AlignmentRequirements()),
test_(test) {}
StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
MachineType parameter0_type, MachineType parameter1_type)
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
index a0a86e043a..9d17c26a56 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
@@ -19,14 +19,6 @@ static const char*
static char register_names_[10 * (RegisterConfiguration::kMaxGeneralRegisters +
RegisterConfiguration::kMaxFPRegisters)];
-namespace {
-static int allocatable_codes[InstructionSequenceTest::kDefaultNRegs] = {
- 0, 1, 2, 3, 4, 5, 6, 7};
-static int allocatable_double_codes[InstructionSequenceTest::kDefaultNRegs] = {
- 0, 1, 2, 3, 4, 5, 6, 7};
-}
-
-
static void InitializeRegisterNames() {
char* loc = register_names_;
for (int i = 0; i < RegisterConfiguration::kMaxGeneralRegisters; ++i) {
@@ -41,7 +33,6 @@ static void InitializeRegisterNames() {
}
}
-
InstructionSequenceTest::InstructionSequenceTest()
: sequence_(nullptr),
num_general_registers_(kDefaultNRegs),
@@ -62,21 +53,35 @@ void InstructionSequenceTest::SetNumRegs(int num_general_registers,
num_double_registers_ = num_double_registers;
}
+int InstructionSequenceTest::GetNumRegs(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ return config()->num_float_registers();
+ case MachineRepresentation::kFloat64:
+ return config()->num_double_registers();
+ case MachineRepresentation::kSimd128:
+ return config()->num_simd128_registers();
+ default:
+ return config()->num_general_registers();
+ }
+}
-RegisterConfiguration* InstructionSequenceTest::config() {
- if (!config_) {
- config_.reset(new RegisterConfiguration(
- num_general_registers_, num_double_registers_, num_general_registers_,
- num_double_registers_, num_double_registers_, allocatable_codes,
- allocatable_double_codes,
- kSimpleFPAliasing ? RegisterConfiguration::OVERLAP
- : RegisterConfiguration::COMBINE,
- general_register_names_,
- double_register_names_, // float register names
- double_register_names_,
- double_register_names_)); // SIMD 128 register names
+int InstructionSequenceTest::GetAllocatableCode(int index,
+ MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ return config()->GetAllocatableFloatCode(index);
+ case MachineRepresentation::kFloat64:
+ return config()->GetAllocatableDoubleCode(index);
+ case MachineRepresentation::kSimd128:
+ return config()->GetAllocatableSimd128Code(index);
+ default:
+ return config()->GetAllocatableGeneralCode(index);
}
- return config_.get();
+}
+
+const RegisterConfiguration* InstructionSequenceTest::config() {
+ return sequence()->GetRegisterConfigurationForTesting();
}
@@ -149,13 +154,12 @@ InstructionSequenceTest::TestOperand InstructionSequenceTest::Imm(int32_t imm) {
InstructionSequenceTest::VReg InstructionSequenceTest::Define(
TestOperand output_op) {
- VReg vreg = NewReg();
+ VReg vreg = NewReg(output_op);
InstructionOperand outputs[1]{ConvertOutputOp(vreg, output_op)};
Emit(kArchNop, 1, outputs);
return vreg;
}
-
Instruction* InstructionSequenceTest::Return(TestOperand input_op_0) {
block_returns_ = true;
InstructionOperand inputs[1]{ConvertInputOp(input_op_0)};
@@ -240,7 +244,7 @@ Instruction* InstructionSequenceTest::EmitI(TestOperand input_op_0,
InstructionSequenceTest::VReg InstructionSequenceTest::EmitOI(
TestOperand output_op, size_t input_size, TestOperand* inputs) {
- VReg output_vreg = NewReg();
+ VReg output_vreg = NewReg(output_op);
InstructionOperand outputs[1]{ConvertOutputOp(output_vreg, output_op)};
InstructionOperand* mapped_inputs = ConvertInputs(input_size, inputs);
Emit(kArchNop, 1, outputs, input_size, mapped_inputs);
@@ -259,7 +263,8 @@ InstructionSequenceTest::VReg InstructionSequenceTest::EmitOI(
InstructionSequenceTest::VRegPair InstructionSequenceTest::EmitOOI(
TestOperand output_op_0, TestOperand output_op_1, size_t input_size,
TestOperand* inputs) {
- VRegPair output_vregs = std::make_pair(NewReg(), NewReg());
+ VRegPair output_vregs =
+ std::make_pair(NewReg(output_op_0), NewReg(output_op_1));
InstructionOperand outputs[2]{
ConvertOutputOp(output_vregs.first, output_op_0),
ConvertOutputOp(output_vregs.second, output_op_1)};
@@ -280,7 +285,7 @@ InstructionSequenceTest::VRegPair InstructionSequenceTest::EmitOOI(
InstructionSequenceTest::VReg InstructionSequenceTest::EmitCall(
TestOperand output_op, size_t input_size, TestOperand* inputs) {
- VReg output_vreg = NewReg();
+ VReg output_vreg = NewReg(output_op);
InstructionOperand outputs[1]{ConvertOutputOp(output_vreg, output_op)};
CHECK(UnallocatedOperand::cast(outputs[0]).HasFixedPolicy());
InstructionOperand* mapped_inputs = ConvertInputs(input_size, inputs);
@@ -387,11 +392,25 @@ InstructionOperand InstructionSequenceTest::ConvertInputOp(TestOperand op) {
case kSlot:
return Unallocated(op, UnallocatedOperand::MUST_HAVE_SLOT,
UnallocatedOperand::USED_AT_START);
- case kFixedRegister:
- CHECK(0 <= op.value_ && op.value_ < num_general_registers_);
- return Unallocated(op, UnallocatedOperand::FIXED_REGISTER, op.value_);
+ case kFixedRegister: {
+ MachineRepresentation rep = GetCanonicalRep(op);
+ CHECK(0 <= op.value_ && op.value_ < GetNumRegs(rep));
+ if (DoesRegisterAllocation()) {
+ auto extended_policy = IsFloatingPoint(rep)
+ ? UnallocatedOperand::FIXED_FP_REGISTER
+ : UnallocatedOperand::FIXED_REGISTER;
+ return Unallocated(op, extended_policy, op.value_);
+ } else {
+ return AllocatedOperand(LocationOperand::REGISTER, rep, op.value_);
+ }
+ }
case kFixedSlot:
- return Unallocated(op, UnallocatedOperand::FIXED_SLOT, op.value_);
+ if (DoesRegisterAllocation()) {
+ return Unallocated(op, UnallocatedOperand::FIXED_SLOT, op.value_);
+ } else {
+ return AllocatedOperand(LocationOperand::STACK_SLOT,
+ GetCanonicalRep(op), op.value_);
+ }
default:
break;
}
@@ -410,10 +429,24 @@ InstructionOperand InstructionSequenceTest::ConvertOutputOp(VReg vreg,
case kRegister:
return Unallocated(op, UnallocatedOperand::MUST_HAVE_REGISTER);
case kFixedSlot:
- return Unallocated(op, UnallocatedOperand::FIXED_SLOT, op.value_);
- case kFixedRegister:
- CHECK(0 <= op.value_ && op.value_ < num_general_registers_);
- return Unallocated(op, UnallocatedOperand::FIXED_REGISTER, op.value_);
+ if (DoesRegisterAllocation()) {
+ return Unallocated(op, UnallocatedOperand::FIXED_SLOT, op.value_);
+ } else {
+ return AllocatedOperand(LocationOperand::STACK_SLOT,
+ GetCanonicalRep(op), op.value_);
+ }
+ case kFixedRegister: {
+ MachineRepresentation rep = GetCanonicalRep(op);
+ CHECK(0 <= op.value_ && op.value_ < GetNumRegs(rep));
+ if (DoesRegisterAllocation()) {
+ auto extended_policy = IsFloatingPoint(rep)
+ ? UnallocatedOperand::FIXED_FP_REGISTER
+ : UnallocatedOperand::FIXED_REGISTER;
+ return Unallocated(op, extended_policy, op.value_);
+ } else {
+ return AllocatedOperand(LocationOperand::REGISTER, rep, op.value_);
+ }
+ }
default:
break;
}
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
index 956f5d55b9..2c4df038fb 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
+++ b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
@@ -19,14 +19,20 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
public:
static const int kDefaultNRegs = 8;
static const int kNoValue = kMinInt;
+ static const MachineRepresentation kNoRep = MachineRepresentation::kNone;
+ static const MachineRepresentation kFloat32 = MachineRepresentation::kFloat32;
+ static const MachineRepresentation kFloat64 = MachineRepresentation::kFloat64;
+ static const MachineRepresentation kSimd128 = MachineRepresentation::kSimd128;
typedef RpoNumber Rpo;
struct VReg {
VReg() : value_(kNoValue) {}
VReg(PhiInstruction* phi) : value_(phi->virtual_register()) {} // NOLINT
- explicit VReg(int value) : value_(value) {}
+ explicit VReg(int value, MachineRepresentation rep = kNoRep)
+ : value_(value), rep_(rep) {}
int value_;
+ MachineRepresentation rep_ = kNoRep;
};
typedef std::pair<VReg, VReg> VRegPair;
@@ -47,43 +53,64 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
};
struct TestOperand {
- TestOperand() : type_(kInvalid), vreg_(), value_(kNoValue) {}
- TestOperand(TestOperandType type, int imm)
- : type_(type), vreg_(), value_(imm) {}
+ TestOperand() : type_(kInvalid), vreg_(), value_(kNoValue), rep_(kNoRep) {}
+ explicit TestOperand(TestOperandType type)
+ : type_(type), vreg_(), value_(kNoValue), rep_(kNoRep) {}
+ // For tests that do register allocation.
TestOperand(TestOperandType type, VReg vreg, int value = kNoValue)
- : type_(type), vreg_(vreg), value_(value) {}
+ : type_(type), vreg_(vreg), value_(value), rep_(vreg.rep_) {}
+ // For immediates, constants, and tests that don't do register allocation.
+ TestOperand(TestOperandType type, int value,
+ MachineRepresentation rep = kNoRep)
+ : type_(type), vreg_(), value_(value), rep_(rep) {}
TestOperandType type_;
VReg vreg_;
int value_;
+ MachineRepresentation rep_;
};
- static TestOperand Same() { return TestOperand(kSameAsFirst, VReg()); }
+ static TestOperand Same() { return TestOperand(kSameAsFirst); }
static TestOperand ExplicitReg(int index) {
TestOperandType type = kExplicit;
- return TestOperand(type, VReg(), index);
+ return TestOperand(type, index);
+ }
+
+ static TestOperand ExplicitFPReg(int index,
+ MachineRepresentation rep = kFloat64) {
+ TestOperandType type = kExplicit;
+ return TestOperand(type, index, rep);
}
static TestOperand Reg(VReg vreg, int index = kNoValue) {
- TestOperandType type = kRegister;
- if (index != kNoValue) type = kFixedRegister;
+ TestOperandType type = (index == kNoValue) ? kRegister : kFixedRegister;
return TestOperand(type, vreg, index);
}
- static TestOperand Reg(int index = kNoValue) { return Reg(VReg(), index); }
+ static TestOperand Reg(int index = kNoValue,
+ MachineRepresentation rep = kNoRep) {
+ return Reg(VReg(kNoValue, rep), index);
+ }
+
+ static TestOperand FPReg(int index = kNoValue,
+ MachineRepresentation rep = kFloat64) {
+ return Reg(index, rep);
+ }
static TestOperand Slot(VReg vreg, int index = kNoValue) {
- TestOperandType type = kSlot;
- if (index != kNoValue) type = kFixedSlot;
+ TestOperandType type = (index == kNoValue) ? kSlot : kFixedSlot;
return TestOperand(type, vreg, index);
}
- static TestOperand Slot(int index = kNoValue) { return Slot(VReg(), index); }
+ static TestOperand Slot(int index = kNoValue,
+ MachineRepresentation rep = kNoRep) {
+ return Slot(VReg(kNoValue, rep), index);
+ }
static TestOperand Const(int index) {
CHECK_NE(kNoValue, index);
- return TestOperand(kConstant, VReg(), index);
+ return TestOperand(kConstant, index);
}
static TestOperand Use(VReg vreg) { return TestOperand(kNone, vreg); }
@@ -129,7 +156,9 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
InstructionSequenceTest();
void SetNumRegs(int num_general_registers, int num_double_registers);
- RegisterConfiguration* config();
+ int GetNumRegs(MachineRepresentation rep);
+ int GetAllocatableCode(int index, MachineRepresentation rep = kNoRep);
+ const RegisterConfiguration* config();
InstructionSequence* sequence();
void StartLoop(int loop_blocks);
@@ -140,6 +169,14 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
TestOperand Imm(int32_t imm = 0);
VReg Define(TestOperand output_op);
VReg Parameter(TestOperand output_op = Reg()) { return Define(output_op); }
+ VReg FPParameter(MachineRepresentation rep = kFloat64) {
+ return Parameter(FPReg(kNoValue, rep));
+ }
+
+ MachineRepresentation GetCanonicalRep(TestOperand op) {
+ return IsFloatingPoint(op.rep_) ? op.rep_
+ : sequence()->DefaultRepresentation();
+ }
Instruction* Return(TestOperand input_op_0);
Instruction* Return(VReg vreg) { return Return(Reg(vreg, 0)); }
@@ -177,16 +214,21 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
TestOperand input_op_3 = TestOperand());
InstructionBlock* current_block() const { return current_block_; }
- int num_general_registers() const { return num_general_registers_; }
- int num_double_registers() const { return num_double_registers_; }
// Called after all instructions have been inserted.
void WireBlocks();
private:
- VReg NewReg() { return VReg(sequence()->NextVirtualRegister()); }
+ virtual bool DoesRegisterAllocation() const { return true; }
+
+ VReg NewReg(TestOperand op = TestOperand()) {
+ int vreg = sequence()->NextVirtualRegister();
+ if (IsFloatingPoint(op.rep_))
+ sequence()->MarkAsRepresentation(op.rep_, vreg);
+ return VReg(vreg, op.rep_);
+ }
- static TestOperand Invalid() { return TestOperand(kInvalid, VReg()); }
+ static TestOperand Invalid() { return TestOperand(kInvalid); }
Instruction* EmitBranch(TestOperand input_op);
Instruction* EmitFallThrough();
diff --git a/deps/v8/test/unittests/compiler/instruction-unittest.cc b/deps/v8/test/unittests/compiler/instruction-unittest.cc
new file mode 100644
index 0000000000..443c42b62a
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/instruction-unittest.cc
@@ -0,0 +1,175 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction.h"
+#include "src/register-configuration.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+const MachineRepresentation kWord = MachineRepresentation::kWord32;
+const MachineRepresentation kFloat = MachineRepresentation::kFloat32;
+const MachineRepresentation kDouble = MachineRepresentation::kFloat64;
+
+bool Interfere(LocationOperand::LocationKind kind, MachineRepresentation rep1,
+ int index1, MachineRepresentation rep2, int index2) {
+ return AllocatedOperand(kind, rep1, index1)
+ .InterferesWith(AllocatedOperand(kind, rep2, index2));
+}
+
+bool Contains(const ZoneVector<MoveOperands*>* moves,
+ const InstructionOperand& to, const InstructionOperand& from) {
+ for (auto move : *moves) {
+ if (move->destination().Equals(to) && move->source().Equals(from)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+class InstructionTest : public TestWithZone {
+ public:
+ InstructionTest() {}
+ virtual ~InstructionTest() {}
+
+ ParallelMove* CreateParallelMove(
+ const std::vector<InstructionOperand>& operand_pairs) {
+ ParallelMove* parallel_move = new (zone()) ParallelMove(zone());
+ for (size_t i = 0; i < operand_pairs.size(); i += 2)
+ parallel_move->AddMove(operand_pairs[i + 1], operand_pairs[i]);
+ return parallel_move;
+ }
+};
+
+TEST_F(InstructionTest, OperandInterference) {
+ // All general registers and slots interfere only with themselves.
+ for (int i = 0; i < RegisterConfiguration::kMaxGeneralRegisters; ++i) {
+ EXPECT_TRUE(Interfere(LocationOperand::REGISTER, kWord, i, kWord, i));
+ EXPECT_TRUE(Interfere(LocationOperand::STACK_SLOT, kWord, i, kWord, i));
+ for (int j = i + 1; j < RegisterConfiguration::kMaxGeneralRegisters; ++j) {
+ EXPECT_FALSE(Interfere(LocationOperand::REGISTER, kWord, i, kWord, j));
+ EXPECT_FALSE(Interfere(LocationOperand::STACK_SLOT, kWord, i, kWord, j));
+ }
+ }
+
+ // All FP registers interfere with themselves.
+ for (int i = 0; i < RegisterConfiguration::kMaxFPRegisters; ++i) {
+ EXPECT_TRUE(Interfere(LocationOperand::REGISTER, kFloat, i, kFloat, i));
+ EXPECT_TRUE(Interfere(LocationOperand::STACK_SLOT, kFloat, i, kFloat, i));
+ EXPECT_TRUE(Interfere(LocationOperand::REGISTER, kDouble, i, kDouble, i));
+ EXPECT_TRUE(Interfere(LocationOperand::STACK_SLOT, kDouble, i, kDouble, i));
+ }
+
+ if (kSimpleFPAliasing) {
+ // Simple FP aliasing: interfering registers of different reps have the same
+ // index.
+ for (int i = 0; i < RegisterConfiguration::kMaxFPRegisters; ++i) {
+ EXPECT_TRUE(Interfere(LocationOperand::REGISTER, kFloat, i, kDouble, i));
+ EXPECT_TRUE(Interfere(LocationOperand::REGISTER, kDouble, i, kFloat, i));
+ for (int j = i + 1; j < RegisterConfiguration::kMaxFPRegisters; ++j) {
+ EXPECT_FALSE(Interfere(LocationOperand::REGISTER, kWord, i, kWord, j));
+ EXPECT_FALSE(
+ Interfere(LocationOperand::STACK_SLOT, kWord, i, kWord, j));
+ }
+ }
+ } else {
+ // Complex FP aliasing: sub-registers intefere with containing registers.
+ // Test sub-register indices which may not exist on the platform. This is
+ // necessary since the GapResolver may split large moves into smaller ones.
+ for (int i = 0; i < RegisterConfiguration::kMaxFPRegisters; ++i) {
+ EXPECT_TRUE(
+ Interfere(LocationOperand::REGISTER, kFloat, i * 2, kDouble, i));
+ EXPECT_TRUE(
+ Interfere(LocationOperand::REGISTER, kFloat, i * 2 + 1, kDouble, i));
+ EXPECT_TRUE(
+ Interfere(LocationOperand::REGISTER, kDouble, i, kFloat, i * 2));
+ EXPECT_TRUE(
+ Interfere(LocationOperand::REGISTER, kDouble, i, kFloat, i * 2 + 1));
+
+ for (int j = i + 1; j < RegisterConfiguration::kMaxFPRegisters; ++j) {
+ EXPECT_FALSE(
+ Interfere(LocationOperand::REGISTER, kFloat, i * 2, kDouble, j));
+ EXPECT_FALSE(Interfere(LocationOperand::REGISTER, kFloat, i * 2 + 1,
+ kDouble, j));
+ EXPECT_FALSE(
+ Interfere(LocationOperand::REGISTER, kDouble, i, kFloat, j * 2));
+ EXPECT_FALSE(Interfere(LocationOperand::REGISTER, kDouble, i, kFloat,
+ j * 2 + 1));
+ }
+ }
+ }
+}
+
+TEST_F(InstructionTest, PrepareInsertAfter) {
+ InstructionOperand r0 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kWord32, 0);
+ InstructionOperand r1 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kWord32, 1);
+ InstructionOperand r2 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kWord32, 2);
+
+ InstructionOperand d0 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kFloat64, 0);
+ InstructionOperand d1 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kFloat64, 1);
+ InstructionOperand d2 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kFloat64, 2);
+
+ {
+ // Moves inserted after should pick up assignments to their sources.
+ // Moves inserted after should cause interfering moves to be eliminated.
+ ZoneVector<MoveOperands*> to_eliminate(zone());
+ std::vector<InstructionOperand> moves = {
+ r1, r0, // r1 <- r0
+ r2, r0, // r2 <- r0
+ d1, d0, // d1 <- d0
+ d2, d0 // d2 <- d0
+ };
+
+ ParallelMove* pm = CreateParallelMove(moves);
+ MoveOperands m1(r1, r2); // r2 <- r1
+ pm->PrepareInsertAfter(&m1, &to_eliminate);
+ CHECK(m1.source().Equals(r0));
+ CHECK(Contains(&to_eliminate, r2, r0));
+ MoveOperands m2(d1, d2); // d2 <- d1
+ pm->PrepareInsertAfter(&m2, &to_eliminate);
+ CHECK(m2.source().Equals(d0));
+ CHECK(Contains(&to_eliminate, d2, d0));
+ }
+
+ if (!kSimpleFPAliasing) {
+ // Moves inserted after should cause all interfering moves to be eliminated.
+ auto s0 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kFloat32, 0);
+ auto s1 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kFloat32, 1);
+ auto s2 = AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kFloat32, 2);
+
+ {
+ ZoneVector<MoveOperands*> to_eliminate(zone());
+ std::vector<InstructionOperand> moves = {
+ s0, s2, // s0 <- s2
+ s1, s2 // s1 <- s2
+ };
+
+ ParallelMove* pm = CreateParallelMove(moves);
+ MoveOperands m1(d1, d0); // d0 <- d1
+ pm->PrepareInsertAfter(&m1, &to_eliminate);
+ CHECK(Contains(&to_eliminate, s0, s2));
+ CHECK(Contains(&to_eliminate, s1, s2));
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index 06ac524111..400eafb4dc 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -40,8 +40,9 @@ class Int64LoweringTest : public GraphTest {
MachineOperatorBuilder* machine() { return &machine_; }
void LowerGraph(Node* node, Signature<MachineRepresentation>* signature) {
- Node* ret = graph()->NewNode(common()->Return(), node, graph()->start(),
- graph()->start());
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, node,
+ graph()->start(), graph()->start());
NodeProperties::MergeControlToEnd(graph(), common(), ret);
Int64Lowering lowering(graph(), machine(), common(), zone(), signature);
@@ -216,7 +217,8 @@ TEST_F(Int64LoweringTest, UnalignedInt64Load) {
Int32Constant(base), Int32Constant(index), \
Int64Constant(value(0)), start(), start()); \
\
- Node* ret = graph()->NewNode(common()->Return(), \
+ Node* zero = graph()->NewNode(common()->Int32Constant(0)); \
+ Node* ret = graph()->NewNode(common()->Return(), zero, \
Int32Constant(return_value), store, start()); \
\
NodeProperties::MergeControlToEnd(graph(), common(), ret); \
@@ -313,7 +315,7 @@ TEST_F(Int64LoweringTest, CallI64Return) {
CompareCallDescriptors(
OpParameter<const CallDescriptor*>(
- graph()->end()->InputAt(1)->InputAt(0)->InputAt(0)),
+ graph()->end()->InputAt(1)->InputAt(1)->InputAt(0)),
wasm::ModuleEnv::GetI32WasmCallDescriptor(zone(), desc));
}
@@ -347,7 +349,7 @@ TEST_F(Int64LoweringTest, CallI64Parameter) {
CompareCallDescriptors(
OpParameter<const CallDescriptor*>(
- graph()->end()->InputAt(1)->InputAt(0)),
+ graph()->end()->InputAt(1)->InputAt(1)),
wasm::ModuleEnv::GetI32WasmCallDescriptor(zone(), desc));
}
@@ -849,6 +851,30 @@ TEST_F(Int64LoweringTest, I64ReverseBytes) {
IsWord32ReverseBytes(IsInt32Constant(low_word_value(0))),
start(), start()));
}
+
+TEST_F(Int64LoweringTest, EffectPhiLoop) {
+ // Construct a cycle consisting of an EffectPhi, a Store, and a Load.
+ Node* eff_phi = graph()->NewNode(common()->EffectPhi(1), graph()->start(),
+ graph()->start());
+
+ StoreRepresentation store_rep(MachineRepresentation::kWord64,
+ WriteBarrierKind::kNoWriteBarrier);
+ LoadRepresentation load_rep(MachineType::Int64());
+
+ Node* load =
+ graph()->NewNode(machine()->Load(load_rep), Int64Constant(value(0)),
+ Int64Constant(value(1)), eff_phi, graph()->start());
+
+ Node* store =
+ graph()->NewNode(machine()->Store(store_rep), Int64Constant(value(0)),
+ Int64Constant(value(1)), load, load, graph()->start());
+
+ eff_phi->InsertInput(zone(), 1, store);
+ NodeProperties::ChangeOp(eff_phi,
+ common()->ResizeMergeOrPhi(eff_phi->op(), 2));
+
+ LowerGraph(load, MachineRepresentation::kWord64);
+}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
index 48debc368c..e3857ef526 100644
--- a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -33,8 +33,10 @@ class JSBuiltinReducerTest : public TypedGraphTest {
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
+
JSBuiltinReducer reducer(&graph_reducer, &jsgraph,
- JSBuiltinReducer::kNoFlags, nullptr);
+ JSBuiltinReducer::kNoFlags, nullptr,
+ native_context());
return reducer.Reduce(node);
}
@@ -1517,7 +1519,7 @@ TEST_F(JSBuiltinReducerTest, NumberParseIntWithIntegral32) {
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberToInt32(p0));
+ EXPECT_EQ(p0, r.replacement());
}
}
@@ -1537,7 +1539,7 @@ TEST_F(JSBuiltinReducerTest, NumberParseIntWithIntegral32AndUndefined) {
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberToInt32(p0));
+ EXPECT_EQ(p0, r.replacement());
}
}
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index ebb1633401..f4a1192abf 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -40,8 +40,8 @@ class JSCreateLoweringTest : public TypedGraphTest {
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
JSCreateLowering reducer(&graph_reducer, &deps_, &jsgraph,
- MaybeHandle<LiteralsArray>(),
- MaybeHandle<Context>(), zone());
+ MaybeHandle<LiteralsArray>(), native_context(),
+ zone());
return reducer.Reduce(node);
}
@@ -65,7 +65,7 @@ class JSCreateLoweringTest : public TypedGraphTest {
TEST_F(JSCreateLoweringTest, JSCreate) {
Handle<JSFunction> function = isolate()->object_function();
- Node* const target = Parameter(Type::Constant(function, graph()->zone()));
+ Node* const target = Parameter(Type::HeapConstant(function, graph()->zone()));
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
Reduction r = Reduce(graph()->NewNode(javascript()->Create(), target, target,
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index ec1ff19880..6883052abb 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -70,11 +70,6 @@ class JSTypedLoweringTest : public TypedGraphTest {
return buffer;
}
- Matcher<Node*> IsIntPtrConstant(intptr_t value) {
- return sizeof(value) == 4 ? IsInt32Constant(static_cast<int32_t>(value))
- : IsInt64Constant(static_cast<int64_t>(value));
- }
-
JSOperatorBuilder* javascript() { return &javascript_; }
private:
@@ -126,8 +121,42 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithAny) {
// -----------------------------------------------------------------------------
-// JSToNumber
+// JSToName
+TEST_F(JSTypedLoweringTest, JSToNameWithString) {
+ Node* const input = Parameter(Type::String(), 0);
+ Node* const context = Parameter(Type::Any(), 1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ToName(), input, context,
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(input, r.replacement());
+}
+
+TEST_F(JSTypedLoweringTest, JSToNameWithSymbol) {
+ Node* const input = Parameter(Type::Symbol(), 0);
+ Node* const context = Parameter(Type::Any(), 1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ToName(), input, context,
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(input, r.replacement());
+}
+
+TEST_F(JSTypedLoweringTest, JSToNameWithAny) {
+ Node* const input = Parameter(Type::Any(), 0);
+ Node* const context = Parameter(Type::Any(), 1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ToName(), input, context,
+ EmptyFrameState(), effect, control));
+ ASSERT_FALSE(r.Changed());
+}
+
+// -----------------------------------------------------------------------------
+// JSToNumber
TEST_F(JSTypedLoweringTest, JSToNumberWithPlainPrimitive) {
Node* const input = Parameter(Type::PlainPrimitive(), 0);
@@ -569,7 +598,7 @@ TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArray) {
EXPECT_THAT(
r.replacement(),
IsLoadBuffer(BufferAccess(type),
- IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+ IsPointerConstant(bit_cast<intptr_t>(&backing_store[0])),
offset_matcher,
IsNumberConstant(array->byte_length()->Number()), effect,
control));
@@ -605,7 +634,7 @@ TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArrayWithSafeKey) {
EXPECT_THAT(
r.replacement(),
IsLoadElement(access,
- IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+ IsPointerConstant(bit_cast<intptr_t>(&backing_store[0])),
key, effect, control));
}
}
@@ -650,11 +679,11 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArray) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
- IsStoreBuffer(BufferAccess(type),
- IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
- offset_matcher,
- IsNumberConstant(array->byte_length()->Number()), value,
- effect, control));
+ IsStoreBuffer(
+ BufferAccess(type),
+ IsPointerConstant(bit_cast<intptr_t>(&backing_store[0])),
+ offset_matcher, IsNumberConstant(array->byte_length()->Number()),
+ value, effect, control));
}
}
}
@@ -703,11 +732,11 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
- IsStoreBuffer(BufferAccess(type),
- IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
- offset_matcher,
- IsNumberConstant(array->byte_length()->Number()),
- value_matcher, effect_matcher, control_matcher));
+ IsStoreBuffer(
+ BufferAccess(type),
+ IsPointerConstant(bit_cast<intptr_t>(&backing_store[0])),
+ offset_matcher, IsNumberConstant(array->byte_length()->Number()),
+ value_matcher, effect_matcher, control_matcher));
}
}
}
@@ -744,7 +773,7 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithSafeKey) {
EXPECT_THAT(
r.replacement(),
IsStoreElement(
- access, IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+ access, IsPointerConstant(bit_cast<intptr_t>(&backing_store[0])),
key, value, effect, control));
}
}
diff --git a/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc b/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
index efc823dba6..f11d6dff18 100644
--- a/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
@@ -27,21 +27,20 @@ class LivenessAnalysisTest : public GraphTest {
javascript_(zone()),
jsgraph_(isolate(), graph(), common(), &javascript_, nullptr,
&machine_),
- analyzer_(locals_count, zone()),
+ analyzer_(locals_count, false, zone()),
empty_values_(graph()->NewNode(common()->StateValues(0), 0, nullptr)),
next_checkpoint_id_(0),
current_block_(nullptr) {}
-
protected:
JSGraph* jsgraph() { return &jsgraph_; }
LivenessAnalyzer* analyzer() { return &analyzer_; }
void Run() {
StateValuesCache cache(jsgraph());
- NonLiveFrameStateSlotReplacer replacer(&cache,
- jsgraph()->UndefinedConstant(),
- analyzer()->local_count(), zone());
+ NonLiveFrameStateSlotReplacer replacer(
+ &cache, jsgraph()->UndefinedConstant(), analyzer()->local_count(),
+ false, zone());
analyzer()->Run(&replacer);
}
diff --git a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
index 56691fdeef..969f253ae1 100644
--- a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
+++ b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
@@ -66,7 +66,7 @@ class LoopPeelingTest : public GraphTest {
OFStream os(stdout);
os << AsRPO(*graph());
}
- Zone zone(isolate()->allocator());
+ Zone zone(isolate()->allocator(), ZONE_NAME);
return LoopFinder::BuildLoopTree(graph(), &zone);
}
@@ -90,7 +90,8 @@ class LoopPeelingTest : public GraphTest {
}
Node* InsertReturn(Node* val, Node* effect, Node* control) {
- Node* r = graph()->NewNode(common()->Return(), val, effect, control);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* r = graph()->NewNode(common()->Return(), zero, val, effect, control);
graph()->SetEnd(r);
return r;
}
diff --git a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
index dc14b85361..1698614760 100644
--- a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -859,6 +859,18 @@ std::ostream& operator<<(std::ostream& os, const MemoryAccessImm1& acc) {
return os << acc.type;
}
+struct MemoryAccessImm2 {
+ MachineType type;
+ ArchOpcode store_opcode;
+ ArchOpcode store_opcode_unaligned;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
+ const int32_t immediates[40];
+};
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccessImm2& acc) {
+ return os << acc.type;
+}
// ----------------------------------------------------------------------------
// Loads and stores immediate values.
@@ -916,14 +928,13 @@ const MemoryAccessImm kMemoryAccessesImm[] = {
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}};
-
const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
{MachineType::Int8(),
kMipsLb,
kMipsSb,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {MachineType::Int8(),
+ {MachineType::Uint8(),
kMipsLbu,
kMipsSb,
&InstructionSelectorTest::Stream::IsInteger,
@@ -933,7 +944,7 @@ const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
kMipsSh,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {MachineType::Int16(),
+ {MachineType::Uint16(),
kMipsLhu,
kMipsSh,
&InstructionSelectorTest::Stream::IsInteger,
@@ -954,6 +965,40 @@ const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
&InstructionSelectorTest::Stream::IsDouble,
{-65000, -55000, 32777, 55000, 65000}}};
+const MemoryAccessImm2 kMemoryAccessesImmUnaligned[] = {
+ {MachineType::Int16(),
+ kMipsUsh,
+ kMipsSh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Int32(),
+ kMipsUsw,
+ kMipsSw,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Float32(),
+ kMipsUswc1,
+ kMipsSwc1,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Float64(),
+ kMipsUsdc1,
+ kMipsSdc1,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}};
+
} // namespace
@@ -1043,11 +1088,60 @@ TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
}
}
+TEST_P(InstructionSelectorMemoryAccessImmTest, StoreZero) {
+ const MemoryAccessImm memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Int32Constant(0), kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessImmTest,
::testing::ValuesIn(kMemoryAccessesImm));
+typedef InstructionSelectorTestWithParam<MemoryAccessImm2>
+ InstructionSelectorMemoryAccessUnalignedImmTest;
+
+TEST_P(InstructionSelectorMemoryAccessUnalignedImmTest, StoreZero) {
+ const MemoryAccessImm2 memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
+ bool unaligned_store_supported = m.machine()->UnalignedStoreSupported(
+ MachineType::TypeForRepresentation(memacc.type.representation()), 1);
+ m.UnalignedStore(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Int32Constant(0));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(unaligned_store_supported ? memacc.store_opcode_unaligned
+ : memacc.store_opcode,
+ s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessUnalignedImmTest,
+ ::testing::ValuesIn(kMemoryAccessesImmUnaligned));
// ----------------------------------------------------------------------------
// Load/store offsets more than 16 bits.
@@ -1065,11 +1159,9 @@ TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
- ASSERT_EQ(2U, s.size());
- // kMipsAdd is expected opcode.
- // size more than 16 bits wide.
- EXPECT_EQ(kMipsAdd, s[0]->arch_opcode());
- EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -1086,13 +1178,11 @@ TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
- ASSERT_EQ(2U, s.size());
- // kMipsAdd is expected opcode
- // size more than 16 bits wide
- EXPECT_EQ(kMipsAdd, s[0]->arch_opcode());
- EXPECT_EQ(kMode_None, s[0]->addressing_mode());
- EXPECT_EQ(2U, s[0]->InputCount());
- EXPECT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
}
}
diff --git a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
index be77126688..b0e82e4316 100644
--- a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
@@ -286,6 +286,15 @@ const Conversion kFloat32RoundInstructions[] = {
kMips64TruncWS, MachineType::Int32()},
MachineType::Float32()}};
+// MIPS64 instructions that clear the top 32 bits of the destination.
+const MachInst2 kCanElideChangeUint32ToUint64[] = {
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kMips64DivU,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32Mod, "Uint32Mod", kMips64ModU,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32MulHigh, "Uint32MulHigh", kMips64MulHighU,
+ MachineType::Uint32()}};
+
} // namespace
@@ -320,10 +329,40 @@ TEST_P(InstructionSelectorCmpTest, Parameter) {
StreamBuilder m(this, type, type, type);
m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
- ASSERT_EQ(cmp.expected_size, s.size());
- EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
- EXPECT_EQ(2U, s[0]->InputCount());
- EXPECT_EQ(1U, s[0]->OutputCount());
+
+ if (FLAG_debug_code &&
+ type.representation() == MachineRepresentation::kWord32) {
+ ASSERT_EQ(6U, s.size());
+
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+
+ EXPECT_EQ(kMips64Dshl, s[1]->arch_opcode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+
+ EXPECT_EQ(kMips64Dshl, s[2]->arch_opcode());
+ EXPECT_EQ(2U, s[2]->InputCount());
+ EXPECT_EQ(1U, s[2]->OutputCount());
+
+ EXPECT_EQ(cmp.mi.arch_opcode, s[3]->arch_opcode());
+ EXPECT_EQ(2U, s[3]->InputCount());
+ EXPECT_EQ(1U, s[3]->OutputCount());
+
+ EXPECT_EQ(kMips64AssertEqual, s[4]->arch_opcode());
+ EXPECT_EQ(3U, s[4]->InputCount());
+ EXPECT_EQ(0U, s[4]->OutputCount());
+
+ EXPECT_EQ(cmp.mi.arch_opcode, s[5]->arch_opcode());
+ EXPECT_EQ(2U, s[5]->InputCount());
+ EXPECT_EQ(1U, s[5]->OutputCount());
+ } else {
+ ASSERT_EQ(cmp.expected_size, s.size());
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorCmpTest,
@@ -1114,6 +1153,83 @@ TEST_F(InstructionSelectorTest, ChangeInt32ToInt64AfterLoad) {
}
}
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorElidedChangeUint32ToUint64Test;
+
+TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
+ const MachInst2 binop = GetParam();
+ StreamBuilder m(this, MachineType::Uint64(), binop.machine_type,
+ binop.machine_type);
+ m.Return(m.ChangeUint32ToUint64(
+ (m.*binop.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ // Make sure the `ChangeUint32ToUint64` node turned into a no-op.
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorElidedChangeUint32ToUint64Test,
+ ::testing::ValuesIn(kCanElideChangeUint32ToUint64));
+
+TEST_F(InstructionSelectorTest, ChangeUint32ToUint64AfterLoad) {
+ // For each case, make sure the `ChangeUint32ToUint64` node turned into a
+ // no-op.
+
+ // Lbu
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kMips64Dadd, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kMips64Lbu, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // Lhu
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kMips64Dadd, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kMips64Lhu, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+ // Lwu
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kMips64Dadd, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kMips64Lwu, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[1]->addressing_mode());
+ EXPECT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+ }
+}
// ----------------------------------------------------------------------------
// Loads and stores.
@@ -1168,6 +1284,18 @@ std::ostream& operator<<(std::ostream& os, const MemoryAccessImm1& acc) {
return os << acc.type;
}
+struct MemoryAccessImm2 {
+ MachineType type;
+ ArchOpcode store_opcode;
+ ArchOpcode store_opcode_unaligned;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
+ const int32_t immediates[40];
+};
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccessImm2& acc) {
+ return os << acc.type;
+}
// ----------------------------------------------------------------------------
// Loads and stores immediate values
@@ -1275,6 +1403,48 @@ const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}}};
+const MemoryAccessImm2 kMemoryAccessesImmUnaligned[] = {
+ {MachineType::Int16(),
+ kMips64Ush,
+ kMips64Sh,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Int32(),
+ kMips64Usw,
+ kMips64Sw,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Int64(),
+ kMips64Usd,
+ kMips64Sd,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Float32(),
+ kMips64Uswc1,
+ kMips64Swc1,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Float64(),
+ kMips64Usdc1,
+ kMips64Sdc1,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}};
+
} // namespace
@@ -1361,10 +1531,60 @@ TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
}
}
+TEST_P(InstructionSelectorMemoryAccessImmTest, StoreZero) {
+ const MemoryAccessImm memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Int32Constant(0), kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessImmTest,
::testing::ValuesIn(kMemoryAccessesImm));
+typedef InstructionSelectorTestWithParam<MemoryAccessImm2>
+ InstructionSelectorMemoryAccessUnalignedImmTest;
+
+TEST_P(InstructionSelectorMemoryAccessUnalignedImmTest, StoreZero) {
+ const MemoryAccessImm2 memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
+ bool unaligned_store_supported = m.machine()->UnalignedStoreSupported(
+ MachineType::TypeForRepresentation(memacc.type.representation()), 1);
+ m.UnalignedStore(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Int32Constant(0));
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(unaligned_store_supported ? memacc.store_opcode_unaligned
+ : memacc.store_opcode,
+ s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessUnalignedImmTest,
+ ::testing::ValuesIn(kMemoryAccessesImmUnaligned));
// ----------------------------------------------------------------------------
// Load/store offsets more than 16 bits.
@@ -1765,6 +1985,36 @@ TEST_F(InstructionSelectorTest, Float64Min) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, LoadAndShiftRight) {
+ {
+ int32_t immediates[] = {-256, -255, -3, -2, -1, 0, 1,
+ 2, 3, 255, 256, 260, 4096, 4100,
+ 8192, 8196, 3276, 3280, 16376, 16380};
+ TRACED_FOREACH(int32_t, index, immediates) {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer());
+ Node* const load =
+ m.Load(MachineType::Uint64(), m.Parameter(0), m.Int32Constant(index));
+ Node* const sar = m.Word64Sar(load, m.Int32Constant(32));
+ // Make sure we don't fold the shift into the following add:
+ m.Return(m.Int64Add(sar, m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kMips64Lw, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ EXPECT_EQ(index + 4, s.ToInt32(s[0]->InputAt(1)));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+#endif
+
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
index 4c69384667..71571488e1 100644
--- a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
@@ -12,6 +12,14 @@ namespace compiler {
class MoveOptimizerTest : public InstructionSequenceTest {
public:
+ // FP register indices which don't interfere under simple or complex aliasing.
+ static const int kF64_1 = 0;
+ static const int kF64_2 = 1;
+ static const int kF32_1 = 4;
+ static const int kF32_2 = 5;
+ static const int kS128_1 = 2;
+ static const int kS128_2 = 3;
+
Instruction* LastInstruction() { return sequence()->instructions().back(); }
void AddMove(Instruction* instr, TestOperand from, TestOperand to,
@@ -61,6 +69,8 @@ class MoveOptimizerTest : public InstructionSequenceTest {
}
private:
+ bool DoesRegisterAllocation() const override { return false; }
+
InstructionOperand ConvertMoveArg(TestOperand op) {
CHECK_EQ(kNoValue, op.vreg_.value_);
CHECK_NE(kNoValue, op.value_);
@@ -70,14 +80,16 @@ class MoveOptimizerTest : public InstructionSequenceTest {
case kFixedSlot:
return AllocatedOperand(LocationOperand::STACK_SLOT,
MachineRepresentation::kWord32, op.value_);
- case kFixedRegister:
- CHECK(0 <= op.value_ && op.value_ < num_general_registers());
- return AllocatedOperand(LocationOperand::REGISTER,
- MachineRepresentation::kWord32, op.value_);
- case kExplicit:
- CHECK(0 <= op.value_ && op.value_ < num_general_registers());
- return ExplicitOperand(LocationOperand::REGISTER,
- MachineRepresentation::kWord32, op.value_);
+ case kFixedRegister: {
+ MachineRepresentation rep = GetCanonicalRep(op);
+ CHECK(0 <= op.value_ && op.value_ < GetNumRegs(rep));
+ return AllocatedOperand(LocationOperand::REGISTER, rep, op.value_);
+ }
+ case kExplicit: {
+ MachineRepresentation rep = GetCanonicalRep(op);
+ CHECK(0 <= op.value_ && op.value_ < GetNumRegs(rep));
+ return ExplicitOperand(LocationOperand::REGISTER, rep, op.value_);
+ }
default:
break;
}
@@ -90,39 +102,69 @@ class MoveOptimizerTest : public InstructionSequenceTest {
TEST_F(MoveOptimizerTest, RemovesRedundant) {
StartBlock();
auto first_instr = EmitNop();
- AddMove(first_instr, Reg(0), Reg(1));
auto last_instr = EmitNop();
+
+ AddMove(first_instr, Reg(0), Reg(1));
AddMove(last_instr, Reg(1), Reg(0));
+
+ AddMove(first_instr, FPReg(kS128_1, kSimd128), FPReg(kS128_2, kSimd128));
+ AddMove(last_instr, FPReg(kS128_2, kSimd128), FPReg(kS128_1, kSimd128));
+ AddMove(first_instr, FPReg(kF64_1, kFloat64), FPReg(kF64_2, kFloat64));
+ AddMove(last_instr, FPReg(kF64_2, kFloat64), FPReg(kF64_1, kFloat64));
+ AddMove(first_instr, FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32));
+ AddMove(last_instr, FPReg(kF32_2, kFloat32), FPReg(kF32_1, kFloat32));
+
EndBlock(Last());
Optimize();
CHECK_EQ(0, NonRedundantSize(first_instr->parallel_moves()[0]));
auto move = last_instr->parallel_moves()[0];
- CHECK_EQ(1, NonRedundantSize(move));
+ CHECK_EQ(4, NonRedundantSize(move));
CHECK(Contains(move, Reg(0), Reg(1)));
+ CHECK(Contains(move, FPReg(kS128_1, kSimd128), FPReg(kS128_2, kSimd128)));
+ CHECK(Contains(move, FPReg(kF64_1, kFloat64), FPReg(kF64_2, kFloat64)));
+ CHECK(Contains(move, FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32)));
}
TEST_F(MoveOptimizerTest, RemovesRedundantExplicit) {
- int first_reg_index =
- RegisterConfiguration::Turbofan()->GetAllocatableGeneralCode(0);
- int second_reg_index =
- RegisterConfiguration::Turbofan()->GetAllocatableGeneralCode(1);
+ int index1 = GetAllocatableCode(0);
+ int index2 = GetAllocatableCode(1);
+ int s128_1 = GetAllocatableCode(kS128_1, kSimd128);
+ int s128_2 = GetAllocatableCode(kS128_2, kSimd128);
+ int f64_1 = GetAllocatableCode(kF64_1, kFloat64);
+ int f64_2 = GetAllocatableCode(kF64_2, kFloat64);
+ int f32_1 = GetAllocatableCode(kF32_1, kFloat32);
+ int f32_2 = GetAllocatableCode(kF32_2, kFloat32);
StartBlock();
auto first_instr = EmitNop();
- AddMove(first_instr, Reg(first_reg_index), ExplicitReg(second_reg_index));
auto last_instr = EmitNop();
- AddMove(last_instr, Reg(second_reg_index), Reg(first_reg_index));
+
+ AddMove(first_instr, Reg(index1), ExplicitReg(index2));
+ AddMove(last_instr, Reg(index2), Reg(index1));
+
+ AddMove(first_instr, FPReg(s128_1, kSimd128),
+ ExplicitFPReg(s128_2, kSimd128));
+ AddMove(last_instr, FPReg(s128_2, kSimd128), FPReg(s128_1, kSimd128));
+ AddMove(first_instr, FPReg(f64_1, kFloat64), ExplicitFPReg(f64_2, kFloat64));
+ AddMove(last_instr, FPReg(f64_2, kFloat64), FPReg(f64_1, kFloat64));
+ AddMove(first_instr, FPReg(f32_1, kFloat32), ExplicitFPReg(f32_2, kFloat32));
+ AddMove(last_instr, FPReg(f32_2, kFloat32), FPReg(f32_1, kFloat32));
+
EndBlock(Last());
Optimize();
CHECK_EQ(0, NonRedundantSize(first_instr->parallel_moves()[0]));
auto move = last_instr->parallel_moves()[0];
- CHECK_EQ(1, NonRedundantSize(move));
- CHECK(Contains(move, Reg(first_reg_index), ExplicitReg(second_reg_index)));
+ CHECK_EQ(4, NonRedundantSize(move));
+ CHECK(Contains(move, Reg(index1), ExplicitReg(index2)));
+ CHECK(
+ Contains(move, FPReg(s128_1, kSimd128), ExplicitFPReg(s128_2, kSimd128)));
+ CHECK(Contains(move, FPReg(f64_1, kFloat64), ExplicitFPReg(f64_2, kFloat64)));
+ CHECK(Contains(move, FPReg(f32_1, kFloat32), ExplicitFPReg(f32_2, kFloat32)));
}
@@ -157,10 +199,18 @@ TEST_F(MoveOptimizerTest, SimpleMerge) {
StartBlock();
EndBlock(Jump(2));
AddMove(LastInstruction(), Reg(0), Reg(1));
+ AddMove(LastInstruction(), FPReg(kS128_1, kSimd128),
+ FPReg(kS128_2, kSimd128));
+ AddMove(LastInstruction(), FPReg(kF64_1, kFloat64), FPReg(kF64_2, kFloat64));
+ AddMove(LastInstruction(), FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32));
StartBlock();
EndBlock(Jump(1));
AddMove(LastInstruction(), Reg(0), Reg(1));
+ AddMove(LastInstruction(), FPReg(kS128_1, kSimd128),
+ FPReg(kS128_2, kSimd128));
+ AddMove(LastInstruction(), FPReg(kF64_1, kFloat64), FPReg(kF64_2, kFloat64));
+ AddMove(LastInstruction(), FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32));
StartBlock();
EndBlock(Last());
@@ -170,8 +220,11 @@ TEST_F(MoveOptimizerTest, SimpleMerge) {
Optimize();
auto move = last->parallel_moves()[0];
- CHECK_EQ(1, NonRedundantSize(move));
+ CHECK_EQ(4, NonRedundantSize(move));
CHECK(Contains(move, Reg(0), Reg(1)));
+ CHECK(Contains(move, FPReg(kS128_1, kSimd128), FPReg(kS128_2, kSimd128)));
+ CHECK(Contains(move, FPReg(kF64_1, kFloat64), FPReg(kF64_2, kFloat64)));
+ CHECK(Contains(move, FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32)));
}
@@ -185,11 +238,25 @@ TEST_F(MoveOptimizerTest, SimpleMergeCycle) {
AddMove(gap_0, Reg(0), Reg(1));
AddMove(LastInstruction(), Reg(1), Reg(0));
+ AddMove(gap_0, FPReg(kS128_1, kSimd128), FPReg(kS128_2, kSimd128));
+ AddMove(LastInstruction(), FPReg(kS128_2, kSimd128),
+ FPReg(kS128_1, kSimd128));
+ AddMove(gap_0, FPReg(kF64_1, kFloat64), FPReg(kF64_2, kFloat64));
+ AddMove(LastInstruction(), FPReg(kF64_2, kFloat64), FPReg(kF64_1, kFloat64));
+ AddMove(gap_0, FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32));
+ AddMove(LastInstruction(), FPReg(kF32_2, kFloat32), FPReg(kF32_1, kFloat32));
+
StartBlock();
EndBlock(Jump(1));
auto gap_1 = LastInstruction();
AddMove(gap_1, Reg(0), Reg(1));
AddMove(gap_1, Reg(1), Reg(0));
+ AddMove(gap_1, FPReg(kS128_1, kSimd128), FPReg(kS128_2, kSimd128));
+ AddMove(gap_1, FPReg(kS128_2, kSimd128), FPReg(kS128_1, kSimd128));
+ AddMove(gap_1, FPReg(kF64_1, kFloat64), FPReg(kF64_2, kFloat64));
+ AddMove(gap_1, FPReg(kF64_2, kFloat64), FPReg(kF64_1, kFloat64));
+ AddMove(gap_1, FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32));
+ AddMove(gap_1, FPReg(kF32_2, kFloat32), FPReg(kF32_1, kFloat32));
StartBlock();
EndBlock(Last());
@@ -201,9 +268,15 @@ TEST_F(MoveOptimizerTest, SimpleMergeCycle) {
CHECK(gap_0->AreMovesRedundant());
CHECK(gap_1->AreMovesRedundant());
auto move = last->parallel_moves()[0];
- CHECK_EQ(2, NonRedundantSize(move));
+ CHECK_EQ(8, NonRedundantSize(move));
CHECK(Contains(move, Reg(0), Reg(1)));
CHECK(Contains(move, Reg(1), Reg(0)));
+ CHECK(Contains(move, FPReg(kS128_1, kSimd128), FPReg(kS128_2, kSimd128)));
+ CHECK(Contains(move, FPReg(kS128_2, kSimd128), FPReg(kS128_1, kSimd128)));
+ CHECK(Contains(move, FPReg(kF64_1, kFloat64), FPReg(kF64_2, kFloat64)));
+ CHECK(Contains(move, FPReg(kF64_2, kFloat64), FPReg(kF64_1, kFloat64)));
+ CHECK(Contains(move, FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32)));
+ CHECK(Contains(move, FPReg(kF32_2, kFloat32), FPReg(kF32_1, kFloat32)));
}
@@ -337,6 +410,30 @@ TEST_F(MoveOptimizerTest, ClobberedDestinationsAreEliminated) {
CHECK_EQ(0, NonRedundantSize(last_move));
}
+TEST_F(MoveOptimizerTest, ClobberedFPDestinationsAreEliminated) {
+ StartBlock();
+ EmitNop();
+ Instruction* first_instr = LastInstruction();
+ AddMove(first_instr, FPReg(4, kFloat64), FPReg(1, kFloat64));
+ if (!kSimpleFPAliasing) {
+ // We clobber q0 below. This is aliased by d0, d1, s0, s1, s2, and s3.
+ // Add moves to registers s2 and s3.
+ AddMove(first_instr, FPReg(10, kFloat32), FPReg(0, kFloat32));
+ AddMove(first_instr, FPReg(11, kFloat32), FPReg(1, kFloat32));
+ }
+ // Clobbers output register 0.
+ EmitOI(FPReg(0, kSimd128), 0, nullptr);
+ Instruction* last_instr = LastInstruction();
+ EndBlock();
+ Optimize();
+
+ ParallelMove* first_move = first_instr->parallel_moves()[0];
+ CHECK_EQ(0, NonRedundantSize(first_move));
+
+ ParallelMove* last_move = last_instr->parallel_moves()[0];
+ CHECK_EQ(0, NonRedundantSize(last_move));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 3a5b2c3aeb..8352691644 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -356,10 +356,10 @@ class IsReturnMatcher final : public NodeMatcher {
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
"value", value_matcher_, listener) &&
(!has_second_return_value_ ||
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
"value2", value2_matcher_, listener)) &&
PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
effect_matcher_, listener) &&
@@ -1790,6 +1790,10 @@ Matcher<Node*> IsNumberConstant(const Matcher<double>& value_matcher) {
new IsConstantMatcher<double>(IrOpcode::kNumberConstant, value_matcher));
}
+Matcher<Node*> IsPointerConstant(const Matcher<intptr_t>& value_matcher) {
+ return MakeMatcher(new IsConstantMatcher<intptr_t>(IrOpcode::kPointerConstant,
+ value_matcher));
+}
Matcher<Node*> IsSelect(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 3afe2adf14..fa5ae02dea 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -93,6 +93,7 @@ Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher);
Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher);
Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher);
Matcher<Node*> IsNumberConstant(const Matcher<double>& value_matcher);
+Matcher<Node*> IsPointerConstant(const Matcher<intptr_t>& value_matcher);
Matcher<Node*> IsSelect(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
diff --git a/deps/v8/test/unittests/compiler/register-allocator-unittest.cc b/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
index 71a726f167..0533ee7406 100644
--- a/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
@@ -101,6 +101,18 @@ TEST_F(RegisterAllocatorTest, CanAllocateThreeRegisters) {
Allocate();
}
+TEST_F(RegisterAllocatorTest, CanAllocateFPRegisters) {
+ StartBlock();
+ TestOperand inputs[] = {
+ Reg(FPParameter(kFloat64)), Reg(FPParameter(kFloat64)),
+ Reg(FPParameter(kFloat32)), Reg(FPParameter(kFloat32)),
+ Reg(FPParameter(kSimd128)), Reg(FPParameter(kSimd128))};
+ VReg out1 = EmitOI(FPReg(1, kFloat64), arraysize(inputs), inputs);
+ Return(out1);
+ EndBlock(Last());
+
+ Allocate();
+}
TEST_F(RegisterAllocatorTest, SimpleLoop) {
// i = K;
@@ -309,11 +321,11 @@ TEST_F(RegisterAllocatorTest, SpillPhi) {
EndBlock(Branch(Imm(), 1, 2));
StartBlock();
- auto left = Define(Reg(0));
+ auto left = Define(Reg(GetAllocatableCode(0)));
EndBlock(Jump(2));
StartBlock();
- auto right = Define(Reg(0));
+ auto right = Define(Reg(GetAllocatableCode(0)));
EndBlock();
StartBlock();
diff --git a/deps/v8/test/unittests/compiler/scheduler-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
index 1b57e5f483..92c17b73cc 100644
--- a/deps/v8/test/unittests/compiler/scheduler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/compiler/scheduler.h"
+#include "src/compiler/schedule.h"
#include "src/compiler/simplified-operator.h"
-#include "src/compiler/source-position.h"
#include "src/compiler/verifier.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/test-utils.h"
@@ -96,7 +96,8 @@ TEST_F(SchedulerTest, BuildScheduleOneParameter) {
graph()->SetStart(graph()->NewNode(common()->Start(0)));
Node* p1 = graph()->NewNode(common()->Parameter(0), graph()->start());
- Node* ret = graph()->NewNode(common()->Return(), p1, graph()->start(),
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, p1, graph()->start(),
graph()->start());
graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
@@ -128,12 +129,13 @@ TARGET_TEST_F(SchedulerTest, FloatingDiamond1) {
Node* p0 = graph()->NewNode(common()->Parameter(0), start);
Node* d1 = CreateDiamond(graph(), common(), p0);
- Node* ret = graph()->NewNode(common()->Return(), d1, start, start);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, d1, start, start);
Node* end = graph()->NewNode(common()->End(1), ret);
graph()->SetEnd(end);
- ComputeAndVerifySchedule(13);
+ ComputeAndVerifySchedule(14);
}
TARGET_TEST_F(SchedulerTest, FloatingDeadDiamond1) {
@@ -143,12 +145,13 @@ TARGET_TEST_F(SchedulerTest, FloatingDeadDiamond1) {
Node* p0 = graph()->NewNode(common()->Parameter(0), start);
Node* d1 = CreateDiamond(graph(), common(), p0);
USE(d1);
- Node* ret = graph()->NewNode(common()->Return(), p0, start, start);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, p0, start, start);
Node* end = graph()->NewNode(common()->End(1), ret);
graph()->SetEnd(end);
- ComputeAndVerifySchedule(4);
+ ComputeAndVerifySchedule(5);
}
TARGET_TEST_F(SchedulerTest, FloatingDeadDiamond2) {
@@ -162,9 +165,10 @@ TARGET_TEST_F(SchedulerTest, FloatingDeadDiamond2) {
Node* n3 = g->NewNode(common()->IfTrue(), n2);
Node* n4 = g->NewNode(common()->IfFalse(), n2);
Node* n5 = g->NewNode(common()->Int32Constant(-100));
- Node* n6 = g->NewNode(common()->Return(), n5, start, n4);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* n6 = g->NewNode(common()->Return(), zero, n5, start, n4);
Node* n7 = g->NewNode(common()->Int32Constant(0));
- Node* n8 = g->NewNode(common()->Return(), n7, start, n3);
+ Node* n8 = g->NewNode(common()->Return(), zero, n7, start, n3);
Node* n9 = g->NewNode(common()->End(2), n6, n8);
// Dead nodes
@@ -179,7 +183,7 @@ TARGET_TEST_F(SchedulerTest, FloatingDeadDiamond2) {
g->SetEnd(n9);
- ComputeAndVerifySchedule(10);
+ ComputeAndVerifySchedule(11);
}
TARGET_TEST_F(SchedulerTest, FloatingDiamond2) {
@@ -191,12 +195,13 @@ TARGET_TEST_F(SchedulerTest, FloatingDiamond2) {
Node* d1 = CreateDiamond(graph(), common(), p0);
Node* d2 = CreateDiamond(graph(), common(), p1);
Node* add = graph()->NewNode(&kIntAdd, d1, d2);
- Node* ret = graph()->NewNode(common()->Return(), add, start, start);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, add, start, start);
Node* end = graph()->NewNode(common()->End(1), ret);
graph()->SetEnd(end);
- ComputeAndVerifySchedule(24);
+ ComputeAndVerifySchedule(25);
}
@@ -210,12 +215,13 @@ TARGET_TEST_F(SchedulerTest, FloatingDiamond3) {
Node* d2 = CreateDiamond(graph(), common(), p1);
Node* add = graph()->NewNode(&kIntAdd, d1, d2);
Node* d3 = CreateDiamond(graph(), common(), add);
- Node* ret = graph()->NewNode(common()->Return(), d3, start, start);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, d3, start, start);
Node* end = graph()->NewNode(common()->End(1), ret);
graph()->SetEnd(end);
- ComputeAndVerifySchedule(33);
+ ComputeAndVerifySchedule(34);
}
@@ -248,12 +254,13 @@ TARGET_TEST_F(SchedulerTest, NestedFloatingDiamonds) {
fv, phi1, m);
Node* ephi1 = graph()->NewNode(common()->EffectPhi(2), start, map, m);
- Node* ret = graph()->NewNode(common()->Return(), phi, ephi1, start);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, phi, ephi1, start);
Node* end = graph()->NewNode(common()->End(1), ret);
graph()->SetEnd(end);
- ComputeAndVerifySchedule(23);
+ ComputeAndVerifySchedule(24);
}
@@ -294,12 +301,13 @@ TARGET_TEST_F(SchedulerTest, NestedFloatingDiamondWithChain) {
common()->Phi(MachineRepresentation::kTagged, 2), phiA1, c, mB2);
Node* add = graph()->NewNode(&kIntAdd, phiA2, phiB2);
- Node* ret = graph()->NewNode(common()->Return(), add, start, start);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, add, start, start);
Node* end = graph()->NewNode(common()->End(1), ret);
graph()->SetEnd(end);
- ComputeAndVerifySchedule(36);
+ ComputeAndVerifySchedule(37);
}
@@ -330,12 +338,13 @@ TARGET_TEST_F(SchedulerTest, NestedFloatingDiamondWithLoop) {
Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
fv, ind, m);
- Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, phi, start, start);
Node* end = graph()->NewNode(common()->End(1), ret);
graph()->SetEnd(end);
- ComputeAndVerifySchedule(20);
+ ComputeAndVerifySchedule(21);
}
@@ -365,12 +374,13 @@ TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond1) {
loop->ReplaceInput(1, t); // close loop.
ind->ReplaceInput(1, phi1); // close induction variable.
- Node* ret = graph()->NewNode(common()->Return(), ind, start, f);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, ind, start, f);
Node* end = graph()->NewNode(common()->End(2), ret, f);
graph()->SetEnd(end);
- ComputeAndVerifySchedule(20);
+ ComputeAndVerifySchedule(21);
}
@@ -401,12 +411,13 @@ TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond2) {
loop->ReplaceInput(1, t); // close loop.
ind->ReplaceInput(1, add); // close induction variable.
- Node* ret = graph()->NewNode(common()->Return(), ind, start, f);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, ind, start, f);
Node* end = graph()->NewNode(common()->End(2), ret, f);
graph()->SetEnd(end);
- ComputeAndVerifySchedule(20);
+ ComputeAndVerifySchedule(21);
}
@@ -450,12 +461,13 @@ TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond3) {
loop->ReplaceInput(1, t); // close loop.
ind->ReplaceInput(1, add); // close induction variable.
- Node* ret = graph()->NewNode(common()->Return(), ind, start, f);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, ind, start, f);
Node* end = graph()->NewNode(common()->End(2), ret, f);
graph()->SetEnd(end);
- ComputeAndVerifySchedule(28);
+ ComputeAndVerifySchedule(29);
}
@@ -486,12 +498,13 @@ TARGET_TEST_F(SchedulerTest, PhisPushedDownToDifferentBranches) {
Node* phi3 = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2), phi, phi2, m2);
- Node* ret = graph()->NewNode(common()->Return(), phi3, start, start);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, phi3, start, start);
Node* end = graph()->NewNode(common()->End(1), ret);
graph()->SetEnd(end);
- ComputeAndVerifySchedule(24);
+ ComputeAndVerifySchedule(25);
}
@@ -508,12 +521,13 @@ TARGET_TEST_F(SchedulerTest, BranchHintTrue) {
Node* m = graph()->NewNode(common()->Merge(2), t, f);
Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
tv, fv, m);
- Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, phi, start, start);
Node* end = graph()->NewNode(common()->End(1), ret);
graph()->SetEnd(end);
- Schedule* schedule = ComputeAndVerifySchedule(13);
+ Schedule* schedule = ComputeAndVerifySchedule(14);
// Make sure the false block is marked as deferred.
EXPECT_FALSE(schedule->block(t)->deferred());
EXPECT_TRUE(schedule->block(f)->deferred());
@@ -533,12 +547,13 @@ TARGET_TEST_F(SchedulerTest, BranchHintFalse) {
Node* m = graph()->NewNode(common()->Merge(2), t, f);
Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
tv, fv, m);
- Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, phi, start, start);
Node* end = graph()->NewNode(common()->End(1), ret);
graph()->SetEnd(end);
- Schedule* schedule = ComputeAndVerifySchedule(13);
+ Schedule* schedule = ComputeAndVerifySchedule(14);
// Make sure the true block is marked as deferred.
EXPECT_TRUE(schedule->block(t)->deferred());
EXPECT_FALSE(schedule->block(f)->deferred());
@@ -560,12 +575,13 @@ TARGET_TEST_F(SchedulerTest, CallException) {
Node* m = graph()->NewNode(common()->Merge(2), ok2, hdl);
Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
c2, p0, m);
- Node* ret = graph()->NewNode(common()->Return(), phi, start, m);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, phi, start, m);
Node* end = graph()->NewNode(common()->End(1), ret);
graph()->SetEnd(end);
- Schedule* schedule = ComputeAndVerifySchedule(17);
+ Schedule* schedule = ComputeAndVerifySchedule(18);
// Make sure the exception blocks as well as the handler are deferred.
EXPECT_TRUE(schedule->block(ex1)->deferred());
EXPECT_TRUE(schedule->block(ex2)->deferred());
@@ -603,12 +619,13 @@ TARGET_TEST_F(SchedulerTest, Switch) {
Node* m = graph()->NewNode(common()->Merge(3), c0, c1, d);
Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 3),
v0, v1, vd, m);
- Node* ret = graph()->NewNode(common()->Return(), phi, start, m);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, phi, start, m);
Node* end = graph()->NewNode(common()->End(1), ret);
graph()->SetEnd(end);
- ComputeAndVerifySchedule(16);
+ ComputeAndVerifySchedule(17);
}
@@ -627,12 +644,13 @@ TARGET_TEST_F(SchedulerTest, FloatingSwitch) {
Node* m = graph()->NewNode(common()->Merge(3), c0, c1, d);
Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 3),
v0, v1, vd, m);
- Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret = graph()->NewNode(common()->Return(), zero, phi, start, start);
Node* end = graph()->NewNode(common()->End(1), ret);
graph()->SetEnd(end);
- ComputeAndVerifySchedule(16);
+ ComputeAndVerifySchedule(17);
}
diff --git a/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc b/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc
index 56fedeeb09..127f269960 100644
--- a/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/tail-call-optimization-unittest.cc
@@ -39,7 +39,9 @@ TEST_F(TailCallOptimizationTest, CallCodeObject0) {
Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
graph()->start(), graph()->start());
Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
- Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret =
+ graph()->NewNode(common()->Return(), zero, call, call, if_success);
Reduction r = Reduce(ret);
ASSERT_FALSE(r.Changed());
}
@@ -60,7 +62,9 @@ TEST_F(TailCallOptimizationTest, CallCodeObject1) {
graph()->start(), graph()->start());
Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
Node* if_exception = graph()->NewNode(common()->IfException(), call, call);
- Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret =
+ graph()->NewNode(common()->Return(), zero, call, call, if_success);
Node* end = graph()->NewNode(common()->End(1), if_exception);
graph()->SetEnd(end);
Reduction r = Reduce(ret);
@@ -82,7 +86,9 @@ TEST_F(TailCallOptimizationTest, CallCodeObject2) {
Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
graph()->start(), graph()->start());
Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
- Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret =
+ graph()->NewNode(common()->Return(), zero, call, call, if_success);
Reduction r = Reduce(ret);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsTailCall(kCallDescriptor, p0, p1,
@@ -104,7 +110,9 @@ TEST_F(TailCallOptimizationTest, CallJSFunction0) {
Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
graph()->start(), graph()->start());
Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
- Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret =
+ graph()->NewNode(common()->Return(), zero, call, call, if_success);
Reduction r = Reduce(ret);
ASSERT_FALSE(r.Changed());
}
@@ -125,7 +133,9 @@ TEST_F(TailCallOptimizationTest, CallJSFunction1) {
graph()->start(), graph()->start());
Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
Node* if_exception = graph()->NewNode(common()->IfException(), call, call);
- Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret =
+ graph()->NewNode(common()->Return(), zero, call, call, if_success);
Node* end = graph()->NewNode(common()->End(1), if_exception);
graph()->SetEnd(end);
Reduction r = Reduce(ret);
@@ -146,7 +156,9 @@ TEST_F(TailCallOptimizationTest, CallJSFunction2) {
Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
graph()->start(), graph()->start());
Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
- Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret =
+ graph()->NewNode(common()->Return(), zero, call, call, if_success);
Reduction r = Reduce(ret);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsTailCall(kCallDescriptor, p0, p1,
diff --git a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
index d73c72d4e0..4b583fd461 100644
--- a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
@@ -88,7 +88,7 @@ class TypedOptimizationTest : public TypedGraphTest {
TEST_F(TypedOptimizationTest, ParameterWithMinusZero) {
{
Reduction r = Reduce(
- Parameter(Type::Constant(factory()->minus_zero_value(), zone())));
+ Parameter(Type::NewConstant(factory()->minus_zero_value(), zone())));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberConstant(-0.0));
}
@@ -98,9 +98,9 @@ TEST_F(TypedOptimizationTest, ParameterWithMinusZero) {
EXPECT_THAT(r.replacement(), IsNumberConstant(-0.0));
}
{
- Reduction r = Reduce(Parameter(
- Type::Union(Type::MinusZero(),
- Type::Constant(factory()->NewNumber(0), zone()), zone())));
+ Reduction r = Reduce(Parameter(Type::Union(
+ Type::MinusZero(), Type::NewConstant(factory()->NewNumber(0), zone()),
+ zone())));
EXPECT_FALSE(r.Changed());
}
}
@@ -108,7 +108,7 @@ TEST_F(TypedOptimizationTest, ParameterWithMinusZero) {
TEST_F(TypedOptimizationTest, ParameterWithNull) {
Handle<HeapObject> null = factory()->null_value();
{
- Reduction r = Reduce(Parameter(Type::Constant(null, zone())));
+ Reduction r = Reduce(Parameter(Type::NewConstant(null, zone())));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsHeapConstant(null));
}
@@ -125,13 +125,13 @@ TEST_F(TypedOptimizationTest, ParameterWithNaN) {
std::numeric_limits<double>::signaling_NaN()};
TRACED_FOREACH(double, nan, kNaNs) {
Handle<Object> constant = factory()->NewNumber(nan);
- Reduction r = Reduce(Parameter(Type::Constant(constant, zone())));
+ Reduction r = Reduce(Parameter(Type::NewConstant(constant, zone())));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberConstant(IsNaN()));
}
{
Reduction r =
- Reduce(Parameter(Type::Constant(factory()->nan_value(), zone())));
+ Reduce(Parameter(Type::NewConstant(factory()->nan_value(), zone())));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberConstant(IsNaN()));
}
@@ -145,7 +145,7 @@ TEST_F(TypedOptimizationTest, ParameterWithNaN) {
TEST_F(TypedOptimizationTest, ParameterWithPlainNumber) {
TRACED_FOREACH(double, value, kFloat64Values) {
Handle<Object> constant = factory()->NewNumber(value);
- Reduction r = Reduce(Parameter(Type::Constant(constant, zone())));
+ Reduction r = Reduce(Parameter(Type::NewConstant(constant, zone())));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberConstant(value));
}
@@ -164,7 +164,7 @@ TEST_F(TypedOptimizationTest, ParameterWithUndefined) {
EXPECT_THAT(r.replacement(), IsHeapConstant(undefined));
}
{
- Reduction r = Reduce(Parameter(Type::Constant(undefined, zone())));
+ Reduction r = Reduce(Parameter(Type::NewConstant(undefined, zone())));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsHeapConstant(undefined));
}
@@ -182,9 +182,9 @@ TEST_F(TypedOptimizationTest, JSToBooleanWithFalsish) {
Type::Undefined(),
Type::Union(
Type::Undetectable(),
- Type::Union(
- Type::Constant(factory()->false_value(), zone()),
- Type::Range(0.0, 0.0, zone()), zone()),
+ Type::Union(Type::NewConstant(
+ factory()->false_value(), zone()),
+ Type::Range(0.0, 0.0, zone()), zone()),
zone()),
zone()),
zone()),
@@ -201,7 +201,7 @@ TEST_F(TypedOptimizationTest, JSToBooleanWithFalsish) {
TEST_F(TypedOptimizationTest, JSToBooleanWithTruish) {
Node* input = Parameter(
Type::Union(
- Type::Constant(factory()->true_value(), zone()),
+ Type::NewConstant(factory()->true_value(), zone()),
Type::Union(Type::DetectableReceiver(), Type::Symbol(), zone()),
zone()),
0);
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index ca5c1cae45..3ef436e5c8 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -135,7 +135,7 @@ class TyperTest : public TypedGraphTest {
for (int x1 = lmin; x1 < lmin + width; x1++) {
for (int x2 = rmin; x2 < rmin + width; x2++) {
double result_value = opfun(x1, x2);
- Type* result_type = Type::Constant(
+ Type* result_type = Type::NewConstant(
isolate()->factory()->NewNumber(result_value), zone());
EXPECT_TRUE(result_type->Is(expected_type));
}
@@ -156,7 +156,7 @@ class TyperTest : public TypedGraphTest {
double x1 = RandomInt(r1->AsRange());
double x2 = RandomInt(r2->AsRange());
double result_value = opfun(x1, x2);
- Type* result_type = Type::Constant(
+ Type* result_type = Type::NewConstant(
isolate()->factory()->NewNumber(result_value), zone());
EXPECT_TRUE(result_type->Is(expected_type));
}
@@ -173,10 +173,10 @@ class TyperTest : public TypedGraphTest {
double x1 = RandomInt(r1->AsRange());
double x2 = RandomInt(r2->AsRange());
bool result_value = opfun(x1, x2);
- Type* result_type =
- Type::Constant(result_value ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value(),
- zone());
+ Type* result_type = Type::NewConstant(
+ result_value ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value(),
+ zone());
EXPECT_TRUE(result_type->Is(expected_type));
}
}
@@ -192,7 +192,7 @@ class TyperTest : public TypedGraphTest {
int32_t x1 = static_cast<int32_t>(RandomInt(r1->AsRange()));
int32_t x2 = static_cast<int32_t>(RandomInt(r2->AsRange()));
double result_value = opfun(x1, x2);
- Type* result_type = Type::Constant(
+ Type* result_type = Type::NewConstant(
isolate()->factory()->NewNumber(result_value), zone());
EXPECT_TRUE(result_type->Is(expected_type));
}
@@ -373,20 +373,6 @@ TEST_BINARY_MONOTONICITY(Divide)
TEST_BINARY_MONOTONICITY(Modulus)
#undef TEST_BINARY_MONOTONICITY
-
-//------------------------------------------------------------------------------
-// Regression tests
-
-
-TEST_F(TyperTest, TypeRegressInt32Constant) {
- int values[] = {-5, 10};
- for (auto i : values) {
- Node* c = graph()->NewNode(common()->Int32Constant(i));
- Type* type = NodeProperties::GetType(c);
- EXPECT_TRUE(type->Is(NewRange(i, i)));
- }
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index 540c5e71c2..074d0c8677 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -241,6 +241,146 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorChangeUint32ToUint64Test,
::testing::ValuesIn(kWord32BinaryOperations));
+// -----------------------------------------------------------------------------
+// CanElideChangeUint32ToUint64
+
+namespace {
+
+template <typename T>
+struct MachInst {
+ T constructor;
+ const char* constructor_name;
+ ArchOpcode arch_opcode;
+ MachineType machine_type;
+};
+
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)> MachInst2;
+
+// X64 instructions that clear the top 32 bits of the destination.
+const MachInst2 kCanElideChangeUint32ToUint64[] = {
+ {&RawMachineAssembler::Word32And, "Word32And", kX64And32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kX64Or32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kX64Xor32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", kX64Shl32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", kX64Shr32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Sar, "Word32Sar", kX64Sar32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror", kX64Ror32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kX64Cmp32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Int32Add, "Int32Add", kX64Lea32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kX64Sub32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Mul, "Int32Mul", kX64Imul32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32MulHigh, "Int32MulHigh", kX64ImulHigh32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Div, "Int32Div", kX64Idiv32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32LessThan, "Int32LessThan", kX64Cmp32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+ kX64Cmp32, MachineType::Int32()},
+ {&RawMachineAssembler::Int32Mod, "Int32Mod", kX64Idiv32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kX64Udiv32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kX64Cmp32,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+ kX64Cmp32, MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32Mod, "Uint32Mod", kX64Udiv32,
+ MachineType::Uint32()},
+};
+
+} // namespace
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+ InstructionSelectorElidedChangeUint32ToUint64Test;
+
+TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
+ const MachInst2 binop = GetParam();
+ StreamBuilder m(this, MachineType::Uint64(), binop.machine_type,
+ binop.machine_type);
+ m.Return(m.ChangeUint32ToUint64(
+ (m.*binop.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ // Make sure the `ChangeUint32ToUint64` node turned into a no-op.
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorElidedChangeUint32ToUint64Test,
+ ::testing::ValuesIn(kCanElideChangeUint32ToUint64));
+
+// ChangeUint32ToUint64AfterLoad
+TEST_F(InstructionSelectorTest, ChangeUint32ToUint64AfterLoad) {
+ // For each case, make sure the `ChangeUint32ToUint64` node turned into a
+ // no-op.
+
+ // movzxbl
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movzxbl, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // movsxbl
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Int8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movsxbl, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // movzxwl
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movzxwl, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // movsxwl
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Int16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movsxwl, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
// -----------------------------------------------------------------------------
// TruncateInt64ToInt32.
diff --git a/deps/v8/test/unittests/compiler/zone-pool-unittest.cc b/deps/v8/test/unittests/compiler/zone-stats-unittest.cc
index 5bbdbfd45d..a643d1480c 100644
--- a/deps/v8/test/unittests/compiler/zone-pool-unittest.cc
+++ b/deps/v8/test/unittests/compiler/zone-stats-unittest.cc
@@ -2,28 +2,28 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/zone-stats.h"
#include "src/base/utils/random-number-generator.h"
-#include "src/compiler/zone-pool.h"
#include "test/unittests/test-utils.h"
namespace v8 {
namespace internal {
namespace compiler {
-class ZonePoolTest : public TestWithIsolate {
+class ZoneStatsTest : public TestWithIsolate {
public:
- ZonePoolTest() : zone_pool_(&allocator_) {}
+ ZoneStatsTest() : zone_stats_(&allocator_) {}
protected:
- ZonePool* zone_pool() { return &zone_pool_; }
+ ZoneStats* zone_stats() { return &zone_stats_; }
void ExpectForPool(size_t current, size_t max, size_t total) {
- ASSERT_EQ(current, zone_pool()->GetCurrentAllocatedBytes());
- ASSERT_EQ(max, zone_pool()->GetMaxAllocatedBytes());
- ASSERT_EQ(total, zone_pool()->GetTotalAllocatedBytes());
+ ASSERT_EQ(current, zone_stats()->GetCurrentAllocatedBytes());
+ ASSERT_EQ(max, zone_stats()->GetMaxAllocatedBytes());
+ ASSERT_EQ(total, zone_stats()->GetTotalAllocatedBytes());
}
- void Expect(ZonePool::StatsScope* stats, size_t current, size_t max,
+ void Expect(ZoneStats::StatsScope* stats, size_t current, size_t max,
size_t total) {
ASSERT_EQ(current, stats->GetCurrentAllocatedBytes());
ASSERT_EQ(max, stats->GetMaxAllocatedBytes());
@@ -39,41 +39,39 @@ class ZonePoolTest : public TestWithIsolate {
private:
v8::internal::AccountingAllocator allocator_;
- ZonePool zone_pool_;
+ ZoneStats zone_stats_;
base::RandomNumberGenerator rng;
};
-
-TEST_F(ZonePoolTest, Empty) {
+TEST_F(ZoneStatsTest, Empty) {
ExpectForPool(0, 0, 0);
{
- ZonePool::StatsScope stats(zone_pool());
+ ZoneStats::StatsScope stats(zone_stats());
Expect(&stats, 0, 0, 0);
}
ExpectForPool(0, 0, 0);
{
- ZonePool::Scope scope(zone_pool());
+ ZoneStats::Scope scope(zone_stats(), ZONE_NAME);
scope.zone();
}
ExpectForPool(0, 0, 0);
}
-
-TEST_F(ZonePoolTest, MultipleZonesWithDeletion) {
+TEST_F(ZoneStatsTest, MultipleZonesWithDeletion) {
static const size_t kArraySize = 10;
- ZonePool::Scope* scopes[kArraySize];
+ ZoneStats::Scope* scopes[kArraySize];
// Initialize.
size_t before_stats = 0;
for (size_t i = 0; i < kArraySize; ++i) {
- scopes[i] = new ZonePool::Scope(zone_pool());
+ scopes[i] = new ZoneStats::Scope(zone_stats(), ZONE_NAME);
before_stats += Allocate(scopes[i]->zone()); // Add some stuff.
}
ExpectForPool(before_stats, before_stats, before_stats);
- ZonePool::StatsScope stats(zone_pool());
+ ZoneStats::StatsScope stats(zone_stats());
size_t before_deletion = 0;
for (size_t i = 0; i < kArraySize; ++i) {
@@ -87,7 +85,7 @@ TEST_F(ZonePoolTest, MultipleZonesWithDeletion) {
// Delete the scopes and create new ones.
for (size_t i = 0; i < kArraySize; ++i) {
delete scopes[i];
- scopes[i] = new ZonePool::Scope(zone_pool());
+ scopes[i] = new ZoneStats::Scope(zone_stats(), ZONE_NAME);
}
Expect(&stats, 0, before_deletion, before_deletion);
@@ -116,14 +114,13 @@ TEST_F(ZonePoolTest, MultipleZonesWithDeletion) {
before_stats + before_deletion + after_deletion);
}
-
-TEST_F(ZonePoolTest, SimpleAllocationLoop) {
+TEST_F(ZoneStatsTest, SimpleAllocationLoop) {
int runs = 20;
size_t total_allocated = 0;
size_t max_loop_allocation = 0;
- ZonePool::StatsScope outer_stats(zone_pool());
+ ZoneStats::StatsScope outer_stats(zone_stats());
{
- ZonePool::Scope outer_scope(zone_pool());
+ ZoneStats::Scope outer_scope(zone_stats(), ZONE_NAME);
size_t outer_allocated = 0;
for (int i = 0; i < runs; ++i) {
{
@@ -131,10 +128,10 @@ TEST_F(ZonePoolTest, SimpleAllocationLoop) {
outer_allocated += bytes;
total_allocated += bytes;
}
- ZonePool::StatsScope inner_stats(zone_pool());
+ ZoneStats::StatsScope inner_stats(zone_stats());
size_t allocated = 0;
{
- ZonePool::Scope inner_scope(zone_pool());
+ ZoneStats::Scope inner_scope(zone_stats(), ZONE_NAME);
for (int j = 0; j < 20; ++j) {
size_t bytes = Allocate(inner_scope.zone());
allocated += bytes;
diff --git a/deps/v8/test/unittests/eh-frame-iterator-unittest.cc b/deps/v8/test/unittests/eh-frame-iterator-unittest.cc
index 27485db67e..1b8adddaee 100644
--- a/deps/v8/test/unittests/eh-frame-iterator-unittest.cc
+++ b/deps/v8/test/unittests/eh-frame-iterator-unittest.cc
@@ -40,7 +40,7 @@ TEST_F(EhFrameIteratorTest, Skip) {
TEST_F(EhFrameIteratorTest, ULEB128Decoding) {
static const byte kEncoded[] = {0xe5, 0x8e, 0x26};
EhFrameIterator iterator(&kEncoded[0], &kEncoded[0] + sizeof(kEncoded));
- EXPECT_EQ(624485, iterator.GetNextULeb128());
+ EXPECT_EQ(624485u, iterator.GetNextULeb128());
EXPECT_TRUE(iterator.Done());
}
diff --git a/deps/v8/test/unittests/eh-frame-writer-unittest.cc b/deps/v8/test/unittests/eh-frame-writer-unittest.cc
index 98503986aa..13b970f448 100644
--- a/deps/v8/test/unittests/eh-frame-writer-unittest.cc
+++ b/deps/v8/test/unittests/eh-frame-writer-unittest.cc
@@ -75,7 +75,7 @@ TEST_F(EhFrameWriterTest, FDEHeader) {
}
TEST_F(EhFrameWriterTest, SetOffset) {
- static const int kOffset = 0x0badc0de;
+ static const uint32_t kOffset = 0x0badc0de;
EhFrameWriter writer(zone());
writer.Initialize();
@@ -91,8 +91,8 @@ TEST_F(EhFrameWriterTest, SetOffset) {
}
TEST_F(EhFrameWriterTest, IncreaseOffset) {
- static const int kFirstOffset = 121;
- static const int kSecondOffset = 16;
+ static const uint32_t kFirstOffset = 121;
+ static const uint32_t kSecondOffset = 16;
EhFrameWriter writer(zone());
writer.Initialize();
@@ -125,12 +125,13 @@ TEST_F(EhFrameWriterTest, SetRegister) {
EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kDefCfaRegister,
iterator.GetNextOpcode());
- EXPECT_EQ(kTestRegisterCode, iterator.GetNextULeb128());
+ EXPECT_EQ(static_cast<uint32_t>(kTestRegisterCode),
+ iterator.GetNextULeb128());
}
TEST_F(EhFrameWriterTest, SetRegisterAndOffset) {
Register test_register = Register::from_code(kTestRegisterCode);
- static const int kOffset = 0x0badc0de;
+ static const uint32_t kOffset = 0x0badc0de;
EhFrameWriter writer(zone());
writer.Initialize();
@@ -141,7 +142,8 @@ TEST_F(EhFrameWriterTest, SetRegisterAndOffset) {
iterator.SkipToFdeDirectives();
EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kDefCfa, iterator.GetNextOpcode());
- EXPECT_EQ(kTestRegisterCode, iterator.GetNextULeb128());
+ EXPECT_EQ(static_cast<uint32_t>(kTestRegisterCode),
+ iterator.GetNextULeb128());
EXPECT_EQ(kOffset, iterator.GetNextULeb128());
}
@@ -261,7 +263,7 @@ TEST_F(EhFrameWriterTest, PcOffsetEncoding16bitDelta) {
}
TEST_F(EhFrameWriterTest, PcOffsetEncoding32bit) {
- static const int kOffset = kMaxUInt16 + 42;
+ static const uint32_t kOffset = kMaxUInt16 + 42;
EhFrameWriter writer(zone());
writer.Initialize();
@@ -277,8 +279,8 @@ TEST_F(EhFrameWriterTest, PcOffsetEncoding32bit) {
}
TEST_F(EhFrameWriterTest, PcOffsetEncoding32bitDelta) {
- static const int kFirstOffset = kMaxUInt16 + 0x42;
- static const int kSecondOffset = kMaxUInt16 + 0x67;
+ static const uint32_t kFirstOffset = kMaxUInt16 + 0x42;
+ static const uint32_t kSecondOffset = kMaxUInt16 + 0x67;
EhFrameWriter writer(zone());
writer.Initialize();
@@ -311,8 +313,9 @@ TEST_F(EhFrameWriterTest, SaveRegisterUnsignedOffset) {
iterator.SkipToFdeDirectives();
EXPECT_EQ((2 << 6) | kTestRegisterCode, iterator.GetNextByte());
- EXPECT_EQ(kOffset / EhFrameConstants::kDataAlignmentFactor,
- iterator.GetNextULeb128());
+ EXPECT_EQ(
+ static_cast<uint32_t>(kOffset / EhFrameConstants::kDataAlignmentFactor),
+ iterator.GetNextULeb128());
}
TEST_F(EhFrameWriterTest, SaveRegisterSignedOffset) {
@@ -332,7 +335,8 @@ TEST_F(EhFrameWriterTest, SaveRegisterSignedOffset) {
EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kOffsetExtendedSf,
iterator.GetNextOpcode());
- EXPECT_EQ(kTestRegisterCode, iterator.GetNextULeb128());
+ EXPECT_EQ(static_cast<uint32_t>(kTestRegisterCode),
+ iterator.GetNextULeb128());
EXPECT_EQ(kOffset / EhFrameConstants::kDataAlignmentFactor,
iterator.GetNextSLeb128());
}
@@ -350,7 +354,8 @@ TEST_F(EhFrameWriterTest, RegisterNotModified) {
EXPECT_EQ(EhFrameConstants::DwarfOpcodes::kSameValue,
iterator.GetNextOpcode());
- EXPECT_EQ(kTestRegisterCode, iterator.GetNextULeb128());
+ EXPECT_EQ(static_cast<uint32_t>(kTestRegisterCode),
+ iterator.GetNextULeb128());
}
TEST_F(EhFrameWriterTest, RegisterFollowsInitialRule) {
diff --git a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
index 677da0eb0b..e7702fda75 100644
--- a/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-tracer-unittest.cc
@@ -17,7 +17,7 @@ namespace internal {
typedef TestWithContext GCTracerTest;
TEST(GCTracer, AverageSpeed) {
- RingBuffer<BytesAndDuration> buffer;
+ base::RingBuffer<BytesAndDuration> buffer;
EXPECT_EQ(100 / 2,
GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(100, 2), 0));
buffer.Push(MakeBytesAndDuration(100, 8));
@@ -263,7 +263,7 @@ TEST_F(GCTracerTest, IncrementalMarkingSpeed) {
// 1000000 bytes in 100ms.
tracer->AddIncrementalMarkingStep(100, 1000000);
EXPECT_EQ(300, tracer->incremental_marking_duration_);
- EXPECT_EQ(3000000, tracer->incremental_marking_bytes_);
+ EXPECT_EQ(3000000u, tracer->incremental_marking_bytes_);
EXPECT_EQ(1000000 / 100,
tracer->IncrementalMarkingSpeedInBytesPerMillisecond());
tracer->Start(MARK_COMPACTOR, GarbageCollectionReason::kTesting,
@@ -273,12 +273,12 @@ TEST_F(GCTracerTest, IncrementalMarkingSpeed) {
// 1000000 bytes in 100ms.
tracer->AddIncrementalMarkingStep(100, 1000000);
EXPECT_EQ(400, tracer->incremental_marking_duration_);
- EXPECT_EQ(4000000, tracer->incremental_marking_bytes_);
+ EXPECT_EQ(4000000u, tracer->incremental_marking_bytes_);
tracer->Stop(MARK_COMPACTOR);
EXPECT_EQ(400, tracer->current_.incremental_marking_duration);
- EXPECT_EQ(4000000, tracer->current_.incremental_marking_bytes);
+ EXPECT_EQ(4000000u, tracer->current_.incremental_marking_bytes);
EXPECT_EQ(0, tracer->incremental_marking_duration_);
- EXPECT_EQ(0, tracer->incremental_marking_bytes_);
+ EXPECT_EQ(0u, tracer->incremental_marking_bytes_);
EXPECT_EQ(1000000 / 100,
tracer->IncrementalMarkingSpeedInBytesPerMillisecond());
diff --git a/deps/v8/test/unittests/heap/marking-unittest.cc b/deps/v8/test/unittests/heap/marking-unittest.cc
index 0015cce8d7..074816bf1f 100644
--- a/deps/v8/test/unittests/heap/marking-unittest.cc
+++ b/deps/v8/test/unittests/heap/marking-unittest.cc
@@ -115,11 +115,11 @@ TEST(Marking, SetAndClearRange) {
calloc(Bitmap::kSize / kPointerSize, kPointerSize));
for (int i = 0; i < 3; i++) {
bitmap->SetRange(i, Bitmap::kBitsPerCell + i);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffff << i);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], (1 << i) - 1);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffffu << i);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], (1u << i) - 1);
bitmap->ClearRange(i, Bitmap::kBitsPerCell + i);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0x0);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0x0);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0x0u);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0x0u);
}
free(bitmap);
}
@@ -129,9 +129,9 @@ TEST(Marking, ClearMultipleRanges) {
calloc(Bitmap::kSize / kPointerSize, kPointerSize));
CHECK(bitmap->AllBitsClearInRange(0, Bitmap::kBitsPerCell * 3));
bitmap->SetRange(0, Bitmap::kBitsPerCell * 3);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffff);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xffffffff);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xffffffff);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffffu);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xffffffffu);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xffffffffu);
CHECK(bitmap->AllBitsSetInRange(0, Bitmap::kBitsPerCell * 3));
bitmap->ClearRange(Bitmap::kBitsPerCell / 2, Bitmap::kBitsPerCell);
bitmap->ClearRange(Bitmap::kBitsPerCell,
@@ -139,17 +139,17 @@ TEST(Marking, ClearMultipleRanges) {
bitmap->ClearRange(Bitmap::kBitsPerCell * 2 + 8,
Bitmap::kBitsPerCell * 2 + 16);
bitmap->ClearRange(Bitmap::kBitsPerCell * 2 + 24, Bitmap::kBitsPerCell * 3);
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffff);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffu);
CHECK(bitmap->AllBitsSetInRange(0, Bitmap::kBitsPerCell / 2));
CHECK(bitmap->AllBitsClearInRange(Bitmap::kBitsPerCell / 2,
Bitmap::kBitsPerCell));
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xffff0000);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xffff0000u);
CHECK(
bitmap->AllBitsSetInRange(Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2,
2 * Bitmap::kBitsPerCell));
CHECK(bitmap->AllBitsClearInRange(
Bitmap::kBitsPerCell, Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2));
- CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xff00ff);
+ CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xff00ffu);
CHECK(bitmap->AllBitsSetInRange(2 * Bitmap::kBitsPerCell,
2 * Bitmap::kBitsPerCell + 8));
CHECK(bitmap->AllBitsClearInRange(2 * Bitmap::kBitsPerCell + 24,
diff --git a/deps/v8/test/unittests/heap/memory-reducer-unittest.cc b/deps/v8/test/unittests/heap/memory-reducer-unittest.cc
index 4787bc66d2..9aa05e2ebd 100644
--- a/deps/v8/test/unittests/heap/memory-reducer-unittest.cc
+++ b/deps/v8/test/unittests/heap/memory-reducer-unittest.cc
@@ -27,7 +27,6 @@ MemoryReducer::State RunState(int started_gcs, double next_gc_start_ms) {
next_gc_start_ms, 1.0);
}
-
MemoryReducer::Event MarkCompactEvent(double time_ms,
bool next_gc_likely_to_collect_more) {
MemoryReducer::Event event;
@@ -37,12 +36,10 @@ MemoryReducer::Event MarkCompactEvent(double time_ms,
return event;
}
-
MemoryReducer::Event MarkCompactEventGarbageLeft(double time_ms) {
return MarkCompactEvent(time_ms, true);
}
-
MemoryReducer::Event MarkCompactEventNoGarbageLeft(double time_ms) {
return MarkCompactEvent(time_ms, false);
}
diff --git a/deps/v8/test/unittests/heap/slot-set-unittest.cc b/deps/v8/test/unittests/heap/slot-set-unittest.cc
index 65b7925310..ec905ea2a7 100644
--- a/deps/v8/test/unittests/heap/slot-set-unittest.cc
+++ b/deps/v8/test/unittests/heap/slot-set-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include <limits>
+#include <map>
#include "src/globals.h"
#include "src/heap/slot-set.h"
@@ -101,18 +102,21 @@ void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
set.SetPageStart(0);
uint32_t first = start == 0 ? 0 : start - kPointerSize;
uint32_t last = end == Page::kPageSize ? end - kPointerSize : end;
- for (uint32_t i = first; i <= last; i += kPointerSize) {
- set.Insert(i);
- }
- set.RemoveRange(start, end);
- if (first != start) {
- EXPECT_TRUE(set.Lookup(first));
- }
- if (last == end) {
- EXPECT_TRUE(set.Lookup(last));
- }
- for (uint32_t i = start; i < end; i += kPointerSize) {
- EXPECT_FALSE(set.Lookup(i));
+ for (const auto mode :
+ {SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
+ for (uint32_t i = first; i <= last; i += kPointerSize) {
+ set.Insert(i);
+ }
+ set.RemoveRange(start, end, mode);
+ if (first != start) {
+ EXPECT_TRUE(set.Lookup(first));
+ }
+ if (last == end) {
+ EXPECT_TRUE(set.Lookup(last));
+ }
+ for (uint32_t i = start; i < end; i += kPointerSize) {
+ EXPECT_FALSE(set.Lookup(i));
+ }
}
}
@@ -123,7 +127,7 @@ TEST(SlotSet, RemoveRange) {
CheckRemoveRangeOn(start * kPointerSize, (start + 1) * kPointerSize);
CheckRemoveRangeOn(start * kPointerSize, (start + 2) * kPointerSize);
const uint32_t kEnds[] = {32, 64, 100, 128, 1024, 1500, 2048};
- for (int i = 0; i < sizeof(kEnds) / sizeof(uint32_t); i++) {
+ for (size_t i = 0; i < sizeof(kEnds) / sizeof(uint32_t); i++) {
for (int k = -3; k <= 3; k++) {
uint32_t end = (kEnds[i] + k);
if (start < end) {
@@ -134,10 +138,13 @@ TEST(SlotSet, RemoveRange) {
}
SlotSet set;
set.SetPageStart(0);
- set.Insert(Page::kPageSize / 2);
- set.RemoveRange(0, Page::kPageSize);
- for (uint32_t i = 0; i < Page::kPageSize; i += kPointerSize) {
- EXPECT_FALSE(set.Lookup(i));
+ for (const auto mode :
+ {SlotSet::FREE_EMPTY_BUCKETS, SlotSet::KEEP_EMPTY_BUCKETS}) {
+ set.Insert(Page::kPageSize / 2);
+ set.RemoveRange(0, Page::kPageSize, mode);
+ for (uint32_t i = 0; i < Page::kPageSize; i += kPointerSize) {
+ EXPECT_FALSE(set.Lookup(i));
+ }
}
}
@@ -161,8 +168,8 @@ TEST(TypedSlotSet, Iterate) {
uint32_t j =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(host_addr));
EXPECT_EQ(i % CLEARED_SLOT, static_cast<uint32_t>(type));
- EXPECT_EQ(0, i % kDelta);
- EXPECT_EQ(0, j % kHostDelta);
+ EXPECT_EQ(0u, i % kDelta);
+ EXPECT_EQ(0u, j % kHostDelta);
++iterated;
return i % 2 == 0 ? KEEP_SLOT : REMOVE_SLOT;
},
@@ -172,7 +179,7 @@ TEST(TypedSlotSet, Iterate) {
set.Iterate(
[&iterated](SlotType type, Address host_addr, Address addr) {
uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
- EXPECT_EQ(0, i % 2);
+ EXPECT_EQ(0u, i % 2);
++iterated;
return KEEP_SLOT;
},
@@ -180,5 +187,35 @@ TEST(TypedSlotSet, Iterate) {
EXPECT_EQ(added / 2, iterated);
}
+TEST(TypedSlotSet, RemoveInvalidSlots) {
+ TypedSlotSet set(0);
+ const int kHostDelta = 100;
+ uint32_t entries = 10;
+ for (uint32_t i = 0; i < entries; i++) {
+ SlotType type = static_cast<SlotType>(i % CLEARED_SLOT);
+ set.Insert(type, i * kHostDelta, i * kHostDelta);
+ }
+
+ std::map<uint32_t, uint32_t> invalid_ranges;
+ for (uint32_t i = 1; i < entries; i += 2) {
+ invalid_ranges.insert(
+ std::pair<uint32_t, uint32_t>(i * kHostDelta, i * kHostDelta + 1));
+ }
+
+ set.RemoveInvaldSlots(invalid_ranges);
+ for (std::map<uint32_t, uint32_t>::iterator it = invalid_ranges.begin();
+ it != invalid_ranges.end(); ++it) {
+ uint32_t start = it->first;
+ uint32_t end = it->second;
+ set.Iterate(
+ [start, end](SlotType slot_type, Address host_addr, Address slot_addr) {
+ CHECK(reinterpret_cast<uintptr_t>(host_addr) < start ||
+ reinterpret_cast<uintptr_t>(host_addr) >= end);
+ return KEEP_SLOT;
+ },
+ TypedSlotSet::KEEP_EMPTY_CHUNKS);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 4507d63eb1..999490518e 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -42,7 +42,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CreateArguments(CreateArgumentsType::kRestParameter);
// Emit constant loads.
- builder.LoadLiteral(Smi::FromInt(0))
+ builder.LoadLiteral(Smi::kZero)
.StoreAccumulatorInRegister(reg)
.LoadLiteral(Smi::FromInt(8))
.CompareOperation(Token::Value::NE, reg,
@@ -54,7 +54,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.LoadLiteral(factory->NewStringFromStaticChars("A constant"))
.StoreAccumulatorInRegister(reg)
.LoadUndefined()
- .Debugger() // Prevent peephole optimization LdaNull, Star -> LdrNull.
+ .StoreAccumulatorInRegister(reg)
.LoadNull()
.StoreAccumulatorInRegister(reg)
.LoadTheHole()
@@ -88,6 +88,10 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.LoadContextSlot(reg, 1, 0)
.StoreContextSlot(reg, 1, 0);
+ // Emit context operations which operate on the local context.
+ builder.LoadContextSlot(Register::current_context(), 1, 0)
+ .StoreContextSlot(Register::current_context(), 1, 0);
+
// Emit load / store property operations.
builder.LoadNamedProperty(reg, name, 0)
.LoadKeyedProperty(reg, 0)
@@ -125,8 +129,10 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CreateObjectLiteral(factory->NewFixedArray(1), 0, 0, reg);
// Call operations.
- builder.Call(reg, reg_list, 1)
- .Call(reg, reg_list, 1, TailCallMode::kAllow)
+ builder.Call(reg, reg_list, 1, Call::GLOBAL_CALL)
+ .Call(reg, reg_list, 1, Call::NAMED_PROPERTY_CALL,
+ TailCallMode::kDisallow)
+ .Call(reg, reg_list, 1, Call::GLOBAL_CALL, TailCallMode::kAllow)
.CallRuntime(Runtime::kIsArray, reg)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, reg_list, pair)
.CallJSRuntime(Context::SPREAD_ITERABLE_INDEX, reg_list);
@@ -219,9 +225,9 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.JumpIfTrue(&end[1])
.LoadTrue()
.JumpIfFalse(&end[2])
- .LoadLiteral(Smi::FromInt(0))
+ .LoadLiteral(Smi::kZero)
.JumpIfTrue(&end[3])
- .LoadLiteral(Smi::FromInt(0))
+ .LoadLiteral(Smi::kZero)
.JumpIfFalse(&end[4])
.JumpIfNull(&end[5])
.JumpIfUndefined(&end[6])
@@ -302,19 +308,6 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreLookupSlot(wide_name, LanguageMode::SLOPPY)
.StoreLookupSlot(wide_name, LanguageMode::STRICT);
- // Emit loads which will be transformed to Ldr equivalents by the peephole
- // optimizer.
- builder.LoadNamedProperty(reg, name, 0)
- .StoreAccumulatorInRegister(reg)
- .LoadKeyedProperty(reg, 0)
- .StoreAccumulatorInRegister(reg)
- .LoadContextSlot(reg, 1, 0)
- .StoreAccumulatorInRegister(reg)
- .LoadGlobal(0, TypeofMode::NOT_INSIDE_TYPEOF)
- .StoreAccumulatorInRegister(reg)
- .LoadUndefined()
- .StoreAccumulatorInRegister(reg);
-
// CreateClosureWide
builder.CreateClosure(1000, NOT_TENURED);
@@ -324,7 +317,15 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CreateArrayLiteral(factory->NewFixedArray(2), 0, 0)
.CreateObjectLiteral(factory->NewFixedArray(2), 0, 0, reg);
- // Emit generator operations
+ // Emit load and store operations for module variables.
+ builder.LoadModuleVariable(-1, 42)
+ .LoadModuleVariable(0, 42)
+ .LoadModuleVariable(1, 42)
+ .StoreModuleVariable(-1, 42)
+ .StoreModuleVariable(0, 42)
+ .StoreModuleVariable(1, 42);
+
+ // Emit generator operations.
builder.SuspendGenerator(reg)
.ResumeGenerator(reg);
@@ -380,11 +381,6 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
if (!FLAG_ignition_peephole) {
// Insert entries for bytecodes only emitted by peephole optimizer.
- scorecard[Bytecodes::ToByte(Bytecode::kLdrNamedProperty)] = 1;
- scorecard[Bytecodes::ToByte(Bytecode::kLdrKeyedProperty)] = 1;
- scorecard[Bytecodes::ToByte(Bytecode::kLdrGlobal)] = 1;
- scorecard[Bytecodes::ToByte(Bytecode::kLdrContextSlot)] = 1;
- scorecard[Bytecodes::ToByte(Bytecode::kLdrUndefined)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kLogicalNot)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kJump)] = 1;
scorecard[Bytecodes::ToByte(Bytecode::kJumpIfTrue)] = 1;
@@ -421,12 +417,12 @@ TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
BytecodeArrayBuilder builder(isolate(), zone(), 0, contexts, locals);
BytecodeRegisterAllocator* allocator(builder.register_allocator());
for (int i = 0; i < locals + contexts; i++) {
- builder.LoadLiteral(Smi::FromInt(0));
+ builder.LoadLiteral(Smi::kZero);
builder.StoreAccumulatorInRegister(Register(i));
}
for (int i = 0; i < temps; i++) {
Register temp = allocator->NewRegister();
- builder.LoadLiteral(Smi::FromInt(0));
+ builder.LoadLiteral(Smi::kZero);
builder.StoreAccumulatorInRegister(temp);
// Ensure temporaries are used so not optimized away by the
// register optimizer.
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index 07ecefb529..894134a959 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -26,7 +26,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
Factory* factory = isolate()->factory();
Handle<HeapObject> heap_num_0 = factory->NewHeapNumber(2.718);
Handle<HeapObject> heap_num_1 = factory->NewHeapNumber(2147483647);
- Smi* zero = Smi::FromInt(0);
+ Smi* zero = Smi::kZero;
Smi* smi_0 = Smi::FromInt(64);
Smi* smi_1 = Smi::FromInt(-65536);
Register reg_0(0);
@@ -35,8 +35,8 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
RegisterList triple(0, 3);
Register param = Register::FromParameterIndex(2, builder.parameter_count());
Handle<String> name = factory->NewStringFromStaticChars("abc");
- int name_index = 2;
- int feedback_slot = 97;
+ uint32_t name_index = 2;
+ uint32_t feedback_slot = 97;
builder.LoadLiteral(heap_num_0)
.StoreAccumulatorInRegister(reg_0)
@@ -229,7 +229,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
CHECK_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadLookupSlotForCall);
CHECK_EQ(iterator.GetRegisterOperand(1).index(), param.index());
CHECK_EQ(iterator.GetRegisterOperandRange(1), 1);
- CHECK_EQ(iterator.GetRegisterCountOperand(2), 1);
+ CHECK_EQ(iterator.GetRegisterCountOperand(2), 1u);
CHECK_EQ(iterator.GetRegisterOperand(3).index(), reg_0.index());
CHECK_EQ(iterator.GetRegisterOperandRange(3), 2);
CHECK(!iterator.done());
@@ -253,7 +253,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadIC_Miss);
CHECK_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
- CHECK_EQ(iterator.GetRegisterCountOperand(2), 1);
+ CHECK_EQ(iterator.GetRegisterCountOperand(2), 1u);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kCallRuntime, OperandScale::kSingle);
iterator.Advance();
@@ -269,7 +269,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
CHECK_EQ(iterator.current_bytecode_size(), 6);
- CHECK_EQ(iterator.GetIndexOperand(0), 0x10000000);
+ CHECK_EQ(iterator.GetIndexOperand(0), 0x10000000u);
offset += Bytecodes::Size(Bytecode::kLdaGlobal, OperandScale::kQuadruple) +
kPrefixByteSize;
iterator.Advance();
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
index 0bb0f9757a..bc865ef7d1 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -57,27 +57,27 @@ class BytecodeArrayWriterUnittest : public TestWithIsolateAndZone {
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode,
BytecodeSourceInfo info) {
- BytecodeNode node(bytecode, &info);
+ BytecodeNode node(bytecode, info);
writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
BytecodeSourceInfo info) {
- BytecodeNode node(bytecode, operand0, &info);
+ BytecodeNode node(bytecode, operand0, info);
writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
uint32_t operand1,
BytecodeSourceInfo info) {
- BytecodeNode node(bytecode, operand0, operand1, &info);
+ BytecodeNode node(bytecode, operand0, operand1, info);
writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
uint32_t operand1, uint32_t operand2,
BytecodeSourceInfo info) {
- BytecodeNode node(bytecode, operand0, operand1, operand2, &info);
+ BytecodeNode node(bytecode, operand0, operand1, operand2, info);
writer()->Write(&node);
}
@@ -85,38 +85,38 @@ void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
uint32_t operand1, uint32_t operand2,
uint32_t operand3,
BytecodeSourceInfo info) {
- BytecodeNode node(bytecode, operand0, operand1, operand2, operand3, &info);
+ BytecodeNode node(bytecode, operand0, operand1, operand2, operand3, info);
writer()->Write(&node);
}
void BytecodeArrayWriterUnittest::WriteJump(Bytecode bytecode,
BytecodeLabel* label,
BytecodeSourceInfo info) {
- BytecodeNode node(bytecode, 0, &info);
+ BytecodeNode node(bytecode, 0, info);
writer()->WriteJump(&node, label);
}
void BytecodeArrayWriterUnittest::WriteJumpLoop(Bytecode bytecode,
BytecodeLabel* label, int depth,
BytecodeSourceInfo info) {
- BytecodeNode node(bytecode, 0, depth, &info);
+ BytecodeNode node(bytecode, 0, depth, info);
writer()->WriteJump(&node, label);
}
TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
- CHECK_EQ(bytecodes()->size(), 0);
+ CHECK_EQ(bytecodes()->size(), 0u);
Write(Bytecode::kStackCheck, {10, false});
- CHECK_EQ(bytecodes()->size(), 1);
+ CHECK_EQ(bytecodes()->size(), 1u);
Write(Bytecode::kLdaSmi, 127, {55, true});
- CHECK_EQ(bytecodes()->size(), 3);
+ CHECK_EQ(bytecodes()->size(), 3u);
Write(Bytecode::kLdar, Register(200).ToOperand());
- CHECK_EQ(bytecodes()->size(), 7);
+ CHECK_EQ(bytecodes()->size(), 7u);
Write(Bytecode::kReturn, {70, true});
- CHECK_EQ(bytecodes()->size(), 8);
+ CHECK_EQ(bytecodes()->size(), 8u);
static const uint8_t bytes[] = {B(StackCheck), B(LdaSmi), U8(127), B(Wide),
B(Ldar), R16(200), B(Return)};
@@ -136,7 +136,8 @@ TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
- CHECK_EQ(source_iterator.source_position(), expected.source_position);
+ CHECK_EQ(source_iterator.source_position().ScriptOffset(),
+ expected.source_position);
CHECK_EQ(source_iterator.is_statement(), expected.is_statement);
source_iterator.Advance();
}
@@ -222,7 +223,8 @@ TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
- CHECK_EQ(source_iterator.source_position(), expected.source_position);
+ CHECK_EQ(source_iterator.source_position().ScriptOffset(),
+ expected.source_position);
CHECK_EQ(source_iterator.is_statement(), expected.is_statement);
source_iterator.Advance();
}
diff --git a/deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc
index 4cb5e69f4e..5b086520dc 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc
@@ -22,12 +22,12 @@ class BytecodeDeadCodeOptimizerTest : public BytecodePipelineStage,
void Write(BytecodeNode* node) override {
write_count_++;
- last_written_.Clone(node);
+ last_written_ = *node;
}
void WriteJump(BytecodeNode* node, BytecodeLabel* label) override {
write_count_++;
- last_written_.Clone(node);
+ last_written_ = *node;
}
void BindLabel(BytecodeLabel* label) override {}
@@ -57,7 +57,7 @@ TEST_F(BytecodeDeadCodeOptimizerTest, LiveCodeKept) {
CHECK_EQ(add, last_written());
BytecodeLabel target;
- BytecodeNode jump(Bytecode::kJump, 0, nullptr);
+ BytecodeNode jump(Bytecode::kJump, 0);
optimizer()->WriteJump(&jump, &target);
CHECK_EQ(write_count(), 2);
CHECK_EQ(jump, last_written());
@@ -101,7 +101,7 @@ TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterReThrowEliminated) {
TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterJumpEliminated) {
BytecodeLabel target;
- BytecodeNode jump(Bytecode::kJump, 0, nullptr);
+ BytecodeNode jump(Bytecode::kJump, 0);
optimizer()->WriteJump(&jump, &target);
CHECK_EQ(write_count(), 1);
CHECK_EQ(jump, last_written());
@@ -119,7 +119,7 @@ TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeStillDeadAfterConditinalJump) {
CHECK_EQ(ret, last_written());
BytecodeLabel target;
- BytecodeNode jump(Bytecode::kJumpIfTrue, 0, nullptr);
+ BytecodeNode jump(Bytecode::kJumpIfTrue, 0);
optimizer()->WriteJump(&jump, &target);
CHECK_EQ(write_count(), 1);
CHECK_EQ(ret, last_written());
diff --git a/deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc
index d7beb47a01..d1c570d421 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc
@@ -29,12 +29,12 @@ class BytecodePeepholeOptimizerTest : public BytecodePipelineStage,
void Write(BytecodeNode* node) override {
write_count_++;
- last_written_.Clone(node);
+ last_written_ = *node;
}
void WriteJump(BytecodeNode* node, BytecodeLabel* label) override {
write_count_++;
- last_written_.Clone(node);
+ last_written_ = *node;
}
void BindLabel(BytecodeLabel* label) override {}
@@ -72,7 +72,7 @@ TEST_F(BytecodePeepholeOptimizerTest, FlushOnJump) {
CHECK_EQ(write_count(), 0);
BytecodeLabel target;
- BytecodeNode jump(Bytecode::kJump, 0, nullptr);
+ BytecodeNode jump(Bytecode::kJump, 0);
optimizer()->WriteJump(&jump, &target);
CHECK_EQ(write_count(), 2);
CHECK_EQ(jump, last_written());
@@ -105,7 +105,7 @@ TEST_F(BytecodePeepholeOptimizerTest, ElideEmptyNop) {
TEST_F(BytecodePeepholeOptimizerTest, ElideExpressionNop) {
BytecodeSourceInfo source_info(3, false);
- BytecodeNode nop(Bytecode::kNop, &source_info);
+ BytecodeNode nop(Bytecode::kNop, source_info);
optimizer()->Write(&nop);
BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1);
optimizer()->Write(&add);
@@ -115,11 +115,12 @@ TEST_F(BytecodePeepholeOptimizerTest, ElideExpressionNop) {
}
TEST_F(BytecodePeepholeOptimizerTest, KeepStatementNop) {
- BytecodeSourceInfo source_info(3, true);
- BytecodeNode nop(Bytecode::kNop, &source_info);
+ BytecodeSourceInfo source_info_statement(3, true);
+ BytecodeNode nop(Bytecode::kNop, source_info_statement);
optimizer()->Write(&nop);
- source_info.MakeExpressionPosition(3);
- BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1, &source_info);
+ BytecodeSourceInfo source_info_expression(3, false);
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand(), 1,
+ source_info_expression);
optimizer()->Write(&add);
Flush();
CHECK_EQ(write_count(), 2);
@@ -206,7 +207,7 @@ TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRx) {
TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRxStatement) {
BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
BytecodeSourceInfo source_info(3, true);
- BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand(), &source_info);
+ BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand(), source_info);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
@@ -215,14 +216,14 @@ TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRxStatement) {
Flush();
CHECK_EQ(write_count(), 2);
CHECK_EQ(last_written().bytecode(), Bytecode::kNop);
- CHECK_EQ(last_written().source_info(), second.source_info());
+ CHECK_EQ(last_written().source_info(), source_info);
}
TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRxStatementStarRy) {
BytecodeLabel label;
BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
BytecodeSourceInfo source_info(0, true);
- BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand(), &source_info);
+ BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand(), source_info);
BytecodeNode third(Bytecode::kStar, Register(3).ToOperand());
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
@@ -279,7 +280,7 @@ TEST_F(BytecodePeepholeOptimizerTest, LdaTrueLdaFalse) {
TEST_F(BytecodePeepholeOptimizerTest, LdaTrueStatementLdaFalse) {
BytecodeSourceInfo source_info(3, true);
- BytecodeNode first(Bytecode::kLdaTrue, &source_info);
+ BytecodeNode first(Bytecode::kLdaTrue, source_info);
BytecodeNode second(Bytecode::kLdaFalse);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
@@ -294,7 +295,7 @@ TEST_F(BytecodePeepholeOptimizerTest, LdaTrueStatementLdaFalse) {
TEST_F(BytecodePeepholeOptimizerTest, NopStackCheck) {
BytecodeNode first(Bytecode::kNop);
- BytecodeNode second(Bytecode::kStackCheck, nullptr);
+ BytecodeNode second(Bytecode::kStackCheck);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
@@ -306,7 +307,7 @@ TEST_F(BytecodePeepholeOptimizerTest, NopStackCheck) {
TEST_F(BytecodePeepholeOptimizerTest, NopStatementStackCheck) {
BytecodeSourceInfo source_info(3, true);
- BytecodeNode first(Bytecode::kNop, &source_info);
+ BytecodeNode first(Bytecode::kNop, source_info);
BytecodeNode second(Bytecode::kStackCheck);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
@@ -314,140 +315,12 @@ TEST_F(BytecodePeepholeOptimizerTest, NopStatementStackCheck) {
CHECK_EQ(write_count(), 0);
Flush();
CHECK_EQ(write_count(), 1);
- BytecodeSourceInfo expected_source_info(3, true);
- BytecodeNode expected(Bytecode::kStackCheck, &expected_source_info);
+ BytecodeNode expected(Bytecode::kStackCheck, source_info);
CHECK_EQ(last_written(), expected);
}
// Tests covering BytecodePeepholeOptimizer::UpdateLastAndCurrentBytecodes().
-TEST_F(BytecodePeepholeOptimizerTest, MergeLoadICStar) {
- const uint32_t operands[] = {
- static_cast<uint32_t>(Register(31).ToOperand()), 32, 33,
- static_cast<uint32_t>(Register(256).ToOperand())};
- const int expected_operand_count = static_cast<int>(arraysize(operands));
-
- BytecodeNode first(Bytecode::kLdaNamedProperty, operands[0], operands[1],
- operands[2]);
- BytecodeNode second(Bytecode::kStar, operands[3]);
- BytecodeNode third(Bytecode::kReturn);
- optimizer()->Write(&first);
- optimizer()->Write(&second);
- CHECK_EQ(write_count(), 1);
- CHECK_EQ(last_written().bytecode(), Bytecode::kLdrNamedProperty);
- CHECK_EQ(last_written().operand_count(), expected_operand_count);
- for (int i = 0; i < expected_operand_count; ++i) {
- CHECK_EQ(last_written().operand(i), operands[i]);
- }
- optimizer()->Write(&third);
- CHECK_EQ(write_count(), 2);
- CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
- CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
- Flush();
- CHECK_EQ(last_written().bytecode(), third.bytecode());
-}
-
-TEST_F(BytecodePeepholeOptimizerTest, MergeLdaKeyedPropertyStar) {
- const uint32_t operands[] = {static_cast<uint32_t>(Register(31).ToOperand()),
- 9999997,
- static_cast<uint32_t>(Register(1).ToOperand())};
- const int expected_operand_count = static_cast<int>(arraysize(operands));
-
- BytecodeNode first(Bytecode::kLdaKeyedProperty, operands[0], operands[1],
- nullptr);
- BytecodeNode second(Bytecode::kStar, operands[2]);
- BytecodeNode third(Bytecode::kReturn);
- optimizer()->Write(&first);
- optimizer()->Write(&second);
- CHECK_EQ(write_count(), 1);
- CHECK_EQ(last_written().bytecode(), Bytecode::kLdrKeyedProperty);
- CHECK_EQ(last_written().operand_count(), expected_operand_count);
- for (int i = 0; i < expected_operand_count; ++i) {
- CHECK_EQ(last_written().operand(i), operands[i]);
- }
- optimizer()->Write(&third);
- CHECK_EQ(write_count(), 2);
- CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
- CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
- Flush();
- CHECK_EQ(last_written().bytecode(), third.bytecode());
-}
-
-TEST_F(BytecodePeepholeOptimizerTest, MergeLdaGlobalStar) {
- const uint32_t operands[] = {19191,
- static_cast<uint32_t>(Register(1).ToOperand())};
- const int expected_operand_count = static_cast<int>(arraysize(operands));
-
- BytecodeNode first(Bytecode::kLdaGlobal, operands[0]);
- BytecodeNode second(Bytecode::kStar, operands[1]);
- BytecodeNode third(Bytecode::kReturn);
- optimizer()->Write(&first);
- optimizer()->Write(&second);
- CHECK_EQ(write_count(), 1);
- CHECK_EQ(last_written().bytecode(), Bytecode::kLdrGlobal);
- CHECK_EQ(last_written().operand_count(), expected_operand_count);
- for (int i = 0; i < expected_operand_count; ++i) {
- CHECK_EQ(last_written().operand(i), operands[i]);
- }
- optimizer()->Write(&third);
- CHECK_EQ(write_count(), 2);
- CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
- CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
- Flush();
- CHECK_EQ(last_written().bytecode(), third.bytecode());
-}
-
-TEST_F(BytecodePeepholeOptimizerTest, MergeLdaContextSlotStar) {
- const uint32_t operands[] = {
- static_cast<uint32_t>(Register(200000).ToOperand()), 55005500,
- static_cast<uint32_t>(Register(0).ToOperand()),
- static_cast<uint32_t>(Register(1).ToOperand())};
- const int expected_operand_count = static_cast<int>(arraysize(operands));
-
- BytecodeNode first(Bytecode::kLdaContextSlot, operands[0], operands[1],
- operands[2]);
- BytecodeNode second(Bytecode::kStar, operands[3]);
- BytecodeNode third(Bytecode::kReturn);
- optimizer()->Write(&first);
- optimizer()->Write(&second);
- CHECK_EQ(write_count(), 1);
- CHECK_EQ(last_written().bytecode(), Bytecode::kLdrContextSlot);
- CHECK_EQ(last_written().operand_count(), expected_operand_count);
- for (int i = 0; i < expected_operand_count; ++i) {
- CHECK_EQ(last_written().operand(i), operands[i]);
- }
- optimizer()->Write(&third);
- CHECK_EQ(write_count(), 2);
- CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
- CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
- Flush();
- CHECK_EQ(last_written().bytecode(), third.bytecode());
-}
-
-TEST_F(BytecodePeepholeOptimizerTest, MergeLdaUndefinedStar) {
- const uint32_t operands[] = {
- static_cast<uint32_t>(Register(100000).ToOperand())};
- const int expected_operand_count = static_cast<int>(arraysize(operands));
-
- BytecodeNode first(Bytecode::kLdaUndefined);
- BytecodeNode second(Bytecode::kStar, operands[0]);
- BytecodeNode third(Bytecode::kReturn);
- optimizer()->Write(&first);
- optimizer()->Write(&second);
- CHECK_EQ(write_count(), 1);
- CHECK_EQ(last_written().bytecode(), Bytecode::kLdrUndefined);
- CHECK_EQ(last_written().operand_count(), expected_operand_count);
- for (int i = 0; i < expected_operand_count; ++i) {
- CHECK_EQ(last_written().operand(i), operands[i]);
- }
- optimizer()->Write(&third);
- CHECK_EQ(write_count(), 2);
- CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
- CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
- Flush();
- CHECK_EQ(last_written().bytecode(), third.bytecode());
-}
-
TEST_F(BytecodePeepholeOptimizerTest, MergeLdaSmiWithBinaryOp) {
Bytecode operator_replacement_pairs[][2] = {
{Bytecode::kAdd, Bytecode::kAddSmi},
@@ -460,7 +333,7 @@ TEST_F(BytecodePeepholeOptimizerTest, MergeLdaSmiWithBinaryOp) {
for (auto operator_replacement : operator_replacement_pairs) {
uint32_t imm_operand = 17;
BytecodeSourceInfo source_info(3, true);
- BytecodeNode first(Bytecode::kLdaSmi, imm_operand, &source_info);
+ BytecodeNode first(Bytecode::kLdaSmi, imm_operand, source_info);
uint32_t reg_operand = Register(0).ToOperand();
uint32_t idx_operand = 1;
BytecodeNode second(operator_replacement[0], reg_operand, idx_operand);
@@ -473,7 +346,7 @@ TEST_F(BytecodePeepholeOptimizerTest, MergeLdaSmiWithBinaryOp) {
CHECK_EQ(last_written().operand(0), imm_operand);
CHECK_EQ(last_written().operand(1), reg_operand);
CHECK_EQ(last_written().operand(2), idx_operand);
- CHECK_EQ(last_written().source_info(), first.source_info());
+ CHECK_EQ(last_written().source_info(), source_info);
Reset();
}
}
@@ -490,10 +363,10 @@ TEST_F(BytecodePeepholeOptimizerTest, NotMergingLdaSmiWithBinaryOp) {
for (auto operator_replacement : operator_replacement_pairs) {
uint32_t imm_operand = 17;
BytecodeSourceInfo source_info(3, true);
- BytecodeNode first(Bytecode::kLdaSmi, imm_operand, &source_info);
+ BytecodeNode first(Bytecode::kLdaSmi, imm_operand, source_info);
uint32_t reg_operand = Register(0).ToOperand();
source_info.MakeStatementPosition(4);
- BytecodeNode second(operator_replacement[0], reg_operand, 1, &source_info);
+ BytecodeNode second(operator_replacement[0], reg_operand, 1, source_info);
optimizer()->Write(&first);
optimizer()->Write(&second);
CHECK_EQ(last_written(), first);
@@ -523,7 +396,7 @@ TEST_F(BytecodePeepholeOptimizerTest, MergeLdaZeroWithBinaryOp) {
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written().bytecode(), operator_replacement[1]);
CHECK_EQ(last_written().operand_count(), 3);
- CHECK_EQ(last_written().operand(0), 0);
+ CHECK_EQ(last_written().operand(0), 0u);
CHECK_EQ(last_written().operand(1), reg_operand);
CHECK_EQ(last_written().operand(2), idx_operand);
Reset();
diff --git a/deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc
index 4399dce6f9..c4388e8fed 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc
@@ -115,11 +115,11 @@ TEST_F(BytecodeNodeTest, EqualityWithSourceInfo) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeSourceInfo first_source_info(3, true);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
- operands[3], &first_source_info);
+ operands[3], first_source_info);
CHECK_EQ(node, node);
BytecodeSourceInfo second_source_info(3, true);
BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
- operands[2], operands[3], &second_source_info);
+ operands[2], operands[3], second_source_info);
CHECK_EQ(node, other);
}
@@ -127,49 +127,40 @@ TEST_F(BytecodeNodeTest, NoEqualityWithDifferentSourceInfo) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeSourceInfo source_info(77, true);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
- operands[3], &source_info);
+ operands[3], source_info);
BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
operands[2], operands[3]);
CHECK_NE(node, other);
}
-TEST_F(BytecodeNodeTest, Clone) {
- uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
- BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
- operands[3]);
- BytecodeNode clone(Bytecode::kIllegal);
- clone.Clone(&node);
- CHECK_EQ(clone, node);
-}
-
TEST_F(BytecodeNodeTest, SetBytecode0) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeSourceInfo source_info(77, false);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
- operands[3], &source_info);
- CHECK_EQ(node.source_info(), BytecodeSourceInfo(77, false));
+ operands[3], source_info);
+ CHECK_EQ(node.source_info(), source_info);
BytecodeNode clone(Bytecode::kIllegal);
- clone.Clone(&node);
+ clone = node;
clone.set_bytecode(Bytecode::kNop);
CHECK_EQ(clone.bytecode(), Bytecode::kNop);
CHECK_EQ(clone.operand_count(), 0);
- CHECK_EQ(clone.source_info(), BytecodeSourceInfo(77, false));
+ CHECK_EQ(clone.source_info(), source_info);
}
TEST_F(BytecodeNodeTest, SetBytecode1) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeSourceInfo source_info(77, false);
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
- operands[3], &source_info);
+ operands[3], source_info);
BytecodeNode clone(Bytecode::kIllegal);
- clone.Clone(&node);
+ clone = node;
clone.set_bytecode(Bytecode::kJump, 0x01aabbcc);
CHECK_EQ(clone.bytecode(), Bytecode::kJump);
CHECK_EQ(clone.operand_count(), 1);
- CHECK_EQ(clone.operand(0), 0x01aabbcc);
- CHECK_EQ(clone.source_info(), BytecodeSourceInfo(77, false));
+ CHECK_EQ(clone.operand(0), 0x01aabbccu);
+ CHECK_EQ(clone.source_info(), source_info);
}
} // namespace interpreter
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
index f06e454cc9..81c6da5f8f 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
@@ -91,6 +91,23 @@ TEST_F(BytecodeRegisterAllocatorTest, RegisterListAllocations) {
CHECK_EQ(allocator()->next_register_index(), 3);
}
+TEST_F(BytecodeRegisterAllocatorTest, GrowableRegisterListAllocations) {
+ CHECK_EQ(allocator()->maximum_register_count(), 0);
+ Register reg = allocator()->NewRegister();
+ CHECK_EQ(reg.index(), 0);
+ RegisterList reg_list = allocator()->NewGrowableRegisterList();
+ CHECK_EQ(reg_list.register_count(), 0);
+ allocator()->GrowRegisterList(&reg_list);
+ allocator()->GrowRegisterList(&reg_list);
+ allocator()->GrowRegisterList(&reg_list);
+ CHECK_EQ(reg_list.register_count(), 3);
+ CHECK_EQ(reg_list[0].index(), 1);
+ CHECK_EQ(reg_list[1].index(), 2);
+ CHECK_EQ(reg_list[2].index(), 3);
+ CHECK_EQ(allocator()->maximum_register_count(), 4);
+ CHECK_EQ(allocator()->next_register_index(), 4);
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
index ae7c159563..55003d7511 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
@@ -62,58 +62,26 @@ class BytecodeRegisterOptimizerTest : public BytecodePipelineStage,
// Sanity tests.
-TEST_F(BytecodeRegisterOptimizerTest, WriteNop) {
- Initialize(1, 1);
- BytecodeNode node(Bytecode::kNop);
- optimizer()->Write(&node);
- CHECK_EQ(write_count(), 1);
- CHECK_EQ(node, last_written());
-}
-
-TEST_F(BytecodeRegisterOptimizerTest, WriteNopExpression) {
- Initialize(1, 1);
- BytecodeSourceInfo source_info(3, false);
- BytecodeNode node(Bytecode::kNop, &source_info);
- optimizer()->Write(&node);
- CHECK_EQ(write_count(), 1);
- CHECK_EQ(node, last_written());
-}
-
-TEST_F(BytecodeRegisterOptimizerTest, WriteNopStatement) {
- Initialize(1, 1);
- BytecodeSourceInfo source_info(3, true);
- BytecodeNode node(Bytecode::kNop);
- optimizer()->Write(&node);
- CHECK_EQ(write_count(), 1);
- CHECK_EQ(node, last_written());
-}
-
-TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForJump) {
+TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForFlush) {
Initialize(1, 1);
Register temp = NewTemporary();
- BytecodeNode node(Bytecode::kStar, temp.ToOperand());
- optimizer()->Write(&node);
- CHECK_EQ(write_count(), 0);
- BytecodeLabel label;
- BytecodeNode jump(Bytecode::kJump, 0, nullptr);
- optimizer()->WriteJump(&jump, &label);
- CHECK_EQ(write_count(), 2);
+ optimizer()->DoStar(temp, BytecodeSourceInfo());
+ CHECK_EQ(write_count(), 0u);
+ optimizer()->Flush();
+ CHECK_EQ(write_count(), 1u);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
- CHECK_EQ(output()->at(0).operand(0), temp.ToOperand());
- CHECK_EQ(output()->at(1).bytecode(), Bytecode::kJump);
+ CHECK_EQ(output()->at(0).operand(0), static_cast<uint32_t>(temp.ToOperand()));
}
-TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForBind) {
+TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForJump) {
Initialize(1, 1);
Register temp = NewTemporary();
- BytecodeNode node(Bytecode::kStar, temp.ToOperand());
- optimizer()->Write(&node);
- CHECK_EQ(write_count(), 0);
- BytecodeLabel label;
- optimizer()->BindLabel(&label);
- CHECK_EQ(write_count(), 1);
+ optimizer()->DoStar(temp, BytecodeSourceInfo());
+ CHECK_EQ(write_count(), 0u);
+ optimizer()->PrepareForBytecode(Bytecode::kJump);
+ CHECK_EQ(write_count(), 1u);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
- CHECK_EQ(output()->at(0).operand(0), temp.ToOperand());
+ CHECK_EQ(output()->at(0).operand(0), static_cast<uint32_t>(temp.ToOperand()));
}
// Basic Register Optimizations
@@ -121,117 +89,98 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForBind) {
TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotEmitted) {
Initialize(3, 1);
Register parameter = Register::FromParameterIndex(1, 3);
- BytecodeNode node0(Bytecode::kLdar, parameter.ToOperand());
- optimizer()->Write(&node0);
- CHECK_EQ(write_count(), 0);
+ optimizer()->DoLdar(parameter, BytecodeSourceInfo());
+ CHECK_EQ(write_count(), 0u);
Register temp = NewTemporary();
+ optimizer()->DoStar(temp, BytecodeSourceInfo());
BytecodeNode node1(Bytecode::kStar, NewTemporary().ToOperand());
- optimizer()->Write(&node1);
- CHECK_EQ(write_count(), 0);
ReleaseTemporaries(temp);
- CHECK_EQ(write_count(), 0);
- BytecodeNode node2(Bytecode::kReturn);
- optimizer()->Write(&node2);
- CHECK_EQ(write_count(), 2);
+ CHECK_EQ(write_count(), 0u);
+ optimizer()->PrepareForBytecode(Bytecode::kReturn);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kLdar);
- CHECK_EQ(output()->at(0).operand(0), parameter.ToOperand());
- CHECK_EQ(output()->at(1).bytecode(), Bytecode::kReturn);
+ CHECK_EQ(output()->at(0).operand(0),
+ static_cast<uint32_t>(parameter.ToOperand()));
}
TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterUsed) {
Initialize(3, 1);
- BytecodeNode node0(Bytecode::kLdaSmi, 3);
- optimizer()->Write(&node0);
- CHECK_EQ(write_count(), 1);
+ optimizer()->PrepareForBytecode(Bytecode::kLdaSmi);
Register temp0 = NewTemporary();
Register temp1 = NewTemporary();
- BytecodeNode node1(Bytecode::kStar, temp1.ToOperand());
- optimizer()->Write(&node1);
- CHECK_EQ(write_count(), 1);
- BytecodeNode node2(Bytecode::kLdaSmi, 1);
- optimizer()->Write(&node2);
- CHECK_EQ(write_count(), 3);
- BytecodeNode node3(Bytecode::kMov, temp1.ToOperand(), temp0.ToOperand());
- optimizer()->Write(&node3);
- CHECK_EQ(write_count(), 3);
+ optimizer()->DoStar(temp1, BytecodeSourceInfo());
+ CHECK_EQ(write_count(), 0u);
+ optimizer()->PrepareForBytecode(Bytecode::kLdaSmi);
+ CHECK_EQ(write_count(), 1u);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
+ CHECK_EQ(output()->at(0).operand(0),
+ static_cast<uint32_t>(temp1.ToOperand()));
+ optimizer()->DoMov(temp1, temp0, BytecodeSourceInfo());
+ CHECK_EQ(write_count(), 1u);
ReleaseTemporaries(temp1);
- CHECK_EQ(write_count(), 3);
- BytecodeNode node4(Bytecode::kLdar, temp0.ToOperand());
- optimizer()->Write(&node4);
- CHECK_EQ(write_count(), 3);
- BytecodeNode node5(Bytecode::kReturn);
- optimizer()->Write(&node5);
- CHECK_EQ(write_count(), 5);
- CHECK_EQ(output()->at(3).bytecode(), Bytecode::kLdar);
- CHECK_EQ(output()->at(3).operand(0), temp1.ToOperand());
- CHECK_EQ(output()->at(4).bytecode(), Bytecode::kReturn);
+ CHECK_EQ(write_count(), 1u);
+ optimizer()->DoLdar(temp0, BytecodeSourceInfo());
+ CHECK_EQ(write_count(), 1u);
+ optimizer()->PrepareForBytecode(Bytecode::kReturn);
+ CHECK_EQ(write_count(), 2u);
+ CHECK_EQ(output()->at(1).bytecode(), Bytecode::kLdar);
+ CHECK_EQ(output()->at(1).operand(0),
+ static_cast<uint32_t>(temp1.ToOperand()));
}
TEST_F(BytecodeRegisterOptimizerTest, ReleasedRegisterNotFlushed) {
Initialize(3, 1);
- BytecodeNode node0(Bytecode::kLdaSmi, 3);
- optimizer()->Write(&node0);
- CHECK_EQ(write_count(), 1);
+ optimizer()->PrepareForBytecode(Bytecode::kLdaSmi);
Register temp0 = NewTemporary();
Register temp1 = NewTemporary();
- BytecodeNode node1(Bytecode::kStar, temp0.ToOperand());
- optimizer()->Write(&node1);
- CHECK_EQ(write_count(), 1);
- BytecodeNode node2(Bytecode::kStar, temp1.ToOperand());
- optimizer()->Write(&node2);
- CHECK_EQ(write_count(), 1);
+ optimizer()->DoStar(temp0, BytecodeSourceInfo());
+ CHECK_EQ(write_count(), 0u);
+ optimizer()->DoStar(temp1, BytecodeSourceInfo());
+ CHECK_EQ(write_count(), 0u);
ReleaseTemporaries(temp1);
- BytecodeLabel label;
- BytecodeNode jump(Bytecode::kJump, 0, nullptr);
- optimizer()->WriteJump(&jump, &label);
- BytecodeNode node3(Bytecode::kReturn);
- optimizer()->Write(&node3);
- CHECK_EQ(write_count(), 4);
- CHECK_EQ(output()->at(1).bytecode(), Bytecode::kStar);
- CHECK_EQ(output()->at(1).operand(0), temp0.ToOperand());
- CHECK_EQ(output()->at(2).bytecode(), Bytecode::kJump);
- CHECK_EQ(output()->at(3).bytecode(), Bytecode::kReturn);
+ optimizer()->Flush();
+ CHECK_EQ(write_count(), 1u);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
+ CHECK_EQ(output()->at(0).operand(0),
+ static_cast<uint32_t>(temp0.ToOperand()));
}
TEST_F(BytecodeRegisterOptimizerTest, StoresToLocalsImmediate) {
Initialize(3, 1);
Register parameter = Register::FromParameterIndex(1, 3);
- BytecodeNode node0(Bytecode::kLdar, parameter.ToOperand());
- optimizer()->Write(&node0);
- CHECK_EQ(write_count(), 0);
+ optimizer()->DoLdar(parameter, BytecodeSourceInfo());
+ CHECK_EQ(write_count(), 0u);
Register local = Register(0);
- BytecodeNode node1(Bytecode::kStar, local.ToOperand());
- optimizer()->Write(&node1);
- CHECK_EQ(write_count(), 1);
+ optimizer()->DoStar(local, BytecodeSourceInfo());
+ CHECK_EQ(write_count(), 1u);
CHECK_EQ(output()->at(0).bytecode(), Bytecode::kMov);
- CHECK_EQ(output()->at(0).operand(0), parameter.ToOperand());
- CHECK_EQ(output()->at(0).operand(1), local.ToOperand());
+ CHECK_EQ(output()->at(0).operand(0),
+ static_cast<uint32_t>(parameter.ToOperand()));
+ CHECK_EQ(output()->at(0).operand(1),
+ static_cast<uint32_t>(local.ToOperand()));
- BytecodeNode node2(Bytecode::kReturn);
- optimizer()->Write(&node2);
- CHECK_EQ(write_count(), 3);
+ optimizer()->PrepareForBytecode(Bytecode::kReturn);
+ CHECK_EQ(write_count(), 2u);
CHECK_EQ(output()->at(1).bytecode(), Bytecode::kLdar);
- CHECK_EQ(output()->at(1).operand(0), local.ToOperand());
- CHECK_EQ(output()->at(2).bytecode(), Bytecode::kReturn);
+ CHECK_EQ(output()->at(1).operand(0),
+ static_cast<uint32_t>(local.ToOperand()));
}
-TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotMaterializedForInput) {
+TEST_F(BytecodeRegisterOptimizerTest, SingleTemporaryNotMaterializedForInput) {
Initialize(3, 1);
Register parameter = Register::FromParameterIndex(1, 3);
Register temp0 = NewTemporary();
Register temp1 = NewTemporary();
- BytecodeNode node0(Bytecode::kMov, parameter.ToOperand(), temp0.ToOperand());
- optimizer()->Write(&node0);
- BytecodeNode node1(Bytecode::kMov, parameter.ToOperand(), temp1.ToOperand());
- optimizer()->Write(&node1);
- CHECK_EQ(write_count(), 0);
- BytecodeNode node2(Bytecode::kCallJSRuntime, 0, temp0.ToOperand(), 1);
- optimizer()->Write(&node2);
- CHECK_EQ(write_count(), 1);
- CHECK_EQ(output()->at(0).bytecode(), Bytecode::kCallJSRuntime);
- CHECK_EQ(output()->at(0).operand(0), 0);
- CHECK_EQ(output()->at(0).operand(1), parameter.ToOperand());
- CHECK_EQ(output()->at(0).operand(2), 1);
+ optimizer()->DoMov(parameter, temp0, BytecodeSourceInfo());
+ optimizer()->DoMov(parameter, temp1, BytecodeSourceInfo());
+ CHECK_EQ(write_count(), 0u);
+
+ Register reg = optimizer()->GetInputRegister(temp0);
+ RegisterList reg_list =
+ optimizer()->GetInputRegisterList(RegisterList(temp0.index(), 1));
+ CHECK_EQ(write_count(), 0u);
+ CHECK_EQ(parameter.index(), reg.index());
+ CHECK_EQ(parameter.index(), reg_list.first_register().index());
+ CHECK_EQ(1, reg_list.register_count());
}
TEST_F(BytecodeRegisterOptimizerTest, RangeOfTemporariesMaterializedForInput) {
@@ -239,32 +188,25 @@ TEST_F(BytecodeRegisterOptimizerTest, RangeOfTemporariesMaterializedForInput) {
Register parameter = Register::FromParameterIndex(1, 3);
Register temp0 = NewTemporary();
Register temp1 = NewTemporary();
- BytecodeNode node0(Bytecode::kLdaSmi, 3);
- optimizer()->Write(&node0);
- CHECK_EQ(write_count(), 1);
- BytecodeNode node1(Bytecode::kStar, temp0.ToOperand());
- optimizer()->Write(&node1);
- BytecodeNode node2(Bytecode::kMov, parameter.ToOperand(), temp1.ToOperand());
- optimizer()->Write(&node2);
- CHECK_EQ(write_count(), 1);
- BytecodeNode node3(Bytecode::kCallJSRuntime, 0, temp0.ToOperand(), 2);
- optimizer()->Write(&node3);
- CHECK_EQ(write_count(), 4);
-
- CHECK_EQ(output()->at(0).bytecode(), Bytecode::kLdaSmi);
- CHECK_EQ(output()->at(0).operand(0), 3);
-
- CHECK_EQ(output()->at(1).bytecode(), Bytecode::kStar);
- CHECK_EQ(output()->at(1).operand(0), temp0.ToOperand());
-
- CHECK_EQ(output()->at(2).bytecode(), Bytecode::kMov);
- CHECK_EQ(output()->at(2).operand(0), parameter.ToOperand());
- CHECK_EQ(output()->at(2).operand(1), temp1.ToOperand());
-
- CHECK_EQ(output()->at(3).bytecode(), Bytecode::kCallJSRuntime);
- CHECK_EQ(output()->at(3).operand(0), 0);
- CHECK_EQ(output()->at(3).operand(1), temp0.ToOperand());
- CHECK_EQ(output()->at(3).operand(2), 2);
+ optimizer()->PrepareForBytecode(Bytecode::kLdaSmi);
+ optimizer()->DoStar(temp0, BytecodeSourceInfo());
+ optimizer()->DoMov(parameter, temp1, BytecodeSourceInfo());
+ CHECK_EQ(write_count(), 0u);
+
+ optimizer()->PrepareForBytecode(Bytecode::kCallJSRuntime);
+ RegisterList reg_list =
+ optimizer()->GetInputRegisterList(RegisterList(temp0.index(), 2));
+ CHECK_EQ(temp0.index(), reg_list.first_register().index());
+ CHECK_EQ(2, reg_list.register_count());
+ CHECK_EQ(write_count(), 2u);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
+ CHECK_EQ(output()->at(0).operand(0),
+ static_cast<uint32_t>(temp0.ToOperand()));
+ CHECK_EQ(output()->at(1).bytecode(), Bytecode::kMov);
+ CHECK_EQ(output()->at(1).operand(0),
+ static_cast<uint32_t>(parameter.ToOperand()));
+ CHECK_EQ(output()->at(1).operand(1),
+ static_cast<uint32_t>(temp1.ToOperand()));
}
} // namespace interpreter
diff --git a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
index 47c7abb772..81868590b9 100644
--- a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
@@ -95,42 +95,14 @@ TEST(OperandScaling, ScalableAndNonScalable) {
TEST(Bytecodes, RegisterOperands) {
CHECK(Bytecodes::IsRegisterOperandType(OperandType::kReg));
+ CHECK(Bytecodes::IsRegisterOperandType(OperandType::kRegPair));
CHECK(Bytecodes::IsRegisterInputOperandType(OperandType::kReg));
+ CHECK(Bytecodes::IsRegisterInputOperandType(OperandType::kRegPair));
+ CHECK(Bytecodes::IsRegisterInputOperandType(OperandType::kRegList));
CHECK(!Bytecodes::IsRegisterOutputOperandType(OperandType::kReg));
CHECK(!Bytecodes::IsRegisterInputOperandType(OperandType::kRegOut));
CHECK(Bytecodes::IsRegisterOutputOperandType(OperandType::kRegOut));
-
-#define IS_REGISTER_OPERAND_TYPE(Name, _) \
- CHECK(Bytecodes::IsRegisterOperandType(OperandType::k##Name));
- REGISTER_OPERAND_TYPE_LIST(IS_REGISTER_OPERAND_TYPE)
-#undef IS_REGISTER_OPERAND_TYPE
-
-#define IS_NOT_REGISTER_OPERAND_TYPE(Name, _) \
- CHECK(!Bytecodes::IsRegisterOperandType(OperandType::k##Name));
- NON_REGISTER_OPERAND_TYPE_LIST(IS_NOT_REGISTER_OPERAND_TYPE)
-#undef IS_NOT_REGISTER_OPERAND_TYPE
-
-#define IS_REGISTER_INPUT_OPERAND_TYPE(Name, _) \
- CHECK(Bytecodes::IsRegisterInputOperandType(OperandType::k##Name));
- REGISTER_INPUT_OPERAND_TYPE_LIST(IS_REGISTER_INPUT_OPERAND_TYPE)
-#undef IS_REGISTER_INPUT_OPERAND_TYPE
-
-#define IS_NOT_REGISTER_INPUT_OPERAND_TYPE(Name, _) \
- CHECK(!Bytecodes::IsRegisterInputOperandType(OperandType::k##Name));
- NON_REGISTER_OPERAND_TYPE_LIST(IS_NOT_REGISTER_INPUT_OPERAND_TYPE);
- REGISTER_OUTPUT_OPERAND_TYPE_LIST(IS_NOT_REGISTER_INPUT_OPERAND_TYPE)
-#undef IS_NOT_REGISTER_INPUT_OPERAND_TYPE
-
-#define IS_REGISTER_OUTPUT_OPERAND_TYPE(Name, _) \
- CHECK(Bytecodes::IsRegisterOutputOperandType(OperandType::k##Name));
- REGISTER_OUTPUT_OPERAND_TYPE_LIST(IS_REGISTER_OUTPUT_OPERAND_TYPE)
-#undef IS_REGISTER_OUTPUT_OPERAND_TYPE
-
-#define IS_NOT_REGISTER_OUTPUT_OPERAND_TYPE(Name, _) \
- CHECK(!Bytecodes::IsRegisterOutputOperandType(OperandType::k##Name));
- NON_REGISTER_OPERAND_TYPE_LIST(IS_NOT_REGISTER_OUTPUT_OPERAND_TYPE)
- REGISTER_INPUT_OPERAND_TYPE_LIST(IS_NOT_REGISTER_OUTPUT_OPERAND_TYPE)
-#undef IS_NOT_REGISTER_INPUT_OPERAND_TYPE
+ CHECK(Bytecodes::IsRegisterOutputOperandType(OperandType::kRegOutPair));
}
TEST(Bytecodes, DebugBreakExistForEachBytecode) {
diff --git a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
index 38cbb6d534..fc80f7145d 100644
--- a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
@@ -36,7 +36,7 @@ TEST_F(ConstantArrayBuilderTest, AllocateAllEntries) {
}
CHECK_EQ(builder.size(), k16BitCapacity);
for (size_t i = 0; i < k16BitCapacity; i++) {
- CHECK_EQ(Handle<Smi>::cast(builder.At(i))->value(), i);
+ CHECK_EQ(Handle<Smi>::cast(builder.At(i))->value(), static_cast<int>(i));
}
}
@@ -50,7 +50,7 @@ TEST_F(ConstantArrayBuilderTest, ToFixedArray) {
CHECK(builder.At(i)->SameValue(*object));
}
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
- CHECK_EQ(constant_array->length(), kNumberOfElements);
+ CHECK_EQ(constant_array->length(), static_cast<int>(kNumberOfElements));
for (size_t i = 0; i < kNumberOfElements; i++) {
CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
}
@@ -66,7 +66,7 @@ TEST_F(ConstantArrayBuilderTest, ToLargeFixedArray) {
CHECK(builder.At(i)->SameValue(*object));
}
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
- CHECK_EQ(constant_array->length(), kNumberOfElements);
+ CHECK_EQ(constant_array->length(), static_cast<int>(kNumberOfElements));
for (size_t i = 0; i < kNumberOfElements; i++) {
CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
}
@@ -81,7 +81,7 @@ TEST_F(ConstantArrayBuilderTest, ToLargeFixedArrayWithReservations) {
Smi::FromInt(static_cast<int>(i)));
}
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
- CHECK_EQ(constant_array->length(), kNumberOfElements);
+ CHECK_EQ(constant_array->length(), static_cast<int>(kNumberOfElements));
for (size_t i = 0; i < kNumberOfElements; i++) {
CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
}
@@ -133,11 +133,12 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx8Reservations) {
for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
Smi* value = Smi::FromInt(static_cast<int>(2 * k8BitCapacity + i));
size_t index = builder.CommitReservedEntry(OperandSize::kByte, value);
- CHECK_EQ(static_cast<int>(index), k8BitCapacity - reserved + i);
+ CHECK_EQ(index, k8BitCapacity - reserved + i);
}
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
- CHECK_EQ(constant_array->length(), 2 * k8BitCapacity + reserved);
+ CHECK_EQ(constant_array->length(),
+ static_cast<int>(2 * k8BitCapacity + reserved));
// Check all committed values match expected
for (size_t i = 0; i < k8BitCapacity - reserved; i++) {
@@ -188,7 +189,8 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithWideReservations) {
}
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
- CHECK_EQ(constant_array->length(), k8BitCapacity + reserved);
+ CHECK_EQ(constant_array->length(),
+ static_cast<int>(k8BitCapacity + reserved));
for (size_t i = 0; i < k8BitCapacity + reserved; i++) {
Object* value = constant_array->get(static_cast<int>(i));
CHECK(value->SameValue(*isolate()->factory()->NewNumberFromSize(i)));
@@ -202,7 +204,7 @@ TEST_F(ConstantArrayBuilderTest, GapFilledWhenLowReservationCommitted) {
for (size_t i = 0; i < k8BitCapacity; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(OperandSize::kByte == operand_size);
- CHECK_EQ(builder.size(), 0);
+ CHECK_EQ(builder.size(), 0u);
}
for (size_t i = 0; i < k8BitCapacity; i++) {
builder.CommitReservedEntry(builder.CreateReservedEntry(),
@@ -215,7 +217,7 @@ TEST_F(ConstantArrayBuilderTest, GapFilledWhenLowReservationCommitted) {
CHECK_EQ(builder.size(), 2 * k8BitCapacity);
}
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
- CHECK_EQ(constant_array->length(), 2 * k8BitCapacity);
+ CHECK_EQ(constant_array->length(), static_cast<int>(2 * k8BitCapacity));
for (size_t i = 0; i < k8BitCapacity; i++) {
Object* original = constant_array->get(static_cast<int>(k8BitCapacity + i));
Object* duplicate = constant_array->get(static_cast<int>(i));
@@ -231,7 +233,7 @@ TEST_F(ConstantArrayBuilderTest, GapNotFilledWhenLowReservationDiscarded) {
for (size_t i = 0; i < k8BitCapacity; i++) {
OperandSize operand_size = builder.CreateReservedEntry();
CHECK(OperandSize::kByte == operand_size);
- CHECK_EQ(builder.size(), 0);
+ CHECK_EQ(builder.size(), 0u);
}
for (size_t i = 0; i < k8BitCapacity; i++) {
Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
@@ -260,9 +262,10 @@ TEST_F(ConstantArrayBuilderTest, HolesWithUnusedReservations) {
CHECK_EQ(builder.CreateReservedEntry(), OperandSize::kByte);
}
for (int i = 0; i < 128; ++i) {
- CHECK_EQ(builder.Insert(isolate()->factory()->NewNumber(i)), i);
+ CHECK_EQ(builder.Insert(isolate()->factory()->NewNumber(i)),
+ static_cast<size_t>(i));
}
- CHECK_EQ(builder.Insert(isolate()->factory()->NewNumber(256)), 256);
+ CHECK_EQ(builder.Insert(isolate()->factory()->NewNumber(256)), 256u);
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
CHECK_EQ(constant_array->length(), 257);
@@ -288,11 +291,12 @@ TEST_F(ConstantArrayBuilderTest, ReservationsAtAllScales) {
for (int i = 65536; i < 131072; ++i) {
CHECK_EQ(builder.CreateReservedEntry(), OperandSize::kQuad);
}
- CHECK_EQ(builder.CommitReservedEntry(OperandSize::kByte, Smi::FromInt(1)), 0);
+ CHECK_EQ(builder.CommitReservedEntry(OperandSize::kByte, Smi::FromInt(1)),
+ 0u);
CHECK_EQ(builder.CommitReservedEntry(OperandSize::kShort, Smi::FromInt(2)),
- 256);
+ 256u);
CHECK_EQ(builder.CommitReservedEntry(OperandSize::kQuad, Smi::FromInt(3)),
- 65536);
+ 65536u);
Handle<FixedArray> constant_array = builder.ToFixedArray(isolate());
CHECK_EQ(constant_array->length(), 65537);
int count = 1;
@@ -326,7 +330,7 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithFixedReservations) {
Handle<Object> empty = builder.At(i);
CHECK(empty->SameValue(isolate()->heap()->the_hole_value()));
} else {
- CHECK_EQ(Handle<Smi>::cast(builder.At(i))->value(), i);
+ CHECK_EQ(Handle<Smi>::cast(builder.At(i))->value(), static_cast<int>(i));
}
}
@@ -338,7 +342,7 @@ TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithFixedReservations) {
// Check values after reserved entries are inserted.
for (size_t i = 0; i < k16BitCapacity; i++) {
- CHECK_EQ(Handle<Smi>::cast(builder.At(i))->value(), i);
+ CHECK_EQ(Handle<Smi>::cast(builder.At(i))->value(), static_cast<int>(i));
}
}
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index 53afb35a12..b8eb64c884 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -335,7 +335,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
IsChangeUint32ToUint64(after_lookahead_bytecode);
}
target_bytecode_matcher =
- IsPhi(MachineRepresentation::kWord8, target_bytecode_matcher,
+ IsPhi(MachineType::PointerRepresentation(), target_bytecode_matcher,
after_lookahead_bytecode, _);
code_target_matcher =
m.IsLoad(MachineType::Pointer(),
@@ -538,9 +538,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, SmiTag) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
Node* value = m.Int32Constant(44);
- EXPECT_THAT(m.SmiTag(value),
- IsIntPtrConstant(static_cast<intptr_t>(44)
- << (kSmiShiftSize + kSmiTagSize)));
+ EXPECT_THAT(m.SmiTag(value), IsBitcastWordToTaggedSigned(IsIntPtrConstant(
+ static_cast<intptr_t>(44)
+ << (kSmiShiftSize + kSmiTagSize))));
EXPECT_THAT(m.SmiUntag(value),
IsWordSar(IsBitcastTaggedToWord(value),
IsIntPtrConstant(kSmiShiftSize + kSmiTagSize)));
@@ -606,39 +606,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
}
}
-TARGET_TEST_F(InterpreterAssemblerTest, LoadContextSlot) {
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerForTest m(this, bytecode);
- Node* context = m.IntPtrConstant(1);
- Node* slot_index = m.IntPtrConstant(22);
- Node* load_context_slot = m.LoadContextSlot(context, slot_index);
-
- Matcher<Node*> offset =
- IsIntPtrAdd(IsWordShl(slot_index, IsIntPtrConstant(kPointerSizeLog2)),
- IsIntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
- EXPECT_THAT(load_context_slot,
- m.IsLoad(MachineType::AnyTagged(), context, offset));
- }
-}
-
-TARGET_TEST_F(InterpreterAssemblerTest, StoreContextSlot) {
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerForTest m(this, bytecode);
- Node* context = m.IntPtrConstant(1);
- Node* slot_index = m.IntPtrConstant(22);
- Node* value = m.SmiConstant(Smi::FromInt(100));
- Node* store_context_slot = m.StoreContextSlot(context, slot_index, value);
-
- Matcher<Node*> offset =
- IsIntPtrAdd(IsWordShl(slot_index, IsIntPtrConstant(kPointerSizeLog2)),
- IsIntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
- EXPECT_THAT(store_context_slot,
- m.IsStore(StoreRepresentation(MachineRepresentation::kTagged,
- kFullWriteBarrier),
- context, offset, value));
- }
-}
-
TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime2) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
diff --git a/deps/v8/test/unittests/register-configuration-unittest.cc b/deps/v8/test/unittests/register-configuration-unittest.cc
index 33453ce9bb..0688a5e54e 100644
--- a/deps/v8/test/unittests/register-configuration-unittest.cc
+++ b/deps/v8/test/unittests/register-configuration-unittest.cc
@@ -16,8 +16,6 @@ class RegisterConfigurationUnitTest : public ::testing::Test {
public:
RegisterConfigurationUnitTest() {}
virtual ~RegisterConfigurationUnitTest() {}
-
- private:
};
TEST_F(RegisterConfigurationUnitTest, BasicProperties) {
@@ -30,9 +28,8 @@ TEST_F(RegisterConfigurationUnitTest, BasicProperties) {
RegisterConfiguration test(
kNumGeneralRegs, kNumDoubleRegs, kNumAllocatableGeneralRegs,
- kNumAllocatableDoubleRegs, kNumAllocatableDoubleRegs, general_codes,
- double_codes, RegisterConfiguration::OVERLAP, nullptr, nullptr, nullptr,
- nullptr);
+ kNumAllocatableDoubleRegs, general_codes, double_codes,
+ RegisterConfiguration::OVERLAP, nullptr, nullptr, nullptr, nullptr);
EXPECT_EQ(test.num_general_registers(), kNumGeneralRegs);
EXPECT_EQ(test.num_double_registers(), kNumDoubleRegs);
@@ -67,9 +64,8 @@ TEST_F(RegisterConfigurationUnitTest, CombineAliasing) {
RegisterConfiguration test(
kNumGeneralRegs, kNumDoubleRegs, kNumAllocatableGeneralRegs,
- kNumAllocatableDoubleRegs, kNumAllocatableDoubleRegs, general_codes,
- double_codes, RegisterConfiguration::COMBINE, nullptr, nullptr, nullptr,
- nullptr);
+ kNumAllocatableDoubleRegs, general_codes, double_codes,
+ RegisterConfiguration::COMBINE, nullptr, nullptr, nullptr, nullptr);
// There are 3 allocatable double regs, but only 2 can alias float regs.
EXPECT_EQ(test.num_allocatable_float_registers(), 4);
@@ -157,9 +153,10 @@ TEST_F(RegisterConfigurationUnitTest, CombineAliasing) {
test.GetAliases(kFloat64, RegisterConfiguration::kMaxFPRegisters / 2 + 1,
kFloat32, &alias_base_index),
0);
- EXPECT_EQ(test.GetAliases(kFloat64, RegisterConfiguration::kMaxFPRegisters,
- kFloat32, &alias_base_index),
- 0);
+ EXPECT_EQ(
+ test.GetAliases(kFloat64, RegisterConfiguration::kMaxFPRegisters - 1,
+ kFloat32, &alias_base_index),
+ 0);
}
} // namespace internal
diff --git a/deps/v8/test/unittests/source-position-table-unittest.cc b/deps/v8/test/unittests/source-position-table-unittest.cc
index 01d9675061..680e1be4c7 100644
--- a/deps/v8/test/unittests/source-position-table-unittest.cc
+++ b/deps/v8/test/unittests/source-position-table-unittest.cc
@@ -16,6 +16,10 @@ class SourcePositionTableTest : public TestWithIsolateAndZone {
public:
SourcePositionTableTest() {}
~SourcePositionTableTest() override {}
+
+ SourcePosition toPos(int offset) {
+ return SourcePosition(offset, offset % 10 - 1);
+ }
};
// Some random offsets, mostly at 'suspicious' bit boundaries.
@@ -25,8 +29,8 @@ static int offsets[] = {0, 1, 2, 3, 4, 30, 31, 32,
TEST_F(SourcePositionTableTest, EncodeStatement) {
SourcePositionTableBuilder builder(zone());
- for (int i = 0; i < arraysize(offsets); i++) {
- builder.AddPosition(offsets[i], offsets[i], true);
+ for (size_t i = 0; i < arraysize(offsets); i++) {
+ builder.AddPosition(offsets[i], toPos(offsets[i]), true);
}
// To test correctness, we rely on the assertions in ToSourcePositionTable().
@@ -37,9 +41,9 @@ TEST_F(SourcePositionTableTest, EncodeStatement) {
TEST_F(SourcePositionTableTest, EncodeStatementDuplicates) {
SourcePositionTableBuilder builder(zone());
- for (int i = 0; i < arraysize(offsets); i++) {
- builder.AddPosition(offsets[i], offsets[i], true);
- builder.AddPosition(offsets[i], offsets[i] + 1, true);
+ for (size_t i = 0; i < arraysize(offsets); i++) {
+ builder.AddPosition(offsets[i], toPos(offsets[i]), true);
+ builder.AddPosition(offsets[i], toPos(offsets[i] + 1), true);
}
// To test correctness, we rely on the assertions in ToSourcePositionTable().
@@ -50,8 +54,8 @@ TEST_F(SourcePositionTableTest, EncodeStatementDuplicates) {
TEST_F(SourcePositionTableTest, EncodeExpression) {
SourcePositionTableBuilder builder(zone());
- for (int i = 0; i < arraysize(offsets); i++) {
- builder.AddPosition(offsets[i], offsets[i], false);
+ for (size_t i = 0; i < arraysize(offsets); i++) {
+ builder.AddPosition(offsets[i], toPos(offsets[i]), false);
}
CHECK(!builder.ToSourcePositionTable(isolate(), Handle<AbstractCode>())
.is_null());
@@ -62,24 +66,24 @@ TEST_F(SourcePositionTableTest, EncodeAscending) {
int code_offset = 0;
int source_position = 0;
- for (int i = 0; i < arraysize(offsets); i++) {
+ for (size_t i = 0; i < arraysize(offsets); i++) {
code_offset += offsets[i];
source_position += offsets[i];
if (i % 2) {
- builder.AddPosition(code_offset, source_position, true);
+ builder.AddPosition(code_offset, toPos(source_position), true);
} else {
- builder.AddPosition(code_offset, source_position, false);
+ builder.AddPosition(code_offset, toPos(source_position), false);
}
}
// Also test negative offsets for source positions:
- for (int i = 0; i < arraysize(offsets); i++) {
+ for (size_t i = 0; i < arraysize(offsets); i++) {
code_offset += offsets[i];
source_position -= offsets[i];
if (i % 2) {
- builder.AddPosition(code_offset, source_position, true);
+ builder.AddPosition(code_offset, toPos(source_position), true);
} else {
- builder.AddPosition(code_offset, source_position, false);
+ builder.AddPosition(code_offset, toPos(source_position), false);
}
}
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index 6ac71d208e..1a06b36e24 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -84,13 +84,17 @@ TestWithIsolateAndZone::~TestWithIsolateAndZone() {}
Factory* TestWithIsolate::factory() const { return isolate()->factory(); }
-
base::RandomNumberGenerator* TestWithIsolate::random_number_generator() const {
return isolate()->random_number_generator();
}
-
TestWithZone::~TestWithZone() {}
+TestWithNativeContext::~TestWithNativeContext() {}
+
+Handle<Context> TestWithNativeContext::native_context() const {
+ return isolate()->native_context();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index 984d63ce2a..ca7efc2a41 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -95,10 +95,9 @@ class TestWithIsolate : public virtual ::v8::TestWithIsolate {
DISALLOW_COPY_AND_ASSIGN(TestWithIsolate);
};
-
class TestWithZone : public virtual ::testing::Test {
public:
- TestWithZone() : zone_(&allocator_) {}
+ TestWithZone() : zone_(&allocator_, ZONE_NAME) {}
virtual ~TestWithZone();
Zone* zone() { return &zone_; }
@@ -110,10 +109,9 @@ class TestWithZone : public virtual ::testing::Test {
DISALLOW_COPY_AND_ASSIGN(TestWithZone);
};
-
class TestWithIsolateAndZone : public virtual TestWithIsolate {
public:
- TestWithIsolateAndZone() : zone_(&allocator_) {}
+ TestWithIsolateAndZone() : zone_(&allocator_, ZONE_NAME) {}
virtual ~TestWithIsolateAndZone();
Zone* zone() { return &zone_; }
@@ -125,6 +123,18 @@ class TestWithIsolateAndZone : public virtual TestWithIsolate {
DISALLOW_COPY_AND_ASSIGN(TestWithIsolateAndZone);
};
+class TestWithNativeContext : public virtual ::v8::TestWithContext,
+ public virtual TestWithIsolate {
+ public:
+ TestWithNativeContext() {}
+ virtual ~TestWithNativeContext();
+
+ Handle<Context> native_context() const;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestWithNativeContext);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
index 6aa2e9169f..de871b7678 100644
--- a/deps/v8/test/unittests/unittests.gyp
+++ b/deps/v8/test/unittests/unittests.gyp
@@ -42,6 +42,7 @@
'compiler/graph-trimmer-unittest.cc',
'compiler/graph-unittest.cc',
'compiler/graph-unittest.h',
+ 'compiler/instruction-unittest.cc',
'compiler/instruction-selector-unittest.cc',
'compiler/instruction-selector-unittest.h',
'compiler/instruction-sequence-unittest.cc',
@@ -79,8 +80,9 @@
'compiler/typed-optimization-unittest.cc',
'compiler/typer-unittest.cc',
'compiler/value-numbering-reducer-unittest.cc',
- 'compiler/zone-pool-unittest.cc',
+ 'compiler/zone-stats-unittest.cc',
'compiler-dispatcher/compiler-dispatcher-job-unittest.cc',
+ 'compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc',
'counters-unittest.cc',
'eh-frame-iterator-unittest.cc',
'eh-frame-writer-unittest.cc',
@@ -117,6 +119,8 @@
'test-utils.cc',
'unicode-unittest.cc',
'value-serializer-unittest.cc',
+ 'zone/segmentpool-unittest.cc',
+ 'zone/zone-chunk-list-unittest.cc',
'zone/zone-unittest.cc',
'wasm/asm-types-unittest.cc',
'wasm/ast-decoder-unittest.cc',
@@ -165,6 +169,8 @@
'dependencies': [
'../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
+ '../../src/v8.gyp:v8',
+ '../../src/v8.gyp:v8_libbase',
'../../src/v8.gyp:v8_libplatform',
],
'include_dirs': [
@@ -227,12 +233,11 @@
['OS=="aix"', {
'ldflags': [ '-Wl,-bbigtoc' ],
}],
- ['component=="shared_library"', {
- # compiler-unittests can't be built against a shared library, so we
- # need to depend on the underlying static target in that case.
- 'dependencies': ['../../src/v8.gyp:v8_maybe_snapshot'],
- }, {
- 'dependencies': ['../../src/v8.gyp:v8'],
+ ['v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
+ ],
}],
['os_posix == 1', {
# TODO(svenpanne): This is a temporary work-around to fix the warnings
diff --git a/deps/v8/test/unittests/unittests.status b/deps/v8/test/unittests/unittests.status
index 71c17f6b6b..51b7de8ac1 100644
--- a/deps/v8/test/unittests/unittests.status
+++ b/deps/v8/test/unittests/unittests.status
@@ -9,6 +9,8 @@
'Ieee754.Tan': [SKIP],
'Ieee754.Acosh': [SKIP],
'Ieee754.Asinh': [SKIP],
+ 'MoveOptimizerTest.RemovesRedundantExplicit': [SKIP],
+ 'RegisterAllocatorTest.CanAllocateFPRegisters': [SKIP],
}], # 'arch == x87'
['variant == asm_wasm', {
diff --git a/deps/v8/test/unittests/value-serializer-unittest.cc b/deps/v8/test/unittests/value-serializer-unittest.cc
index d88d60a3e6..1dabd2a17a 100644
--- a/deps/v8/test/unittests/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/value-serializer-unittest.cc
@@ -64,6 +64,7 @@ class ValueSerializerTest : public TestWithIsolate {
// Overridden in more specific fixtures.
virtual ValueSerializer::Delegate* GetSerializerDelegate() { return nullptr; }
virtual void BeforeEncode(ValueSerializer*) {}
+ virtual void AfterEncode() {}
virtual ValueDeserializer::Delegate* GetDeserializerDelegate() {
return nullptr;
}
@@ -109,7 +110,11 @@ class ValueSerializerTest : public TestWithIsolate {
if (!serializer.WriteValue(context, value).FromMaybe(false)) {
return Nothing<std::vector<uint8_t>>();
}
- return Just(serializer.ReleaseBuffer());
+ AfterEncode();
+ std::pair<uint8_t*, size_t> buffer = serializer.Release();
+ std::vector<uint8_t> result(buffer.first, buffer.first + buffer.second);
+ free(buffer.first);
+ return Just(std::move(result));
}
template <typename InputFunctor, typename EncodedDataFunctor>
@@ -173,7 +178,7 @@ class ValueSerializerTest : public TestWithIsolate {
deserializer.SetSupportsLegacyWireFormat(true);
BeforeDecode(&deserializer);
ASSERT_TRUE(deserializer.ReadHeader(context).FromMaybe(false));
- ASSERT_EQ(0, deserializer.GetWireFormatVersion());
+ ASSERT_EQ(0u, deserializer.GetWireFormatVersion());
Local<Value> result;
ASSERT_TRUE(deserializer.ReadValue(context).ToLocal(&result));
ASSERT_FALSE(result.IsEmpty());
@@ -800,7 +805,7 @@ TEST_F(ValueSerializerTest, RoundTripArray) {
// A simple array of integers.
RoundTripTest("[1, 2, 3, 4, 5]", [this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- EXPECT_EQ(5, Array::Cast(*value)->Length());
+ EXPECT_EQ(5u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Array.prototype"));
EXPECT_TRUE(
@@ -811,14 +816,14 @@ TEST_F(ValueSerializerTest, RoundTripArray) {
"(() => { var x = new Array(1000); x[500] = 42; return x; })()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- EXPECT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_EQ(1000u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result[500] === 42"));
});
// Duplicate reference.
RoundTripTest(
"(() => { var y = {}; return [y, y]; })()", [this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2, Array::Cast(*value)->Length());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === result[1]"));
});
// Duplicate reference in a sparse array.
@@ -826,7 +831,7 @@ TEST_F(ValueSerializerTest, RoundTripArray) {
"(() => { var x = new Array(1000); x[1] = x[500] = {}; return x; })()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
EXPECT_TRUE(
EvaluateScriptForResultBool("typeof result[1] === 'object'"));
EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === result[500]"));
@@ -836,7 +841,7 @@ TEST_F(ValueSerializerTest, RoundTripArray) {
"(() => { var y = []; y[0] = y; return y; })()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1, Array::Cast(*value)->Length());
+ ASSERT_EQ(1u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === result"));
});
// Self reference in a sparse array.
@@ -844,7 +849,7 @@ TEST_F(ValueSerializerTest, RoundTripArray) {
"(() => { var y = new Array(1000); y[519] = y; return y; })()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result[519] === result"));
});
// Array with additional properties.
@@ -852,7 +857,7 @@ TEST_F(ValueSerializerTest, RoundTripArray) {
"(() => { var y = [1, 2]; y.foo = 'bar'; return y; })()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2, Array::Cast(*value)->Length());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result.toString() === '1,2'"));
EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
});
@@ -861,7 +866,7 @@ TEST_F(ValueSerializerTest, RoundTripArray) {
"(() => { var y = new Array(1000); y.foo = 'bar'; return y; })()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool(
"result.toString() === ','.repeat(999)"));
EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
@@ -869,7 +874,7 @@ TEST_F(ValueSerializerTest, RoundTripArray) {
// The distinction between holes and undefined elements must be maintained.
RoundTripTest("[,undefined]", [this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2, Array::Cast(*value)->Length());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
EXPECT_TRUE(
EvaluateScriptForResultBool("typeof result[0] === 'undefined'"));
EXPECT_TRUE(
@@ -886,7 +891,7 @@ TEST_F(ValueSerializerTest, DecodeArray) {
0x49, 0x08, 0x3f, 0x01, 0x49, 0x0a, 0x24, 0x00, 0x05, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- EXPECT_EQ(5, Array::Cast(*value)->Length());
+ EXPECT_EQ(5u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Array.prototype"));
EXPECT_TRUE(EvaluateScriptForResultBool(
@@ -897,7 +902,7 @@ TEST_F(ValueSerializerTest, DecodeArray) {
0xe8, 0x07, 0x3f, 0x01, 0x49, 0x54, 0x40, 0x01, 0xe8, 0x07},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- EXPECT_EQ(1000, Array::Cast(*value)->Length());
+ EXPECT_EQ(1000u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result[500] === 42"));
});
// Duplicate reference.
@@ -906,7 +911,7 @@ TEST_F(ValueSerializerTest, DecodeArray) {
0x02, 0x5e, 0x01, 0x24, 0x00, 0x02},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2, Array::Cast(*value)->Length());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === result[1]"));
});
// Duplicate reference in a sparse array.
@@ -916,7 +921,7 @@ TEST_F(ValueSerializerTest, DecodeArray) {
0x07, 0x3f, 0x02, 0x5e, 0x01, 0x40, 0x02, 0xe8, 0x07, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
EXPECT_TRUE(
EvaluateScriptForResultBool("typeof result[1] === 'object'"));
EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === result[500]"));
@@ -926,7 +931,7 @@ TEST_F(ValueSerializerTest, DecodeArray) {
0x00, 0x01, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1, Array::Cast(*value)->Length());
+ ASSERT_EQ(1u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === result"));
});
// Self reference in a sparse array.
@@ -935,7 +940,7 @@ TEST_F(ValueSerializerTest, DecodeArray) {
0x8e, 0x08, 0x3f, 0x01, 0x5e, 0x00, 0x40, 0x01, 0xe8, 0x07},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result[519] === result"));
});
// Array with additional properties.
@@ -945,7 +950,7 @@ TEST_F(ValueSerializerTest, DecodeArray) {
0x01, 0x53, 0x03, 0x62, 0x61, 0x72, 0x24, 0x01, 0x02, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2, Array::Cast(*value)->Length());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result.toString() === '1,2'"));
EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
});
@@ -955,7 +960,7 @@ TEST_F(ValueSerializerTest, DecodeArray) {
0x62, 0x61, 0x72, 0x40, 0x01, 0xe8, 0x07, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool(
"result.toString() === ','.repeat(999)"));
EXPECT_TRUE(EvaluateScriptForResultBool("result.foo === 'bar'"));
@@ -967,7 +972,7 @@ TEST_F(ValueSerializerTest, DecodeArray) {
{0xff, 0x09, 0x61, 0x02, 0x49, 0x02, 0x5f, 0x40, 0x01, 0x02},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2, Array::Cast(*value)->Length());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
EXPECT_TRUE(
EvaluateScriptForResultBool("typeof result[0] === 'undefined'"));
EXPECT_TRUE(
@@ -977,6 +982,14 @@ TEST_F(ValueSerializerTest, DecodeArray) {
});
}
+TEST_F(ValueSerializerTest, DecodeInvalidOverLargeArray) {
+ // So large it couldn't exist in the V8 heap, and its size couldn't fit in a
+ // SMI on 32-bit systems (2^30).
+ InvalidDecodeTest({0xff, 0x09, 0x41, 0x80, 0x80, 0x80, 0x80, 0x04});
+ // Not so large, but there isn't enough data left in the buffer.
+ InvalidDecodeTest({0xff, 0x09, 0x41, 0x01});
+}
+
TEST_F(ValueSerializerTest, RoundTripArrayWithNonEnumerableElement) {
// Even though this array looks like [1,5,3], the 5 should be missing from the
// perspective of structured clone, which only clones properties that were
@@ -989,7 +1002,7 @@ TEST_F(ValueSerializerTest, RoundTripArrayWithNonEnumerableElement) {
"})()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(3, Array::Cast(*value)->Length());
+ ASSERT_EQ(3u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty('1')"));
});
}
@@ -1003,7 +1016,7 @@ TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
"})()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2, Array::Cast(*value)->Length());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
EXPECT_TRUE(
EvaluateScriptForResultBool("typeof result[1] === 'undefined'"));
EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(1)"));
@@ -1017,7 +1030,7 @@ TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
"})()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
EXPECT_TRUE(
EvaluateScriptForResultBool("typeof result[1] === 'undefined'"));
EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(1)"));
@@ -1031,7 +1044,7 @@ TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
"})()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(4, Array::Cast(*value)->Length());
+ ASSERT_EQ(4u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === 1"));
EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(2)"));
});
@@ -1044,7 +1057,7 @@ TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
"})()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(4, Array::Cast(*value)->Length());
+ ASSERT_EQ(4u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result[2] === 3"));
EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(3)"));
});
@@ -1057,7 +1070,7 @@ TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
"})()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result[0] === 1"));
EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(2)"));
});
@@ -1069,7 +1082,7 @@ TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
"})()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result[2] === 3"));
EXPECT_TRUE(EvaluateScriptForResultBool("!result.hasOwnProperty(3)"));
});
@@ -1084,7 +1097,7 @@ TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
"})()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2, Array::Cast(*value)->Length());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 3"));
});
// Same for sparse arrays.
@@ -1098,7 +1111,7 @@ TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
"})()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 3"));
});
// Getters on the array itself must also run.
@@ -1110,7 +1123,7 @@ TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
"})()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(3, Array::Cast(*value)->Length());
+ ASSERT_EQ(3u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 4"));
});
// Same for sparse arrays.
@@ -1123,7 +1136,7 @@ TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
"})()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("result[1] === 4"));
});
// Even with a getter that deletes things, we don't read from the prototype.
@@ -1135,7 +1148,7 @@ TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
"})()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(2, Array::Cast(*value)->Length());
+ ASSERT_EQ(2u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("!(1 in result)"));
});
// Same for sparse arrays.
@@ -1148,7 +1161,7 @@ TEST_F(ValueSerializerTest, RoundTripArrayWithTrickyGetters) {
"})()",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(1000, Array::Cast(*value)->Length());
+ ASSERT_EQ(1000u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("!(1 in result)"));
});
}
@@ -1158,7 +1171,7 @@ TEST_F(ValueSerializerTest, DecodeSparseArrayVersion0) {
DecodeTestForVersion0({0x40, 0x00, 0x00, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- ASSERT_EQ(0, Array::Cast(*value)->Length());
+ ASSERT_EQ(0u, Array::Cast(*value)->Length());
});
// Sparse array with a mixture of elements and properties.
DecodeTestForVersion0(
@@ -1167,7 +1180,7 @@ TEST_F(ValueSerializerTest, DecodeSparseArrayVersion0) {
0x03, 'b', 'a', 'z', 0x49, 0x0b, 0x40, 0x04, 0x03, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- EXPECT_EQ(3, Array::Cast(*value)->Length());
+ EXPECT_EQ(3u, Array::Cast(*value)->Length());
EXPECT_TRUE(
EvaluateScriptForResultBool("result.toString() === 'a,,5'"));
EXPECT_TRUE(EvaluateScriptForResultBool("!(1 in result)"));
@@ -1179,7 +1192,7 @@ TEST_F(ValueSerializerTest, DecodeSparseArrayVersion0) {
{0x55, 0x01, 0x55, 0x01, 0x54, 0x40, 0x01, 0x02, 0x40, 0x01, 0x02, 0x00},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsArray());
- EXPECT_EQ(2, Array::Cast(*value)->Length());
+ EXPECT_EQ(2u, Array::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool("!(0 in result)"));
EXPECT_TRUE(EvaluateScriptForResultBool("result[1] instanceof Array"));
EXPECT_TRUE(EvaluateScriptForResultBool("!(0 in result[1])"));
@@ -1732,7 +1745,6 @@ class ValueSerializerTestWithArrayBufferTransfer : public ValueSerializerTest {
{
Context::Scope scope(serialization_context());
input_buffer_ = ArrayBuffer::New(isolate(), nullptr, 0);
- input_buffer_->Neuter();
}
{
Context::Scope scope(deserialization_context());
@@ -1749,6 +1761,8 @@ class ValueSerializerTestWithArrayBufferTransfer : public ValueSerializerTest {
serializer->TransferArrayBuffer(0, input_buffer_);
}
+ void AfterEncode() override { input_buffer_->Neuter(); }
+
void BeforeDecode(ValueDeserializer* deserializer) override {
deserializer->TransferArrayBuffer(0, output_buffer_);
}
@@ -1797,8 +1811,8 @@ TEST_F(ValueSerializerTest, RoundTripTypedArray) {
#define TYPED_ARRAY_ROUND_TRIP_TEST(Type, type, TYPE, ctype, size) \
RoundTripTest("new " #Type "Array(2)", [this](Local<Value> value) { \
ASSERT_TRUE(value->Is##Type##Array()); \
- EXPECT_EQ(2 * size, TypedArray::Cast(*value)->ByteLength()); \
- EXPECT_EQ(2, TypedArray::Cast(*value)->Length()); \
+ EXPECT_EQ(2u * size, TypedArray::Cast(*value)->ByteLength()); \
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length()); \
EXPECT_TRUE(EvaluateScriptForResultBool( \
"Object.getPrototypeOf(result) === " #Type "Array.prototype")); \
});
@@ -1852,8 +1866,8 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
0x42, 0x00, 0x02},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsUint8Array());
- EXPECT_EQ(2, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2, TypedArray::Cast(*value)->Length());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Uint8Array.prototype"));
});
@@ -1861,8 +1875,8 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
0x62, 0x00, 0x02},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsInt8Array());
- EXPECT_EQ(2, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2, TypedArray::Cast(*value)->Length());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Int8Array.prototype"));
});
@@ -1871,8 +1885,8 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
0x00, 0x56, 0x57, 0x00, 0x04},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsUint16Array());
- EXPECT_EQ(4, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2, TypedArray::Cast(*value)->Length());
+ EXPECT_EQ(4u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Uint16Array.prototype"));
});
@@ -1880,8 +1894,8 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
0x00, 0x56, 0x77, 0x00, 0x04},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsInt16Array());
- EXPECT_EQ(4, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2, TypedArray::Cast(*value)->Length());
+ EXPECT_EQ(4u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Int16Array.prototype"));
});
@@ -1889,8 +1903,8 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x44, 0x00, 0x08},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsUint32Array());
- EXPECT_EQ(8, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2, TypedArray::Cast(*value)->Length());
+ EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Uint32Array.prototype"));
});
@@ -1898,8 +1912,8 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x64, 0x00, 0x08},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsInt32Array());
- EXPECT_EQ(8, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2, TypedArray::Cast(*value)->Length());
+ EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Int32Array.prototype"));
});
@@ -1907,8 +1921,8 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x66, 0x00, 0x08},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsFloat32Array());
- EXPECT_EQ(8, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2, TypedArray::Cast(*value)->Length());
+ EXPECT_EQ(8u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Float32Array.prototype"));
});
@@ -1917,8 +1931,8 @@ TEST_F(ValueSerializerTest, DecodeTypedArray) {
0x00, 0x00, 0x00, 0x00, 0x56, 0x46, 0x00, 0x10},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsFloat64Array());
- EXPECT_EQ(16, TypedArray::Cast(*value)->ByteLength());
- EXPECT_EQ(2, TypedArray::Cast(*value)->Length());
+ EXPECT_EQ(16u, TypedArray::Cast(*value)->ByteLength());
+ EXPECT_EQ(2u, TypedArray::Cast(*value)->Length());
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === Float64Array.prototype"));
});
@@ -1984,15 +1998,18 @@ TEST_F(ValueSerializerTest, DecodeInvalidTypedArray) {
// Byte length not divisible by element size.
InvalidDecodeTest(
{0xff, 0x09, 0x42, 0x04, 0x00, 0x00, 0x00, 0x00, 0x56, 0x77, 0x02, 0x01});
+ // Invalid view type (0xff).
+ InvalidDecodeTest(
+ {0xff, 0x09, 0x42, 0x02, 0x00, 0x00, 0x56, 0xff, 0x01, 0x01});
}
TEST_F(ValueSerializerTest, RoundTripDataView) {
RoundTripTest("new DataView(new ArrayBuffer(4), 1, 2)",
[this](Local<Value> value) {
ASSERT_TRUE(value->IsDataView());
- EXPECT_EQ(1, DataView::Cast(*value)->ByteOffset());
- EXPECT_EQ(2, DataView::Cast(*value)->ByteLength());
- EXPECT_EQ(4, DataView::Cast(*value)->Buffer()->ByteLength());
+ EXPECT_EQ(1u, DataView::Cast(*value)->ByteOffset());
+ EXPECT_EQ(2u, DataView::Cast(*value)->ByteLength());
+ EXPECT_EQ(4u, DataView::Cast(*value)->Buffer()->ByteLength());
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === DataView.prototype"));
});
@@ -2003,9 +2020,9 @@ TEST_F(ValueSerializerTest, DecodeDataView) {
0x00, 0x56, 0x3f, 0x01, 0x02},
[this](Local<Value> value) {
ASSERT_TRUE(value->IsDataView());
- EXPECT_EQ(1, DataView::Cast(*value)->ByteOffset());
- EXPECT_EQ(2, DataView::Cast(*value)->ByteLength());
- EXPECT_EQ(4, DataView::Cast(*value)->Buffer()->ByteLength());
+ EXPECT_EQ(1u, DataView::Cast(*value)->ByteOffset());
+ EXPECT_EQ(2u, DataView::Cast(*value)->ByteLength());
+ EXPECT_EQ(4u, DataView::Cast(*value)->Buffer()->ByteLength());
EXPECT_TRUE(EvaluateScriptForResultBool(
"Object.getPrototypeOf(result) === DataView.prototype"));
});
@@ -2358,5 +2375,162 @@ TEST_F(ValueSerializerTestWithHostObject, RoundTripSameObject) {
});
}
+// It's expected that WebAssembly has more exhaustive tests elsewhere; this
+// mostly checks that the logic to embed it in structured clone serialization
+// works correctly.
+
+class ValueSerializerTestWithWasm : public ValueSerializerTest {
+ protected:
+ static void SetUpTestCase() {
+ g_saved_flag = i::FLAG_expose_wasm;
+ i::FLAG_expose_wasm = true;
+ ValueSerializerTest::SetUpTestCase();
+ }
+
+ static void TearDownTestCase() {
+ ValueSerializerTest::TearDownTestCase();
+ i::FLAG_expose_wasm = g_saved_flag;
+ g_saved_flag = false;
+ }
+
+ private:
+ static bool g_saved_flag;
+};
+
+bool ValueSerializerTestWithWasm::g_saved_flag = false;
+
+// A simple module which exports an "increment" function.
+// Copied from test/mjsunit/wasm/incrementer.wasm.
+const unsigned char kIncrementerWasm[] = {
+ 0x00, 0x61, 0x73, 0x6d, 0x0d, 0x00, 0x00, 0x00, 0x01, 0x06, 0x01, 0x60,
+ 0x01, 0x7f, 0x01, 0x7f, 0x03, 0x02, 0x01, 0x00, 0x07, 0x0d, 0x01, 0x09,
+ 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x00, 0x00, 0x0a,
+ 0x08, 0x01, 0x06, 0x00, 0x20, 0x00, 0x41, 0x01, 0x6a};
+
+TEST_F(ValueSerializerTestWithWasm, RoundTripWasmModule) {
+ RoundTripTest(
+ [this]() {
+ return WasmCompiledModule::DeserializeOrCompile(
+ isolate(), {nullptr, 0},
+ {kIncrementerWasm, sizeof(kIncrementerWasm)})
+ .ToLocalChecked();
+ },
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsWebAssemblyCompiledModule());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "new WebAssembly.Instance(result).exports.increment(8) === 9"));
+ });
+}
+
+// As produced around Chrome 56.
+const unsigned char kSerializedIncrementerWasm[] = {
+ 0xff, 0x09, 0x3f, 0x00, 0x57, 0x79, 0x2d, 0x00, 0x61, 0x73, 0x6d, 0x0d,
+ 0x00, 0x00, 0x00, 0x01, 0x06, 0x01, 0x60, 0x01, 0x7f, 0x01, 0x7f, 0x03,
+ 0x02, 0x01, 0x00, 0x07, 0x0d, 0x01, 0x09, 0x69, 0x6e, 0x63, 0x72, 0x65,
+ 0x6d, 0x65, 0x6e, 0x74, 0x00, 0x00, 0x0a, 0x08, 0x01, 0x06, 0x00, 0x20,
+ 0x00, 0x41, 0x01, 0x6a, 0xf8, 0x04, 0xa1, 0x06, 0xde, 0xc0, 0xc6, 0x44,
+ 0x3c, 0x29, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x02, 0x00, 0x00, 0x81, 0x4e,
+ 0xce, 0x7c, 0x05, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x30, 0x02,
+ 0x00, 0x00, 0xb0, 0x25, 0x30, 0xe3, 0xf2, 0xdb, 0x2e, 0x48, 0x00, 0x00,
+ 0x00, 0x80, 0xe8, 0x00, 0x00, 0x80, 0xe0, 0x01, 0x00, 0x80, 0x00, 0x00,
+ 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, 0x07, 0x08, 0x00, 0x00, 0x09, 0x04,
+ 0x10, 0x02, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3c, 0x8c, 0xc0, 0x00, 0x00,
+ 0x00, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x01, 0x10, 0x8c, 0xc0, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x02, 0x70, 0x94, 0x01, 0x0c, 0x8b,
+ 0xc1, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x25, 0xdc, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, 0x01, 0x10, 0x8c, 0xc0, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x84, 0xc0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x05, 0x7d, 0x01, 0x1a, 0xe1, 0x02, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x23, 0x88, 0x42, 0x32, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x04, 0x00,
+ 0x00, 0x02, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x49, 0x3b, 0xa5, 0x60, 0x0c, 0x00,
+ 0x00, 0x0f, 0x86, 0x04, 0x00, 0x00, 0x00, 0x83, 0xc0, 0x01, 0xc3, 0x55,
+ 0x48, 0x89, 0xe5, 0x49, 0xba, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x41, 0x52, 0x48, 0x83, 0xec, 0x08, 0x48, 0x89, 0x45, 0xf0, 0x48,
+ 0xbb, 0xb0, 0x67, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0xc0, 0x48,
+ 0xbe, 0xe1, 0x57, 0x81, 0x85, 0xf6, 0x14, 0x00, 0x00, 0xe8, 0xfc, 0x3c,
+ 0xea, 0xff, 0x48, 0x8b, 0x45, 0xf0, 0x48, 0x8b, 0xe5, 0x5d, 0xeb, 0xbf,
+ 0x66, 0x90, 0x01, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x44, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0xff, 0x0f, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x0f, 0x20, 0x84, 0x0f, 0x7d, 0x01, 0x0d, 0x00, 0x0f, 0x04,
+ 0x6d, 0x08, 0x0f, 0xf0, 0x02, 0x80, 0x94, 0x01, 0x0c, 0x8b, 0xc1, 0x00,
+ 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xed, 0xa9, 0x2d, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x9e, 0xe0, 0x38, 0x1a, 0x61, 0x03, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x23, 0x88, 0x42, 0x32, 0x03, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9a, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00,
+ 0x02, 0xf9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff,
+ 0xff, 0x00, 0x00, 0x00, 0x00, 0x55, 0x48, 0x89, 0xe5, 0x56, 0x57, 0x48,
+ 0x8b, 0x45, 0x10, 0xe8, 0x11, 0xed, 0xed, 0xff, 0xa8, 0x01, 0x0f, 0x85,
+ 0x2d, 0x00, 0x00, 0x00, 0x48, 0xc1, 0xe8, 0x20, 0xc5, 0xf9, 0x57, 0xc0,
+ 0xc5, 0xfb, 0x2a, 0xc0, 0xc4, 0xe1, 0xfb, 0x2c, 0xc0, 0x48, 0x83, 0xf8,
+ 0x01, 0x0f, 0x80, 0x34, 0x00, 0x00, 0x00, 0x8b, 0xc0, 0xe8, 0x27, 0xfe,
+ 0xff, 0xff, 0x48, 0xc1, 0xe0, 0x20, 0x48, 0x8b, 0xe5, 0x5d, 0xc2, 0x10,
+ 0x00, 0x49, 0x39, 0x45, 0xa0, 0x0f, 0x84, 0x07, 0x00, 0x00, 0x00, 0xc5,
+ 0xfb, 0x10, 0x40, 0x07, 0xeb, 0xce, 0x49, 0xba, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xf8, 0x7f, 0xc4, 0xc1, 0xf9, 0x6e, 0xc2, 0xeb, 0xbd, 0x48,
+ 0x83, 0xec, 0x08, 0xc5, 0xfb, 0x11, 0x04, 0x24, 0xe8, 0xcc, 0xfe, 0xff,
+ 0xff, 0x48, 0x83, 0xc4, 0x08, 0xeb, 0xb8, 0x66, 0x90, 0x02, 0x00, 0x00,
+ 0x00, 0x03, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff,
+ 0x0f, 0x39, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x0f, 0xff, 0xff, 0x00,
+ 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x20, 0x84,
+ 0x0f, 0xcc, 0x6e, 0x7d, 0x01, 0x72, 0x98, 0x00, 0x0f, 0xdc, 0x6d, 0x0c,
+ 0x0f, 0xb0, 0x84, 0x0d, 0x04, 0x84, 0xe3, 0xc0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x84, 0xe0, 0x84, 0x84, 0x18, 0x2f, 0x2f, 0x2f,
+ 0x2f, 0x2f};
+
+TEST_F(ValueSerializerTestWithWasm, DecodeWasmModule) {
+ std::vector<uint8_t> raw(
+ kSerializedIncrementerWasm,
+ kSerializedIncrementerWasm + sizeof(kSerializedIncrementerWasm));
+ DecodeTest(raw, [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsWebAssemblyCompiledModule());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "new WebAssembly.Instance(result).exports.increment(8) === 9"));
+ });
+}
+
+// As above, but with empty compiled data. Should work due to fallback to wire
+// data.
+const unsigned char kSerializedIncrementerWasmWithInvalidCompiledData[] = {
+ 0xff, 0x09, 0x3f, 0x00, 0x57, 0x79, 0x2d, 0x00, 0x61, 0x73, 0x6d,
+ 0x0d, 0x00, 0x00, 0x00, 0x01, 0x06, 0x01, 0x60, 0x01, 0x7f, 0x01,
+ 0x7f, 0x03, 0x02, 0x01, 0x00, 0x07, 0x0d, 0x01, 0x09, 0x69, 0x6e,
+ 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x00, 0x00, 0x0a, 0x08,
+ 0x01, 0x06, 0x00, 0x20, 0x00, 0x41, 0x01, 0x6a, 0x00};
+
+TEST_F(ValueSerializerTestWithWasm, DecodeWasmModuleWithInvalidCompiledData) {
+ std::vector<uint8_t> raw(
+ kSerializedIncrementerWasmWithInvalidCompiledData,
+ kSerializedIncrementerWasmWithInvalidCompiledData +
+ sizeof(kSerializedIncrementerWasmWithInvalidCompiledData));
+ DecodeTest(raw, [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsWebAssemblyCompiledModule());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "new WebAssembly.Instance(result).exports.increment(8) === 9"));
+ });
+}
+
+// As above, but also with empty wire data. Should fail.
+const unsigned char kSerializedIncrementerWasmInvalid[] = {
+ 0xff, 0x09, 0x3f, 0x00, 0x57, 0x79, 0x00, 0x00};
+
+TEST_F(ValueSerializerTestWithWasm,
+ DecodeWasmModuleWithInvalidCompiledAndWireData) {
+ std::vector<uint8_t> raw(kSerializedIncrementerWasmInvalid,
+ kSerializedIncrementerWasmInvalid +
+ sizeof(kSerializedIncrementerWasmInvalid));
+ InvalidDecodeTest(raw);
+}
+
+TEST_F(ValueSerializerTestWithWasm, DecodeWasmModuleWithInvalidDataLength) {
+ InvalidDecodeTest({0xff, 0x09, 0x3f, 0x00, 0x57, 0x79, 0x7f, 0x00});
+ InvalidDecodeTest({0xff, 0x09, 0x3f, 0x00, 0x57, 0x79, 0x00, 0x7f});
+}
+
} // namespace
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/asm-types-unittest.cc b/deps/v8/test/unittests/wasm/asm-types-unittest.cc
index 36493df86d..ebdd74a9f4 100644
--- a/deps/v8/test/unittests/wasm/asm-types-unittest.cc
+++ b/deps/v8/test/unittests/wasm/asm-types-unittest.cc
@@ -155,7 +155,7 @@ TEST_F(AsmTypeTest, ValidateBits) {
do { \
++total_types; \
if (AsmValueTypeParents::CamelName != 0) { \
- EXPECT_NE(0, ParentsOf(AsmType::CamelName()).size()) << #CamelName; \
+ EXPECT_NE(0u, ParentsOf(AsmType::CamelName()).size()) << #CamelName; \
} \
seen_types.insert(Type::CamelName()); \
seen_numbers.insert(number); \
@@ -163,7 +163,7 @@ TEST_F(AsmTypeTest, ValidateBits) {
EXPECT_NE(0, number) << Type::CamelName()->Name(); \
/* Inheritance cycles - unlikely, but we're paranoid and check for it */ \
/* anyways.*/ \
- EXPECT_EQ(0, (1 << (number)) & AsmValueTypeParents::CamelName); \
+ EXPECT_EQ(0u, (1 << (number)) & AsmValueTypeParents::CamelName); \
} while (0);
FOR_EACH_ASM_VALUE_TYPE_LIST(V)
#undef V
diff --git a/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc b/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
index cbaf6201c6..e630ac4721 100644
--- a/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
@@ -11,6 +11,7 @@
#include "src/objects.h"
#include "src/wasm/ast-decoder.h"
+#include "src/wasm/signature-map.h"
#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
@@ -1149,7 +1150,7 @@ TEST_F(AstDecoderTest, AllSimpleExpressions) {
}
TEST_F(AstDecoderTest, MemorySize) {
- byte code[] = {kExprMemorySize};
+ byte code[] = {kExprMemorySize, 0};
EXPECT_VERIFIES_C(i_i, code);
EXPECT_FAILURE_C(f_ff, code);
}
@@ -1183,7 +1184,7 @@ TEST_F(AstDecoderTest, LoadMemAlignment) {
{kExprF64LoadMem, 3}, // --
};
- for (int i = 0; i < arraysize(values); i++) {
+ for (size_t i = 0; i < arraysize(values); i++) {
for (byte alignment = 0; alignment <= 4; alignment++) {
byte code[] = {WASM_ZERO, static_cast<byte>(values[i].instruction),
alignment, ZERO_OFFSET, WASM_DROP};
@@ -1283,7 +1284,7 @@ class TestModuleEnv : public ModuleEnv {
module = &mod;
}
byte AddGlobal(LocalType type, bool mutability = true) {
- mod.globals.push_back({type, mutability, NO_INIT, 0, false, false});
+ mod.globals.push_back({type, mutability, WasmInitExpr(), 0, false, false});
CHECK(mod.globals.size() <= 127);
return static_cast<byte>(mod.globals.size() - 1);
}
@@ -1311,6 +1312,11 @@ class TestModuleEnv : public ModuleEnv {
return result;
}
+ void InitializeFunctionTable() {
+ mod.function_tables.push_back(
+ {0, 0, true, std::vector<int32_t>(), false, false, SignatureMap()});
+ }
+
private:
WasmModule mod;
};
@@ -1421,6 +1427,7 @@ TEST_F(AstDecoderTest, MultiReturnType) {
TEST_F(AstDecoderTest, SimpleIndirectCalls) {
FunctionSig* sig = sigs.i_i();
TestModuleEnv module_env;
+ module_env.InitializeFunctionTable();
module = &module_env;
byte f0 = module_env.AddSignature(sigs.i_v());
@@ -1436,6 +1443,7 @@ TEST_F(AstDecoderTest, SimpleIndirectCalls) {
TEST_F(AstDecoderTest, IndirectCallsOutOfBounds) {
FunctionSig* sig = sigs.i_i();
TestModuleEnv module_env;
+ module_env.InitializeFunctionTable();
module = &module_env;
EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT0(0, WASM_ZERO));
@@ -1452,6 +1460,7 @@ TEST_F(AstDecoderTest, IndirectCallsOutOfBounds) {
TEST_F(AstDecoderTest, IndirectCallsWithMismatchedSigs3) {
FunctionSig* sig = sigs.i_i();
TestModuleEnv module_env;
+ module_env.InitializeFunctionTable();
module = &module_env;
byte f0 = module_env.AddFunction(sigs.i_f());
@@ -1471,6 +1480,21 @@ TEST_F(AstDecoderTest, IndirectCallsWithMismatchedSigs3) {
EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_F32(17.6)));
}
+TEST_F(AstDecoderTest, IndirectCallsWithoutTableCrash) {
+ FunctionSig* sig = sigs.i_i();
+ TestModuleEnv module_env;
+ module = &module_env;
+
+ byte f0 = module_env.AddSignature(sigs.i_v());
+ byte f1 = module_env.AddSignature(sigs.i_i());
+ byte f2 = module_env.AddSignature(sigs.i_ii());
+
+ EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT0(f0, WASM_ZERO));
+ EXPECT_FAILURE_S(sig, WASM_CALL_INDIRECT1(f1, WASM_ZERO, WASM_I8(22)));
+ EXPECT_FAILURE_S(
+ sig, WASM_CALL_INDIRECT2(f2, WASM_ZERO, WASM_I8(32), WASM_I8(72)));
+}
+
TEST_F(AstDecoderTest, SimpleImportCalls) {
FunctionSig* sig = sigs.i_i();
TestModuleEnv module_env;
@@ -1632,7 +1656,7 @@ TEST_F(AstDecoderTest, WasmGrowMemory) {
module = &module_env;
module->origin = kWasmOrigin;
- byte code[] = {WASM_UNOP(kExprGrowMemory, WASM_GET_LOCAL(0))};
+ byte code[] = {WASM_GET_LOCAL(0), kExprGrowMemory, 0};
EXPECT_VERIFIES_C(i_i, code);
EXPECT_FAILURE_C(i_d, code);
}
@@ -1642,7 +1666,7 @@ TEST_F(AstDecoderTest, AsmJsGrowMemory) {
module = &module_env;
module->origin = kAsmJsOrigin;
- byte code[] = {WASM_UNOP(kExprGrowMemory, WASM_GET_LOCAL(0))};
+ byte code[] = {WASM_GET_LOCAL(0), kExprGrowMemory, 0};
EXPECT_FAILURE_C(i_i, code);
}
@@ -1673,7 +1697,7 @@ TEST_F(AstDecoderTest, AsmJsBinOpsCheckOrigin) {
TestModuleEnv module_env;
module = &module_env;
module->origin = kAsmJsOrigin;
- for (int i = 0; i < arraysize(AsmJsBinOps); i++) {
+ for (size_t i = 0; i < arraysize(AsmJsBinOps); i++) {
TestBinop(AsmJsBinOps[i].op, AsmJsBinOps[i].sig);
}
}
@@ -1682,7 +1706,7 @@ TEST_F(AstDecoderTest, AsmJsBinOpsCheckOrigin) {
TestModuleEnv module_env;
module = &module_env;
module->origin = kWasmOrigin;
- for (int i = 0; i < arraysize(AsmJsBinOps); i++) {
+ for (size_t i = 0; i < arraysize(AsmJsBinOps); i++) {
byte code[] = {
WASM_BINOP(AsmJsBinOps[i].op, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1))};
EXPECT_FAILURE_SC(AsmJsBinOps[i].sig, code);
@@ -1721,7 +1745,7 @@ TEST_F(AstDecoderTest, AsmJsUnOpsCheckOrigin) {
TestModuleEnv module_env;
module = &module_env;
module->origin = kAsmJsOrigin;
- for (int i = 0; i < arraysize(AsmJsUnOps); i++) {
+ for (size_t i = 0; i < arraysize(AsmJsUnOps); i++) {
TestUnop(AsmJsUnOps[i].op, AsmJsUnOps[i].sig);
}
}
@@ -1730,7 +1754,7 @@ TEST_F(AstDecoderTest, AsmJsUnOpsCheckOrigin) {
TestModuleEnv module_env;
module = &module_env;
module->origin = kWasmOrigin;
- for (int i = 0; i < arraysize(AsmJsUnOps); i++) {
+ for (size_t i = 0; i < arraysize(AsmJsUnOps); i++) {
byte code[] = {WASM_UNOP(AsmJsUnOps[i].op, WASM_GET_LOCAL(0))};
EXPECT_FAILURE_SC(AsmJsUnOps[i].sig, code);
}
@@ -2222,7 +2246,7 @@ class BranchTableIteratorTest : public TestWithZone {
Decoder decoder(start, end);
BranchTableOperand operand(&decoder, start);
BranchTableIterator iterator(&decoder, operand);
- EXPECT_EQ(end - start - 1, iterator.length());
+ EXPECT_EQ(end - start - 1u, iterator.length());
EXPECT_TRUE(decoder.ok());
}
void CheckBrTableError(const byte* start, const byte* end) {
@@ -2280,16 +2304,18 @@ class WasmOpcodeLengthTest : public TestWithZone {
WasmOpcodeLengthTest() : TestWithZone() {}
};
-#define EXPECT_LENGTH(expected, opcode) \
- { \
- static const byte code[] = {opcode, 0, 0, 0, 0, 0, 0, 0, 0}; \
- EXPECT_EQ(expected, OpcodeLength(code, code + sizeof(code))); \
+#define EXPECT_LENGTH(expected, opcode) \
+ { \
+ static const byte code[] = {opcode, 0, 0, 0, 0, 0, 0, 0, 0}; \
+ EXPECT_EQ(static_cast<unsigned>(expected), \
+ OpcodeLength(code, code + sizeof(code))); \
}
-#define EXPECT_LENGTH_N(expected, ...) \
- { \
- static const byte code[] = {__VA_ARGS__}; \
- EXPECT_EQ(expected, OpcodeLength(code, code + sizeof(code))); \
+#define EXPECT_LENGTH_N(expected, ...) \
+ { \
+ static const byte code[] = {__VA_ARGS__}; \
+ EXPECT_EQ(static_cast<unsigned>(expected), \
+ OpcodeLength(code, code + sizeof(code))); \
}
TEST_F(WasmOpcodeLengthTest, Statements) {
@@ -2316,7 +2342,7 @@ TEST_F(WasmOpcodeLengthTest, MiscExpressions) {
EXPECT_LENGTH(2, kExprGetGlobal);
EXPECT_LENGTH(2, kExprSetGlobal);
EXPECT_LENGTH(2, kExprCallFunction);
- EXPECT_LENGTH(2, kExprCallIndirect);
+ EXPECT_LENGTH(3, kExprCallIndirect);
}
TEST_F(WasmOpcodeLengthTest, I32Const) {
@@ -2375,8 +2401,8 @@ TEST_F(WasmOpcodeLengthTest, LoadsAndStores) {
}
TEST_F(WasmOpcodeLengthTest, MiscMemExpressions) {
- EXPECT_LENGTH(1, kExprMemorySize);
- EXPECT_LENGTH(1, kExprGrowMemory);
+ EXPECT_LENGTH(2, kExprMemorySize);
+ EXPECT_LENGTH(2, kExprGrowMemory);
}
TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
@@ -2500,6 +2526,19 @@ TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
EXPECT_LENGTH(1, kExprI64ReinterpretF64);
}
+TEST_F(WasmOpcodeLengthTest, SimdExpressions) {
+#define TEST_SIMD(name, opcode, sig) \
+ EXPECT_LENGTH_N(2, kSimdPrefix, static_cast<byte>(kExpr##name & 0xff));
+ FOREACH_SIMD_0_OPERAND_OPCODE(TEST_SIMD)
+#undef TEST_SIMD
+#define TEST_SIMD(name, opcode, sig) \
+ EXPECT_LENGTH_N(3, kSimdPrefix, static_cast<byte>(kExpr##name & 0xff));
+ FOREACH_SIMD_1_OPERAND_OPCODE(TEST_SIMD)
+#undef TEST_SIMD
+ // test for bad simd opcode
+ EXPECT_LENGTH_N(2, kSimdPrefix, 0xff);
+}
+
typedef ZoneVector<LocalType> LocalTypeMap;
class LocalDeclDecoderTest : public TestWithZone {
@@ -2534,7 +2573,7 @@ TEST_F(LocalDeclDecoderTest, NoLocals) {
AstLocalDecls decls(zone());
bool result = DecodeLocalDecls(decls, data, data + sizeof(data));
EXPECT_TRUE(result);
- EXPECT_EQ(0, decls.total_local_count);
+ EXPECT_EQ(0u, decls.total_local_count);
}
TEST_F(LocalDeclDecoderTest, OneLocal) {
@@ -2545,10 +2584,10 @@ TEST_F(LocalDeclDecoderTest, OneLocal) {
AstLocalDecls decls(zone());
bool result = DecodeLocalDecls(decls, data, data + sizeof(data));
EXPECT_TRUE(result);
- EXPECT_EQ(1, decls.total_local_count);
+ EXPECT_EQ(1u, decls.total_local_count);
LocalTypeMap map = Expand(decls);
- EXPECT_EQ(1, map.size());
+ EXPECT_EQ(1u, map.size());
EXPECT_EQ(type, map[0]);
}
}
@@ -2562,10 +2601,10 @@ TEST_F(LocalDeclDecoderTest, FiveLocals) {
bool result = DecodeLocalDecls(decls, data, data + sizeof(data));
EXPECT_TRUE(result);
EXPECT_EQ(sizeof(data), decls.decls_encoded_size);
- EXPECT_EQ(5, decls.total_local_count);
+ EXPECT_EQ(5u, decls.total_local_count);
LocalTypeMap map = Expand(decls);
- EXPECT_EQ(5, map.size());
+ EXPECT_EQ(5u, map.size());
ExpectRun(map, 0, type, 5);
}
}
@@ -2581,10 +2620,11 @@ TEST_F(LocalDeclDecoderTest, MixedLocals) {
bool result = DecodeLocalDecls(decls, data, data + sizeof(data));
EXPECT_TRUE(result);
EXPECT_EQ(sizeof(data), decls.decls_encoded_size);
- EXPECT_EQ(a + b + c + d, decls.total_local_count);
+ EXPECT_EQ(static_cast<uint32_t>(a + b + c + d),
+ decls.total_local_count);
LocalTypeMap map = Expand(decls);
- EXPECT_EQ(a + b + c + d, map.size());
+ EXPECT_EQ(static_cast<uint32_t>(a + b + c + d), map.size());
size_t pos = 0;
pos = ExpectRun(map, pos, kAstI32, a);
@@ -2610,7 +2650,7 @@ TEST_F(LocalDeclDecoderTest, UseEncoder) {
AstLocalDecls decls(zone());
bool result = DecodeLocalDecls(decls, data, end);
EXPECT_TRUE(result);
- EXPECT_EQ(5 + 1337 + 212, decls.total_local_count);
+ EXPECT_EQ(5u + 1337u + 212u, decls.total_local_count);
LocalTypeMap map = Expand(decls);
size_t pos = 0;
@@ -2662,8 +2702,8 @@ TEST_F(BytecodeIteratorTest, WithAstDecls) {
AstLocalDecls decls(zone());
BytecodeIterator iter(code, code + sizeof(code), &decls);
- EXPECT_EQ(3, decls.decls_encoded_size);
- EXPECT_EQ(3, iter.pc_offset());
+ EXPECT_EQ(3u, decls.decls_encoded_size);
+ EXPECT_EQ(3u, iter.pc_offset());
EXPECT_TRUE(iter.has_next());
EXPECT_EQ(kExprI8Const, iter.current());
iter.next();
diff --git a/deps/v8/test/unittests/wasm/decoder-unittest.cc b/deps/v8/test/unittests/wasm/decoder-unittest.cc
index e298f0ba9f..9f68dc8c95 100644
--- a/deps/v8/test/unittests/wasm/decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/decoder-unittest.cc
@@ -19,14 +19,18 @@ class DecoderTest : public TestWithZone {
Decoder decoder;
};
-#define CHECK_UINT32V_INLINE(expected, expected_length, ...) \
- do { \
- const byte data[] = {__VA_ARGS__}; \
- decoder.Reset(data, data + sizeof(data)); \
- unsigned length; \
- EXPECT_EQ(expected, \
- decoder.checked_read_u32v(decoder.start(), 0, &length)); \
- EXPECT_EQ(expected_length, length); \
+#define CHECK_UINT32V_INLINE(expected, expected_length, ...) \
+ do { \
+ const byte data[] = {__VA_ARGS__}; \
+ decoder.Reset(data, data + sizeof(data)); \
+ unsigned length; \
+ EXPECT_EQ(static_cast<uint32_t>(expected), \
+ decoder.checked_read_u32v(decoder.start(), 0, &length)); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+ EXPECT_EQ(data, decoder.pc()); \
+ EXPECT_TRUE(decoder.ok()); \
+ EXPECT_EQ(static_cast<uint32_t>(expected), decoder.consume_u32v()); \
+ EXPECT_EQ(data + expected_length, decoder.pc()); \
} while (false)
#define CHECK_INT32V_INLINE(expected, expected_length, ...) \
@@ -36,7 +40,11 @@ class DecoderTest : public TestWithZone {
unsigned length; \
EXPECT_EQ(expected, \
decoder.checked_read_i32v(decoder.start(), 0, &length)); \
- EXPECT_EQ(expected_length, length); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
+ EXPECT_EQ(data, decoder.pc()); \
+ EXPECT_TRUE(decoder.ok()); \
+ EXPECT_EQ(expected, decoder.consume_i32v()); \
+ EXPECT_EQ(data + expected_length, decoder.pc()); \
} while (false)
#define CHECK_UINT64V_INLINE(expected, expected_length, ...) \
@@ -44,9 +52,9 @@ class DecoderTest : public TestWithZone {
const byte data[] = {__VA_ARGS__}; \
decoder.Reset(data, data + sizeof(data)); \
unsigned length; \
- EXPECT_EQ(expected, \
+ EXPECT_EQ(static_cast<uint64_t>(expected), \
decoder.checked_read_u64v(decoder.start(), 0, &length)); \
- EXPECT_EQ(expected_length, length); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
} while (false)
#define CHECK_INT64V_INLINE(expected, expected_length, ...) \
@@ -56,7 +64,7 @@ class DecoderTest : public TestWithZone {
unsigned length; \
EXPECT_EQ(expected, \
decoder.checked_read_i64v(decoder.start(), 0, &length)); \
- EXPECT_EQ(expected_length, length); \
+ EXPECT_EQ(static_cast<unsigned>(expected_length), length); \
} while (false)
TEST_F(DecoderTest, ReadU32v_OneByte) {
@@ -369,7 +377,7 @@ TEST_F(DecoderTest, ReadU32v_off_end1) {
unsigned length = 0;
decoder.Reset(data, data);
decoder.checked_read_u32v(decoder.start(), 0, &length);
- EXPECT_EQ(0, length);
+ EXPECT_EQ(0u, length);
EXPECT_FALSE(decoder.ok());
}
@@ -424,7 +432,7 @@ TEST_F(DecoderTest, ReadU32v_extra_bits) {
unsigned length = 0;
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_u32v(decoder.start(), 0, &length);
- EXPECT_EQ(5, length);
+ EXPECT_EQ(5u, length);
EXPECT_FALSE(decoder.ok());
}
}
@@ -435,7 +443,7 @@ TEST_F(DecoderTest, ReadI32v_extra_bits_negative) {
byte data[] = {0xff, 0xff, 0xff, 0xff, 0x7f};
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_i32v(decoder.start(), 0, &length);
- EXPECT_EQ(5, length);
+ EXPECT_EQ(5u, length);
EXPECT_TRUE(decoder.ok());
}
@@ -445,7 +453,7 @@ TEST_F(DecoderTest, ReadI32v_extra_bits_positive) {
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x77};
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_i32v(decoder.start(), 0, &length);
- EXPECT_EQ(5, length);
+ EXPECT_EQ(5u, length);
EXPECT_FALSE(decoder.ok());
}
@@ -467,16 +475,16 @@ TEST_F(DecoderTest, ReadU32v_Bits) {
uint32_t val = kVals[v];
if (i < 32) val &= ((1 << i) - 1);
- int length = 1 + i / 7;
- for (int j = 0; j < kMaxSize; j++) {
+ unsigned length = 1 + i / 7;
+ for (unsigned j = 0; j < kMaxSize; j++) {
data[j] = static_cast<byte>((val >> (7 * j)) & MASK_7);
}
- for (int j = 0; j < length - 1; j++) {
+ for (unsigned j = 0; j < length - 1; j++) {
data[j] |= 0x80;
}
// foreach buffer size 0...5
- for (int limit = 0; limit <= kMaxSize; limit++) {
+ for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
unsigned rlen;
uint32_t result = decoder.checked_read_u32v(data, 0, &rlen);
@@ -526,13 +534,13 @@ TEST_F(DecoderTest, ReadU64v_PowerOf2) {
const int kMaxSize = 10;
byte data[kMaxSize];
- for (int i = 0; i < 64; i++) {
+ for (unsigned i = 0; i < 64; i++) {
const uint64_t val = 1ull << i;
- int index = i / 7;
+ unsigned index = i / 7;
data[index] = 1 << (i % 7);
memset(data, 0x80, index);
- for (int limit = 0; limit <= kMaxSize; limit++) {
+ for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
unsigned length;
uint64_t result = decoder.checked_read_u64v(data, 0, &length);
@@ -564,16 +572,16 @@ TEST_F(DecoderTest, ReadU64v_Bits) {
uint64_t val = kVals[v];
if (i < 64) val &= ((1ull << i) - 1);
- int length = 1 + i / 7;
- for (int j = 0; j < kMaxSize; j++) {
+ unsigned length = 1 + i / 7;
+ for (unsigned j = 0; j < kMaxSize; j++) {
data[j] = static_cast<byte>((val >> (7 * j)) & MASK_7);
}
- for (int j = 0; j < length - 1; j++) {
+ for (unsigned j = 0; j < length - 1; j++) {
data[j] |= 0x80;
}
// foreach buffer size 0...10
- for (int limit = 0; limit <= kMaxSize; limit++) {
+ for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
unsigned rlen;
uint64_t result = decoder.checked_read_u64v(data, 0, &rlen);
@@ -606,16 +614,16 @@ TEST_F(DecoderTest, ReadI64v_Bits) {
for (int i = 1; i <= 64; i++) {
const int64_t val = bit_cast<int64_t>(kVals[v] << (64 - i)) >> (64 - i);
- int length = 1 + i / 7;
- for (int j = 0; j < kMaxSize; j++) {
+ unsigned length = 1 + i / 7;
+ for (unsigned j = 0; j < kMaxSize; j++) {
data[j] = static_cast<byte>((val >> (7 * j)) & MASK_7);
}
- for (int j = 0; j < length - 1; j++) {
+ for (unsigned j = 0; j < length - 1; j++) {
data[j] |= 0x80;
}
// foreach buffer size 0...10
- for (int limit = 0; limit <= kMaxSize; limit++) {
+ for (unsigned limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
unsigned rlen;
int64_t result = decoder.checked_read_i64v(data, 0, &rlen);
@@ -638,7 +646,7 @@ TEST_F(DecoderTest, ReadU64v_extra_bits) {
unsigned length = 0;
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_u64v(decoder.start(), 0, &length);
- EXPECT_EQ(10, length);
+ EXPECT_EQ(10u, length);
EXPECT_FALSE(decoder.ok());
}
}
@@ -649,7 +657,7 @@ TEST_F(DecoderTest, ReadI64v_extra_bits_negative) {
byte data[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f};
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_i64v(decoder.start(), 0, &length);
- EXPECT_EQ(10, length);
+ EXPECT_EQ(10u, length);
EXPECT_TRUE(decoder.ok());
}
@@ -659,8 +667,15 @@ TEST_F(DecoderTest, ReadI64v_extra_bits_positive) {
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x77};
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_i64v(decoder.start(), 0, &length);
- EXPECT_EQ(10, length);
+ EXPECT_EQ(10u, length);
+ EXPECT_FALSE(decoder.ok());
+}
+
+TEST_F(DecoderTest, FailOnNullData) {
+ decoder.Reset(nullptr, 0);
+ decoder.checkAvailable(1);
EXPECT_FALSE(decoder.ok());
+ EXPECT_FALSE(decoder.toResult(nullptr).ok());
}
} // namespace wasm
diff --git a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
index b9759332bb..6fd91e464f 100644
--- a/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
+++ b/deps/v8/test/unittests/wasm/leb-helper-unittest.cc
@@ -15,76 +15,76 @@ namespace wasm {
class LEBHelperTest : public TestWithZone {};
TEST_F(LEBHelperTest, sizeof_u32v) {
- EXPECT_EQ(1, LEBHelper::sizeof_u32v(0));
- EXPECT_EQ(1, LEBHelper::sizeof_u32v(1));
- EXPECT_EQ(1, LEBHelper::sizeof_u32v(3));
+ EXPECT_EQ(1u, LEBHelper::sizeof_u32v(0));
+ EXPECT_EQ(1u, LEBHelper::sizeof_u32v(1));
+ EXPECT_EQ(1u, LEBHelper::sizeof_u32v(3));
for (uint32_t i = 4; i < 128; i++) {
- EXPECT_EQ(1, LEBHelper::sizeof_u32v(i));
+ EXPECT_EQ(1u, LEBHelper::sizeof_u32v(i));
}
- for (uint32_t i = (1 << 7); i < (1 << 9); i++) {
- EXPECT_EQ(2, LEBHelper::sizeof_u32v(i));
+ for (uint32_t i = (1u << 7); i < (1u << 9); i++) {
+ EXPECT_EQ(2u, LEBHelper::sizeof_u32v(i));
}
- for (uint32_t i = (1 << 14); i < (1 << 16); i += 33) {
- EXPECT_EQ(3, LEBHelper::sizeof_u32v(i));
+ for (uint32_t i = (1u << 14); i < (1u << 16); i += 33) {
+ EXPECT_EQ(3u, LEBHelper::sizeof_u32v(i));
}
- for (uint32_t i = (1 << 21); i < (1 << 24); i += 33999) {
- EXPECT_EQ(4, LEBHelper::sizeof_u32v(i));
+ for (uint32_t i = (1u << 21); i < (1u << 24); i += 33999) {
+ EXPECT_EQ(4u, LEBHelper::sizeof_u32v(i));
}
- for (uint32_t i = (1 << 28); i < (1 << 31); i += 33997779) {
- EXPECT_EQ(5, LEBHelper::sizeof_u32v(i));
+ for (uint32_t i = (1u << 28); i < (1u << 31); i += 33997779u) {
+ EXPECT_EQ(5u, LEBHelper::sizeof_u32v(i));
}
- EXPECT_EQ(5, LEBHelper::sizeof_u32v(0xFFFFFFFF));
+ EXPECT_EQ(5u, LEBHelper::sizeof_u32v(0xFFFFFFFF));
}
TEST_F(LEBHelperTest, sizeof_i32v) {
- EXPECT_EQ(1, LEBHelper::sizeof_i32v(0));
- EXPECT_EQ(1, LEBHelper::sizeof_i32v(1));
- EXPECT_EQ(1, LEBHelper::sizeof_i32v(3));
+ EXPECT_EQ(1u, LEBHelper::sizeof_i32v(0));
+ EXPECT_EQ(1u, LEBHelper::sizeof_i32v(1));
+ EXPECT_EQ(1u, LEBHelper::sizeof_i32v(3));
for (int32_t i = 0; i < (1 << 6); i++) {
- EXPECT_EQ(1, LEBHelper::sizeof_i32v(i));
+ EXPECT_EQ(1u, LEBHelper::sizeof_i32v(i));
}
for (int32_t i = (1 << 6); i < (1 << 8); i++) {
- EXPECT_EQ(2, LEBHelper::sizeof_i32v(i));
+ EXPECT_EQ(2u, LEBHelper::sizeof_i32v(i));
}
for (int32_t i = (1 << 13); i < (1 << 15); i += 31) {
- EXPECT_EQ(3, LEBHelper::sizeof_i32v(i));
+ EXPECT_EQ(3u, LEBHelper::sizeof_i32v(i));
}
for (int32_t i = (1 << 20); i < (1 << 22); i += 31991) {
- EXPECT_EQ(4, LEBHelper::sizeof_i32v(i));
+ EXPECT_EQ(4u, LEBHelper::sizeof_i32v(i));
}
for (int32_t i = (1 << 27); i < (1 << 29); i += 3199893) {
- EXPECT_EQ(5, LEBHelper::sizeof_i32v(i));
+ EXPECT_EQ(5u, LEBHelper::sizeof_i32v(i));
}
for (int32_t i = -(1 << 6); i <= 0; i++) {
- EXPECT_EQ(1, LEBHelper::sizeof_i32v(i));
+ EXPECT_EQ(1u, LEBHelper::sizeof_i32v(i));
}
for (int32_t i = -(1 << 13); i < -(1 << 6); i++) {
- EXPECT_EQ(2, LEBHelper::sizeof_i32v(i));
+ EXPECT_EQ(2u, LEBHelper::sizeof_i32v(i));
}
for (int32_t i = -(1 << 20); i < -(1 << 18); i += 11) {
- EXPECT_EQ(3, LEBHelper::sizeof_i32v(i));
+ EXPECT_EQ(3u, LEBHelper::sizeof_i32v(i));
}
for (int32_t i = -(1 << 27); i < -(1 << 25); i += 11999) {
- EXPECT_EQ(4, LEBHelper::sizeof_i32v(i));
+ EXPECT_EQ(4u, LEBHelper::sizeof_i32v(i));
}
for (int32_t i = -(1 << 30); i < -(1 << 28); i += 1199999) {
- EXPECT_EQ(5, LEBHelper::sizeof_i32v(i));
+ EXPECT_EQ(5u, LEBHelper::sizeof_i32v(i));
}
}
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 42798ca81b..baf6909499 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -47,8 +47,7 @@ namespace wasm {
#define EMPTY_SIGNATURES_SECTION SECTION(Type, 1), 0
#define EMPTY_FUNCTION_SIGNATURES_SECTION SECTION(Function, 1), 0
#define EMPTY_FUNCTION_BODIES_SECTION SECTION(Code, 1), 0
-#define SECTION_NAMES(size) \
- kUnknownSectionCode, U32V_1(size + 5), 4, 'n', 'a', 'm', 'e'
+#define SECTION_NAMES(size) SECTION(Unknown, size + 5), 4, 'n', 'a', 'm', 'e'
#define EMPTY_NAMES_SECTION SECTION_NAMES(1), 0
#define X1(...) __VA_ARGS__
@@ -144,14 +143,14 @@ class WasmModuleVerifyTest : public TestWithIsolateAndZone {
auto temp = new byte[total];
memcpy(temp, header, sizeof(header));
memcpy(temp + sizeof(header), module_start, size);
- ModuleResult result = DecodeWasmModule(isolate(), zone(), temp,
- temp + total, false, kWasmOrigin);
+ ModuleResult result =
+ DecodeWasmModule(isolate(), temp, temp + total, false, kWasmOrigin);
delete[] temp;
return result;
}
ModuleResult DecodeModuleNoHeader(const byte* module_start,
const byte* module_end) {
- return DecodeWasmModule(isolate(), zone(), module_start, module_end, false,
+ return DecodeWasmModule(isolate(), module_start, module_end, false,
kWasmOrigin);
}
};
@@ -193,14 +192,14 @@ TEST_F(WasmModuleVerifyTest, OneGlobal) {
// Should decode to exactly one global.
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1, result.val->globals.size());
- EXPECT_EQ(0, result.val->functions.size());
- EXPECT_EQ(0, result.val->data_segments.size());
+ EXPECT_EQ(1u, result.val->globals.size());
+ EXPECT_EQ(0u, result.val->functions.size());
+ EXPECT_EQ(0u, result.val->data_segments.size());
const WasmGlobal* global = &result.val->globals.back();
EXPECT_EQ(kAstI32, global->type);
- EXPECT_EQ(0, global->offset);
+ EXPECT_EQ(0u, global->offset);
EXPECT_FALSE(global->mutability);
EXPECT_EQ(WasmInitExpr::kI32Const, global->init.kind);
EXPECT_EQ(13, global->init.val.i32_const);
@@ -249,6 +248,47 @@ TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
if (result.val) delete result.val;
}
+TEST_F(WasmModuleVerifyTest, ExportMutableGlobal) {
+ {
+ static const byte data[] = {
+ SECTION(Global, 6), // --
+ 1,
+ kLocalI32, // local type
+ 0, // immutable
+ WASM_INIT_EXPR_I32V_1(13), // init
+ SECTION(Export, 8), // --
+ 1, // Export count
+ 4, // name length
+ 'n', // --
+ 'a', // --
+ 'm', // --
+ 'e', // --
+ kExternalGlobal, // global
+ 0, // global index
+ };
+ EXPECT_VERIFIES(data);
+ }
+ {
+ static const byte data[] = {
+ SECTION(Global, 6), // --
+ 1, // --
+ kLocalI32, // local type
+ 1, // mutable
+ WASM_INIT_EXPR_I32V_1(13), // init
+ SECTION(Export, 8), // --
+ 1, // Export count
+ 4, // name length
+ 'n', // --
+ 'a', // --
+ 'm', // --
+ 'e', // --
+ kExternalGlobal, // global
+ 0, // global index
+ };
+ EXPECT_FAILURE(data);
+ }
+}
+
static void AppendUint32v(std::vector<byte>& buffer, uint32_t val) {
while (true) {
uint32_t next = val >> 7;
@@ -313,21 +353,21 @@ TEST_F(WasmModuleVerifyTest, TwoGlobals) {
// Should decode to exactly two globals.
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(2, result.val->globals.size());
- EXPECT_EQ(0, result.val->functions.size());
- EXPECT_EQ(0, result.val->data_segments.size());
+ EXPECT_EQ(2u, result.val->globals.size());
+ EXPECT_EQ(0u, result.val->functions.size());
+ EXPECT_EQ(0u, result.val->data_segments.size());
const WasmGlobal* g0 = &result.val->globals[0];
EXPECT_EQ(kAstF32, g0->type);
- EXPECT_EQ(0, g0->offset);
+ EXPECT_EQ(0u, g0->offset);
EXPECT_FALSE(g0->mutability);
EXPECT_EQ(WasmInitExpr::kF32Const, g0->init.kind);
const WasmGlobal* g1 = &result.val->globals[1];
EXPECT_EQ(kAstF64, g1->type);
- EXPECT_EQ(8, g1->offset);
+ EXPECT_EQ(8u, g1->offset);
EXPECT_TRUE(g1->mutability);
EXPECT_EQ(WasmInitExpr::kF64Const, g1->init.kind);
@@ -362,21 +402,115 @@ TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(3, result.val->signatures.size());
+ EXPECT_EQ(3u, result.val->signatures.size());
if (result.val->signatures.size() == 3) {
- EXPECT_EQ(0, result.val->signatures[0]->return_count());
- EXPECT_EQ(1, result.val->signatures[1]->return_count());
- EXPECT_EQ(1, result.val->signatures[2]->return_count());
+ EXPECT_EQ(0u, result.val->signatures[0]->return_count());
+ EXPECT_EQ(1u, result.val->signatures[1]->return_count());
+ EXPECT_EQ(1u, result.val->signatures[2]->return_count());
- EXPECT_EQ(0, result.val->signatures[0]->parameter_count());
- EXPECT_EQ(1, result.val->signatures[1]->parameter_count());
- EXPECT_EQ(2, result.val->signatures[2]->parameter_count());
+ EXPECT_EQ(0u, result.val->signatures[0]->parameter_count());
+ EXPECT_EQ(1u, result.val->signatures[1]->parameter_count());
+ EXPECT_EQ(2u, result.val->signatures[2]->parameter_count());
}
if (result.val) delete result.val;
EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
}
+TEST_F(WasmModuleVerifyTest, DataSegmentWithImmutableImportedGlobal) {
+ // Import 2 globals so that we can initialize data with a global index != 0.
+ const byte data[] = {
+ SECTION(Import, 15), // section header
+ 2, // number of imports
+ NAME_LENGTH(1), // --
+ 'm', // module name
+ NAME_LENGTH(1), // --
+ 'f', // global name
+ kExternalGlobal, // import kind
+ kLocalI32, // type
+ 0, // mutability
+ NAME_LENGTH(1), // --
+ 'n', // module name
+ NAME_LENGTH(1), // --
+ 'g', // global name
+ kExternalGlobal, // import kind
+ kLocalI32, // type
+ 0, // mutability
+ SECTION(Memory, 4),
+ ENTRY_COUNT(1),
+ kResizableMaximumFlag,
+ 28,
+ 28,
+ SECTION(Data, 9),
+ ENTRY_COUNT(1),
+ LINEAR_MEMORY_INDEX_0,
+ WASM_INIT_EXPR_GLOBAL(1), // dest addr
+ U32V_1(3), // source size
+ 'a',
+ 'b',
+ 'c' // data bytes
+ };
+ ModuleResult result = DecodeModule(data, data + sizeof(data));
+ EXPECT_OK(result);
+ WasmInitExpr expr = result.val->data_segments.back().dest_addr;
+ EXPECT_EQ(WasmInitExpr::kGlobalIndex, expr.kind);
+ EXPECT_EQ(1u, expr.val.global_index);
+ if (result.val) delete result.val;
+}
+
+TEST_F(WasmModuleVerifyTest, DataSegmentWithMutableImportedGlobal) {
+ // Only an immutable imported global can be used as an init_expr.
+ const byte data[] = {
+ SECTION(Import, 8), // section header
+ 1, // number of imports
+ NAME_LENGTH(1), // --
+ 'm', // module name
+ NAME_LENGTH(1), // --
+ 'f', // global name
+ kExternalGlobal, // import kind
+ kLocalI32, // type
+ 1, // mutability
+ SECTION(Memory, 4),
+ ENTRY_COUNT(1),
+ kResizableMaximumFlag,
+ 28,
+ 28,
+ SECTION(Data, 9),
+ ENTRY_COUNT(1),
+ LINEAR_MEMORY_INDEX_0,
+ WASM_INIT_EXPR_GLOBAL(0), // dest addr
+ U32V_1(3), // source size
+ 'a',
+ 'b',
+ 'c' // data bytes
+ };
+ EXPECT_FAILURE(data);
+}
+TEST_F(WasmModuleVerifyTest, DataSegmentWithImmutableGlobal) {
+ // Only an immutable imported global can be used as an init_expr.
+ const byte data[] = {
+ SECTION(Memory, 4),
+ ENTRY_COUNT(1),
+ kResizableMaximumFlag,
+ 28,
+ 28,
+ SECTION(Global, 8), // --
+ 1,
+ kLocalI32, // local type
+ 0, // immutable
+ WASM_INIT_EXPR_I32V_3(0x9bbaa), // init
+ SECTION(Data, 9),
+ ENTRY_COUNT(1),
+ LINEAR_MEMORY_INDEX_0,
+ WASM_INIT_EXPR_GLOBAL(0), // dest addr
+ U32V_1(3), // source size
+ 'a',
+ 'b',
+ 'c' // data bytes
+ };
+ EXPECT_FAILURE(data);
+}
+
TEST_F(WasmModuleVerifyTest, OneDataSegment) {
const byte kDataSegmentSourceOffset = 24;
const byte data[] = {
@@ -399,16 +533,16 @@ TEST_F(WasmModuleVerifyTest, OneDataSegment) {
EXPECT_VERIFIES(data);
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(0, result.val->globals.size());
- EXPECT_EQ(0, result.val->functions.size());
- EXPECT_EQ(1, result.val->data_segments.size());
+ EXPECT_EQ(0u, result.val->globals.size());
+ EXPECT_EQ(0u, result.val->functions.size());
+ EXPECT_EQ(1u, result.val->data_segments.size());
const WasmDataSegment* segment = &result.val->data_segments.back();
EXPECT_EQ(WasmInitExpr::kI32Const, segment->dest_addr.kind);
EXPECT_EQ(0x9bbaa, segment->dest_addr.val.i32_const);
EXPECT_EQ(kDataSegmentSourceOffset, segment->source_offset);
- EXPECT_EQ(3, segment->source_size);
+ EXPECT_EQ(3u, segment->source_size);
if (result.val) delete result.val;
}
@@ -453,9 +587,9 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
{
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(0, result.val->globals.size());
- EXPECT_EQ(0, result.val->functions.size());
- EXPECT_EQ(2, result.val->data_segments.size());
+ EXPECT_EQ(0u, result.val->globals.size());
+ EXPECT_EQ(0u, result.val->functions.size());
+ EXPECT_EQ(2u, result.val->data_segments.size());
const WasmDataSegment* s0 = &result.val->data_segments[0];
const WasmDataSegment* s1 = &result.val->data_segments[1];
@@ -463,12 +597,12 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
EXPECT_EQ(WasmInitExpr::kI32Const, s0->dest_addr.kind);
EXPECT_EQ(0x7ffee, s0->dest_addr.val.i32_const);
EXPECT_EQ(kDataSegment0SourceOffset, s0->source_offset);
- EXPECT_EQ(4, s0->source_size);
+ EXPECT_EQ(4u, s0->source_size);
EXPECT_EQ(WasmInitExpr::kI32Const, s1->dest_addr.kind);
EXPECT_EQ(0x6ddcc, s1->dest_addr.val.i32_const);
EXPECT_EQ(kDataSegment1SourceOffset, s1->source_offset);
- EXPECT_EQ(10, s1->source_size);
+ EXPECT_EQ(10u, s1->source_size);
if (result.val) delete result.val;
}
@@ -476,6 +610,37 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
EXPECT_OFF_END_FAILURE(data, 14, sizeof(data));
}
+TEST_F(WasmModuleVerifyTest, DataWithoutMemory) {
+ const byte data[] = {
+ SECTION(Data, 11),
+ ENTRY_COUNT(1),
+ LINEAR_MEMORY_INDEX_0,
+ WASM_INIT_EXPR_I32V_3(0x9bbaa), // dest addr
+ U32V_1(3), // source size
+ 'a',
+ 'b',
+ 'c' // data bytes
+ };
+ EXPECT_FAILURE(data);
+}
+
+TEST_F(WasmModuleVerifyTest, MaxMaximumMemorySize) {
+ {
+ const byte data[] = {
+ SECTION(Memory, 6), ENTRY_COUNT(1), kResizableMaximumFlag, 0,
+ U32V_3(65536),
+ };
+ EXPECT_VERIFIES(data);
+ }
+ {
+ const byte data[] = {
+ SECTION(Memory, 6), ENTRY_COUNT(1), kResizableMaximumFlag, 0,
+ U32V_3(65537),
+ };
+ EXPECT_FAILURE(data);
+ }
+}
+
TEST_F(WasmModuleVerifyTest, DataSegment_wrong_init_type) {
const byte data[] = {
SECTION(Memory, 4),
@@ -508,11 +673,10 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
if (result.ok()) {
- EXPECT_EQ(1, result.val->signatures.size());
- EXPECT_EQ(1, result.val->functions.size());
- EXPECT_EQ(1, result.val->function_tables.size());
- EXPECT_EQ(1, result.val->function_tables[0].values.size());
- EXPECT_EQ(-1, result.val->function_tables[0].values[0]);
+ EXPECT_EQ(1u, result.val->signatures.size());
+ EXPECT_EQ(1u, result.val->functions.size());
+ EXPECT_EQ(1u, result.val->function_tables.size());
+ EXPECT_EQ(1u, result.val->function_tables[0].min_size);
}
if (result.val) delete result.val;
}
@@ -535,11 +699,10 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction_one_entry) {
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
if (result.ok()) {
- EXPECT_EQ(1, result.val->signatures.size());
- EXPECT_EQ(1, result.val->functions.size());
- EXPECT_EQ(1, result.val->function_tables.size());
- EXPECT_EQ(1, result.val->function_tables[0].values.size());
- EXPECT_EQ(0, result.val->function_tables[0].values[0]);
+ EXPECT_EQ(1u, result.val->signatures.size());
+ EXPECT_EQ(1u, result.val->functions.size());
+ EXPECT_EQ(1u, result.val->function_tables.size());
+ EXPECT_EQ(1u, result.val->function_tables[0].min_size);
}
if (result.val) delete result.val;
}
@@ -573,13 +736,10 @@ TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
if (result.ok()) {
- EXPECT_EQ(2, result.val->signatures.size());
- EXPECT_EQ(4, result.val->functions.size());
- EXPECT_EQ(1, result.val->function_tables.size());
- EXPECT_EQ(8, result.val->function_tables[0].values.size());
- for (int i = 0; i < 8; i++) {
- EXPECT_EQ(i & 3, result.val->function_tables[0].values[i]);
- }
+ EXPECT_EQ(2u, result.val->signatures.size());
+ EXPECT_EQ(4u, result.val->functions.size());
+ EXPECT_EQ(1u, result.val->function_tables.size());
+ EXPECT_EQ(8u, result.val->function_tables[0].min_size);
}
if (result.val) delete result.val;
}
@@ -613,13 +773,13 @@ class WasmSignatureDecodeTest : public TestWithZone {};
TEST_F(WasmSignatureDecodeTest, Ok_v_v) {
static const byte data[] = {SIG_ENTRY_v_v};
v8::internal::AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
FunctionSig* sig =
DecodeWasmSignatureForTesting(&zone, data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
- EXPECT_EQ(0, sig->parameter_count());
- EXPECT_EQ(0, sig->return_count());
+ EXPECT_EQ(0u, sig->parameter_count());
+ EXPECT_EQ(0u, sig->return_count());
}
TEST_F(WasmSignatureDecodeTest, Ok_t_v) {
@@ -630,8 +790,8 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_v) {
DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
- EXPECT_EQ(0, sig->parameter_count());
- EXPECT_EQ(1, sig->return_count());
+ EXPECT_EQ(0u, sig->parameter_count());
+ EXPECT_EQ(1u, sig->return_count());
EXPECT_EQ(ret_type.type, sig->GetReturn());
}
}
@@ -644,8 +804,8 @@ TEST_F(WasmSignatureDecodeTest, Ok_v_t) {
DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
- EXPECT_EQ(1, sig->parameter_count());
- EXPECT_EQ(0, sig->return_count());
+ EXPECT_EQ(1u, sig->parameter_count());
+ EXPECT_EQ(0u, sig->return_count());
EXPECT_EQ(param_type.type, sig->GetParam(0));
}
}
@@ -660,8 +820,8 @@ TEST_F(WasmSignatureDecodeTest, Ok_t_t) {
DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
- EXPECT_EQ(1, sig->parameter_count());
- EXPECT_EQ(1, sig->return_count());
+ EXPECT_EQ(1u, sig->parameter_count());
+ EXPECT_EQ(1u, sig->return_count());
EXPECT_EQ(param_type.type, sig->GetParam(0));
EXPECT_EQ(ret_type.type, sig->GetReturn());
}
@@ -679,8 +839,8 @@ TEST_F(WasmSignatureDecodeTest, Ok_i_tt) {
DecodeWasmSignatureForTesting(zone(), data, data + sizeof(data));
EXPECT_TRUE(sig != nullptr);
- EXPECT_EQ(2, sig->parameter_count());
- EXPECT_EQ(1, sig->return_count());
+ EXPECT_EQ(2u, sig->parameter_count());
+ EXPECT_EQ(1u, sig->return_count());
EXPECT_EQ(p0_type.type, sig->GetParam(0));
EXPECT_EQ(p1_type.type, sig->GetParam(1));
}
@@ -756,10 +916,11 @@ TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
if (result.val && result.ok()) {
WasmFunction* function = result.val;
- EXPECT_EQ(0, function->sig->parameter_count());
- EXPECT_EQ(0, function->sig->return_count());
- EXPECT_EQ(0, function->name_offset);
- EXPECT_EQ(SIZEOF_SIG_ENTRY_v_v, function->code_start_offset);
+ EXPECT_EQ(0u, function->sig->parameter_count());
+ EXPECT_EQ(0u, function->sig->return_count());
+ EXPECT_EQ(0u, function->name_offset);
+ EXPECT_EQ(static_cast<uint32_t>(SIZEOF_SIG_ENTRY_v_v),
+ function->code_start_offset);
EXPECT_EQ(sizeof(data), function->code_end_offset);
// TODO(titzer): verify encoding of local declarations
}
@@ -861,14 +1022,14 @@ TEST_F(WasmModuleVerifyTest, UnknownSectionSkipped) {
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1, result.val->globals.size());
- EXPECT_EQ(0, result.val->functions.size());
- EXPECT_EQ(0, result.val->data_segments.size());
+ EXPECT_EQ(1u, result.val->globals.size());
+ EXPECT_EQ(0u, result.val->functions.size());
+ EXPECT_EQ(0u, result.val->data_segments.size());
const WasmGlobal* global = &result.val->globals.back();
EXPECT_EQ(kAstI32, global->type);
- EXPECT_EQ(0, global->offset);
+ EXPECT_EQ(0u, global->offset);
if (result.val) delete result.val;
}
@@ -883,6 +1044,37 @@ TEST_F(WasmModuleVerifyTest, ImportTable_nosigs1) {
EXPECT_VERIFIES(data);
}
+TEST_F(WasmModuleVerifyTest, ImportTable_mutable_global) {
+ {
+ static const byte data[] = {
+ SECTION(Import, 8), // section header
+ 1, // number of imports
+ NAME_LENGTH(1), // --
+ 'm', // module name
+ NAME_LENGTH(1), // --
+ 'f', // global name
+ kExternalGlobal, // import kind
+ kLocalI32, // type
+ 0, // mutability
+ };
+ EXPECT_VERIFIES(data);
+ }
+ {
+ static const byte data[] = {
+ SECTION(Import, 8), // section header
+ 1, // sig table
+ NAME_LENGTH(1), // --
+ 'm', // module name
+ NAME_LENGTH(1), // --
+ 'f', // global name
+ kExternalGlobal, // import kind
+ kLocalI32, // type
+ 1, // mutability
+ };
+ EXPECT_FAILURE(data);
+ }
+}
+
TEST_F(WasmModuleVerifyTest, ImportTable_nosigs2) {
static const byte data[] = {
SECTION(Import, 6), 1, // sig table
@@ -964,8 +1156,8 @@ TEST_F(WasmModuleVerifyTest, ExportTable_empty1) {
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1, result.val->functions.size());
- EXPECT_EQ(0, result.val->export_table.size());
+ EXPECT_EQ(1u, result.val->functions.size());
+ EXPECT_EQ(0u, result.val->export_table.size());
if (result.val) delete result.val;
}
@@ -995,8 +1187,8 @@ TEST_F(WasmModuleVerifyTest, ExportTableOne) {
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1, result.val->functions.size());
- EXPECT_EQ(1, result.val->export_table.size());
+ EXPECT_EQ(1u, result.val->functions.size());
+ EXPECT_EQ(1u, result.val->export_table.size());
if (result.val) delete result.val;
}
@@ -1039,8 +1231,8 @@ TEST_F(WasmModuleVerifyTest, ExportTableTwo) {
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(1, result.val->functions.size());
- EXPECT_EQ(2, result.val->export_table.size());
+ EXPECT_EQ(1u, result.val->functions.size());
+ EXPECT_EQ(2u, result.val->export_table.size());
if (result.val) delete result.val;
}
@@ -1067,8 +1259,8 @@ TEST_F(WasmModuleVerifyTest, ExportTableThree) {
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- EXPECT_EQ(3, result.val->functions.size());
- EXPECT_EQ(3, result.val->export_table.size());
+ EXPECT_EQ(3u, result.val->functions.size());
+ EXPECT_EQ(3u, result.val->export_table.size());
if (result.val) delete result.val;
}
@@ -1107,7 +1299,7 @@ TEST_F(WasmModuleVerifyTest, ExportTableOne_off_end) {
FUNC_INDEX(0), // --
};
- for (int length = 33; length < sizeof(data); length++) {
+ for (size_t length = 33; length < sizeof(data); length++) {
ModuleResult result = DecodeModule(data, data + length);
EXPECT_FALSE(result.ok());
if (result.val) delete result.val;
@@ -1281,11 +1473,13 @@ TEST_F(WasmModuleVerifyTest, InitExpr_illegal) {
EXPECT_INIT_EXPR_FAIL(WASM_IF_ELSE(WASM_ZERO, WASM_ZERO, WASM_ZERO));
}
-TEST_F(WasmModuleVerifyTest, InitExpr_global) {
- static const byte data[] = {WASM_INIT_EXPR_GLOBAL(37)};
- WasmInitExpr expr = DecodeWasmInitExprForTesting(data, data + sizeof(data));
- EXPECT_EQ(WasmInitExpr::kGlobalIndex, expr.kind);
- EXPECT_EQ(37, expr.val.global_index);
+TEST_F(WasmModuleVerifyTest, Multiple_Named_Sections) {
+ static const byte data[] = {
+ SECTION(Unknown, 4), 1, 'X', 17, 18, // --
+ SECTION(Unknown, 9), 3, 'f', 'o', 'o', 5, 6, 7, 8, 9, // --
+ SECTION(Unknown, 8), 5, 'o', 't', 'h', 'e', 'r', 7, 8, // --
+ };
+ EXPECT_VERIFIES(data);
}
} // namespace wasm
diff --git a/deps/v8/test/unittests/wasm/switch-logic-unittest.cc b/deps/v8/test/unittests/wasm/switch-logic-unittest.cc
index be587c28bd..cc3fbb05cc 100644
--- a/deps/v8/test/unittests/wasm/switch-logic-unittest.cc
+++ b/deps/v8/test/unittests/wasm/switch-logic-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/wasm/switch-logic.h"
+#include "src/asmjs/switch-logic.h"
#include "test/unittests/test-utils.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
index 0399835d01..d047e04032 100644
--- a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
@@ -12,10 +12,10 @@ namespace wasm {
class WasmMacroGenTest : public TestWithZone {};
-#define EXPECT_SIZE(size, ...) \
- do { \
- byte code[] = {__VA_ARGS__}; \
- EXPECT_EQ(size, sizeof(code)); \
+#define EXPECT_SIZE(size, ...) \
+ do { \
+ byte code[] = {__VA_ARGS__}; \
+ EXPECT_EQ(static_cast<size_t>(size), sizeof(code)); \
} while (false)
TEST_F(WasmMacroGenTest, Constants) {
@@ -131,12 +131,12 @@ TEST_F(WasmMacroGenTest, CallFunction) {
}
TEST_F(WasmMacroGenTest, CallIndirect) {
- EXPECT_SIZE(4, WASM_CALL_INDIRECT0(0, WASM_ZERO));
- EXPECT_SIZE(4, WASM_CALL_INDIRECT0(1, WASM_ZERO));
- EXPECT_SIZE(4, WASM_CALL_INDIRECT0(11, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_CALL_INDIRECT0(0, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_CALL_INDIRECT0(1, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_CALL_INDIRECT0(11, WASM_ZERO));
- EXPECT_SIZE(6, WASM_CALL_INDIRECT1(0, WASM_ZERO, WASM_ZERO));
- EXPECT_SIZE(8, WASM_CALL_INDIRECT2(1, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(7, WASM_CALL_INDIRECT1(0, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(9, WASM_CALL_INDIRECT2(1, WASM_ZERO, WASM_ZERO, WASM_ZERO));
}
TEST_F(WasmMacroGenTest, Int32Ops) {
diff --git a/deps/v8/test/unittests/zone/segmentpool-unittest.cc b/deps/v8/test/unittests/zone/segmentpool-unittest.cc
new file mode 100644
index 0000000000..dd2c94f729
--- /dev/null
+++ b/deps/v8/test/unittests/zone/segmentpool-unittest.cc
@@ -0,0 +1,35 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/zone/accounting-allocator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+TEST(Zone, SegmentPoolConstraints) {
+ size_t sizes[]{
+ 0, // Corner case
+ AccountingAllocator::kMaxPoolSizeLowMemoryDevice,
+ AccountingAllocator::kMaxPoolSizeMediumMemoryDevice,
+ AccountingAllocator::kMaxPoolSizeHighMemoryDevice,
+ AccountingAllocator::kMaxPoolSizeHugeMemoryDevice,
+ GB // Something really large
+ };
+
+ AccountingAllocator allocator;
+ for (size_t size : sizes) {
+ allocator.ConfigureSegmentPool(size);
+ size_t total_size = 0;
+ for (size_t power = 0; power < AccountingAllocator::kNumberBuckets;
+ ++power) {
+ total_size +=
+ allocator.unused_segments_max_sizes_[power] * (size_t(1) << power);
+ }
+ EXPECT_LE(total_size, size);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc b/deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc
new file mode 100644
index 0000000000..2969366bc7
--- /dev/null
+++ b/deps/v8/test/unittests/zone/zone-chunk-list-unittest.cc
@@ -0,0 +1,207 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/zone/zone-chunk-list.h"
+
+#include "src/list-inl.h"
+#include "src/zone/accounting-allocator.h"
+#include "src/zone/zone.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+const size_t kItemCount = size_t(1) << 10;
+
+TEST(ZoneChunkList, ForwardIterationTest) {
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneChunkList<uintptr_t> zone_chunk_list(&zone);
+
+ for (size_t i = 0; i < kItemCount; ++i) {
+ zone_chunk_list.push_back(static_cast<uintptr_t>(i));
+ }
+
+ size_t count = 0;
+
+ for (uintptr_t item : zone_chunk_list) {
+ EXPECT_EQ(static_cast<size_t>(item), count);
+ count++;
+ }
+
+ EXPECT_EQ(count, kItemCount);
+}
+
+TEST(ZoneChunkList, ReverseIterationTest) {
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneChunkList<uintptr_t> zone_chunk_list(&zone);
+
+ for (size_t i = 0; i < kItemCount; ++i) {
+ zone_chunk_list.push_back(static_cast<uintptr_t>(i));
+ }
+
+ size_t count = 0;
+
+ for (auto it = zone_chunk_list.rbegin(); it != zone_chunk_list.rend(); ++it) {
+ EXPECT_EQ(static_cast<size_t>(*it), kItemCount - count - 1);
+ count++;
+ }
+
+ EXPECT_EQ(count, kItemCount);
+}
+
+TEST(ZoneChunkList, PushFrontTest) {
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneChunkList<uintptr_t> zone_chunk_list(&zone);
+
+ for (size_t i = 0; i < kItemCount; ++i) {
+ zone_chunk_list.push_front(static_cast<uintptr_t>(i));
+ }
+
+ size_t count = 0;
+
+ for (uintptr_t item : zone_chunk_list) {
+ EXPECT_EQ(static_cast<size_t>(item), kItemCount - count - 1);
+ count++;
+ }
+
+ EXPECT_EQ(count, kItemCount);
+}
+
+TEST(ZoneChunkList, RewindTest) {
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneChunkList<uintptr_t> zone_chunk_list(&zone);
+
+ for (size_t i = 0; i < kItemCount; ++i) {
+ zone_chunk_list.push_back(static_cast<uintptr_t>(i));
+ }
+
+ zone_chunk_list.Rewind(42);
+
+ size_t count = 0;
+
+ for (uintptr_t item : zone_chunk_list) {
+ EXPECT_EQ(static_cast<size_t>(item), count);
+ count++;
+ }
+
+ EXPECT_EQ(count, 42u);
+ EXPECT_EQ(count, zone_chunk_list.size());
+
+ zone_chunk_list.Rewind(0);
+
+ count = 0;
+
+ for (uintptr_t item : zone_chunk_list) {
+ USE(item);
+ count++;
+ }
+
+ EXPECT_EQ(count, 0u);
+ EXPECT_EQ(count, zone_chunk_list.size());
+
+ zone_chunk_list.Rewind(100);
+
+ count = 0;
+
+ for (uintptr_t item : zone_chunk_list) {
+ EXPECT_EQ(static_cast<size_t>(item), count);
+ count++;
+ }
+
+ EXPECT_EQ(count, 0u);
+ EXPECT_EQ(count, zone_chunk_list.size());
+}
+
+TEST(ZoneChunkList, FindTest) {
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneChunkList<uintptr_t> zone_chunk_list(&zone);
+
+ for (size_t i = 0; i < kItemCount; ++i) {
+ zone_chunk_list.push_back(static_cast<uintptr_t>(i));
+ }
+
+ const size_t index = kItemCount / 2 + 42;
+
+ EXPECT_EQ(*zone_chunk_list.Find(index), static_cast<uintptr_t>(index));
+
+ *zone_chunk_list.Find(index) = 42;
+
+ EXPECT_EQ(*zone_chunk_list.Find(index), 42u);
+}
+
+TEST(ZoneChunkList, CopyToTest) {
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneChunkList<uintptr_t> zone_chunk_list(&zone);
+
+ for (size_t i = 0; i < kItemCount; ++i) {
+ zone_chunk_list.push_back(static_cast<uintptr_t>(i));
+ }
+
+ uintptr_t* array = zone.NewArray<uintptr_t>(kItemCount);
+
+ zone_chunk_list.CopyTo(array);
+
+ for (size_t i = 0; i < kItemCount; ++i) {
+ EXPECT_EQ(array[i], static_cast<uintptr_t>(i));
+ }
+}
+
+TEST(ZoneChunkList, SmallCopyToTest) {
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneChunkList<uint8_t> zone_chunk_list(&zone);
+
+ for (size_t i = 0; i < kItemCount; ++i) {
+ zone_chunk_list.push_back(static_cast<uint8_t>(i & 0xFF));
+ }
+
+ uint8_t* array = zone.NewArray<uint8_t>(kItemCount);
+
+ zone_chunk_list.CopyTo(array);
+
+ for (size_t i = 0; i < kItemCount; ++i) {
+ EXPECT_EQ(array[i], static_cast<uint8_t>(i & 0xFF));
+ }
+}
+
+struct Fubar {
+ size_t a_;
+ size_t b_;
+};
+
+TEST(ZoneChunkList, BigCopyToTest) {
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneChunkList<Fubar> zone_chunk_list(&zone);
+
+ for (size_t i = 0; i < kItemCount; ++i) {
+ zone_chunk_list.push_back({i, i + 5});
+ }
+
+ Fubar* array = zone.NewArray<Fubar>(kItemCount);
+
+ zone_chunk_list.CopyTo(array);
+
+ for (size_t i = 0; i < kItemCount; ++i) {
+ EXPECT_EQ(array[i].a_, i);
+ EXPECT_EQ(array[i].b_, i + 5);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/zone/zone-unittest.cc b/deps/v8/test/unittests/zone/zone-unittest.cc
index 3ea18b530f..ae3cfdaf5e 100644
--- a/deps/v8/test/unittests/zone/zone-unittest.cc
+++ b/deps/v8/test/unittests/zone/zone-unittest.cc
@@ -12,7 +12,7 @@ namespace internal {
TEST(Zone, 8ByteAlignment) {
AccountingAllocator allocator;
- Zone zone(&allocator);
+ Zone zone(&allocator, ZONE_NAME);
for (size_t i = 0; i < 16; ++i) {
ASSERT_EQ(reinterpret_cast<intptr_t>(zone.New(i)) % 8, 0);
diff --git a/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt b/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
index 36394155d9..1b8ad93c79 100644
--- a/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
+++ b/deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
@@ -80,13 +80,13 @@ PASS (function (){(function (){var a; function f() {'use strict'; delete a;} })(
PASS (function (){'use strict'; with(1){};}) threw exception SyntaxError: Strict mode code may not include a with statement.
PASS (function (){(function (){'use strict'; with(1){};})}) threw exception SyntaxError: Strict mode code may not include a with statement.
PASS (function (){'use strict'; arguments.callee; })() threw exception TypeError: 'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them.
-PASS (function (){'use strict'; arguments.caller; })() threw exception TypeError: 'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them.
+PASS (function (){'use strict'; arguments.caller; })() is undefined.
PASS (function f(){'use strict'; f.arguments; })() threw exception TypeError: 'caller' and 'arguments' are restricted function properties and cannot be accessed in this context..
PASS (function f(){'use strict'; f.caller; })() threw exception TypeError: 'caller' and 'arguments' are restricted function properties and cannot be accessed in this context..
PASS (function f(){'use strict'; f.arguments=5; })() threw exception TypeError: 'caller' and 'arguments' are restricted function properties and cannot be accessed in this context..
PASS (function f(){'use strict'; f.caller=5; })() threw exception TypeError: 'caller' and 'arguments' are restricted function properties and cannot be accessed in this context..
PASS (function (arg){'use strict'; arguments.callee; })() threw exception TypeError: 'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them.
-PASS (function (arg){'use strict'; arguments.caller; })() threw exception TypeError: 'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them.
+PASS (function (arg){'use strict'; arguments.caller; })() is undefined.
PASS (function f(arg){'use strict'; f.arguments; })() threw exception TypeError: 'caller' and 'arguments' are restricted function properties and cannot be accessed in this context..
PASS (function f(arg){'use strict'; f.caller; })() threw exception TypeError: 'caller' and 'arguments' are restricted function properties and cannot be accessed in this context..
PASS (function f(arg){'use strict'; f.arguments=5; })() threw exception TypeError: 'caller' and 'arguments' are restricted function properties and cannot be accessed in this context..
@@ -200,8 +200,7 @@ PASS (function (){var a = true; eval('"use strict"; var a = false'); return a; }
PASS (function f(arg){'use strict'; return Object.getOwnPropertyDescriptor(f.__proto__, 'arguments').value; })() is undefined.
PASS (function f(arg){'use strict'; return Object.getOwnPropertyDescriptor(f.__proto__, 'caller').value; })() is undefined.
PASS (function f(arg){'use strict'; return Object.getOwnPropertyDescriptor(arguments, 'callee').value; })() is undefined.
-PASS (function f(arg){'use strict'; return Object.getOwnPropertyDescriptor(arguments, 'caller').value; })() is undefined.
-PASS (function f(arg){'use strict'; var descriptor = Object.getOwnPropertyDescriptor(arguments, 'caller'); return descriptor.get === descriptor.set; })() is true
+PASS (function f(arg){'use strict'; return Object.getOwnPropertyDescriptor(arguments, 'caller'); })() is undefined.
PASS (function f(arg){'use strict'; var descriptor = Object.getOwnPropertyDescriptor(arguments, 'callee'); return descriptor.get === descriptor.set; })() is true
PASS (function f(arg){'use strict'; var descriptor = Object.getOwnPropertyDescriptor(f.__proto__, 'caller'); return descriptor.get === descriptor.set; })() is true
PASS (function f(arg){'use strict'; var descriptor = Object.getOwnPropertyDescriptor(f.__proto__, 'arguments'); return descriptor.get === descriptor.set; })() is true
diff --git a/deps/v8/test/webkit/fast/js/basic-strict-mode.js b/deps/v8/test/webkit/fast/js/basic-strict-mode.js
index 027af0f152..77415c0881 100644
--- a/deps/v8/test/webkit/fast/js/basic-strict-mode.js
+++ b/deps/v8/test/webkit/fast/js/basic-strict-mode.js
@@ -97,13 +97,13 @@ shouldBeSyntaxError("(function (){'use strict'; var a; delete a;})()");
shouldBeSyntaxError("(function (){var a; function f() {'use strict'; delete a;} })()");
shouldBeSyntaxError("(function (){'use strict'; with(1){};})");
shouldThrow("(function (){'use strict'; arguments.callee; })()");
-shouldThrow("(function (){'use strict'; arguments.caller; })()");
+shouldBeUndefined("(function (){'use strict'; arguments.caller; })()");
shouldThrow("(function f(){'use strict'; f.arguments; })()");
shouldThrow("(function f(){'use strict'; f.caller; })()");
shouldThrow("(function f(){'use strict'; f.arguments=5; })()");
shouldThrow("(function f(){'use strict'; f.caller=5; })()");
shouldThrow("(function (arg){'use strict'; arguments.callee; })()");
-shouldThrow("(function (arg){'use strict'; arguments.caller; })()");
+shouldBeUndefined("(function (arg){'use strict'; arguments.caller; })()");
shouldThrow("(function f(arg){'use strict'; f.arguments; })()");
shouldThrow("(function f(arg){'use strict'; f.caller; })()");
shouldThrow("(function f(arg){'use strict'; f.arguments=5; })()");
@@ -199,8 +199,7 @@ shouldBeTrue("(function (){var a = true; eval('\"use strict\"; var a = false');
shouldBeUndefined("(function f(arg){'use strict'; return Object.getOwnPropertyDescriptor(f.__proto__, 'arguments').value; })()");
shouldBeUndefined("(function f(arg){'use strict'; return Object.getOwnPropertyDescriptor(f.__proto__, 'caller').value; })()");
shouldBeUndefined("(function f(arg){'use strict'; return Object.getOwnPropertyDescriptor(arguments, 'callee').value; })()");
-shouldBeUndefined("(function f(arg){'use strict'; return Object.getOwnPropertyDescriptor(arguments, 'caller').value; })()");
-shouldBeTrue("(function f(arg){'use strict'; var descriptor = Object.getOwnPropertyDescriptor(arguments, 'caller'); return descriptor.get === descriptor.set; })()");
+shouldBeUndefined("(function f(arg){'use strict'; return Object.getOwnPropertyDescriptor(arguments, 'caller'); })()");
shouldBeTrue("(function f(arg){'use strict'; var descriptor = Object.getOwnPropertyDescriptor(arguments, 'callee'); return descriptor.get === descriptor.set; })()");
shouldBeTrue("(function f(arg){'use strict'; var descriptor = Object.getOwnPropertyDescriptor(f.__proto__, 'caller'); return descriptor.get === descriptor.set; })()");
shouldBeTrue("(function f(arg){'use strict'; var descriptor = Object.getOwnPropertyDescriptor(f.__proto__, 'arguments'); return descriptor.get === descriptor.set; })()");
diff --git a/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt b/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
index 132fb4bca0..a441daa3bf 100644
--- a/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
+++ b/deps/v8/test/webkit/fast/js/parser-syntax-check-expected.txt
@@ -308,8 +308,8 @@ PASS Invalid: "while if (a) ;"
PASS Invalid: "function f() { while if (a) ; }"
PASS Valid: "if (a) function f() {} else function g() {}"
PASS Valid: "function f() { if (a) function f() {} else function g() {} }"
-PASS Valid: "if (a()) while(0) function f() {} else function g() {}"
-PASS Valid: "function f() { if (a()) while(0) function f() {} else function g() {} }"
+PASS Invalid: "if (a()) while(0) function f() {} else function g() {}"
+PASS Invalid: "function f() { if (a()) while(0) function f() {} else function g() {} }"
PASS Invalid: "if (a()) function f() { else function g() }"
PASS Invalid: "function f() { if (a()) function f() { else function g() } }"
PASS Invalid: "if (a) if (b) ; else function f {}"
diff --git a/deps/v8/test/webkit/fast/js/parser-syntax-check.js b/deps/v8/test/webkit/fast/js/parser-syntax-check.js
index c00374506d..d7d8ba5811 100644
--- a/deps/v8/test/webkit/fast/js/parser-syntax-check.js
+++ b/deps/v8/test/webkit/fast/js/parser-syntax-check.js
@@ -21,8 +21,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --no-harmony-restrictive-declarations
-
description(
"This test checks that the following expressions or statements are valid ECMASCRIPT code or should throw parse error"
);
@@ -231,7 +229,7 @@ valid ("do if (a) with (b) continue; else debugger; while (false)");
invalid("do if (a) while (false) else debugger");
invalid("while if (a) ;");
valid ("if (a) function f() {} else function g() {}");
-valid ("if (a()) while(0) function f() {} else function g() {}");
+invalid("if (a()) while(0) function f() {} else function g() {}");
invalid("if (a()) function f() { else function g() }");
invalid("if (a) if (b) ; else function f {}");
invalid("if (a) if (b) ; else function (){}");
diff --git a/deps/v8/test/webkit/function-declaration-statement-expected.txt b/deps/v8/test/webkit/function-declaration-statement-expected.txt
deleted file mode 100644
index 1bbc7f56e4..0000000000
--- a/deps/v8/test/webkit/function-declaration-statement-expected.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-This test checks that function declarations are treated as statements.
-
-On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
-
-
-PASS ifTest() is true
-PASS ifElseTest() is true
-PASS doWhileTest() is true
-PASS whileTest() is true
-PASS forTest() is true
-PASS forVarTest() is true
-PASS forInTest() is true
-PASS forInVarTest() is true
-PASS forInVarInitTest() is true
-PASS withTest() is true
-PASS labelTest() is true
-PASS successfullyParsed is true
-
-TEST COMPLETE
-
diff --git a/deps/v8/test/webkit/function-declaration-statement.js b/deps/v8/test/webkit/function-declaration-statement.js
deleted file mode 100644
index 2c866d5944..0000000000
--- a/deps/v8/test/webkit/function-declaration-statement.js
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
-// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-// DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
-// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-// ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --no-harmony-restrictive-declarations
-
-description(
-"This test checks that function declarations are treated as statements."
-);
-
-function f()
-{
- return false;
-}
-
-function ifTest()
-{
- if (true)
- function f()
- {
- return true;
- }
-
- return f();
-}
-
-shouldBeTrue("ifTest()");
-
-function ifElseTest()
-{
- if (false)
- return false;
- else
- function f()
- {
- return true;
- }
-
- return f();
-}
-
-shouldBeTrue("ifElseTest()");
-
-function doWhileTest()
-{
- var i = 0;
- do
- function f()
- {
- return true;
- }
- while (i++ < 10)
-
- return f();
-}
-
-shouldBeTrue("doWhileTest()");
-
-function whileTest()
-{
- var i = 0;
- while (i++ < 10)
- function f()
- {
- return true;
- }
-
- return f();
-}
-
-shouldBeTrue("whileTest()");
-
-function forTest()
-{
- var i;
- for (i = 0; i < 10; ++i)
- function f()
- {
- return true;
- }
-
- return f();
-}
-
-shouldBeTrue("forTest()");
-
-function forVarTest()
-{
- for (var i = 0; i < 10; ++i)
- function f()
- {
- return true;
- }
-
- return f();
-}
-
-shouldBeTrue("forVarTest()");
-
-function forInTest()
-{
- var a;
- for (a in { field: false })
- function f()
- {
- return true;
- }
-
- return f();
-}
-
-shouldBeTrue("forInTest()");
-
-function forInVarTest()
-{
- var a;
- for (var a in { field: false })
- function f()
- {
- return true;
- }
-
- return f();
-}
-
-shouldBeTrue("forInVarTest()");
-
-function forInVarInitTest()
-{
- var a;
- for (var a in { field: false })
- function f()
- {
- return true;
- }
-
- return f();
-}
-
-shouldBeTrue("forInVarInitTest()");
-
-function withTest()
-{
- with ({ })
- function f()
- {
- return true;
- }
-
- return f();
-}
-
-shouldBeTrue("withTest()");
-
-function labelTest()
-{
- label:
- function f()
- {
- return true;
- }
-
- return f();
-}
-
-shouldBeTrue("labelTest()");
diff --git a/deps/v8/third_party/binutils/.gitignore b/deps/v8/third_party/binutils/.gitignore
new file mode 100644
index 0000000000..5605b2f711
--- /dev/null
+++ b/deps/v8/third_party/binutils/.gitignore
@@ -0,0 +1,8 @@
+binutils-*
+*-chroot-*
+output-*
+Linux_ia32/*stamp*
+Linux_ia32/*tar.bz2
+Linux_x64/*stamp*
+Linux_x64/*tar.bz2
+*/Release
diff --git a/deps/v8/third_party/binutils/Linux_ia32/binutils.tar.bz2.sha1 b/deps/v8/third_party/binutils/Linux_ia32/binutils.tar.bz2.sha1
new file mode 100644
index 0000000000..9d046d1f66
--- /dev/null
+++ b/deps/v8/third_party/binutils/Linux_ia32/binutils.tar.bz2.sha1
@@ -0,0 +1 @@
+24f937cfdad77bdcd6ad8cacc542d806f3eb4b0f
diff --git a/deps/v8/third_party/binutils/Linux_x64/binutils.tar.bz2.sha1 b/deps/v8/third_party/binutils/Linux_x64/binutils.tar.bz2.sha1
new file mode 100644
index 0000000000..09c5366c5c
--- /dev/null
+++ b/deps/v8/third_party/binutils/Linux_x64/binutils.tar.bz2.sha1
@@ -0,0 +1 @@
+d9064388bed0e7225b1366d80b59289b1509d7c2
diff --git a/deps/v8/third_party/binutils/download.py b/deps/v8/third_party/binutils/download.py
new file mode 100755
index 0000000000..a8ad814dbe
--- /dev/null
+++ b/deps/v8/third_party/binutils/download.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+# Copyright 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+# vim: set ts=2 sw=2 et sts=2 ai:
+
+"""Minimal tool to download binutils from Google storage.
+
+TODO(mithro): Replace with generic download_and_extract tool.
+"""
+
+import os
+import platform
+import re
+import shutil
+import subprocess
+import sys
+
+
+BINUTILS_DIR = os.path.abspath(os.path.dirname(__file__))
+BINUTILS_FILE = 'binutils.tar.bz2'
+BINUTILS_TOOLS = ['bin/ld.gold', 'bin/objcopy', 'bin/objdump']
+BINUTILS_OUT = 'Release'
+
+DETECT_HOST_ARCH = os.path.abspath(os.path.join(
+ BINUTILS_DIR, '../../gypfiles/detect_v8_host_arch.py'))
+
+
+def ReadFile(filename):
+ with file(filename, 'r') as f:
+ return f.read().strip()
+
+
+def WriteFile(filename, content):
+ assert not os.path.exists(filename)
+ with file(filename, 'w') as f:
+ f.write(content)
+ f.write('\n')
+
+
+def GetArch():
+ gyp_host_arch = re.search(
+ 'host_arch=(\S*)', os.environ.get('GYP_DEFINES', ''))
+ if gyp_host_arch:
+ arch = gyp_host_arch.group(1)
+ # This matches detect_host_arch.py.
+ if arch == 'x86_64':
+ return 'x64'
+ return arch
+
+ return subprocess.check_output(['python', DETECT_HOST_ARCH]).strip()
+
+
+def FetchAndExtract(arch):
+ archdir = os.path.join(BINUTILS_DIR, 'Linux_' + arch)
+ tarball = os.path.join(archdir, BINUTILS_FILE)
+ outdir = os.path.join(archdir, BINUTILS_OUT)
+
+ sha1file = tarball + '.sha1'
+ if not os.path.exists(sha1file):
+ print "WARNING: No binutils found for your architecture (%s)!" % arch
+ return 0
+
+ checksum = ReadFile(sha1file)
+
+ stampfile = tarball + '.stamp'
+ if os.path.exists(stampfile):
+ if (os.path.exists(tarball) and
+ os.path.exists(outdir) and
+ checksum == ReadFile(stampfile)):
+ return 0
+ else:
+ os.unlink(stampfile)
+
+ print "Downloading", tarball
+ subprocess.check_call([
+ 'download_from_google_storage',
+ '--no_resume',
+ '--no_auth',
+ '--bucket', 'chromium-binutils',
+ '-s', sha1file])
+ assert os.path.exists(tarball)
+
+ if os.path.exists(outdir):
+ shutil.rmtree(outdir)
+ assert not os.path.exists(outdir)
+ os.makedirs(outdir)
+ assert os.path.exists(outdir)
+
+ print "Extracting", tarball
+ subprocess.check_call(['tar', 'axf', tarball], cwd=outdir)
+
+ for tool in BINUTILS_TOOLS:
+ assert os.path.exists(os.path.join(outdir, tool))
+
+ WriteFile(stampfile, checksum)
+ return 0
+
+
+def main(args):
+ if not sys.platform.startswith('linux'):
+ return 0
+
+ arch = GetArch()
+ if arch == 'x64':
+ return FetchAndExtract(arch)
+ if arch == 'ia32':
+ ret = FetchAndExtract(arch)
+ if ret != 0:
+ return ret
+ # Fetch the x64 toolchain as well for official bots with 64-bit kernels.
+ return FetchAndExtract('x64')
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/deps/v8/third_party/inspector_protocol/CheckProtocolCompatibility.py b/deps/v8/third_party/inspector_protocol/CheckProtocolCompatibility.py
new file mode 100755
index 0000000000..dd9acad898
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/CheckProtocolCompatibility.py
@@ -0,0 +1,479 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Inspector protocol validator.
+#
+# Tests that subsequent protocol changes are not breaking backwards compatibility.
+# Following violations are reported:
+#
+# - Domain has been removed
+# - Command has been removed
+# - Required command parameter was added or changed from optional
+# - Required response parameter was removed or changed to optional
+# - Event has been removed
+# - Required event parameter was removed or changed to optional
+# - Parameter type has changed.
+#
+# For the parameters with composite types the above checks are also applied
+# recursively to every property of the type.
+#
+# Adding --show_changes to the command line prints out a list of valid public API changes.
+
+import copy
+import os.path
+import optparse
+import sys
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+
+def list_to_map(items, key):
+ result = {}
+ for item in items:
+ if "experimental" not in item and "hidden" not in item:
+ result[item[key]] = item
+ return result
+
+
+def named_list_to_map(container, name, key):
+ if name in container:
+ return list_to_map(container[name], key)
+ return {}
+
+
+def removed(reverse):
+ if reverse:
+ return "added"
+ return "removed"
+
+
+def required(reverse):
+ if reverse:
+ return "optional"
+ return "required"
+
+
+def compare_schemas(d_1, d_2, reverse):
+ errors = []
+ domains_1 = copy.deepcopy(d_1)
+ domains_2 = copy.deepcopy(d_2)
+ types_1 = normalize_types_in_schema(domains_1)
+ types_2 = normalize_types_in_schema(domains_2)
+
+ domains_by_name_1 = list_to_map(domains_1, "domain")
+ domains_by_name_2 = list_to_map(domains_2, "domain")
+
+ for name in domains_by_name_1:
+ domain_1 = domains_by_name_1[name]
+ if name not in domains_by_name_2:
+ errors.append("%s: domain has been %s" % (name, removed(reverse)))
+ continue
+ compare_domains(domain_1, domains_by_name_2[name], types_1, types_2, errors, reverse)
+ return errors
+
+
+def compare_domains(domain_1, domain_2, types_map_1, types_map_2, errors, reverse):
+ domain_name = domain_1["domain"]
+ commands_1 = named_list_to_map(domain_1, "commands", "name")
+ commands_2 = named_list_to_map(domain_2, "commands", "name")
+ for name in commands_1:
+ command_1 = commands_1[name]
+ if name not in commands_2:
+ errors.append("%s.%s: command has been %s" % (domain_1["domain"], name, removed(reverse)))
+ continue
+ compare_commands(domain_name, command_1, commands_2[name], types_map_1, types_map_2, errors, reverse)
+
+ events_1 = named_list_to_map(domain_1, "events", "name")
+ events_2 = named_list_to_map(domain_2, "events", "name")
+ for name in events_1:
+ event_1 = events_1[name]
+ if name not in events_2:
+ errors.append("%s.%s: event has been %s" % (domain_1["domain"], name, removed(reverse)))
+ continue
+ compare_events(domain_name, event_1, events_2[name], types_map_1, types_map_2, errors, reverse)
+
+
+def compare_commands(domain_name, command_1, command_2, types_map_1, types_map_2, errors, reverse):
+ context = domain_name + "." + command_1["name"]
+
+ params_1 = named_list_to_map(command_1, "parameters", "name")
+ params_2 = named_list_to_map(command_2, "parameters", "name")
+ # Note the reversed order: we allow removing but forbid adding parameters.
+ compare_params_list(context, "parameter", params_2, params_1, types_map_2, types_map_1, 0, errors, not reverse)
+
+ returns_1 = named_list_to_map(command_1, "returns", "name")
+ returns_2 = named_list_to_map(command_2, "returns", "name")
+ compare_params_list(context, "response parameter", returns_1, returns_2, types_map_1, types_map_2, 0, errors, reverse)
+
+
+def compare_events(domain_name, event_1, event_2, types_map_1, types_map_2, errors, reverse):
+ context = domain_name + "." + event_1["name"]
+ params_1 = named_list_to_map(event_1, "parameters", "name")
+ params_2 = named_list_to_map(event_2, "parameters", "name")
+ compare_params_list(context, "parameter", params_1, params_2, types_map_1, types_map_2, 0, errors, reverse)
+
+
+def compare_params_list(context, kind, params_1, params_2, types_map_1, types_map_2, depth, errors, reverse):
+ for name in params_1:
+ param_1 = params_1[name]
+ if name not in params_2:
+ if "optional" not in param_1:
+ errors.append("%s.%s: required %s has been %s" % (context, name, kind, removed(reverse)))
+ continue
+
+ param_2 = params_2[name]
+ if param_2 and "optional" in param_2 and "optional" not in param_1:
+ errors.append("%s.%s: %s %s is now %s" % (context, name, required(reverse), kind, required(not reverse)))
+ continue
+ type_1 = extract_type(param_1, types_map_1, errors)
+ type_2 = extract_type(param_2, types_map_2, errors)
+ compare_types(context + "." + name, kind, type_1, type_2, types_map_1, types_map_2, depth, errors, reverse)
+
+
+def compare_types(context, kind, type_1, type_2, types_map_1, types_map_2, depth, errors, reverse):
+ if depth > 10:
+ return
+
+ base_type_1 = type_1["type"]
+ base_type_2 = type_2["type"]
+
+ if base_type_1 != base_type_2:
+ errors.append("%s: %s base type mismatch, '%s' vs '%s'" % (context, kind, base_type_1, base_type_2))
+ elif base_type_1 == "object":
+ params_1 = named_list_to_map(type_1, "properties", "name")
+ params_2 = named_list_to_map(type_2, "properties", "name")
+ # If both parameters have the same named type use it in the context.
+ if "id" in type_1 and "id" in type_2 and type_1["id"] == type_2["id"]:
+ type_name = type_1["id"]
+ else:
+ type_name = "<object>"
+ context += " %s->%s" % (kind, type_name)
+ compare_params_list(context, "property", params_1, params_2, types_map_1, types_map_2, depth + 1, errors, reverse)
+ elif base_type_1 == "array":
+ item_type_1 = extract_type(type_1["items"], types_map_1, errors)
+ item_type_2 = extract_type(type_2["items"], types_map_2, errors)
+ compare_types(context, kind, item_type_1, item_type_2, types_map_1, types_map_2, depth + 1, errors, reverse)
+
+
+def extract_type(typed_object, types_map, errors):
+ if "type" in typed_object:
+ result = {"id": "<transient>", "type": typed_object["type"]}
+ if typed_object["type"] == "object":
+ result["properties"] = []
+ elif typed_object["type"] == "array":
+ result["items"] = typed_object["items"]
+ return result
+ elif "$ref" in typed_object:
+ ref = typed_object["$ref"]
+ if ref not in types_map:
+ errors.append("Can not resolve type: %s" % ref)
+ types_map[ref] = {"id": "<transient>", "type": "object"}
+ return types_map[ref]
+
+
+def normalize_types_in_schema(domains):
+ types = {}
+ for domain in domains:
+ domain_name = domain["domain"]
+ normalize_types(domain, domain_name, types)
+ return types
+
+
+def normalize_types(obj, domain_name, types):
+ if isinstance(obj, list):
+ for item in obj:
+ normalize_types(item, domain_name, types)
+ elif isinstance(obj, dict):
+ for key, value in obj.items():
+ if key == "$ref" and value.find(".") == -1:
+ obj[key] = "%s.%s" % (domain_name, value)
+ elif key == "id":
+ obj[key] = "%s.%s" % (domain_name, value)
+ types[obj[key]] = obj
+ else:
+ normalize_types(value, domain_name, types)
+
+
+def load_schema(file_name, domains):
+ # pylint: disable=W0613
+ if not os.path.isfile(file_name):
+ return
+ input_file = open(file_name, "r")
+ json_string = input_file.read()
+ parsed_json = json.loads(json_string)
+ domains += parsed_json["domains"]
+ return parsed_json["version"]
+
+
+def self_test():
+ def create_test_schema_1():
+ return [
+ {
+ "domain": "Network",
+ "types": [
+ {
+ "id": "LoaderId",
+ "type": "string"
+ },
+ {
+ "id": "Headers",
+ "type": "object"
+ },
+ {
+ "id": "Request",
+ "type": "object",
+ "properties": [
+ {"name": "url", "type": "string"},
+ {"name": "method", "type": "string"},
+ {"name": "headers", "$ref": "Headers"},
+ {"name": "becameOptionalField", "type": "string"},
+ {"name": "removedField", "type": "string"},
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "removedCommand",
+ },
+ {
+ "name": "setExtraHTTPHeaders",
+ "parameters": [
+ {"name": "headers", "$ref": "Headers"},
+ {"name": "mismatched", "type": "string"},
+ {"name": "becameOptional", "$ref": "Headers"},
+ {"name": "removedRequired", "$ref": "Headers"},
+ {"name": "becameRequired", "$ref": "Headers", "optional": True},
+ {"name": "removedOptional", "$ref": "Headers", "optional": True},
+ ],
+ "returns": [
+ {"name": "mimeType", "type": "string"},
+ {"name": "becameOptional", "type": "string"},
+ {"name": "removedRequired", "type": "string"},
+ {"name": "becameRequired", "type": "string", "optional": True},
+ {"name": "removedOptional", "type": "string", "optional": True},
+ ]
+ }
+ ],
+ "events": [
+ {
+ "name": "requestWillBeSent",
+ "parameters": [
+ {"name": "frameId", "type": "string", "experimental": True},
+ {"name": "request", "$ref": "Request"},
+ {"name": "becameOptional", "type": "string"},
+ {"name": "removedRequired", "type": "string"},
+ {"name": "becameRequired", "type": "string", "optional": True},
+ {"name": "removedOptional", "type": "string", "optional": True},
+ ]
+ },
+ {
+ "name": "removedEvent",
+ "parameters": [
+ {"name": "errorText", "type": "string"},
+ {"name": "canceled", "type": "boolean", "optional": True}
+ ]
+ }
+ ]
+ },
+ {
+ "domain": "removedDomain"
+ }
+ ]
+
+ def create_test_schema_2():
+ return [
+ {
+ "domain": "Network",
+ "types": [
+ {
+ "id": "LoaderId",
+ "type": "string"
+ },
+ {
+ "id": "Request",
+ "type": "object",
+ "properties": [
+ {"name": "url", "type": "string"},
+ {"name": "method", "type": "string"},
+ {"name": "headers", "type": "object"},
+ {"name": "becameOptionalField", "type": "string", "optional": True},
+ ]
+ }
+ ],
+ "commands": [
+ {
+ "name": "addedCommand",
+ },
+ {
+ "name": "setExtraHTTPHeaders",
+ "parameters": [
+ {"name": "headers", "type": "object"},
+ {"name": "mismatched", "type": "object"},
+ {"name": "becameOptional", "type": "object", "optional": True},
+ {"name": "addedRequired", "type": "object"},
+ {"name": "becameRequired", "type": "object"},
+ {"name": "addedOptional", "type": "object", "optional": True},
+ ],
+ "returns": [
+ {"name": "mimeType", "type": "string"},
+ {"name": "becameOptional", "type": "string", "optional": True},
+ {"name": "addedRequired", "type": "string"},
+ {"name": "becameRequired", "type": "string"},
+ {"name": "addedOptional", "type": "string", "optional": True},
+ ]
+ }
+ ],
+ "events": [
+ {
+ "name": "requestWillBeSent",
+ "parameters": [
+ {"name": "request", "$ref": "Request"},
+ {"name": "becameOptional", "type": "string", "optional": True},
+ {"name": "addedRequired", "type": "string"},
+ {"name": "becameRequired", "type": "string"},
+ {"name": "addedOptional", "type": "string", "optional": True},
+ ]
+ },
+ {
+ "name": "addedEvent"
+ }
+ ]
+ },
+ {
+ "domain": "addedDomain"
+ }
+ ]
+
+ expected_errors = [
+ "removedDomain: domain has been removed",
+ "Network.removedCommand: command has been removed",
+ "Network.removedEvent: event has been removed",
+ "Network.setExtraHTTPHeaders.mismatched: parameter base type mismatch, 'object' vs 'string'",
+ "Network.setExtraHTTPHeaders.addedRequired: required parameter has been added",
+ "Network.setExtraHTTPHeaders.becameRequired: optional parameter is now required",
+ "Network.setExtraHTTPHeaders.removedRequired: required response parameter has been removed",
+ "Network.setExtraHTTPHeaders.becameOptional: required response parameter is now optional",
+ "Network.requestWillBeSent.removedRequired: required parameter has been removed",
+ "Network.requestWillBeSent.becameOptional: required parameter is now optional",
+ "Network.requestWillBeSent.request parameter->Network.Request.removedField: required property has been removed",
+ "Network.requestWillBeSent.request parameter->Network.Request.becameOptionalField: required property is now optional",
+ ]
+
+ expected_errors_reverse = [
+ "addedDomain: domain has been added",
+ "Network.addedEvent: event has been added",
+ "Network.addedCommand: command has been added",
+ "Network.setExtraHTTPHeaders.mismatched: parameter base type mismatch, 'string' vs 'object'",
+ "Network.setExtraHTTPHeaders.removedRequired: required parameter has been removed",
+ "Network.setExtraHTTPHeaders.becameOptional: required parameter is now optional",
+ "Network.setExtraHTTPHeaders.addedRequired: required response parameter has been added",
+ "Network.setExtraHTTPHeaders.becameRequired: optional response parameter is now required",
+ "Network.requestWillBeSent.becameRequired: optional parameter is now required",
+ "Network.requestWillBeSent.addedRequired: required parameter has been added",
+ ]
+
+ def is_subset(subset, superset, message):
+ for i in range(len(subset)):
+ if subset[i] not in superset:
+ sys.stderr.write("%s error: %s\n" % (message, subset[i]))
+ return False
+ return True
+
+ def errors_match(expected, actual):
+ return (is_subset(actual, expected, "Unexpected") and
+ is_subset(expected, actual, "Missing"))
+
+ return (errors_match(expected_errors,
+ compare_schemas(create_test_schema_1(), create_test_schema_2(), False)) and
+ errors_match(expected_errors_reverse,
+ compare_schemas(create_test_schema_2(), create_test_schema_1(), True)))
+
+
+def load_domains_and_baselines(file_name, domains, baseline_domains):
+ version = load_schema(os.path.normpath(file_name), domains)
+ suffix = "-%s.%s.json" % (version["major"], version["minor"])
+ baseline_file = file_name.replace(".json", suffix)
+ load_schema(os.path.normpath(baseline_file), baseline_domains)
+ return version
+
+
+def main():
+ if not self_test():
+ sys.stderr.write("Self-test failed")
+ return 1
+
+ cmdline_parser = optparse.OptionParser()
+ cmdline_parser.add_option("--show_changes")
+ cmdline_parser.add_option("--expected_errors")
+ cmdline_parser.add_option("--stamp")
+ arg_options, arg_values = cmdline_parser.parse_args()
+
+ if len(arg_values) < 1:
+ sys.stderr.write("Usage: %s [--show_changes] <protocol-1> [, <protocol-2>...]\n" % sys.argv[0])
+ return 1
+
+ domains = []
+ baseline_domains = []
+ version = load_domains_and_baselines(arg_values[0], domains, baseline_domains)
+ for dependency in arg_values[1:]:
+ load_domains_and_baselines(dependency, domains, baseline_domains)
+
+ expected_errors = []
+ if arg_options.expected_errors:
+ expected_errors_file = open(arg_options.expected_errors, "r")
+ expected_errors = json.loads(expected_errors_file.read())["errors"]
+ expected_errors_file.close()
+
+ errors = compare_schemas(baseline_domains, domains, False)
+ unexpected_errors = []
+ for i in range(len(errors)):
+ if errors[i] not in expected_errors:
+ unexpected_errors.append(errors[i])
+ if len(unexpected_errors) > 0:
+ sys.stderr.write(" Compatibility checks FAILED\n")
+ for error in unexpected_errors:
+ sys.stderr.write(" %s\n" % error)
+ return 1
+
+ if arg_options.show_changes:
+ changes = compare_schemas(domains, baseline_domains, True)
+ if len(changes) > 0:
+ print " Public changes since %s:" % version
+ for change in changes:
+ print " %s" % change
+
+ if arg_options.stamp:
+ with open(arg_options.stamp, 'a') as _:
+ pass
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/deps/v8/third_party/inspector_protocol/CodeGenerator.py b/deps/v8/third_party/inspector_protocol/CodeGenerator.py
new file mode 100644
index 0000000000..de1029e801
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/CodeGenerator.py
@@ -0,0 +1,498 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os.path
+import sys
+import optparse
+import collections
+import functools
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+# Path handling for libraries and templates
+# Paths have to be normalized because Jinja uses the exact template path to
+# determine the hash used in the cache filename, and we need a pre-caching step
+# to be concurrency-safe. Use absolute path because __file__ is absolute if
+# module is imported, and relative if executed directly.
+# If paths differ between pre-caching and individual file compilation, the cache
+# is regenerated, which causes a race condition and breaks concurrent build,
+# since some compile processes will try to read the partially written cache.
+module_path, module_filename = os.path.split(os.path.realpath(__file__))
+
+def read_config():
+ # pylint: disable=W0703
+ def json_to_object(data, output_base, config_base):
+ def json_object_hook(object_dict):
+ items = [(k, os.path.join(config_base, v) if k == "path" else v) for (k, v) in object_dict.items()]
+ items = [(k, os.path.join(output_base, v) if k == "output" else v) for (k, v) in items]
+ keys, values = zip(*items)
+ return collections.namedtuple('X', keys)(*values)
+ return json.loads(data, object_hook=json_object_hook)
+
+ def init_defaults(config_tuple, path, defaults):
+ keys = list(config_tuple._fields) # pylint: disable=E1101
+ values = [getattr(config_tuple, k) for k in keys]
+ for i in xrange(len(keys)):
+ if hasattr(values[i], "_fields"):
+ values[i] = init_defaults(values[i], path + "." + keys[i], defaults)
+ for optional in defaults:
+ if optional.find(path + ".") != 0:
+ continue
+ optional_key = optional[len(path) + 1:]
+ if optional_key.find(".") == -1 and optional_key not in keys:
+ keys.append(optional_key)
+ values.append(defaults[optional])
+ return collections.namedtuple('X', keys)(*values)
+
+ try:
+ cmdline_parser = optparse.OptionParser()
+ cmdline_parser.add_option("--output_base")
+ cmdline_parser.add_option("--jinja_dir")
+ cmdline_parser.add_option("--config")
+ arg_options, _ = cmdline_parser.parse_args()
+ jinja_dir = arg_options.jinja_dir
+ if not jinja_dir:
+ raise Exception("jinja directory must be specified")
+ output_base = arg_options.output_base
+ if not output_base:
+ raise Exception("Base output directory must be specified")
+ config_file = arg_options.config
+ if not config_file:
+ raise Exception("Config file name must be specified")
+ config_base = os.path.dirname(config_file)
+ except Exception:
+ # Work with python 2 and 3 http://docs.python.org/py3k/howto/pyporting.html
+ exc = sys.exc_info()[1]
+ sys.stderr.write("Failed to parse command-line arguments: %s\n\n" % exc)
+ exit(1)
+
+ try:
+ config_json_file = open(config_file, "r")
+ config_json_string = config_json_file.read()
+ config_partial = json_to_object(config_json_string, output_base, config_base)
+ config_json_file.close()
+ defaults = {
+ ".imported": False,
+ ".imported.export_macro": "",
+ ".imported.export_header": False,
+ ".imported.header": False,
+ ".imported.package": False,
+ ".protocol.export_macro": "",
+ ".protocol.export_header": False,
+ ".exported": False,
+ ".exported.export_macro": "",
+ ".exported.export_header": False,
+ ".lib": False,
+ ".lib.export_macro": "",
+ ".lib.export_header": False,
+ }
+ return (jinja_dir, config_file, init_defaults(config_partial, "", defaults))
+ except Exception:
+ # Work with python 2 and 3 http://docs.python.org/py3k/howto/pyporting.html
+ exc = sys.exc_info()[1]
+ sys.stderr.write("Failed to parse config file: %s\n\n" % exc)
+ exit(1)
+
+
+def to_title_case(name):
+ return name[:1].upper() + name[1:]
+
+
+def dash_to_camelcase(word):
+ prefix = ""
+ if word[0] == "-":
+ prefix = "Negative"
+ word = word[1:]
+ return prefix + "".join(to_title_case(x) or "-" for x in word.split("-"))
+
+
+def initialize_jinja_env(jinja_dir, cache_dir):
+ # pylint: disable=F0401
+ sys.path.insert(1, os.path.abspath(jinja_dir))
+ import jinja2
+
+ jinja_env = jinja2.Environment(
+ loader=jinja2.FileSystemLoader(module_path),
+ # Bytecode cache is not concurrency-safe unless pre-cached:
+ # if pre-cached this is read-only, but writing creates a race condition.
+ bytecode_cache=jinja2.FileSystemBytecodeCache(cache_dir),
+ keep_trailing_newline=True, # newline-terminate generated files
+ lstrip_blocks=True, # so can indent control flow tags
+ trim_blocks=True)
+ jinja_env.filters.update({"to_title_case": to_title_case, "dash_to_camelcase": dash_to_camelcase})
+ jinja_env.add_extension("jinja2.ext.loopcontrols")
+ return jinja_env
+
+
+def patch_full_qualified_refs(protocol):
+ def patch_full_qualified_refs_in_domain(json, domain_name):
+ if isinstance(json, list):
+ for item in json:
+ patch_full_qualified_refs_in_domain(item, domain_name)
+
+ if not isinstance(json, dict):
+ return
+ for key in json:
+ if key == "type" and json[key] == "string":
+ json[key] = domain_name + ".string"
+ if key != "$ref":
+ patch_full_qualified_refs_in_domain(json[key], domain_name)
+ continue
+ if json["$ref"].find(".") == -1:
+ json["$ref"] = domain_name + "." + json["$ref"]
+ return
+
+ for domain in protocol.json_api["domains"]:
+ patch_full_qualified_refs_in_domain(domain, domain["domain"])
+
+
+def calculate_exports(protocol):
+ def calculate_exports_in_json(json_value):
+ has_exports = False
+ if isinstance(json_value, list):
+ for item in json_value:
+ has_exports = calculate_exports_in_json(item) or has_exports
+ if isinstance(json_value, dict):
+ has_exports = ("exported" in json_value and json_value["exported"]) or has_exports
+ for key in json_value:
+ has_exports = calculate_exports_in_json(json_value[key]) or has_exports
+ return has_exports
+
+ protocol.json_api["has_exports"] = False
+ for domain_json in protocol.json_api["domains"]:
+ domain_json["has_exports"] = calculate_exports_in_json(domain_json)
+ if domain_json["has_exports"] and domain_json["domain"] in protocol.generate_domains:
+ protocol.json_api["has_exports"] = True
+
+
+def create_imported_type_definition(domain_name, type, imported_namespace):
+ # pylint: disable=W0622
+ return {
+ "return_type": "std::unique_ptr<%s::%s::API::%s>" % (imported_namespace, domain_name, type["id"]),
+ "pass_type": "std::unique_ptr<%s::%s::API::%s>" % (imported_namespace, domain_name, type["id"]),
+ "to_raw_type": "%s.get()",
+ "to_pass_type": "std::move(%s)",
+ "to_rvalue": "std::move(%s)",
+ "type": "std::unique_ptr<%s::%s::API::%s>" % (imported_namespace, domain_name, type["id"]),
+ "raw_type": "%s::%s::API::%s" % (imported_namespace, domain_name, type["id"]),
+ "raw_pass_type": "%s::%s::API::%s*" % (imported_namespace, domain_name, type["id"]),
+ "raw_return_type": "%s::%s::API::%s*" % (imported_namespace, domain_name, type["id"]),
+ }
+
+
+def create_user_type_definition(domain_name, type):
+ # pylint: disable=W0622
+ return {
+ "return_type": "std::unique_ptr<protocol::%s::%s>" % (domain_name, type["id"]),
+ "pass_type": "std::unique_ptr<protocol::%s::%s>" % (domain_name, type["id"]),
+ "to_raw_type": "%s.get()",
+ "to_pass_type": "std::move(%s)",
+ "to_rvalue": "std::move(%s)",
+ "type": "std::unique_ptr<protocol::%s::%s>" % (domain_name, type["id"]),
+ "raw_type": "protocol::%s::%s" % (domain_name, type["id"]),
+ "raw_pass_type": "protocol::%s::%s*" % (domain_name, type["id"]),
+ "raw_return_type": "protocol::%s::%s*" % (domain_name, type["id"]),
+ }
+
+
+def create_object_type_definition():
+ # pylint: disable=W0622
+ return {
+ "return_type": "std::unique_ptr<protocol::DictionaryValue>",
+ "pass_type": "std::unique_ptr<protocol::DictionaryValue>",
+ "to_raw_type": "%s.get()",
+ "to_pass_type": "std::move(%s)",
+ "to_rvalue": "std::move(%s)",
+ "type": "std::unique_ptr<protocol::DictionaryValue>",
+ "raw_type": "protocol::DictionaryValue",
+ "raw_pass_type": "protocol::DictionaryValue*",
+ "raw_return_type": "protocol::DictionaryValue*",
+ }
+
+
+def create_any_type_definition():
+ # pylint: disable=W0622
+ return {
+ "return_type": "std::unique_ptr<protocol::Value>",
+ "pass_type": "std::unique_ptr<protocol::Value>",
+ "to_raw_type": "%s.get()",
+ "to_pass_type": "std::move(%s)",
+ "to_rvalue": "std::move(%s)",
+ "type": "std::unique_ptr<protocol::Value>",
+ "raw_type": "protocol::Value",
+ "raw_pass_type": "protocol::Value*",
+ "raw_return_type": "protocol::Value*",
+ }
+
+
+def create_string_type_definition():
+ # pylint: disable=W0622
+ return {
+ "return_type": "String",
+ "pass_type": "const String&",
+ "to_pass_type": "%s",
+ "to_raw_type": "%s",
+ "to_rvalue": "%s",
+ "type": "String",
+ "raw_type": "String",
+ "raw_pass_type": "const String&",
+ "raw_return_type": "String",
+ }
+
+
+def create_primitive_type_definition(type):
+ # pylint: disable=W0622
+ typedefs = {
+ "number": "double",
+ "integer": "int",
+ "boolean": "bool"
+ }
+ defaults = {
+ "number": "0",
+ "integer": "0",
+ "boolean": "false"
+ }
+ jsontypes = {
+ "number": "TypeDouble",
+ "integer": "TypeInteger",
+ "boolean": "TypeBoolean",
+ }
+ return {
+ "return_type": typedefs[type],
+ "pass_type": typedefs[type],
+ "to_pass_type": "%s",
+ "to_raw_type": "%s",
+ "to_rvalue": "%s",
+ "type": typedefs[type],
+ "raw_type": typedefs[type],
+ "raw_pass_type": typedefs[type],
+ "raw_return_type": typedefs[type],
+ "default_value": defaults[type]
+ }
+
+
+def wrap_array_definition(type):
+ # pylint: disable=W0622
+ return {
+ "return_type": "std::unique_ptr<protocol::Array<%s>>" % type["raw_type"],
+ "pass_type": "std::unique_ptr<protocol::Array<%s>>" % type["raw_type"],
+ "to_raw_type": "%s.get()",
+ "to_pass_type": "std::move(%s)",
+ "to_rvalue": "std::move(%s)",
+ "type": "std::unique_ptr<protocol::Array<%s>>" % type["raw_type"],
+ "raw_type": "protocol::Array<%s>" % type["raw_type"],
+ "raw_pass_type": "protocol::Array<%s>*" % type["raw_type"],
+ "raw_return_type": "protocol::Array<%s>*" % type["raw_type"],
+ "create_type": "wrapUnique(new protocol::Array<%s>())" % type["raw_type"],
+ "out_type": "protocol::Array<%s>&" % type["raw_type"],
+ }
+
+
+def create_type_definitions(protocol, imported_namespace):
+ protocol.type_definitions = {}
+ protocol.type_definitions["number"] = create_primitive_type_definition("number")
+ protocol.type_definitions["integer"] = create_primitive_type_definition("integer")
+ protocol.type_definitions["boolean"] = create_primitive_type_definition("boolean")
+ protocol.type_definitions["object"] = create_object_type_definition()
+ protocol.type_definitions["any"] = create_any_type_definition()
+ for domain in protocol.json_api["domains"]:
+ protocol.type_definitions[domain["domain"] + ".string"] = create_string_type_definition()
+ if not ("types" in domain):
+ continue
+ for type in domain["types"]:
+ type_name = domain["domain"] + "." + type["id"]
+ if type["type"] == "object" and domain["domain"] in protocol.imported_domains:
+ protocol.type_definitions[type_name] = create_imported_type_definition(domain["domain"], type, imported_namespace)
+ elif type["type"] == "object":
+ protocol.type_definitions[type_name] = create_user_type_definition(domain["domain"], type)
+ elif type["type"] == "array":
+ items_type = type["items"]["type"]
+ protocol.type_definitions[type_name] = wrap_array_definition(protocol.type_definitions[items_type])
+ elif type["type"] == domain["domain"] + ".string":
+ protocol.type_definitions[type_name] = create_string_type_definition()
+ else:
+ protocol.type_definitions[type_name] = create_primitive_type_definition(type["type"])
+
+
+def type_definition(protocol, name):
+ return protocol.type_definitions[name]
+
+
+def resolve_type(protocol, prop):
+ if "$ref" in prop:
+ return protocol.type_definitions[prop["$ref"]]
+ if prop["type"] == "array":
+ return wrap_array_definition(resolve_type(protocol, prop["items"]))
+ return protocol.type_definitions[prop["type"]]
+
+
+def join_arrays(dict, keys):
+ result = []
+ for key in keys:
+ if key in dict:
+ result += dict[key]
+ return result
+
+
+def has_disable(commands):
+ for command in commands:
+ if command["name"] == "disable" and (not ("handlers" in command) or "renderer" in command["handlers"]):
+ return True
+ return False
+
+
+def format_include(header):
+ return "\"" + header + "\"" if header[0] not in "<\"" else header
+
+
+def read_protocol_file(file_name, json_api):
+ input_file = open(file_name, "r")
+ json_string = input_file.read()
+ input_file.close()
+ parsed_json = json.loads(json_string)
+ version = parsed_json["version"]["major"] + "." + parsed_json["version"]["minor"]
+ domains = []
+ for domain in parsed_json["domains"]:
+ domains.append(domain["domain"])
+ domain["version"] = version
+ json_api["domains"] += parsed_json["domains"]
+ return domains
+
+
+class Protocol(object):
+ def __init__(self):
+ self.json_api = {}
+ self.generate_domains = []
+ self.imported_domains = []
+
+
+def main():
+ jinja_dir, config_file, config = read_config()
+
+ protocol = Protocol()
+ protocol.json_api = {"domains": []}
+ protocol.generate_domains = read_protocol_file(config.protocol.path, protocol.json_api)
+ protocol.imported_domains = read_protocol_file(config.imported.path, protocol.json_api) if config.imported else []
+ patch_full_qualified_refs(protocol)
+ calculate_exports(protocol)
+ create_type_definitions(protocol, "::".join(config.imported.namespace) if config.imported else "")
+
+ if not config.exported:
+ for domain_json in protocol.json_api["domains"]:
+ if domain_json["has_exports"] and domain_json["domain"] in protocol.generate_domains:
+ sys.stderr.write("Domain %s is exported, but config is missing export entry\n\n" % domain_json["domain"])
+ exit(1)
+
+ if not os.path.exists(config.protocol.output):
+ os.mkdir(config.protocol.output)
+ if protocol.json_api["has_exports"] and not os.path.exists(config.exported.output):
+ os.mkdir(config.exported.output)
+ jinja_env = initialize_jinja_env(jinja_dir, config.protocol.output)
+
+ inputs = []
+ inputs.append(__file__)
+ inputs.append(config_file)
+ inputs.append(config.protocol.path)
+ if config.imported:
+ inputs.append(config.imported.path)
+ templates_dir = os.path.join(module_path, "templates")
+ inputs.append(os.path.join(templates_dir, "TypeBuilder_h.template"))
+ inputs.append(os.path.join(templates_dir, "TypeBuilder_cpp.template"))
+ inputs.append(os.path.join(templates_dir, "Exported_h.template"))
+ inputs.append(os.path.join(templates_dir, "Imported_h.template"))
+
+ h_template = jinja_env.get_template("templates/TypeBuilder_h.template")
+ cpp_template = jinja_env.get_template("templates/TypeBuilder_cpp.template")
+ exported_template = jinja_env.get_template("templates/Exported_h.template")
+ imported_template = jinja_env.get_template("templates/Imported_h.template")
+
+ outputs = dict()
+
+ for domain in protocol.json_api["domains"]:
+ class_name = domain["domain"]
+ template_context = {
+ "config": config,
+ "domain": domain,
+ "join_arrays": join_arrays,
+ "resolve_type": functools.partial(resolve_type, protocol),
+ "type_definition": functools.partial(type_definition, protocol),
+ "has_disable": has_disable,
+ "format_include": format_include
+ }
+
+ if domain["domain"] in protocol.generate_domains:
+ outputs[os.path.join(config.protocol.output, class_name + ".h")] = h_template.render(template_context)
+ outputs[os.path.join(config.protocol.output, class_name + ".cpp")] = cpp_template.render(template_context)
+ if domain["has_exports"]:
+ outputs[os.path.join(config.exported.output, class_name + ".h")] = exported_template.render(template_context)
+ if domain["domain"] in protocol.imported_domains and domain["has_exports"]:
+ outputs[os.path.join(config.protocol.output, class_name + ".h")] = imported_template.render(template_context)
+
+ if config.lib:
+ template_context = {
+ "config": config,
+ "format_include": format_include,
+ }
+
+ lib_templates_dir = os.path.join(module_path, "lib")
+ # Note these should be sorted in the right order.
+ # TODO(dgozman): sort them programmatically based on commented includes.
+ lib_h_templates = [
+ "Collections_h.template",
+ "ErrorSupport_h.template",
+ "Values_h.template",
+ "Object_h.template",
+ "ValueConversions_h.template",
+ "Maybe_h.template",
+ "Array_h.template",
+ "DispatcherBase_h.template",
+ "Parser_h.template",
+ ]
+
+ lib_cpp_templates = [
+ "Protocol_cpp.template",
+ "ErrorSupport_cpp.template",
+ "Values_cpp.template",
+ "Object_cpp.template",
+ "DispatcherBase_cpp.template",
+ "Parser_cpp.template",
+ ]
+
+ forward_h_templates = [
+ "Forward_h.template",
+ "Allocator_h.template",
+ "FrontendChannel_h.template",
+ ]
+
+ def generate_lib_file(file_name, template_files):
+ parts = []
+ for template_file in template_files:
+ inputs.append(os.path.join(lib_templates_dir, template_file))
+ template = jinja_env.get_template("lib/" + template_file)
+ parts.append(template.render(template_context))
+ outputs[file_name] = "\n\n".join(parts)
+
+ generate_lib_file(os.path.join(config.lib.output, "Forward.h"), forward_h_templates)
+ generate_lib_file(os.path.join(config.lib.output, "Protocol.h"), lib_h_templates)
+ generate_lib_file(os.path.join(config.lib.output, "Protocol.cpp"), lib_cpp_templates)
+
+ # Make gyp / make generatos happy, otherwise make rebuilds world.
+ inputs_ts = max(map(os.path.getmtime, inputs))
+ up_to_date = True
+ for output_file in outputs.iterkeys():
+ if not os.path.exists(output_file) or os.path.getmtime(output_file) < inputs_ts:
+ up_to_date = False
+ break
+ if up_to_date:
+ sys.exit()
+
+ for file_name, content in outputs.iteritems():
+ out_file = open(file_name, "w")
+ out_file.write(content)
+ out_file.close()
+
+
+main()
diff --git a/deps/v8/third_party/inspector_protocol/ConcatenateProtocols.py b/deps/v8/third_party/inspector_protocol/ConcatenateProtocols.py
new file mode 100755
index 0000000000..a7cbc992c7
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/ConcatenateProtocols.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os.path
+import sys
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+
+def main(argv):
+ if len(argv) < 1:
+ sys.stderr.write("Usage: %s <protocol-1> [<protocol-2> [, <protocol-3>...]] <output-file>\n" % sys.argv[0])
+ return 1
+
+ domains = []
+ version = None
+ for protocol in argv[:-1]:
+ file_name = os.path.normpath(protocol)
+ if not os.path.isfile(file_name):
+ sys.stderr.write("Cannot find %s\n" % file_name)
+ return 1
+ input_file = open(file_name, "r")
+ json_string = input_file.read()
+ parsed_json = json.loads(json_string)
+ domains += parsed_json["domains"]
+ version = parsed_json["version"]
+
+ output_file = open(argv[-1], "w")
+ json.dump({"version": version, "domains": domains}, output_file, indent=4, sort_keys=False, separators=(',', ': '))
+ output_file.close()
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/deps/v8/test/mjsunit/regress/regress-220.js b/deps/v8/third_party/inspector_protocol/LICENSE
index cd38a478cc..800468e576 100644
--- a/deps/v8/test/mjsunit/regress/regress-220.js
+++ b/deps/v8/third_party/inspector_protocol/LICENSE
@@ -1,17 +1,18 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2016 The Chromium Authors. All rights reserved.
+//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@@ -24,10 +25,3 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --no-harmony-restrictive-declarations
-
-function foo(f) { eval(f); }
-
-// Ensure that compiling a declaration of a function does not crash.
-foo("(function (x) { with ({x: []}) function x(){} })");
diff --git a/deps/v8/third_party/inspector_protocol/OWNERS b/deps/v8/third_party/inspector_protocol/OWNERS
new file mode 100644
index 0000000000..ff8888ace3
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/OWNERS
@@ -0,0 +1,9 @@
+set noparent
+
+alph@chromium.org
+caseq@chromium.org
+dgozman@chromium.org
+jochen@chromium.org
+kozyatinskiy@chromium.org
+pfeldman@chromium.org
+yangguo@chromium.org \ No newline at end of file
diff --git a/deps/v8/third_party/inspector_protocol/README.v8 b/deps/v8/third_party/inspector_protocol/README.v8
new file mode 100644
index 0000000000..d95b61eaaf
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/README.v8
@@ -0,0 +1,16 @@
+Name: inspector protocol
+Short Name: inspector_protocol
+URL: https://chromium.googlesource.com/deps/inspector_protocol/
+Version: 0
+Revision: ebda02bf94a742a2e26e4f818df1fc77517ac44c
+License: BSD
+License File: LICENSE
+Security Critical: no
+
+Description:
+src/inspector uses these scripts to generate handlers from protocol
+description.
+
+Local modifications:
+- This only includes the lib/ and templates/ directories, scripts, build
+ and the LICENSE files.
diff --git a/deps/v8/third_party/inspector_protocol/inspector_protocol.gni b/deps/v8/third_party/inspector_protocol/inspector_protocol.gni
new file mode 100644
index 0000000000..34bc0de208
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/inspector_protocol.gni
@@ -0,0 +1,80 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This template will generate inspector protocol source code. The code will
+# not be compiled, use get_target_outputs(<name>) to compile them.
+#
+# Inputs
+#
+# config_file (required)
+# Path to json file specifying inspector protocol configuration.
+#
+# out_dir (required)
+# Path to put the generated files in. It must be inside output or
+# generated file directory.
+#
+# outputs (required)
+# Files generated. Relative to out_dir.
+#
+# inputs (optional)
+# Extra inputs specified by the config file.
+template("inspector_protocol_generate") {
+ assert(defined(invoker.config_file))
+ assert(defined(invoker.out_dir))
+ assert(defined(invoker.outputs))
+ assert(defined(invoker.inspector_protocol_dir))
+ inspector_protocol_dir = invoker.inspector_protocol_dir
+
+ action(target_name) {
+ script = "$inspector_protocol_dir/CodeGenerator.py"
+
+ inputs = [
+ invoker.config_file,
+ "$inspector_protocol_dir/lib/Allocator_h.template",
+ "$inspector_protocol_dir/lib/Array_h.template",
+ "$inspector_protocol_dir/lib/Collections_h.template",
+ "$inspector_protocol_dir/lib/DispatcherBase_cpp.template",
+ "$inspector_protocol_dir/lib/DispatcherBase_h.template",
+ "$inspector_protocol_dir/lib/ErrorSupport_cpp.template",
+ "$inspector_protocol_dir/lib/ErrorSupport_h.template",
+ "$inspector_protocol_dir/lib/Forward_h.template",
+ "$inspector_protocol_dir/lib/FrontendChannel_h.template",
+ "$inspector_protocol_dir/lib/Maybe_h.template",
+ "$inspector_protocol_dir/lib/Object_cpp.template",
+ "$inspector_protocol_dir/lib/Object_h.template",
+ "$inspector_protocol_dir/lib/Parser_cpp.template",
+ "$inspector_protocol_dir/lib/Parser_h.template",
+ "$inspector_protocol_dir/lib/Protocol_cpp.template",
+ "$inspector_protocol_dir/lib/ValueConversions_h.template",
+ "$inspector_protocol_dir/lib/Values_cpp.template",
+ "$inspector_protocol_dir/lib/Values_h.template",
+ "$inspector_protocol_dir/templates/Exported_h.template",
+ "$inspector_protocol_dir/templates/Imported_h.template",
+ "$inspector_protocol_dir/templates/TypeBuilder_cpp.template",
+ "$inspector_protocol_dir/templates/TypeBuilder_h.template",
+ ]
+ if (defined(invoker.inputs)) {
+ inputs += invoker.inputs
+ }
+
+ args = [
+ "--jinja_dir",
+ rebase_path("//third_party/", root_build_dir), # jinja is in chromium's third_party
+ "--output_base",
+ rebase_path(invoker.out_dir, root_build_dir),
+ "--config",
+ rebase_path(invoker.config_file, root_build_dir),
+ ]
+
+ outputs = get_path_info(rebase_path(invoker.outputs, ".", invoker.out_dir),
+ "abspath")
+
+ forward_variables_from(invoker,
+ [
+ "visibility",
+ "deps",
+ "public_deps",
+ ])
+ }
+}
diff --git a/deps/v8/third_party/inspector_protocol/inspector_protocol.gypi b/deps/v8/third_party/inspector_protocol/inspector_protocol.gypi
new file mode 100644
index 0000000000..1fb7119b5f
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/inspector_protocol.gypi
@@ -0,0 +1,33 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'inspector_protocol_files': [
+ 'lib/Allocator_h.template',
+ 'lib/Array_h.template',
+ 'lib/Collections_h.template',
+ 'lib/DispatcherBase_cpp.template',
+ 'lib/DispatcherBase_h.template',
+ 'lib/ErrorSupport_cpp.template',
+ 'lib/ErrorSupport_h.template',
+ 'lib/Forward_h.template',
+ 'lib/FrontendChannel_h.template',
+ 'lib/Maybe_h.template',
+ 'lib/Object_cpp.template',
+ 'lib/Object_h.template',
+ 'lib/Parser_cpp.template',
+ 'lib/Parser_h.template',
+ 'lib/Protocol_cpp.template',
+ 'lib/ValueConversions_h.template',
+ 'lib/Values_cpp.template',
+ 'lib/Values_h.template',
+ 'templates/Exported_h.template',
+ 'templates/Imported_h.template',
+ 'templates/TypeBuilder_cpp.template',
+ 'templates/TypeBuilder_h.template',
+ 'CodeGenerator.py',
+ ]
+ }
+}
diff --git a/deps/v8/third_party/inspector_protocol/lib/Allocator_h.template b/deps/v8/third_party/inspector_protocol/lib/Allocator_h.template
new file mode 100644
index 0000000000..8f8109d695
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/Allocator_h.template
@@ -0,0 +1,30 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_Allocator_h
+#define {{"_".join(config.protocol.namespace)}}_Allocator_h
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+enum NotNullTagEnum { NotNullLiteral };
+
+#define PROTOCOL_DISALLOW_NEW() \
+ private: \
+ void* operator new(size_t) = delete; \
+ void* operator new(size_t, NotNullTagEnum, void*) = delete; \
+ void* operator new(size_t, void*) = delete; \
+ public:
+
+#define PROTOCOL_DISALLOW_COPY(ClassName) \
+ private: \
+ ClassName(const ClassName&) = delete; \
+ ClassName& operator=(const ClassName&) = delete
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#endif // !defined({{"_".join(config.protocol.namespace)}}_Allocator_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/Array_h.template b/deps/v8/third_party/inspector_protocol/lib/Array_h.template
new file mode 100644
index 0000000000..9555e302a9
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/Array_h.template
@@ -0,0 +1,136 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_Array_h
+#define {{"_".join(config.protocol.namespace)}}_Array_h
+
+//#include "ErrorSupport.h"
+//#include "Forward.h"
+//#include "ValueConversions.h"
+//#include "Values.h"
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+template<typename T>
+class Array {
+public:
+ static std::unique_ptr<Array<T>> create()
+ {
+ return wrapUnique(new Array<T>());
+ }
+
+ static std::unique_ptr<Array<T>> parse(protocol::Value* value, ErrorSupport* errors)
+ {
+ protocol::ListValue* array = ListValue::cast(value);
+ if (!array) {
+ errors->addError("array expected");
+ return nullptr;
+ }
+ std::unique_ptr<Array<T>> result(new Array<T>());
+ errors->push();
+ for (size_t i = 0; i < array->size(); ++i) {
+ errors->setName(StringUtil::fromInteger(i));
+ std::unique_ptr<T> item = ValueConversions<T>::parse(array->at(i), errors);
+ result->m_vector.push_back(std::move(item));
+ }
+ errors->pop();
+ if (errors->hasErrors())
+ return nullptr;
+ return result;
+ }
+
+ void addItem(std::unique_ptr<T> value)
+ {
+ m_vector.push_back(std::move(value));
+ }
+
+ size_t length()
+ {
+ return m_vector.size();
+ }
+
+ T* get(size_t index)
+ {
+ return m_vector[index].get();
+ }
+
+ std::unique_ptr<protocol::ListValue> serialize()
+ {
+ std::unique_ptr<protocol::ListValue> result = ListValue::create();
+ for (auto& item : m_vector)
+ result->pushValue(ValueConversions<T>::serialize(item));
+ return result;
+ }
+
+private:
+ std::vector<std::unique_ptr<T>> m_vector;
+};
+
+template<typename T>
+class ArrayBase {
+public:
+ static std::unique_ptr<Array<T>> create()
+ {
+ return wrapUnique(new Array<T>());
+ }
+
+ static std::unique_ptr<Array<T>> parse(protocol::Value* value, ErrorSupport* errors)
+ {
+ protocol::ListValue* array = ListValue::cast(value);
+ if (!array) {
+ errors->addError("array expected");
+ return nullptr;
+ }
+ errors->push();
+ std::unique_ptr<Array<T>> result(new Array<T>());
+ for (size_t i = 0; i < array->size(); ++i) {
+ errors->setName(StringUtil::fromInteger(i));
+ T item = ValueConversions<T>::parse(array->at(i), errors);
+ result->m_vector.push_back(item);
+ }
+ errors->pop();
+ if (errors->hasErrors())
+ return nullptr;
+ return result;
+ }
+
+ void addItem(const T& value)
+ {
+ m_vector.push_back(value);
+ }
+
+ size_t length()
+ {
+ return m_vector.size();
+ }
+
+ T get(size_t index)
+ {
+ return m_vector[index];
+ }
+
+ std::unique_ptr<protocol::ListValue> serialize()
+ {
+ std::unique_ptr<protocol::ListValue> result = ListValue::create();
+ for (auto& item : m_vector)
+ result->pushValue(ValueConversions<T>::serialize(item));
+ return result;
+ }
+
+private:
+ std::vector<T> m_vector;
+};
+
+template<> class Array<String> : public ArrayBase<String> {};
+template<> class Array<int> : public ArrayBase<int> {};
+template<> class Array<double> : public ArrayBase<double> {};
+template<> class Array<bool> : public ArrayBase<bool> {};
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#endif // !defined({{"_".join(config.protocol.namespace)}}_Array_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/Collections_h.template b/deps/v8/third_party/inspector_protocol/lib/Collections_h.template
new file mode 100644
index 0000000000..3f760287b5
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/Collections_h.template
@@ -0,0 +1,43 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_Collections_h
+#define {{"_".join(config.protocol.namespace)}}_Collections_h
+
+#include "{{config.protocol.package}}/Forward.h"
+#include <cstddef>
+
+#if defined(__APPLE__) && !defined(_LIBCPP_VERSION)
+#include <map>
+#include <set>
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+template <class Key, class T> using HashMap = std::map<Key, T>;
+template <class Key> using HashSet = std::set<Key>;
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#else
+#include <unordered_map>
+#include <unordered_set>
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+template <class Key, class T> using HashMap = std::unordered_map<Key, T>;
+template <class Key> using HashSet = std::unordered_set<Key>;
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#endif // defined(__APPLE__) && !defined(_LIBCPP_VERSION)
+
+#endif // !defined({{"_".join(config.protocol.namespace)}}_Collections_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
new file mode 100644
index 0000000000..c4f36a5fd3
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_cpp.template
@@ -0,0 +1,225 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#include "DispatcherBase.h"
+//#include "Parser.h"
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+// static
+DispatchResponse DispatchResponse::OK()
+{
+ DispatchResponse result;
+ result.m_status = kSuccess;
+ result.m_errorCode = kParseError;
+ return result;
+}
+
+// static
+DispatchResponse DispatchResponse::Error(const String& error)
+{
+ DispatchResponse result;
+ result.m_status = kError;
+ result.m_errorCode = kServerError;
+ result.m_errorMessage = error;
+ return result;
+}
+
+// static
+DispatchResponse DispatchResponse::InternalError()
+{
+ DispatchResponse result;
+ result.m_status = kError;
+ result.m_errorCode = kInternalError;
+ result.m_errorMessage = "Internal error";
+ return result;
+}
+
+// static
+DispatchResponse DispatchResponse::FallThrough()
+{
+ DispatchResponse result;
+ result.m_status = kFallThrough;
+ result.m_errorCode = kParseError;
+ return result;
+}
+
+// static
+const char DispatcherBase::kInvalidParamsString[] = "Invalid parameters";
+
+DispatcherBase::WeakPtr::WeakPtr(DispatcherBase* dispatcher) : m_dispatcher(dispatcher) { }
+
+DispatcherBase::WeakPtr::~WeakPtr()
+{
+ if (m_dispatcher)
+ m_dispatcher->m_weakPtrs.erase(this);
+}
+
+DispatcherBase::Callback::Callback(std::unique_ptr<DispatcherBase::WeakPtr> backendImpl, int callId)
+ : m_backendImpl(std::move(backendImpl))
+ , m_callId(callId) { }
+
+DispatcherBase::Callback::~Callback() = default;
+
+void DispatcherBase::Callback::dispose()
+{
+ m_backendImpl = nullptr;
+}
+
+void DispatcherBase::Callback::sendIfActive(std::unique_ptr<protocol::DictionaryValue> partialMessage, const DispatchResponse& response)
+{
+ if (!m_backendImpl || !m_backendImpl->get())
+ return;
+ m_backendImpl->get()->sendResponse(m_callId, response, std::move(partialMessage));
+ m_backendImpl = nullptr;
+}
+
+DispatcherBase::DispatcherBase(FrontendChannel* frontendChannel)
+ : m_frontendChannel(frontendChannel) { }
+
+DispatcherBase::~DispatcherBase()
+{
+ clearFrontend();
+}
+
+// static
+bool DispatcherBase::getCommandName(const String& message, String* result)
+{
+ std::unique_ptr<protocol::Value> value = parseJSON(message);
+ if (!value)
+ return false;
+
+ protocol::DictionaryValue* object = DictionaryValue::cast(value.get());
+ if (!object)
+ return false;
+
+ if (!object->getString("method", result))
+ return false;
+
+ return true;
+}
+
+void DispatcherBase::sendResponse(int callId, const DispatchResponse& response, std::unique_ptr<protocol::DictionaryValue> result)
+{
+ if (response.status() == DispatchResponse::kError) {
+ reportProtocolError(callId, response.errorCode(), response.errorMessage(), nullptr);
+ return;
+ }
+
+ std::unique_ptr<protocol::DictionaryValue> responseMessage = DictionaryValue::create();
+ responseMessage->setInteger("id", callId);
+ responseMessage->setObject("result", std::move(result));
+ if (m_frontendChannel)
+ m_frontendChannel->sendProtocolResponse(callId, responseMessage->toJSONString());
+}
+
+void DispatcherBase::sendResponse(int callId, const DispatchResponse& response)
+{
+ sendResponse(callId, response, DictionaryValue::create());
+}
+
+static void reportProtocolErrorTo(FrontendChannel* frontendChannel, int callId, DispatchResponse::ErrorCode code, const String& errorMessage, ErrorSupport* errors)
+{
+ if (!frontendChannel)
+ return;
+ std::unique_ptr<protocol::DictionaryValue> error = DictionaryValue::create();
+ error->setInteger("code", code);
+ error->setString("message", errorMessage);
+ if (errors && errors->hasErrors())
+ error->setString("data", errors->errors());
+ std::unique_ptr<protocol::DictionaryValue> message = DictionaryValue::create();
+ message->setObject("error", std::move(error));
+ message->setInteger("id", callId);
+ frontendChannel->sendProtocolResponse(callId, message->toJSONString());
+}
+
+static void reportProtocolErrorTo(FrontendChannel* frontendChannel, DispatchResponse::ErrorCode code, const String& errorMessage)
+{
+ if (!frontendChannel)
+ return;
+ std::unique_ptr<protocol::DictionaryValue> error = DictionaryValue::create();
+ error->setInteger("code", code);
+ error->setString("message", errorMessage);
+ std::unique_ptr<protocol::DictionaryValue> message = DictionaryValue::create();
+ message->setObject("error", std::move(error));
+ frontendChannel->sendProtocolNotification(message->toJSONString());
+}
+
+void DispatcherBase::reportProtocolError(int callId, DispatchResponse::ErrorCode code, const String& errorMessage, ErrorSupport* errors)
+{
+ reportProtocolErrorTo(m_frontendChannel, callId, code, errorMessage, errors);
+}
+
+void DispatcherBase::clearFrontend()
+{
+ m_frontendChannel = nullptr;
+ for (auto& weak : m_weakPtrs)
+ weak->dispose();
+ m_weakPtrs.clear();
+}
+
+std::unique_ptr<DispatcherBase::WeakPtr> DispatcherBase::weakPtr()
+{
+ std::unique_ptr<DispatcherBase::WeakPtr> weak(new DispatcherBase::WeakPtr(this));
+ m_weakPtrs.insert(weak.get());
+ return weak;
+}
+
+UberDispatcher::UberDispatcher(FrontendChannel* frontendChannel)
+ : m_frontendChannel(frontendChannel) { }
+
+void UberDispatcher::registerBackend(const String& name, std::unique_ptr<protocol::DispatcherBase> dispatcher)
+{
+ m_dispatchers[name] = std::move(dispatcher);
+}
+
+DispatchResponse::Status UberDispatcher::dispatch(std::unique_ptr<Value> parsedMessage)
+{
+ if (!parsedMessage) {
+ reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kParseError, "Message must be a valid JSON");
+ return DispatchResponse::kError;
+ }
+ std::unique_ptr<protocol::DictionaryValue> messageObject = DictionaryValue::cast(std::move(parsedMessage));
+ if (!messageObject) {
+ reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kInvalidRequest, "Message must be an object");
+ return DispatchResponse::kError;
+ }
+
+ int callId = 0;
+ protocol::Value* callIdValue = messageObject->get("id");
+ bool success = callIdValue && callIdValue->asInteger(&callId);
+ if (!success) {
+ reportProtocolErrorTo(m_frontendChannel, DispatchResponse::kInvalidRequest, "Message must have integer 'id' porperty");
+ return DispatchResponse::kError;
+ }
+
+ protocol::Value* methodValue = messageObject->get("method");
+ String method;
+ success = methodValue && methodValue->asString(&method);
+ if (!success) {
+ reportProtocolErrorTo(m_frontendChannel, callId, DispatchResponse::kInvalidRequest, "Message must have string 'method' porperty", nullptr);
+ return DispatchResponse::kError;
+ }
+
+ size_t dotIndex = method.find(".");
+ if (dotIndex == StringUtil::kNotFound) {
+ reportProtocolErrorTo(m_frontendChannel, callId, DispatchResponse::kMethodNotFound, "'" + method + "' wasn't found", nullptr);
+ return DispatchResponse::kError;
+ }
+ String domain = StringUtil::substring(method, 0, dotIndex);
+ auto it = m_dispatchers.find(domain);
+ if (it == m_dispatchers.end()) {
+ reportProtocolErrorTo(m_frontendChannel, callId, DispatchResponse::kMethodNotFound, "'" + method + "' wasn't found", nullptr);
+ return DispatchResponse::kError;
+ }
+ return it->second->dispatch(callId, method, std::move(messageObject));
+}
+
+UberDispatcher::~UberDispatcher() = default;
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
new file mode 100644
index 0000000000..4fb89efafe
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/DispatcherBase_h.template
@@ -0,0 +1,120 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_DispatcherBase_h
+#define {{"_".join(config.protocol.namespace)}}_DispatcherBase_h
+
+//#include "Collections.h"
+//#include "ErrorSupport.h"
+//#include "Forward.h"
+//#include "Values.h"
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+class WeakPtr;
+
+class {{config.lib.export_macro}} DispatchResponse {
+public:
+ enum Status {
+ kSuccess = 0,
+ kError = 1,
+ kFallThrough = 2,
+ kAsync = 3
+ };
+
+ enum ErrorCode {
+ kParseError = -32700,
+ kInvalidRequest = -32600,
+ kMethodNotFound = -32601,
+ kInvalidParams = -32602,
+ kInternalError = -32603,
+ kServerError = -32000,
+ };
+
+ Status status() const { return m_status; }
+ const String& errorMessage() const { return m_errorMessage; }
+ ErrorCode errorCode() const { return m_errorCode; }
+ bool isSuccess() const { return m_status == kSuccess; }
+
+ static DispatchResponse OK();
+ static DispatchResponse Error(const String&);
+ static DispatchResponse InternalError();
+ static DispatchResponse FallThrough();
+
+private:
+ Status m_status;
+ String m_errorMessage;
+ ErrorCode m_errorCode;
+};
+
+class {{config.lib.export_macro}} DispatcherBase {
+ PROTOCOL_DISALLOW_COPY(DispatcherBase);
+public:
+ static const char kInvalidParamsString[];
+ class {{config.lib.export_macro}} WeakPtr {
+ public:
+ explicit WeakPtr(DispatcherBase*);
+ ~WeakPtr();
+ DispatcherBase* get() { return m_dispatcher; }
+ void dispose() { m_dispatcher = nullptr; }
+
+ private:
+ DispatcherBase* m_dispatcher;
+ };
+
+ class {{config.lib.export_macro}} Callback {
+ public:
+ Callback(std::unique_ptr<WeakPtr> backendImpl, int callId);
+ virtual ~Callback();
+ void dispose();
+
+ protected:
+ void sendIfActive(std::unique_ptr<protocol::DictionaryValue> partialMessage, const DispatchResponse& response);
+
+ private:
+ std::unique_ptr<WeakPtr> m_backendImpl;
+ int m_callId;
+ };
+
+ explicit DispatcherBase(FrontendChannel*);
+ virtual ~DispatcherBase();
+
+ static bool getCommandName(const String& message, String* result);
+
+ virtual DispatchResponse::Status dispatch(int callId, const String& method, std::unique_ptr<protocol::DictionaryValue> messageObject) = 0;
+
+ void sendResponse(int callId, const DispatchResponse&, std::unique_ptr<protocol::DictionaryValue> result);
+ void sendResponse(int callId, const DispatchResponse&);
+
+ void reportProtocolError(int callId, DispatchResponse::ErrorCode, const String& errorMessage, ErrorSupport* errors);
+ void clearFrontend();
+
+ std::unique_ptr<WeakPtr> weakPtr();
+
+private:
+ FrontendChannel* m_frontendChannel;
+ protocol::HashSet<WeakPtr*> m_weakPtrs;
+};
+
+class {{config.lib.export_macro}} UberDispatcher {
+ PROTOCOL_DISALLOW_COPY(UberDispatcher);
+public:
+ explicit UberDispatcher(FrontendChannel*);
+ void registerBackend(const String& name, std::unique_ptr<protocol::DispatcherBase>);
+ DispatchResponse::Status dispatch(std::unique_ptr<Value> message);
+ FrontendChannel* channel() { return m_frontendChannel; }
+ virtual ~UberDispatcher();
+
+private:
+ FrontendChannel* m_frontendChannel;
+ protocol::HashMap<String, std::unique_ptr<protocol::DispatcherBase>> m_dispatchers;
+};
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#endif // !defined({{"_".join(config.protocol.namespace)}}_DispatcherBase_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template b/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template
new file mode 100644
index 0000000000..1d107f697a
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_cpp.template
@@ -0,0 +1,61 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#include "ErrorSupport.h"
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+ErrorSupport::ErrorSupport() { }
+ErrorSupport::~ErrorSupport() { }
+
+void ErrorSupport::setName(const String& name)
+{
+ DCHECK(m_path.size());
+ m_path[m_path.size() - 1] = name;
+}
+
+void ErrorSupport::push()
+{
+ m_path.push_back(String());
+}
+
+void ErrorSupport::pop()
+{
+ m_path.pop_back();
+}
+
+void ErrorSupport::addError(const String& error)
+{
+ StringBuilder builder;
+ for (size_t i = 0; i < m_path.size(); ++i) {
+ if (i)
+ builder.append('.');
+ builder.append(m_path[i]);
+ }
+ builder.append(": ");
+ builder.append(error);
+ m_errors.push_back(builder.toString());
+}
+
+bool ErrorSupport::hasErrors()
+{
+ return m_errors.size();
+}
+
+String ErrorSupport::errors()
+{
+ StringBuilder builder;
+ for (size_t i = 0; i < m_errors.size(); ++i) {
+ if (i)
+ builder.append("; ");
+ builder.append(m_errors[i]);
+ }
+ return builder.toString();
+}
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template b/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template
new file mode 100644
index 0000000000..0c98e3e0eb
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/ErrorSupport_h.template
@@ -0,0 +1,35 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_ErrorSupport_h
+#define {{"_".join(config.protocol.namespace)}}_ErrorSupport_h
+
+//#include "Forward.h"
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+class {{config.lib.export_macro}} ErrorSupport {
+public:
+ ErrorSupport();
+ ~ErrorSupport();
+
+ void push();
+ void setName(const String&);
+ void pop();
+ void addError(const String&);
+ bool hasErrors();
+ String errors();
+
+private:
+ std::vector<String> m_path;
+ std::vector<String> m_errors;
+};
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#endif // !defined({{"_".join(config.protocol.namespace)}}_ErrorSupport_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/Forward_h.template b/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
new file mode 100644
index 0000000000..04868b707f
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/Forward_h.template
@@ -0,0 +1,38 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_Forward_h
+#define {{"_".join(config.protocol.namespace)}}_Forward_h
+
+{% if config.lib.export_header %}
+#include {{format_include(config.lib.export_header)}}
+{% endif %}
+#include {{format_include(config.lib.platform_header)}}
+#include {{format_include(config.lib.string_header)}}
+
+#include <vector>
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+template<typename T> class Array;
+class DictionaryValue;
+class DispatchResponse;
+class ErrorSupport;
+class FundamentalValue;
+class ListValue;
+template<typename T> class Maybe;
+class Object;
+using Response = DispatchResponse;
+class SerializedValue;
+class StringValue;
+class UberDispatcher;
+class Value;
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#endif // !defined({{"_".join(config.protocol.namespace)}}_Forward_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template b/deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template
new file mode 100644
index 0000000000..8b653b5821
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/FrontendChannel_h.template
@@ -0,0 +1,24 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_FrontendChannel_h
+#define {{"_".join(config.protocol.namespace)}}_FrontendChannel_h
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+class {{config.lib.export_macro}} FrontendChannel {
+public:
+ virtual ~FrontendChannel() { }
+ virtual void sendProtocolResponse(int callId, const String& message) = 0;
+ virtual void sendProtocolNotification(const String& message) = 0;
+ virtual void flushProtocolNotifications() = 0;
+};
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#endif // !defined({{"_".join(config.protocol.namespace)}}_FrontendChannel_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template b/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template
new file mode 100644
index 0000000000..cd0dfbdf2e
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/Maybe_h.template
@@ -0,0 +1,86 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_Maybe_h
+#define {{"_".join(config.protocol.namespace)}}_Maybe_h
+
+//#include "Forward.h"
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+template<typename T>
+class Maybe {
+public:
+ Maybe() : m_value() { }
+ Maybe(std::unique_ptr<T> value) : m_value(std::move(value)) { }
+ Maybe(Maybe&& other) : m_value(std::move(other.m_value)) { }
+ void operator=(std::unique_ptr<T> value) { m_value = std::move(value); }
+ T* fromJust() const { DCHECK(m_value); return m_value.get(); }
+ T* fromMaybe(T* defaultValue) const { return m_value ? m_value.get() : defaultValue; }
+ bool isJust() const { return !!m_value; }
+ std::unique_ptr<T> takeJust() { DCHECK(m_value); return m_value.release(); }
+private:
+ std::unique_ptr<T> m_value;
+};
+
+template<typename T>
+class MaybeBase {
+public:
+ MaybeBase() : m_isJust(false) { }
+ MaybeBase(T value) : m_isJust(true), m_value(value) { }
+ MaybeBase(MaybeBase&& other) : m_isJust(other.m_isJust), m_value(std::move(other.m_value)) { }
+ void operator=(T value) { m_value = value; m_isJust = true; }
+ T fromJust() const { DCHECK(m_isJust); return m_value; }
+ T fromMaybe(const T& defaultValue) const { return m_isJust ? m_value : defaultValue; }
+ bool isJust() const { return m_isJust; }
+ T takeJust() { DCHECK(m_isJust); return m_value; }
+
+protected:
+ bool m_isJust;
+ T m_value;
+};
+
+template<>
+class Maybe<bool> : public MaybeBase<bool> {
+public:
+ Maybe() { }
+ Maybe(bool value) : MaybeBase(value) { }
+ Maybe(Maybe&& other) : MaybeBase(std::move(other)) { }
+ using MaybeBase::operator=;
+};
+
+template<>
+class Maybe<int> : public MaybeBase<int> {
+public:
+ Maybe() { }
+ Maybe(int value) : MaybeBase(value) { }
+ Maybe(Maybe&& other) : MaybeBase(std::move(other)) { }
+ using MaybeBase::operator=;
+};
+
+template<>
+class Maybe<double> : public MaybeBase<double> {
+public:
+ Maybe() { }
+ Maybe(double value) : MaybeBase(value) { }
+ Maybe(Maybe&& other) : MaybeBase(std::move(other)) { }
+ using MaybeBase::operator=;
+};
+
+template<>
+class Maybe<String> : public MaybeBase<String> {
+public:
+ Maybe() { }
+ Maybe(const String& value) : MaybeBase(value) { }
+ Maybe(Maybe&& other) : MaybeBase(std::move(other)) { }
+ using MaybeBase::operator=;
+};
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#endif // !defined({{"_".join(config.protocol.namespace)}}_Maybe_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/Object_cpp.template b/deps/v8/third_party/inspector_protocol/lib/Object_cpp.template
new file mode 100644
index 0000000000..e3f18c3500
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/Object_cpp.template
@@ -0,0 +1,37 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#include "Object.h"
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+std::unique_ptr<Object> Object::parse(protocol::Value* value, ErrorSupport* errors)
+{
+ protocol::DictionaryValue* object = DictionaryValue::cast(value);
+ if (!object) {
+ errors->addError("object expected");
+ return nullptr;
+ }
+ return wrapUnique(new Object(wrapUnique(static_cast<DictionaryValue*>(object->clone().release()))));
+}
+
+std::unique_ptr<protocol::DictionaryValue> Object::serialize() const
+{
+ return DictionaryValue::cast(m_object->clone());
+}
+
+std::unique_ptr<Object> Object::clone() const
+{
+ return wrapUnique(new Object(DictionaryValue::cast(m_object->clone())));
+}
+
+Object::Object(std::unique_ptr<protocol::DictionaryValue> object) : m_object(std::move(object)) { }
+
+Object::~Object() { }
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/Object_h.template b/deps/v8/third_party/inspector_protocol/lib/Object_h.template
new file mode 100644
index 0000000000..4ccd88bdab
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/Object_h.template
@@ -0,0 +1,32 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_Object_h
+#define {{"_".join(config.protocol.namespace)}}_Object_h
+
+//#include "ErrorSupport.h"
+//#include "Forward.h"
+//#include "Values.h"
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+class {{config.lib.export_macro}} Object {
+public:
+ static std::unique_ptr<Object> parse(protocol::Value*, ErrorSupport*);
+ ~Object();
+
+ std::unique_ptr<protocol::DictionaryValue> serialize() const;
+ std::unique_ptr<Object> clone() const;
+private:
+ explicit Object(std::unique_ptr<protocol::DictionaryValue>);
+ std::unique_ptr<protocol::DictionaryValue> m_object;
+};
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#endif // !defined({{"_".join(config.protocol.namespace)}}_Object_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template b/deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template
new file mode 100644
index 0000000000..a103b8228e
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template
@@ -0,0 +1,553 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+namespace {
+
+const int stackLimit = 1000;
+
+enum Token {
+ ObjectBegin,
+ ObjectEnd,
+ ArrayBegin,
+ ArrayEnd,
+ StringLiteral,
+ Number,
+ BoolTrue,
+ BoolFalse,
+ NullToken,
+ ListSeparator,
+ ObjectPairSeparator,
+ InvalidToken,
+};
+
+const char* const nullString = "null";
+const char* const trueString = "true";
+const char* const falseString = "false";
+
+bool isASCII(uint16_t c)
+{
+ return !(c & ~0x7F);
+}
+
+bool isSpaceOrNewLine(uint16_t c)
+{
+ return isASCII(c) && c <= ' ' && (c == ' ' || (c <= 0xD && c >= 0x9));
+}
+
+double charactersToDouble(const uint16_t* characters, size_t length, bool* ok)
+{
+ std::vector<char> buffer;
+ buffer.reserve(length + 1);
+ for (size_t i = 0; i < length; ++i) {
+ if (!isASCII(characters[i])) {
+ *ok = false;
+ return 0;
+ }
+ buffer.push_back(static_cast<char>(characters[i]));
+ }
+ buffer.push_back('\0');
+ char* endptr;
+ double result = std::strtod(buffer.data(), &endptr);
+ *ok = !(*endptr);
+ return result;
+}
+
+double charactersToDouble(const uint8_t* characters, size_t length, bool* ok)
+{
+ std::string buffer(reinterpret_cast<const char*>(characters), length);
+ char* endptr;
+ double result = std::strtod(buffer.data(), &endptr);
+ *ok = !(*endptr);
+ return result;
+}
+
+template<typename Char>
+bool parseConstToken(const Char* start, const Char* end, const Char** tokenEnd, const char* token)
+{
+ while (start < end && *token != '\0' && *start++ == *token++) { }
+ if (*token != '\0')
+ return false;
+ *tokenEnd = start;
+ return true;
+}
+
+template<typename Char>
+bool readInt(const Char* start, const Char* end, const Char** tokenEnd, bool canHaveLeadingZeros)
+{
+ if (start == end)
+ return false;
+ bool haveLeadingZero = '0' == *start;
+ int length = 0;
+ while (start < end && '0' <= *start && *start <= '9') {
+ ++start;
+ ++length;
+ }
+ if (!length)
+ return false;
+ if (!canHaveLeadingZeros && length > 1 && haveLeadingZero)
+ return false;
+ *tokenEnd = start;
+ return true;
+}
+
+template<typename Char>
+bool parseNumberToken(const Char* start, const Char* end, const Char** tokenEnd)
+{
+ // We just grab the number here. We validate the size in DecodeNumber.
+ // According to RFC4627, a valid number is: [minus] int [frac] [exp]
+ if (start == end)
+ return false;
+ Char c = *start;
+ if ('-' == c)
+ ++start;
+
+ if (!readInt(start, end, &start, false))
+ return false;
+ if (start == end) {
+ *tokenEnd = start;
+ return true;
+ }
+
+ // Optional fraction part
+ c = *start;
+ if ('.' == c) {
+ ++start;
+ if (!readInt(start, end, &start, true))
+ return false;
+ if (start == end) {
+ *tokenEnd = start;
+ return true;
+ }
+ c = *start;
+ }
+
+ // Optional exponent part
+ if ('e' == c || 'E' == c) {
+ ++start;
+ if (start == end)
+ return false;
+ c = *start;
+ if ('-' == c || '+' == c) {
+ ++start;
+ if (start == end)
+ return false;
+ }
+ if (!readInt(start, end, &start, true))
+ return false;
+ }
+
+ *tokenEnd = start;
+ return true;
+}
+
+template<typename Char>
+bool readHexDigits(const Char* start, const Char* end, const Char** tokenEnd, int digits)
+{
+ if (end - start < digits)
+ return false;
+ for (int i = 0; i < digits; ++i) {
+ Char c = *start++;
+ if (!(('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')))
+ return false;
+ }
+ *tokenEnd = start;
+ return true;
+}
+
+template<typename Char>
+bool parseStringToken(const Char* start, const Char* end, const Char** tokenEnd)
+{
+ while (start < end) {
+ Char c = *start++;
+ if ('\\' == c) {
+ if (start == end)
+ return false;
+ c = *start++;
+ // Make sure the escaped char is valid.
+ switch (c) {
+ case 'x':
+ if (!readHexDigits(start, end, &start, 2))
+ return false;
+ break;
+ case 'u':
+ if (!readHexDigits(start, end, &start, 4))
+ return false;
+ break;
+ case '\\':
+ case '/':
+ case 'b':
+ case 'f':
+ case 'n':
+ case 'r':
+ case 't':
+ case 'v':
+ case '"':
+ break;
+ default:
+ return false;
+ }
+ } else if ('"' == c) {
+ *tokenEnd = start;
+ return true;
+ }
+ }
+ return false;
+}
+
+template<typename Char>
+bool skipComment(const Char* start, const Char* end, const Char** commentEnd)
+{
+ if (start == end)
+ return false;
+
+ if (*start != '/' || start + 1 >= end)
+ return false;
+ ++start;
+
+ if (*start == '/') {
+ // Single line comment, read to newline.
+ for (++start; start < end; ++start) {
+ if (*start == '\n' || *start == '\r') {
+ *commentEnd = start + 1;
+ return true;
+ }
+ }
+ *commentEnd = end;
+ // Comment reaches end-of-input, which is fine.
+ return true;
+ }
+
+ if (*start == '*') {
+ Char previous = '\0';
+ // Block comment, read until end marker.
+ for (++start; start < end; previous = *start++) {
+ if (previous == '*' && *start == '/') {
+ *commentEnd = start + 1;
+ return true;
+ }
+ }
+ // Block comment must close before end-of-input.
+ return false;
+ }
+
+ return false;
+}
+
+template<typename Char>
+void skipWhitespaceAndComments(const Char* start, const Char* end, const Char** whitespaceEnd)
+{
+ while (start < end) {
+ if (isSpaceOrNewLine(*start)) {
+ ++start;
+ } else if (*start == '/') {
+ const Char* commentEnd;
+ if (!skipComment(start, end, &commentEnd))
+ break;
+ start = commentEnd;
+ } else {
+ break;
+ }
+ }
+ *whitespaceEnd = start;
+}
+
+template<typename Char>
+Token parseToken(const Char* start, const Char* end, const Char** tokenStart, const Char** tokenEnd)
+{
+ skipWhitespaceAndComments(start, end, tokenStart);
+ start = *tokenStart;
+
+ if (start == end)
+ return InvalidToken;
+
+ switch (*start) {
+ case 'n':
+ if (parseConstToken(start, end, tokenEnd, nullString))
+ return NullToken;
+ break;
+ case 't':
+ if (parseConstToken(start, end, tokenEnd, trueString))
+ return BoolTrue;
+ break;
+ case 'f':
+ if (parseConstToken(start, end, tokenEnd, falseString))
+ return BoolFalse;
+ break;
+ case '[':
+ *tokenEnd = start + 1;
+ return ArrayBegin;
+ case ']':
+ *tokenEnd = start + 1;
+ return ArrayEnd;
+ case ',':
+ *tokenEnd = start + 1;
+ return ListSeparator;
+ case '{':
+ *tokenEnd = start + 1;
+ return ObjectBegin;
+ case '}':
+ *tokenEnd = start + 1;
+ return ObjectEnd;
+ case ':':
+ *tokenEnd = start + 1;
+ return ObjectPairSeparator;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ case '-':
+ if (parseNumberToken(start, end, tokenEnd))
+ return Number;
+ break;
+ case '"':
+ if (parseStringToken(start + 1, end, tokenEnd))
+ return StringLiteral;
+ break;
+ }
+ return InvalidToken;
+}
+
+template<typename Char>
+int hexToInt(Char c)
+{
+ if ('0' <= c && c <= '9')
+ return c - '0';
+ if ('A' <= c && c <= 'F')
+ return c - 'A' + 10;
+ if ('a' <= c && c <= 'f')
+ return c - 'a' + 10;
+ DCHECK(false);
+ return 0;
+}
+
+template<typename Char>
+bool decodeString(const Char* start, const Char* end, StringBuilder* output)
+{
+ while (start < end) {
+ uint16_t c = *start++;
+ if ('\\' != c) {
+ output->append(c);
+ continue;
+ }
+ if (start == end)
+ return false;
+ c = *start++;
+
+ if (c == 'x') {
+ // \x is not supported.
+ return false;
+ }
+
+ switch (c) {
+ case '"':
+ case '/':
+ case '\\':
+ break;
+ case 'b':
+ c = '\b';
+ break;
+ case 'f':
+ c = '\f';
+ break;
+ case 'n':
+ c = '\n';
+ break;
+ case 'r':
+ c = '\r';
+ break;
+ case 't':
+ c = '\t';
+ break;
+ case 'v':
+ c = '\v';
+ break;
+ case 'u':
+ c = (hexToInt(*start) << 12) +
+ (hexToInt(*(start + 1)) << 8) +
+ (hexToInt(*(start + 2)) << 4) +
+ hexToInt(*(start + 3));
+ start += 4;
+ break;
+ default:
+ return false;
+ }
+ output->append(c);
+ }
+ return true;
+}
+
+template<typename Char>
+bool decodeString(const Char* start, const Char* end, String* output)
+{
+ if (start == end) {
+ *output = "";
+ return true;
+ }
+ if (start > end)
+ return false;
+ StringBuilder buffer;
+ StringUtil::builderReserve(buffer, end - start);
+ if (!decodeString(start, end, &buffer))
+ return false;
+ *output = buffer.toString();
+ return true;
+}
+
+template<typename Char>
+std::unique_ptr<Value> buildValue(const Char* start, const Char* end, const Char** valueTokenEnd, int depth)
+{
+ if (depth > stackLimit)
+ return nullptr;
+
+ std::unique_ptr<Value> result;
+ const Char* tokenStart;
+ const Char* tokenEnd;
+ Token token = parseToken(start, end, &tokenStart, &tokenEnd);
+ switch (token) {
+ case InvalidToken:
+ return nullptr;
+ case NullToken:
+ result = Value::null();
+ break;
+ case BoolTrue:
+ result = FundamentalValue::create(true);
+ break;
+ case BoolFalse:
+ result = FundamentalValue::create(false);
+ break;
+ case Number: {
+ bool ok;
+ double value = charactersToDouble(tokenStart, tokenEnd - tokenStart, &ok);
+ if (!ok)
+ return nullptr;
+ int number = static_cast<int>(value);
+ if (number == value)
+ result = FundamentalValue::create(number);
+ else
+ result = FundamentalValue::create(value);
+ break;
+ }
+ case StringLiteral: {
+ String value;
+ bool ok = decodeString(tokenStart + 1, tokenEnd - 1, &value);
+ if (!ok)
+ return nullptr;
+ result = StringValue::create(value);
+ break;
+ }
+ case ArrayBegin: {
+ std::unique_ptr<ListValue> array = ListValue::create();
+ start = tokenEnd;
+ token = parseToken(start, end, &tokenStart, &tokenEnd);
+ while (token != ArrayEnd) {
+ std::unique_ptr<Value> arrayNode = buildValue(start, end, &tokenEnd, depth + 1);
+ if (!arrayNode)
+ return nullptr;
+ array->pushValue(std::move(arrayNode));
+
+ // After a list value, we expect a comma or the end of the list.
+ start = tokenEnd;
+ token = parseToken(start, end, &tokenStart, &tokenEnd);
+ if (token == ListSeparator) {
+ start = tokenEnd;
+ token = parseToken(start, end, &tokenStart, &tokenEnd);
+ if (token == ArrayEnd)
+ return nullptr;
+ } else if (token != ArrayEnd) {
+ // Unexpected value after list value. Bail out.
+ return nullptr;
+ }
+ }
+ if (token != ArrayEnd)
+ return nullptr;
+ result = std::move(array);
+ break;
+ }
+ case ObjectBegin: {
+ std::unique_ptr<DictionaryValue> object = DictionaryValue::create();
+ start = tokenEnd;
+ token = parseToken(start, end, &tokenStart, &tokenEnd);
+ while (token != ObjectEnd) {
+ if (token != StringLiteral)
+ return nullptr;
+ String key;
+ if (!decodeString(tokenStart + 1, tokenEnd - 1, &key))
+ return nullptr;
+ start = tokenEnd;
+
+ token = parseToken(start, end, &tokenStart, &tokenEnd);
+ if (token != ObjectPairSeparator)
+ return nullptr;
+ start = tokenEnd;
+
+ std::unique_ptr<Value> value = buildValue(start, end, &tokenEnd, depth + 1);
+ if (!value)
+ return nullptr;
+ object->setValue(key, std::move(value));
+ start = tokenEnd;
+
+ // After a key/value pair, we expect a comma or the end of the
+ // object.
+ token = parseToken(start, end, &tokenStart, &tokenEnd);
+ if (token == ListSeparator) {
+ start = tokenEnd;
+ token = parseToken(start, end, &tokenStart, &tokenEnd);
+ if (token == ObjectEnd)
+ return nullptr;
+ } else if (token != ObjectEnd) {
+ // Unexpected value after last object value. Bail out.
+ return nullptr;
+ }
+ }
+ if (token != ObjectEnd)
+ return nullptr;
+ result = std::move(object);
+ break;
+ }
+
+ default:
+ // We got a token that's not a value.
+ return nullptr;
+ }
+
+ skipWhitespaceAndComments(tokenEnd, end, valueTokenEnd);
+ return result;
+}
+
+template<typename Char>
+std::unique_ptr<Value> parseJSONInternal(const Char* start, unsigned length)
+{
+ const Char* end = start + length;
+ const Char *tokenEnd;
+ std::unique_ptr<Value> value = buildValue(start, end, &tokenEnd, 0);
+ if (!value || tokenEnd != end)
+ return nullptr;
+ return value;
+}
+
+} // anonymous namespace
+
+std::unique_ptr<Value> parseJSON(const uint16_t* characters, unsigned length)
+{
+ return parseJSONInternal<uint16_t>(characters, length);
+}
+
+std::unique_ptr<Value> parseJSON(const uint8_t* characters, unsigned length)
+{
+ return parseJSONInternal<uint8_t>(characters, length);
+}
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/Parser_h.template b/deps/v8/third_party/inspector_protocol/lib/Parser_h.template
new file mode 100644
index 0000000000..7b2a29b6c9
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/Parser_h.template
@@ -0,0 +1,22 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_Parser_h
+#define {{"_".join(config.protocol.namespace)}}_Parser_h
+
+//#include "Forward.h"
+//#include "Values.h"
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+{{config.lib.export_macro}} std::unique_ptr<Value> parseJSON(const uint8_t*, unsigned);
+{{config.lib.export_macro}} std::unique_ptr<Value> parseJSON(const uint16_t*, unsigned);
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#endif // !defined({{"_".join(config.protocol.namespace)}}_Parser_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/Protocol_cpp.template b/deps/v8/third_party/inspector_protocol/lib/Protocol_cpp.template
new file mode 100644
index 0000000000..8e35fa74fc
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/Protocol_cpp.template
@@ -0,0 +1,12 @@
+// This file is generated.
+
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "{{config.protocol.package}}/Protocol.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include <cstring>
diff --git a/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template b/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template
new file mode 100644
index 0000000000..5384c7bb1e
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/ValueConversions_h.template
@@ -0,0 +1,171 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_ValueConversions_h
+#define {{"_".join(config.protocol.namespace)}}_ValueConversions_h
+
+//#include "ErrorSupport.h"
+//#include "Forward.h"
+//#include "Values.h"
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+template<typename T>
+struct ValueConversions {
+ static std::unique_ptr<T> parse(protocol::Value* value, ErrorSupport* errors)
+ {
+ return T::parse(value, errors);
+ }
+
+ static std::unique_ptr<protocol::Value> serialize(T* value)
+ {
+ return value->serialize();
+ }
+
+ static std::unique_ptr<protocol::Value> serialize(const std::unique_ptr<T>& value)
+ {
+ return value->serialize();
+ }
+};
+
+template<>
+struct ValueConversions<bool> {
+ static bool parse(protocol::Value* value, ErrorSupport* errors)
+ {
+ bool result = false;
+ bool success = value ? value->asBoolean(&result) : false;
+ if (!success)
+ errors->addError("boolean value expected");
+ return result;
+ }
+
+ static std::unique_ptr<protocol::Value> serialize(bool value)
+ {
+ return FundamentalValue::create(value);
+ }
+};
+
+template<>
+struct ValueConversions<int> {
+ static int parse(protocol::Value* value, ErrorSupport* errors)
+ {
+ int result = 0;
+ bool success = value ? value->asInteger(&result) : false;
+ if (!success)
+ errors->addError("integer value expected");
+ return result;
+ }
+
+ static std::unique_ptr<protocol::Value> serialize(int value)
+ {
+ return FundamentalValue::create(value);
+ }
+};
+
+template<>
+struct ValueConversions<double> {
+ static double parse(protocol::Value* value, ErrorSupport* errors)
+ {
+ double result = 0;
+ bool success = value ? value->asDouble(&result) : false;
+ if (!success)
+ errors->addError("double value expected");
+ return result;
+ }
+
+ static std::unique_ptr<protocol::Value> serialize(double value)
+ {
+ return FundamentalValue::create(value);
+ }
+};
+
+template<>
+struct ValueConversions<String> {
+ static String parse(protocol::Value* value, ErrorSupport* errors)
+ {
+ String result;
+ bool success = value ? value->asString(&result) : false;
+ if (!success)
+ errors->addError("string value expected");
+ return result;
+ }
+
+ static std::unique_ptr<protocol::Value> serialize(const String& value)
+ {
+ return StringValue::create(value);
+ }
+};
+
+template<>
+struct ValueConversions<Value> {
+ static std::unique_ptr<Value> parse(protocol::Value* value, ErrorSupport* errors)
+ {
+ bool success = !!value;
+ if (!success) {
+ errors->addError("value expected");
+ return nullptr;
+ }
+ return value->clone();
+ }
+
+ static std::unique_ptr<protocol::Value> serialize(Value* value)
+ {
+ return value->clone();
+ }
+
+ static std::unique_ptr<protocol::Value> serialize(const std::unique_ptr<Value>& value)
+ {
+ return value->clone();
+ }
+};
+
+template<>
+struct ValueConversions<DictionaryValue> {
+ static std::unique_ptr<DictionaryValue> parse(protocol::Value* value, ErrorSupport* errors)
+ {
+ bool success = value && value->type() == protocol::Value::TypeObject;
+ if (!success)
+ errors->addError("object expected");
+ return DictionaryValue::cast(value->clone());
+ }
+
+ static std::unique_ptr<protocol::Value> serialize(DictionaryValue* value)
+ {
+ return value->clone();
+ }
+
+ static std::unique_ptr<protocol::Value> serialize(const std::unique_ptr<DictionaryValue>& value)
+ {
+ return value->clone();
+ }
+};
+
+template<>
+struct ValueConversions<ListValue> {
+ static std::unique_ptr<ListValue> parse(protocol::Value* value, ErrorSupport* errors)
+ {
+ bool success = value && value->type() == protocol::Value::TypeArray;
+ if (!success)
+ errors->addError("list expected");
+ return ListValue::cast(value->clone());
+ }
+
+ static std::unique_ptr<protocol::Value> serialize(ListValue* value)
+ {
+ return value->clone();
+ }
+
+ static std::unique_ptr<protocol::Value> serialize(const std::unique_ptr<ListValue>& value)
+ {
+ return value->clone();
+ }
+};
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#endif // !defined({{"_".join(config.protocol.namespace)}}_ValueConversions_h)
diff --git a/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template b/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
new file mode 100644
index 0000000000..1b5cdfee22
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/Values_cpp.template
@@ -0,0 +1,407 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#include "Values.h"
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+namespace {
+
+const char* const nullValueString = "null";
+const char* const trueValueString = "true";
+const char* const falseValueString = "false";
+
+inline bool escapeChar(uint16_t c, StringBuilder* dst)
+{
+ switch (c) {
+ case '\b': dst->append("\\b"); break;
+ case '\f': dst->append("\\f"); break;
+ case '\n': dst->append("\\n"); break;
+ case '\r': dst->append("\\r"); break;
+ case '\t': dst->append("\\t"); break;
+ case '\\': dst->append("\\\\"); break;
+ case '"': dst->append("\\\""); break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+const char hexDigits[17] = "0123456789ABCDEF";
+
+void appendUnsignedAsHex(uint16_t number, StringBuilder* dst)
+{
+ dst->append("\\u");
+ for (size_t i = 0; i < 4; ++i) {
+ uint16_t c = hexDigits[(number & 0xF000) >> 12];
+ dst->append(c);
+ number <<= 4;
+ }
+}
+
+void escapeStringForJSON(const String& str, StringBuilder* dst)
+{
+ for (unsigned i = 0; i < str.length(); ++i) {
+ uint16_t c = str[i];
+ if (!escapeChar(c, dst)) {
+ if (c < 32 || c > 126 || c == '<' || c == '>') {
+ // 1. Escaping <, > to prevent script execution.
+ // 2. Technically, we could also pass through c > 126 as UTF8, but this
+ // is also optional. It would also be a pain to implement here.
+ appendUnsignedAsHex(c, dst);
+ } else {
+ dst->append(c);
+ }
+ }
+ }
+}
+
+void doubleQuoteStringForJSON(const String& str, StringBuilder* dst)
+{
+ dst->append('"');
+ escapeStringForJSON(str, dst);
+ dst->append('"');
+}
+
+} // anonymous namespace
+
+bool Value::asBoolean(bool*) const
+{
+ return false;
+}
+
+bool Value::asDouble(double*) const
+{
+ return false;
+}
+
+bool Value::asInteger(int*) const
+{
+ return false;
+}
+
+bool Value::asString(String*) const
+{
+ return false;
+}
+
+bool Value::asSerialized(String*) const
+{
+ return false;
+}
+
+String Value::toJSONString() const
+{
+ StringBuilder result;
+ StringUtil::builderReserve(result, 512);
+ writeJSON(&result);
+ return result.toString();
+}
+
+void Value::writeJSON(StringBuilder* output) const
+{
+ DCHECK(m_type == TypeNull);
+ output->append(nullValueString, 4);
+}
+
+std::unique_ptr<Value> Value::clone() const
+{
+ return Value::null();
+}
+
+bool FundamentalValue::asBoolean(bool* output) const
+{
+ if (type() != TypeBoolean)
+ return false;
+ *output = m_boolValue;
+ return true;
+}
+
+bool FundamentalValue::asDouble(double* output) const
+{
+ if (type() == TypeDouble) {
+ *output = m_doubleValue;
+ return true;
+ }
+ if (type() == TypeInteger) {
+ *output = m_integerValue;
+ return true;
+ }
+ return false;
+}
+
+bool FundamentalValue::asInteger(int* output) const
+{
+ if (type() != TypeInteger)
+ return false;
+ *output = m_integerValue;
+ return true;
+}
+
+void FundamentalValue::writeJSON(StringBuilder* output) const
+{
+ DCHECK(type() == TypeBoolean || type() == TypeInteger || type() == TypeDouble);
+ if (type() == TypeBoolean) {
+ if (m_boolValue)
+ output->append(trueValueString, 4);
+ else
+ output->append(falseValueString, 5);
+ } else if (type() == TypeDouble) {
+ if (!std::isfinite(m_doubleValue)) {
+ output->append(nullValueString, 4);
+ return;
+ }
+ output->append(StringUtil::fromDouble(m_doubleValue));
+ } else if (type() == TypeInteger) {
+ output->append(StringUtil::fromInteger(m_integerValue));
+ }
+}
+
+std::unique_ptr<Value> FundamentalValue::clone() const
+{
+ switch (type()) {
+ case TypeDouble: return FundamentalValue::create(m_doubleValue);
+ case TypeInteger: return FundamentalValue::create(m_integerValue);
+ case TypeBoolean: return FundamentalValue::create(m_boolValue);
+ default:
+ DCHECK(false);
+ }
+ return nullptr;
+}
+
+bool StringValue::asString(String* output) const
+{
+ *output = m_stringValue;
+ return true;
+}
+
+void StringValue::writeJSON(StringBuilder* output) const
+{
+ DCHECK(type() == TypeString);
+ doubleQuoteStringForJSON(m_stringValue, output);
+}
+
+std::unique_ptr<Value> StringValue::clone() const
+{
+ return StringValue::create(m_stringValue);
+}
+
+bool SerializedValue::asSerialized(String* output) const
+{
+ *output = m_serializedValue;
+ return true;
+}
+
+void SerializedValue::writeJSON(StringBuilder* output) const
+{
+ DCHECK(type() == TypeSerialized);
+ output->append(m_serializedValue);
+}
+
+std::unique_ptr<Value> SerializedValue::clone() const
+{
+ return SerializedValue::create(m_serializedValue);
+}
+
+DictionaryValue::~DictionaryValue()
+{
+}
+
+void DictionaryValue::setBoolean(const String& name, bool value)
+{
+ setValue(name, FundamentalValue::create(value));
+}
+
+void DictionaryValue::setInteger(const String& name, int value)
+{
+ setValue(name, FundamentalValue::create(value));
+}
+
+void DictionaryValue::setDouble(const String& name, double value)
+{
+ setValue(name, FundamentalValue::create(value));
+}
+
+void DictionaryValue::setString(const String& name, const String& value)
+{
+ setValue(name, StringValue::create(value));
+}
+
+void DictionaryValue::setValue(const String& name, std::unique_ptr<Value> value)
+{
+ set(name, value);
+}
+
+void DictionaryValue::setObject(const String& name, std::unique_ptr<DictionaryValue> value)
+{
+ set(name, value);
+}
+
+void DictionaryValue::setArray(const String& name, std::unique_ptr<ListValue> value)
+{
+ set(name, value);
+}
+
+bool DictionaryValue::getBoolean(const String& name, bool* output) const
+{
+ protocol::Value* value = get(name);
+ if (!value)
+ return false;
+ return value->asBoolean(output);
+}
+
+bool DictionaryValue::getInteger(const String& name, int* output) const
+{
+ Value* value = get(name);
+ if (!value)
+ return false;
+ return value->asInteger(output);
+}
+
+bool DictionaryValue::getDouble(const String& name, double* output) const
+{
+ Value* value = get(name);
+ if (!value)
+ return false;
+ return value->asDouble(output);
+}
+
+bool DictionaryValue::getString(const String& name, String* output) const
+{
+ protocol::Value* value = get(name);
+ if (!value)
+ return false;
+ return value->asString(output);
+}
+
+DictionaryValue* DictionaryValue::getObject(const String& name) const
+{
+ return DictionaryValue::cast(get(name));
+}
+
+protocol::ListValue* DictionaryValue::getArray(const String& name) const
+{
+ return ListValue::cast(get(name));
+}
+
+protocol::Value* DictionaryValue::get(const String& name) const
+{
+ Dictionary::const_iterator it = m_data.find(name);
+ if (it == m_data.end())
+ return nullptr;
+ return it->second.get();
+}
+
+DictionaryValue::Entry DictionaryValue::at(size_t index) const
+{
+ const String key = m_order[index];
+ return std::make_pair(key, m_data.find(key)->second.get());
+}
+
+bool DictionaryValue::booleanProperty(const String& name, bool defaultValue) const
+{
+ bool result = defaultValue;
+ getBoolean(name, &result);
+ return result;
+}
+
+int DictionaryValue::integerProperty(const String& name, int defaultValue) const
+{
+ int result = defaultValue;
+ getInteger(name, &result);
+ return result;
+}
+
+double DictionaryValue::doubleProperty(const String& name, double defaultValue) const
+{
+ double result = defaultValue;
+ getDouble(name, &result);
+ return result;
+}
+
+void DictionaryValue::remove(const String& name)
+{
+ m_data.erase(name);
+ m_order.erase(std::remove(m_order.begin(), m_order.end(), name), m_order.end());
+}
+
+void DictionaryValue::writeJSON(StringBuilder* output) const
+{
+ output->append('{');
+ for (size_t i = 0; i < m_order.size(); ++i) {
+ Dictionary::const_iterator it = m_data.find(m_order[i]);
+ CHECK(it != m_data.end());
+ if (i)
+ output->append(',');
+ doubleQuoteStringForJSON(it->first, output);
+ output->append(':');
+ it->second->writeJSON(output);
+ }
+ output->append('}');
+}
+
+std::unique_ptr<Value> DictionaryValue::clone() const
+{
+ std::unique_ptr<DictionaryValue> result = DictionaryValue::create();
+ for (size_t i = 0; i < m_order.size(); ++i) {
+ String key = m_order[i];
+ Dictionary::const_iterator value = m_data.find(key);
+ DCHECK(value != m_data.cend() && value->second);
+ result->setValue(key, value->second->clone());
+ }
+ return std::move(result);
+}
+
+DictionaryValue::DictionaryValue()
+ : Value(TypeObject)
+{
+}
+
+ListValue::~ListValue()
+{
+}
+
+void ListValue::writeJSON(StringBuilder* output) const
+{
+ output->append('[');
+ bool first = true;
+ for (const std::unique_ptr<protocol::Value>& value : m_data) {
+ if (!first)
+ output->append(',');
+ value->writeJSON(output);
+ first = false;
+ }
+ output->append(']');
+}
+
+std::unique_ptr<Value> ListValue::clone() const
+{
+ std::unique_ptr<ListValue> result = ListValue::create();
+ for (const std::unique_ptr<protocol::Value>& value : m_data)
+ result->pushValue(value->clone());
+ return std::move(result);
+}
+
+ListValue::ListValue()
+ : Value(TypeArray)
+{
+}
+
+void ListValue::pushValue(std::unique_ptr<protocol::Value> value)
+{
+ DCHECK(value);
+ m_data.push_back(std::move(value));
+}
+
+protocol::Value* ListValue::at(size_t index)
+{
+ DCHECK_LT(index, m_data.size());
+ return m_data[index].get();
+}
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/lib/Values_h.template b/deps/v8/third_party/inspector_protocol/lib/Values_h.template
new file mode 100644
index 0000000000..8f75ef2220
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/lib/Values_h.template
@@ -0,0 +1,246 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_Values_h
+#define {{"_".join(config.protocol.namespace)}}_Values_h
+
+//#include "Allocator.h"
+//#include "Collections.h"
+//#include "Forward.h"
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+
+class ListValue;
+class DictionaryValue;
+class Value;
+
+class {{config.lib.export_macro}} Value {
+ PROTOCOL_DISALLOW_COPY(Value);
+public:
+ virtual ~Value() { }
+
+ static std::unique_ptr<Value> null()
+ {
+ return wrapUnique(new Value());
+ }
+
+ enum ValueType {
+ TypeNull = 0,
+ TypeBoolean,
+ TypeInteger,
+ TypeDouble,
+ TypeString,
+ TypeObject,
+ TypeArray,
+ TypeSerialized
+ };
+
+ ValueType type() const { return m_type; }
+
+ bool isNull() const { return m_type == TypeNull; }
+
+ virtual bool asBoolean(bool* output) const;
+ virtual bool asDouble(double* output) const;
+ virtual bool asInteger(int* output) const;
+ virtual bool asString(String* output) const;
+ virtual bool asSerialized(String* output) const;
+
+ String toJSONString() const;
+ virtual void writeJSON(StringBuilder* output) const;
+ virtual std::unique_ptr<Value> clone() const;
+
+protected:
+ Value() : m_type(TypeNull) { }
+ explicit Value(ValueType type) : m_type(type) { }
+
+private:
+ friend class DictionaryValue;
+ friend class ListValue;
+
+ ValueType m_type;
+};
+
+class {{config.lib.export_macro}} FundamentalValue : public Value {
+public:
+ static std::unique_ptr<FundamentalValue> create(bool value)
+ {
+ return wrapUnique(new FundamentalValue(value));
+ }
+
+ static std::unique_ptr<FundamentalValue> create(int value)
+ {
+ return wrapUnique(new FundamentalValue(value));
+ }
+
+ static std::unique_ptr<FundamentalValue> create(double value)
+ {
+ return wrapUnique(new FundamentalValue(value));
+ }
+
+ bool asBoolean(bool* output) const override;
+ bool asDouble(double* output) const override;
+ bool asInteger(int* output) const override;
+ void writeJSON(StringBuilder* output) const override;
+ std::unique_ptr<Value> clone() const override;
+
+private:
+ explicit FundamentalValue(bool value) : Value(TypeBoolean), m_boolValue(value) { }
+ explicit FundamentalValue(int value) : Value(TypeInteger), m_integerValue(value) { }
+ explicit FundamentalValue(double value) : Value(TypeDouble), m_doubleValue(value) { }
+
+ union {
+ bool m_boolValue;
+ double m_doubleValue;
+ int m_integerValue;
+ };
+};
+
+class {{config.lib.export_macro}} StringValue : public Value {
+public:
+ static std::unique_ptr<StringValue> create(const String& value)
+ {
+ return wrapUnique(new StringValue(value));
+ }
+
+ static std::unique_ptr<StringValue> create(const char* value)
+ {
+ return wrapUnique(new StringValue(value));
+ }
+
+ bool asString(String* output) const override;
+ void writeJSON(StringBuilder* output) const override;
+ std::unique_ptr<Value> clone() const override;
+
+private:
+ explicit StringValue(const String& value) : Value(TypeString), m_stringValue(value) { }
+ explicit StringValue(const char* value) : Value(TypeString), m_stringValue(value) { }
+
+ String m_stringValue;
+};
+
+class {{config.lib.export_macro}} SerializedValue : public Value {
+public:
+ static std::unique_ptr<SerializedValue> create(const String& value)
+ {
+ return wrapUnique(new SerializedValue(value));
+ }
+
+ bool asSerialized(String* output) const override;
+ void writeJSON(StringBuilder* output) const override;
+ std::unique_ptr<Value> clone() const override;
+
+private:
+ explicit SerializedValue(const String& value) : Value(TypeSerialized), m_serializedValue(value) { }
+
+ String m_serializedValue;
+};
+
+class {{config.lib.export_macro}} DictionaryValue : public Value {
+public:
+ using Entry = std::pair<String, Value*>;
+ static std::unique_ptr<DictionaryValue> create()
+ {
+ return wrapUnique(new DictionaryValue());
+ }
+
+ static DictionaryValue* cast(Value* value)
+ {
+ if (!value || value->type() != TypeObject)
+ return nullptr;
+ return static_cast<DictionaryValue*>(value);
+ }
+
+ static std::unique_ptr<DictionaryValue> cast(std::unique_ptr<Value> value)
+ {
+ return wrapUnique(DictionaryValue::cast(value.release()));
+ }
+
+ void writeJSON(StringBuilder* output) const override;
+ std::unique_ptr<Value> clone() const override;
+
+ size_t size() const { return m_data.size(); }
+
+ void setBoolean(const String& name, bool);
+ void setInteger(const String& name, int);
+ void setDouble(const String& name, double);
+ void setString(const String& name, const String&);
+ void setValue(const String& name, std::unique_ptr<Value>);
+ void setObject(const String& name, std::unique_ptr<DictionaryValue>);
+ void setArray(const String& name, std::unique_ptr<ListValue>);
+
+ bool getBoolean(const String& name, bool* output) const;
+ bool getInteger(const String& name, int* output) const;
+ bool getDouble(const String& name, double* output) const;
+ bool getString(const String& name, String* output) const;
+
+ DictionaryValue* getObject(const String& name) const;
+ ListValue* getArray(const String& name) const;
+ Value* get(const String& name) const;
+ Entry at(size_t index) const;
+
+ bool booleanProperty(const String& name, bool defaultValue) const;
+ int integerProperty(const String& name, int defaultValue) const;
+ double doubleProperty(const String& name, double defaultValue) const;
+ void remove(const String& name);
+
+ ~DictionaryValue() override;
+
+private:
+ DictionaryValue();
+ template<typename T>
+ void set(const String& key, std::unique_ptr<T>& value)
+ {
+ DCHECK(value);
+ bool isNew = m_data.find(key) == m_data.end();
+ m_data[key] = std::move(value);
+ if (isNew)
+ m_order.push_back(key);
+ }
+
+ using Dictionary = protocol::HashMap<String, std::unique_ptr<Value>>;
+ Dictionary m_data;
+ std::vector<String> m_order;
+};
+
+class {{config.lib.export_macro}} ListValue : public Value {
+public:
+ static std::unique_ptr<ListValue> create()
+ {
+ return wrapUnique(new ListValue());
+ }
+
+ static ListValue* cast(Value* value)
+ {
+ if (!value || value->type() != TypeArray)
+ return nullptr;
+ return static_cast<ListValue*>(value);
+ }
+
+ static std::unique_ptr<ListValue> cast(std::unique_ptr<Value> value)
+ {
+ return wrapUnique(ListValue::cast(value.release()));
+ }
+
+ ~ListValue() override;
+
+ void writeJSON(StringBuilder* output) const override;
+ std::unique_ptr<Value> clone() const override;
+
+ void pushValue(std::unique_ptr<Value>);
+
+ Value* at(size_t index);
+ size_t size() const { return m_data.size(); }
+
+private:
+ ListValue();
+ std::vector<std::unique_ptr<Value>> m_data;
+};
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#endif // {{"_".join(config.protocol.namespace)}}_Values_h
diff --git a/deps/v8/third_party/inspector_protocol/templates/Exported_h.template b/deps/v8/third_party/inspector_protocol/templates/Exported_h.template
new file mode 100644
index 0000000000..3357f95b5e
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/templates/Exported_h.template
@@ -0,0 +1,65 @@
+// This file is generated
+
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_{{domain.domain}}_api_h
+#define {{"_".join(config.protocol.namespace)}}_{{domain.domain}}_api_h
+
+{% if config.exported.export_header %}
+#include {{format_include(config.exported.export_header)}}
+{% endif %}
+#include {{format_include(config.exported.string_header)}}
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+namespace {{domain.domain}} {
+namespace API {
+
+// ------------- Enums.
+ {% for type in domain.types %}
+ {% if ("enum" in type) and type.exported %}
+
+namespace {{type.id}}Enum {
+ {% for literal in type.enum %}
+{{config.exported.export_macro}} extern const char* {{ literal | dash_to_camelcase}};
+ {% endfor %}
+} // {{type.id}}Enum
+ {% endif %}
+ {% endfor %}
+ {% for command in join_arrays(domain, ["commands", "events"]) %}
+ {% for param in join_arrays(command, ["parameters", "returns"]) %}
+ {% if ("enum" in param) and (param.exported) %}
+
+namespace {{command.name | to_title_case}} {
+namespace {{param.name | to_title_case}}Enum {
+ {% for literal in param.enum %}
+{{config.exported.export_macro}} extern const char* {{ literal | dash_to_camelcase}};
+ {% endfor %}
+} // {{param.name | to_title_case}}Enum
+} // {{command.name | to_title_case }}
+ {% endif %}
+ {% endfor %}
+ {% endfor %}
+
+// ------------- Types.
+ {% for type in domain.types %}
+ {% if not (type.type == "object") or not ("properties" in type) or not (type.exported) %}{% continue %}{% endif %}
+
+class {{config.exported.export_macro}} {{type.id}} {
+public:
+ virtual {{config.exported.string_out}} toJSONString() const = 0;
+ virtual ~{{type.id}}() { }
+ static std::unique_ptr<protocol::{{domain.domain}}::API::{{type.id}}> fromJSONString(const {{config.exported.string_in}}& json);
+};
+ {% endfor %}
+
+} // namespace API
+} // namespace {{domain.domain}}
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#endif // !defined({{"_".join(config.protocol.namespace)}}_{{domain.domain}}_api_h)
diff --git a/deps/v8/third_party/inspector_protocol/templates/Imported_h.template b/deps/v8/third_party/inspector_protocol/templates/Imported_h.template
new file mode 100644
index 0000000000..c23b8fe87c
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/templates/Imported_h.template
@@ -0,0 +1,51 @@
+// This file is generated
+
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_{{domain.domain}}_imported_h
+#define {{"_".join(config.protocol.namespace)}}_{{domain.domain}}_imported_h
+
+#include "{{config.protocol.package}}/Protocol.h"
+#include {{format_include(config.imported.header if config.imported.header else "\"%s/%s.h\"" % (config.imported.package, domain.domain))}}
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+ {% for type in domain.types %}
+ {% if not (type.type == "object") or not ("properties" in type) or not (type.exported) %}{% continue %}{% endif %}
+
+template<>
+struct ValueConversions<{{"::".join(config.imported.namespace)}}::{{domain.domain}}::API::{{type.id}}> {
+ static std::unique_ptr<{{"::".join(config.imported.namespace)}}::{{domain.domain}}::API::{{type.id}}> parse(protocol::Value* value, ErrorSupport* errors)
+ {
+ if (!value) {
+ errors->addError("value expected");
+ return nullptr;
+ }
+ String json = value->toJSONString();
+ auto result = {{"::".join(config.imported.namespace)}}::{{domain.domain}}::API::{{type.id}}::fromJSONString({{config.imported.to_imported_string % "json"}});
+ if (!result)
+ errors->addError("cannot parse");
+ return result;
+ }
+
+ static std::unique_ptr<protocol::Value> serialize(const {{"::".join(config.imported.namespace)}}::{{domain.domain}}::API::{{type.id}}* value)
+ {
+ auto json = value->toJSONString();
+ return SerializedValue::create({{config.imported.from_imported_string % "std::move(json)"}});
+ }
+
+ static std::unique_ptr<protocol::Value> serialize(const std::unique_ptr<{{"::".join(config.imported.namespace)}}::{{domain.domain}}::API::{{type.id}}>& value)
+ {
+ return serialize(value.get());
+ }
+};
+ {% endfor %}
+
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#endif // !defined({{"_".join(config.protocol.namespace)}}_{{domain.domain}}_imported_h)
diff --git a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
new file mode 100644
index 0000000000..16f1ae516a
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_cpp.template
@@ -0,0 +1,364 @@
+// This file is generated
+
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "{{config.protocol.package}}/{{domain.domain}}.h"
+
+#include "{{config.protocol.package}}/Protocol.h"
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+namespace {{domain.domain}} {
+
+// ------------- Enum values from types.
+
+const char Metainfo::domainName[] = "{{domain.domain}}";
+const char Metainfo::commandPrefix[] = "{{domain.domain}}.";
+const char Metainfo::version[] = "{{domain.version}}";
+ {% for type in domain.types %}
+ {% if "enum" in type %}
+
+namespace {{type.id}}Enum {
+ {% for literal in type.enum %}
+const char* {{ literal | dash_to_camelcase}} = "{{literal}}";
+ {% endfor %}
+} // namespace {{type.id}}Enum
+ {% if type.exported %}
+
+namespace API {
+namespace {{type.id}}Enum {
+ {% for literal in type.enum %}
+const char* {{ literal | dash_to_camelcase}} = "{{literal}}";
+ {% endfor %}
+} // namespace {{type.id}}Enum
+} // namespace API
+ {% endif %}
+ {% endif %}
+ {% for property in type.properties %}
+ {% if "enum" in property %}
+
+ {% for literal in property.enum %}
+const char* {{type.id}}::{{property.name | to_title_case}}Enum::{{literal | dash_to_camelcase}} = "{{literal}}";
+ {% endfor %}
+ {% endif %}
+ {% endfor %}
+ {% if not (type.type == "object") or not ("properties" in type) %}{% continue %}{% endif %}
+
+std::unique_ptr<{{type.id}}> {{type.id}}::parse(protocol::Value* value, ErrorSupport* errors)
+{
+ if (!value || value->type() != protocol::Value::TypeObject) {
+ errors->addError("object expected");
+ return nullptr;
+ }
+
+ std::unique_ptr<{{type.id}}> result(new {{type.id}}());
+ protocol::DictionaryValue* object = DictionaryValue::cast(value);
+ errors->push();
+ {% for property in type.properties %}
+ protocol::Value* {{property.name}}Value = object->get("{{property.name}}");
+ {% if property.optional %}
+ if ({{property.name}}Value) {
+ errors->setName("{{property.name}}");
+ result->m_{{property.name}} = ValueConversions<{{resolve_type(property).raw_type}}>::parse({{property.name}}Value, errors);
+ }
+ {% else %}
+ errors->setName("{{property.name}}");
+ result->m_{{property.name}} = ValueConversions<{{resolve_type(property).raw_type}}>::parse({{property.name}}Value, errors);
+ {% endif %}
+ {% endfor %}
+ errors->pop();
+ if (errors->hasErrors())
+ return nullptr;
+ return result;
+}
+
+std::unique_ptr<protocol::DictionaryValue> {{type.id}}::serialize() const
+{
+ std::unique_ptr<protocol::DictionaryValue> result = DictionaryValue::create();
+ {% for property in type.properties %}
+ {% if property.optional %}
+ if (m_{{property.name}}.isJust())
+ result->setValue("{{property.name}}", ValueConversions<{{resolve_type(property).raw_type}}>::serialize(m_{{property.name}}.fromJust()));
+ {% else %}
+ result->setValue("{{property.name}}", ValueConversions<{{resolve_type(property).raw_type}}>::serialize({{resolve_type(property).to_raw_type % ("m_" + property.name)}}));
+ {% endif %}
+ {% endfor %}
+ return result;
+}
+
+std::unique_ptr<{{type.id}}> {{type.id}}::clone() const
+{
+ ErrorSupport errors;
+ return parse(serialize().get(), &errors);
+}
+ {% if type.exported %}
+
+{{config.exported.string_out}} {{type.id}}::toJSONString() const
+{
+ String json = serialize()->toJSONString();
+ return {{config.exported.to_string_out % "json"}};
+}
+
+// static
+std::unique_ptr<API::{{type.id}}> API::{{type.id}}::fromJSONString(const {{config.exported.string_in}}& json)
+{
+ ErrorSupport errors;
+ std::unique_ptr<Value> value = parseJSON(json);
+ if (!value)
+ return nullptr;
+ return protocol::{{domain.domain}}::{{type.id}}::parse(value.get(), &errors);
+}
+ {% endif %}
+ {% endfor %}
+
+// ------------- Enum values from params.
+
+ {% for command in join_arrays(domain, ["commands", "events"]) %}
+ {% for param in join_arrays(command, ["parameters", "returns"]) %}
+ {% if "enum" in param %}
+
+namespace {{command.name | to_title_case}} {
+namespace {{param.name | to_title_case}}Enum {
+ {% for literal in param.enum %}
+const char* {{ literal | to_title_case}} = "{{literal}}";
+ {% endfor %}
+} // namespace {{param.name | to_title_case}}Enum
+} // namespace {{command.name | to_title_case }}
+ {% if param.exported %}
+
+namespace API {
+namespace {{command.name | to_title_case}} {
+namespace {{param.name | to_title_case}}Enum {
+ {% for literal in param.enum %}
+const char* {{ literal | to_title_case}} = "{{literal}}";
+ {% endfor %}
+} // namespace {{param.name | to_title_case}}Enum
+} // namespace {{command.name | to_title_case }}
+} // namespace API
+ {% endif %}
+ {% endif %}
+ {% endfor %}
+ {% endfor %}
+
+// ------------- Frontend notifications.
+ {% for event in domain.events %}
+ {% if "handlers" in event and not ("renderer" in event["handlers"]) %}{% continue %}{% endif %}
+
+void Frontend::{{event.name}}(
+ {%- for parameter in event.parameters %}
+ {% if "optional" in parameter -%}
+ Maybe<{{resolve_type(parameter).raw_type}}>
+ {%- else -%}
+ {{resolve_type(parameter).pass_type}}
+ {%- endif %} {{parameter.name}}{%- if not loop.last -%}, {% endif -%}
+ {% endfor -%})
+{
+ std::unique_ptr<protocol::DictionaryValue> jsonMessage = DictionaryValue::create();
+ jsonMessage->setString("method", "{{domain.domain}}.{{event.name}}");
+ std::unique_ptr<protocol::DictionaryValue> paramsObject = DictionaryValue::create();
+ {% for parameter in event.parameters %}
+ {% if "optional" in parameter %}
+ if ({{parameter.name}}.isJust())
+ paramsObject->setValue("{{parameter.name}}", ValueConversions<{{resolve_type(parameter).raw_type}}>::serialize({{parameter.name}}.fromJust()));
+ {% else %}
+ paramsObject->setValue("{{parameter.name}}", ValueConversions<{{resolve_type(parameter).raw_type}}>::serialize({{resolve_type(parameter).to_raw_type % parameter.name}}));
+ {% endif %}
+ {% endfor %}
+ jsonMessage->setObject("params", std::move(paramsObject));
+ if (m_frontendChannel)
+ m_frontendChannel->sendProtocolNotification(jsonMessage->toJSONString());
+}
+ {% endfor %}
+
+void Frontend::flush()
+{
+ m_frontendChannel->flushProtocolNotifications();
+}
+
+// --------------------- Dispatcher.
+
+class DispatcherImpl : public protocol::DispatcherBase {
+public:
+ DispatcherImpl(FrontendChannel* frontendChannel, Backend* backend)
+ : DispatcherBase(frontendChannel)
+ , m_backend(backend) {
+ {% for command in domain.commands %}
+ {% if "redirect" in command %}{% continue %}{% endif %}
+ {% if "handlers" in command and not ("renderer" in command["handlers"]) %}{% continue %}{% endif %}
+ m_dispatchMap["{{domain.domain}}.{{command.name}}"] = &DispatcherImpl::{{command.name}};
+ {% endfor %}
+ }
+ ~DispatcherImpl() override { }
+ DispatchResponse::Status dispatch(int callId, const String& method, std::unique_ptr<protocol::DictionaryValue> messageObject) override;
+
+protected:
+ using CallHandler = DispatchResponse::Status (DispatcherImpl::*)(int callId, std::unique_ptr<DictionaryValue> messageObject, ErrorSupport* errors);
+ using DispatchMap = protocol::HashMap<String, CallHandler>;
+ DispatchMap m_dispatchMap;
+
+ {% for command in domain.commands %}
+ {% if "redirect" in command %}{% continue %}{% endif %}
+ {% if "handlers" in command and not ("renderer" in command["handlers"]) %}{% continue %}{% endif %}
+ DispatchResponse::Status {{command.name}}(int callId, std::unique_ptr<DictionaryValue> requestMessageObject, ErrorSupport*);
+ {% endfor %}
+
+ Backend* m_backend;
+};
+
+DispatchResponse::Status DispatcherImpl::dispatch(int callId, const String& method, std::unique_ptr<protocol::DictionaryValue> messageObject)
+{
+ protocol::HashMap<String, CallHandler>::iterator it = m_dispatchMap.find(method);
+ if (it == m_dispatchMap.end()) {
+ reportProtocolError(callId, DispatchResponse::kMethodNotFound, "'" + method + "' wasn't found", nullptr);
+ return DispatchResponse::kError;
+ }
+
+ protocol::ErrorSupport errors;
+ return (this->*(it->second))(callId, std::move(messageObject), &errors);
+}
+
+ {% for command in domain.commands %}
+ {% if "redirect" in command %}{% continue %}{% endif %}
+ {% if "handlers" in command and not ("renderer" in command["handlers"]) %}{% continue %}{% endif %}
+ {% if "async" in command %}
+
+class {{command.name | to_title_case}}CallbackImpl : public Backend::{{command.name | to_title_case}}Callback, public DispatcherBase::Callback {
+public:
+ {{command.name | to_title_case}}CallbackImpl(std::unique_ptr<DispatcherBase::WeakPtr> backendImpl, int callId)
+ : DispatcherBase::Callback(std::move(backendImpl), callId) { }
+
+ void sendSuccess(
+ {%- for parameter in command.returns -%}
+ {%- if "optional" in parameter -%}
+ Maybe<{{resolve_type(parameter).raw_type}}> {{parameter.name}}
+ {%- else -%}
+ {{resolve_type(parameter).pass_type}} {{parameter.name}}
+ {%- endif -%}
+ {%- if not loop.last -%}, {% endif -%}
+ {%- endfor -%}) override
+ {
+ std::unique_ptr<protocol::DictionaryValue> resultObject = DictionaryValue::create();
+ {% for parameter in command.returns %}
+ {% if "optional" in parameter %}
+ if ({{parameter.name}}.isJust())
+ resultObject->setValue("{{parameter.name}}", ValueConversions<{{resolve_type(parameter).raw_type}}>::serialize({{parameter.name}}.fromJust()));
+ {% else %}
+ resultObject->setValue("{{parameter.name}}", ValueConversions<{{resolve_type(parameter).raw_type}}>::serialize({{resolve_type(parameter).to_raw_type % parameter.name}}));
+ {% endif %}
+ {% endfor %}
+ sendIfActive(std::move(resultObject), DispatchResponse::OK());
+ }
+
+ void sendFailure(const DispatchResponse& response) override
+ {
+ DCHECK(response.status() == DispatchResponse::kError);
+ sendIfActive(nullptr, response);
+ }
+};
+ {% endif %}
+
+DispatchResponse::Status DispatcherImpl::{{command.name}}(int callId, std::unique_ptr<DictionaryValue> requestMessageObject, ErrorSupport* errors)
+{
+ {% if "parameters" in command %}
+ // Prepare input parameters.
+ protocol::DictionaryValue* object = DictionaryValue::cast(requestMessageObject->get("params"));
+ errors->push();
+ {% for property in command.parameters %}
+ protocol::Value* {{property.name}}Value = object ? object->get("{{property.name}}") : nullptr;
+ {% if property.optional %}
+ Maybe<{{resolve_type(property).raw_type}}> in_{{property.name}};
+ if ({{property.name}}Value) {
+ errors->setName("{{property.name}}");
+ in_{{property.name}} = ValueConversions<{{resolve_type(property).raw_type}}>::parse({{property.name}}Value, errors);
+ }
+ {% else %}
+ errors->setName("{{property.name}}");
+ {{resolve_type(property).type}} in_{{property.name}} = ValueConversions<{{resolve_type(property).raw_type}}>::parse({{property.name}}Value, errors);
+ {% endif %}
+ {% endfor %}
+ errors->pop();
+ if (errors->hasErrors()) {
+ reportProtocolError(callId, DispatchResponse::kInvalidParams, kInvalidParamsString, errors);
+ return DispatchResponse::kError;
+ }
+ {% endif %}
+ {% if "returns" in command and not ("async" in command) %}
+ // Declare output parameters.
+ {% for property in command.returns %}
+ {% if "optional" in property %}
+ Maybe<{{resolve_type(property).raw_type}}> out_{{property.name}};
+ {% else %}
+ {{resolve_type(property).type}} out_{{property.name}};
+ {% endif %}
+ {% endfor %}
+ {% endif %}
+
+ {% if not("async" in command) %}
+ std::unique_ptr<DispatcherBase::WeakPtr> weak = weakPtr();
+ DispatchResponse response = m_backend->{{command.name}}(
+ {%- for property in command.parameters -%}
+ {%- if not loop.first -%}, {% endif -%}
+ {%- if "optional" in property -%}
+ std::move(in_{{property.name}})
+ {%- else -%}
+ {{resolve_type(property).to_pass_type % ("in_" + property.name)}}
+ {%- endif -%}
+ {%- endfor %}
+ {%- if "returns" in command %}
+ {%- for property in command.returns -%}
+ {%- if not loop.first or command.parameters -%}, {% endif -%}
+ &out_{{property.name}}
+ {%- endfor %}
+ {% endif %});
+ {% if "returns" in command %}
+ if (response.status() == DispatchResponse::kFallThrough)
+ return response.status();
+ std::unique_ptr<protocol::DictionaryValue> result = DictionaryValue::create();
+ if (response.status() == DispatchResponse::kSuccess) {
+ {% for parameter in command.returns %}
+ {% if "optional" in parameter %}
+ if (out_{{parameter.name}}.isJust())
+ result->setValue("{{parameter.name}}", ValueConversions<{{resolve_type(parameter).raw_type}}>::serialize(out_{{parameter.name}}.fromJust()));
+ {% else %}
+ result->setValue("{{parameter.name}}", ValueConversions<{{resolve_type(parameter).raw_type}}>::serialize({{resolve_type(parameter).to_raw_type % ("out_" + parameter.name)}}));
+ {% endif %}
+ {% endfor %}
+ }
+ if (weak->get())
+ weak->get()->sendResponse(callId, response, std::move(result));
+ {% else %}
+ if (weak->get())
+ weak->get()->sendResponse(callId, response);
+ {% endif %}
+ return response.status();
+ {% else %}
+ std::unique_ptr<{{command.name | to_title_case}}CallbackImpl> callback(new {{command.name | to_title_case}}CallbackImpl(weakPtr(), callId));
+ m_backend->{{command.name}}(
+ {%- for property in command.parameters -%}
+ {%- if not loop.first -%}, {% endif -%}
+ {%- if "optional" in property -%}
+ std::move(in_{{property.name}})
+ {%- else -%}
+ {{resolve_type(property).to_pass_type % ("in_" + property.name)}}
+ {%- endif -%}
+ {%- endfor -%}
+ {%- if command.parameters -%}, {% endif -%}
+ std::move(callback));
+ return DispatchResponse::kAsync;
+ {% endif %}
+}
+ {% endfor %}
+
+// static
+void Dispatcher::wire(UberDispatcher* dispatcher, Backend* backend)
+{
+ dispatcher->registerBackend("{{domain.domain}}", wrapUnique(new DispatcherImpl(dispatcher->channel(), backend)));
+}
+
+} // {{domain.domain}}
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
diff --git a/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
new file mode 100644
index 0000000000..f665039dd7
--- /dev/null
+++ b/deps/v8/third_party/inspector_protocol/templates/TypeBuilder_h.template
@@ -0,0 +1,297 @@
+// This file is generated
+
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef {{"_".join(config.protocol.namespace)}}_{{domain.domain}}_h
+#define {{"_".join(config.protocol.namespace)}}_{{domain.domain}}_h
+
+{% if config.protocol.export_header %}
+#include {{format_include(config.protocol.export_header)}}
+{% endif %}
+#include "{{config.protocol.package}}/Protocol.h"
+// For each imported domain we generate a ValueConversions struct instead of a full domain definition
+// and include Domain::API version from there.
+{% for name in domain.dependencies %}
+#include "{{config.protocol.package}}/{{name}}.h"
+{% endfor %}
+{% if domain["has_exports"] %}
+#include "{{config.exported.package}}/{{domain.domain}}.h"
+{% endif %}
+
+{% for namespace in config.protocol.namespace %}
+namespace {{namespace}} {
+{% endfor %}
+namespace {{domain.domain}} {
+
+// ------------- Forward and enum declarations.
+ {% for type in domain.types %}
+ {% if type.type == "object" %}
+ {% if "properties" in type %}
+// {{type.description}}
+class {{type.id}};
+ {% else %}
+// {{type.description}}
+using {{type.id}} = Object;
+ {% endif %}
+ {% elif type.type != "array" %}
+// {{type.description}}
+using {{type.id}} = {{resolve_type(type).type}};
+ {% endif %}
+ {% endfor %}
+ {% for type in domain.types %}
+ {% if "enum" in type %}
+
+namespace {{type.id}}Enum {
+ {% for literal in type.enum %}
+{{config.protocol.export_macro}} extern const char* {{ literal | dash_to_camelcase}};
+ {% endfor %}
+} // namespace {{type.id}}Enum
+ {% endif %}
+ {% endfor %}
+ {% for command in join_arrays(domain, ["commands", "events"]) %}
+ {% for param in join_arrays(command, ["parameters", "returns"]) %}
+ {% if "enum" in param %}
+
+namespace {{command.name | to_title_case}} {
+namespace {{param.name | to_title_case}}Enum {
+ {% for literal in param.enum %}
+{{config.protocol.export_macro}} extern const char* {{literal | dash_to_camelcase}};
+ {% endfor %}
+} // {{param.name | to_title_case}}Enum
+} // {{command.name | to_title_case }}
+ {% endif %}
+ {% endfor %}
+ {% endfor %}
+
+// ------------- Type and builder declarations.
+ {% for type in domain.types %}
+ {% if not (type.type == "object") or not ("properties" in type) %}{% continue %}{% endif %}
+ {% set type_def = type_definition(domain.domain + "." + type.id)%}
+
+// {{type.description}}
+class {{config.protocol.export_macro}} {{type.id}} {% if type.exported %}: public API::{{type.id}} {% endif %}{
+ PROTOCOL_DISALLOW_COPY({{type.id}});
+public:
+ static std::unique_ptr<{{type.id}}> parse(protocol::Value* value, ErrorSupport* errors);
+
+ ~{{type.id}}() { }
+ {% for property in type.properties %}
+ {% if "enum" in property %}
+
+ struct {{config.protocol.export_macro}} {{property.name | to_title_case}}Enum {
+ {% for literal in property.enum %}
+ static const char* {{literal | dash_to_camelcase}};
+ {% endfor %}
+ }; // {{property.name | to_title_case}}Enum
+ {% endif %}
+
+ {% if property.optional %}
+ bool has{{property.name | to_title_case}}() { return m_{{property.name}}.isJust(); }
+ {{resolve_type(property).raw_return_type}} get{{property.name | to_title_case}}({{resolve_type(property).raw_pass_type}} defaultValue) { return m_{{property.name}}.isJust() ? m_{{property.name}}.fromJust() : defaultValue; }
+ {% else %}
+ {{resolve_type(property).raw_return_type}} get{{property.name | to_title_case}}() { return {{resolve_type(property).to_raw_type % ("m_" + property.name)}}; }
+ {% endif %}
+ void set{{property.name | to_title_case}}({{resolve_type(property).pass_type}} value) { m_{{property.name}} = {{resolve_type(property).to_rvalue % "value"}}; }
+ {% endfor %}
+
+ std::unique_ptr<protocol::DictionaryValue> serialize() const;
+ std::unique_ptr<{{type.id}}> clone() const;
+ {% if type.exported %}
+ {{config.exported.string_out}} toJSONString() const override;
+ {% endif %}
+
+ template<int STATE>
+ class {{type.id}}Builder {
+ public:
+ enum {
+ NoFieldsSet = 0,
+ {% set count = 0 %}
+ {% for property in type.properties %}
+ {% if not(property.optional) %}
+ {% set count = count + 1 %}
+ {{property.name | to_title_case}}Set = 1 << {{count}},
+ {% endif %}
+ {% endfor %}
+ AllFieldsSet = (
+ {%- for property in type.properties %}
+ {% if not(property.optional) %}{{property.name | to_title_case}}Set | {%endif %}
+ {% endfor %}0)};
+
+ {% for property in type.properties %}
+
+ {% if property.optional %}
+ {{type.id}}Builder<STATE>& set{{property.name | to_title_case}}({{resolve_type(property).pass_type}} value)
+ {
+ m_result->set{{property.name | to_title_case}}({{resolve_type(property).to_rvalue % "value"}});
+ return *this;
+ }
+ {% else %}
+ {{type.id}}Builder<STATE | {{property.name | to_title_case}}Set>& set{{property.name | to_title_case}}({{resolve_type(property).pass_type}} value)
+ {
+ static_assert(!(STATE & {{property.name | to_title_case}}Set), "property {{property.name}} should not be set yet");
+ m_result->set{{property.name | to_title_case}}({{resolve_type(property).to_rvalue % "value"}});
+ return castState<{{property.name | to_title_case}}Set>();
+ }
+ {% endif %}
+ {% endfor %}
+
+ std::unique_ptr<{{type.id}}> build()
+ {
+ static_assert(STATE == AllFieldsSet, "state should be AllFieldsSet");
+ return std::move(m_result);
+ }
+
+ private:
+ friend class {{type.id}};
+ {{type.id}}Builder() : m_result(new {{type.id}}()) { }
+
+ template<int STEP> {{type.id}}Builder<STATE | STEP>& castState()
+ {
+ return *reinterpret_cast<{{type.id}}Builder<STATE | STEP>*>(this);
+ }
+
+ {{type_def.type}} m_result;
+ };
+
+ static {{type.id}}Builder<0> create()
+ {
+ return {{type.id}}Builder<0>();
+ }
+
+private:
+ {{type.id}}()
+ {
+ {% for property in type.properties %}
+ {% if not(property.optional) and "default_value" in resolve_type(property) %}
+ m_{{property.name}} = {{resolve_type(property).default_value}};
+ {%endif %}
+ {% endfor %}
+ }
+
+ {% for property in type.properties %}
+ {% if property.optional %}
+ Maybe<{{resolve_type(property).raw_type}}> m_{{property.name}};
+ {% else %}
+ {{resolve_type(property).type}} m_{{property.name}};
+ {% endif %}
+ {% endfor %}
+};
+
+ {% endfor %}
+
+// ------------- Backend interface.
+
+class {{config.protocol.export_macro}} Backend {
+public:
+ virtual ~Backend() { }
+
+ {% for command in domain.commands %}
+ {% if "redirect" in command %}{% continue %}{% endif %}
+ {% if ("handlers" in command) and not ("renderer" in command["handlers"]) %}{% continue %}{% endif %}
+ {% if "async" in command %}
+ class {{config.protocol.export_macro}} {{command.name | to_title_case}}Callback {
+ public:
+ virtual void sendSuccess(
+ {%- for parameter in command.returns -%}
+ {%- if "optional" in parameter -%}
+ Maybe<{{resolve_type(parameter).raw_type}}> {{parameter.name}}
+ {%- else -%}
+ {{resolve_type(parameter).pass_type}} {{parameter.name}}
+ {%- endif -%}
+ {%- if not loop.last -%}, {% endif -%}
+ {%- endfor -%}
+ ) = 0;
+ virtual void sendFailure(const DispatchResponse&) = 0;
+ virtual ~{{command.name | to_title_case}}Callback() { }
+ };
+ {% endif %}
+ {%- if not("async" in command) %}
+ virtual DispatchResponse {{command.name}}(
+ {%- else %}
+ virtual void {{command.name}}(
+ {%- endif %}
+ {%- for parameter in command.parameters -%}
+ {%- if not loop.first -%}, {% endif -%}
+ {%- if "optional" in parameter -%}
+ Maybe<{{resolve_type(parameter).raw_type}}> in_{{parameter.name}}
+ {%- else -%}
+ {{resolve_type(parameter).pass_type}} in_{{parameter.name}}
+ {%- endif -%}
+ {%- endfor -%}
+ {%- if "async" in command -%}
+ {%- if command.parameters -%}, {% endif -%}
+ std::unique_ptr<{{command.name | to_title_case}}Callback> callback
+ {%- else -%}
+ {%- for parameter in command.returns -%}
+ {%- if (not loop.first) or command.parameters -%}, {% endif -%}
+ {%- if "optional" in parameter -%}
+ Maybe<{{resolve_type(parameter).raw_type}}>* out_{{parameter.name}}
+ {%- else -%}
+ {{resolve_type(parameter).type}}* out_{{parameter.name}}
+ {%- endif -%}
+ {%- endfor -%}
+ {%- endif -%}
+ ) = 0;
+ {% endfor %}
+
+ {% if not has_disable(domain.commands) %}
+ virtual DispatchResponse disable()
+ {
+ return DispatchResponse::OK();
+ }
+ {% endif %}
+};
+
+// ------------- Frontend interface.
+
+class {{config.protocol.export_macro}} Frontend {
+public:
+ Frontend(FrontendChannel* frontendChannel) : m_frontendChannel(frontendChannel) { }
+ {% for event in domain.events %}
+ {% if "handlers" in event and not ("renderer" in event["handlers"]) %}{% continue %}{% endif %}
+ void {{event.name}}(
+ {%- for parameter in event.parameters -%}
+ {%- if "optional" in parameter -%}
+ Maybe<{{resolve_type(parameter).raw_type}}> {{parameter.name}} = Maybe<{{resolve_type(parameter).raw_type}}>()
+ {%- else -%}
+ {{resolve_type(parameter).pass_type}} {{parameter.name}}
+ {%- endif -%}{%- if not loop.last -%}, {% endif -%}
+ {%- endfor -%}
+ );
+ {% endfor %}
+
+ void flush();
+private:
+ FrontendChannel* m_frontendChannel;
+};
+
+// ------------- Dispatcher.
+
+class {{config.protocol.export_macro}} Dispatcher {
+public:
+ static void wire(UberDispatcher*, Backend*);
+
+private:
+ Dispatcher() { }
+};
+
+// ------------- Metainfo.
+
+class {{config.protocol.export_macro}} Metainfo {
+public:
+ using BackendClass = Backend;
+ using FrontendClass = Frontend;
+ using DispatcherClass = Dispatcher;
+ static const char domainName[];
+ static const char commandPrefix[];
+ static const char version[];
+};
+
+} // namespace {{domain.domain}}
+{% for namespace in config.protocol.namespace %}
+} // namespace {{namespace}}
+{% endfor %}
+
+#endif // !defined({{"_".join(config.protocol.namespace)}}_{{domain.domain}}_h)
diff --git a/deps/v8/third_party/jinja2/AUTHORS b/deps/v8/third_party/jinja2/AUTHORS
new file mode 100644
index 0000000000..943f625f87
--- /dev/null
+++ b/deps/v8/third_party/jinja2/AUTHORS
@@ -0,0 +1,33 @@
+Jinja is written and maintained by the Jinja Team and various
+contributors:
+
+Lead Developer:
+
+- Armin Ronacher <armin.ronacher@active-4.com>
+
+Developers:
+
+- Christoph Hack
+- Georg Brandl
+
+Contributors:
+
+- Bryan McLemore
+- Mickaël Guérin <kael@crocobox.org>
+- Cameron Knight
+- Lawrence Journal-World.
+- David Cramer
+
+Patches and suggestions:
+
+- Ronny Pfannschmidt
+- Axel Böhm
+- Alexey Melchakov
+- Bryan McLemore
+- Clovis Fabricio (nosklo)
+- Cameron Knight
+- Peter van Dijk (Habbie)
+- Stefan Ebner
+- Rene Leonhardt
+- Thomas Waldmann
+- Cory Benfield (Lukasa)
diff --git a/deps/v8/third_party/jinja2/Jinja2-2.8.tar.gz.md5 b/deps/v8/third_party/jinja2/Jinja2-2.8.tar.gz.md5
new file mode 100644
index 0000000000..a0eb1b2464
--- /dev/null
+++ b/deps/v8/third_party/jinja2/Jinja2-2.8.tar.gz.md5
@@ -0,0 +1 @@
+edb51693fe22c53cee5403775c71a99e Jinja2-2.8.tar.gz
diff --git a/deps/v8/third_party/jinja2/Jinja2-2.8.tar.gz.sha512 b/deps/v8/third_party/jinja2/Jinja2-2.8.tar.gz.sha512
new file mode 100644
index 0000000000..88e4ea6154
--- /dev/null
+++ b/deps/v8/third_party/jinja2/Jinja2-2.8.tar.gz.sha512
@@ -0,0 +1 @@
+2e80d6d9ad10dafcce1e6dd24493f5dffc43a17f71a30a650415638e12d3a3891738ebacc569701129214026d062d91a2b10e4f7a2c7b85d801dde26ded1bebb Jinja2-2.8.tar.gz
diff --git a/deps/v8/third_party/jinja2/LICENSE b/deps/v8/third_party/jinja2/LICENSE
new file mode 100644
index 0000000000..31bf900e58
--- /dev/null
+++ b/deps/v8/third_party/jinja2/LICENSE
@@ -0,0 +1,31 @@
+Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details.
+
+Some rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * The names of the contributors may not be used to endorse or
+ promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/deps/v8/third_party/jinja2/OWNERS b/deps/v8/third_party/jinja2/OWNERS
new file mode 100644
index 0000000000..8edbdf893c
--- /dev/null
+++ b/deps/v8/third_party/jinja2/OWNERS
@@ -0,0 +1,3 @@
+timloh@chromium.org
+haraken@chromium.org
+nbarth@chromium.org
diff --git a/deps/v8/third_party/jinja2/README.chromium b/deps/v8/third_party/jinja2/README.chromium
new file mode 100644
index 0000000000..684ff8ec0a
--- /dev/null
+++ b/deps/v8/third_party/jinja2/README.chromium
@@ -0,0 +1,25 @@
+Name: Jinja2 Python Template Engine
+Short Name: jinja2
+URL: http://jinja.pocoo.org/
+Version: 2.8
+License: BSD 3-clause License
+License File: NOT_SHIPPED
+Security Critical: no
+
+Description:
+Template engine for code generation in Blink.
+
+Source: https://pypi.python.org/packages/f2/2f/0b98b06a345a761bec91a079ccae392d282690c2d8272e708f4d10829e22/Jinja2-2.8.tar.gz
+MD5: edb51693fe22c53cee5403775c71a99e
+SHA-1: 4a33c1a0fd585eba2507e8c274a9cd113b1d13ab
+
+Local Modifications:
+This only includes the jinja2 directory from the tarball and the LICENSE and
+AUTHORS files. Unit tests (testsuite directory) have been removed.
+Additional chromium-specific files are:
+* README.chromium (this file)
+* OWNERS
+* install script (get_jinja2.sh)
+* files of hashes (MD5 is also posted on website, SHA-512 computed locally).
+Script checks hash then unpacks archive and installs desired files.
+Retrieve or update by executing jinja2/get_jinja2.sh from third_party.
diff --git a/deps/v8/third_party/jinja2/__init__.py b/deps/v8/third_party/jinja2/__init__.py
new file mode 100644
index 0000000000..029fb2e6e8
--- /dev/null
+++ b/deps/v8/third_party/jinja2/__init__.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2
+ ~~~~~~
+
+ Jinja2 is a template engine written in pure Python. It provides a
+ Django inspired non-XML syntax but supports inline expressions and
+ an optional sandboxed environment.
+
+ Nutshell
+ --------
+
+ Here a small example of a Jinja2 template::
+
+ {% extends 'base.html' %}
+ {% block title %}Memberlist{% endblock %}
+ {% block content %}
+ <ul>
+ {% for user in users %}
+ <li><a href="{{ user.url }}">{{ user.username }}</a></li>
+ {% endfor %}
+ </ul>
+ {% endblock %}
+
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+__docformat__ = 'restructuredtext en'
+__version__ = '2.8'
+
+# high level interface
+from jinja2.environment import Environment, Template
+
+# loaders
+from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
+ DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
+ ModuleLoader
+
+# bytecode caches
+from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
+ MemcachedBytecodeCache
+
+# undefined types
+from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \
+ make_logging_undefined
+
+# exceptions
+from jinja2.exceptions import TemplateError, UndefinedError, \
+ TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
+ TemplateAssertionError
+
+# decorators and public utilities
+from jinja2.filters import environmentfilter, contextfilter, \
+ evalcontextfilter
+from jinja2.utils import Markup, escape, clear_caches, \
+ environmentfunction, evalcontextfunction, contextfunction, \
+ is_undefined
+
+__all__ = [
+ 'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
+ 'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
+ 'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
+ 'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
+ 'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
+ 'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
+ 'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
+ 'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
+ 'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined',
+]
diff --git a/deps/v8/third_party/jinja2/_compat.py b/deps/v8/third_party/jinja2/_compat.py
new file mode 100644
index 0000000000..143962f384
--- /dev/null
+++ b/deps/v8/third_party/jinja2/_compat.py
@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2._compat
+ ~~~~~~~~~~~~~~
+
+ Some py2/py3 compatibility support based on a stripped down
+ version of six so we don't have to depend on a specific version
+ of it.
+
+ :copyright: Copyright 2013 by the Jinja team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import sys
+
+PY2 = sys.version_info[0] == 2
+PYPY = hasattr(sys, 'pypy_translation_info')
+_identity = lambda x: x
+
+
+if not PY2:
+ unichr = chr
+ range_type = range
+ text_type = str
+ string_types = (str,)
+ integer_types = (int,)
+
+ iterkeys = lambda d: iter(d.keys())
+ itervalues = lambda d: iter(d.values())
+ iteritems = lambda d: iter(d.items())
+
+ import pickle
+ from io import BytesIO, StringIO
+ NativeStringIO = StringIO
+
+ def reraise(tp, value, tb=None):
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+ ifilter = filter
+ imap = map
+ izip = zip
+ intern = sys.intern
+
+ implements_iterator = _identity
+ implements_to_string = _identity
+ encode_filename = _identity
+ get_next = lambda x: x.__next__
+
+else:
+ unichr = unichr
+ text_type = unicode
+ range_type = xrange
+ string_types = (str, unicode)
+ integer_types = (int, long)
+
+ iterkeys = lambda d: d.iterkeys()
+ itervalues = lambda d: d.itervalues()
+ iteritems = lambda d: d.iteritems()
+
+ import cPickle as pickle
+ from cStringIO import StringIO as BytesIO, StringIO
+ NativeStringIO = BytesIO
+
+ exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
+
+ from itertools import imap, izip, ifilter
+ intern = intern
+
+ def implements_iterator(cls):
+ cls.next = cls.__next__
+ del cls.__next__
+ return cls
+
+ def implements_to_string(cls):
+ cls.__unicode__ = cls.__str__
+ cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
+ return cls
+
+ get_next = lambda x: x.next
+
+ def encode_filename(filename):
+ if isinstance(filename, unicode):
+ return filename.encode('utf-8')
+ return filename
+
+
+def with_metaclass(meta, *bases):
+ # This requires a bit of explanation: the basic idea is to make a
+ # dummy metaclass for one level of class instanciation that replaces
+ # itself with the actual metaclass. Because of internal type checks
+ # we also need to make sure that we downgrade the custom metaclass
+ # for one level to something closer to type (that's why __call__ and
+ # __init__ comes back from type etc.).
+ #
+ # This has the advantage over six.with_metaclass in that it does not
+ # introduce dummy classes into the final MRO.
+ class metaclass(meta):
+ __call__ = type.__call__
+ __init__ = type.__init__
+ def __new__(cls, name, this_bases, d):
+ if this_bases is None:
+ return type.__new__(cls, name, (), d)
+ return meta(name, bases, d)
+ return metaclass('temporary_class', None, {})
+
+
+try:
+ from urllib.parse import quote_from_bytes as url_quote
+except ImportError:
+ from urllib import quote as url_quote
diff --git a/deps/v8/third_party/jinja2/_stringdefs.py b/deps/v8/third_party/jinja2/_stringdefs.py
new file mode 100644
index 0000000000..da5830e9f1
--- /dev/null
+++ b/deps/v8/third_party/jinja2/_stringdefs.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2._stringdefs
+ ~~~~~~~~~~~~~~~~~~
+
+ Strings of all Unicode characters of a certain category.
+ Used for matching in Unicode-aware languages. Run to regenerate.
+
+ Inspired by chartypes_create.py from the MoinMoin project, original
+ implementation from Pygments.
+
+ :copyright: Copyright 2006-2009 by the Jinja team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from jinja2._compat import unichr
+
+Cc = u'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f'
+
+Cf = u'\xad\u0600\u0601\u0602\u0603\u06dd\u070f\u17b4\u17b5\u200b\u200c\u200d\u200e\u200f\u202a\u202b\u202c\u202d\u202e\u2060\u2061\u2062\u2063\u206a\u206b\u206c\u206d\u206e\u206f\ufeff\ufff9\ufffa\ufffb'
+
+Cn = u'\u0242\u0243\u0244\u0245\u0246\u0247\u0248\u0249\u024a\u024b\u024c\u024d\u024e\u024f\u0370\u0371\u0372\u0373\u0376\u0377\u0378\u0379\u037b\u037c\u037d\u037f\u0380\u0381\u0382\u0383\u038b\u038d\u03a2\u03cf\u0487\u04cf\u04fa\u04fb\u04fc\u04fd\u04fe\u04ff\u0510\u0511\u0512\u0513\u0514\u0515\u0516\u0517\u0518\u0519\u051a\u051b\u051c\u051d\u051e\u051f\u0520\u0521\u0522\u0523\u0524\u0525\u0526\u0527\u0528\u0529\u052a\u052b\u052c\u052d\u052e\u052f\u0530\u0557\u0558\u0560\u0588\u058b\u058c\u058d\u058e\u058f\u0590\u05ba\u05c8\u05c9\u05ca\u05cb\u05cc\u05cd\u05ce\u05cf\u05eb\u05ec\u05ed\u05ee\u05ef\u05f5\u05f6\u05f7\u05f8\u05f9\u05fa\u05fb\u05fc\u05fd\u05fe\u05ff\u0604\u0605\u0606\u0607\u0608\u0609\u060a\u0616\u0617\u0618\u0619\u061a\u061c\u061d\u0620\u063b\u063c\u063d\u063e\u063f\u065f\u070e\u074b\u074c\u076e\u076f\u0770\u0771\u0772\u0773\u0774\u0775\u0776\u0777\u0778\u0779\u077a\u077b\u077c\u077d\u077e\u077f\u07b2\u07b3\u07b4\u07b5\u07b6\u07b7\u07b8\u07b9\u07ba\u07bb\u07bc\u07bd\u07be\u07bf\u07c0\u07c1\u07c2\u07c3\u07c4\u07c5\u07c6\u07c7\u07c8\u07c9\u07ca\u07cb\u07cc\u07cd\u07ce\u07cf\u07d0\u07d1\u07d2\u07d3\u07d4\u07d5\u07d6\u07d7\u07d8\u07d9\u07da\u07db\u07dc\u07dd\u07de\u07df\u07e0\u07e1\u07e2\u07e3\u07e4\u07e5\u07e6\u07e7\u07e8\u07e9\u07ea\u07eb\u07ec\u07ed\u07ee\u07ef\u07f0\u07f1\u07f2\u07f3\u07f4\u07f5\u07f6\u07f7\u07f8\u07f9\u07fa\u07fb\u07fc\u07fd\u07fe\u07ff\u0800\u0801\u0802\u0803\u0804\u0805\u0806\u0807\u0808\u0809\u080a\u080b\u080c\u080d\u080e\u080f\u0810\u0811\u0812\u0813\u0814\u0815\u0816\u0817\u0818\u0819\u081a\u081b\u081c\u081d\u081e\u081f\u0820\u0821\u0822\u0823\u0824\u0825\u0826\u0827\u0828\u0829\u082a\u082b\u082c\u082d\u082e\u082f\u0830\u0831\u0832\u0833\u0834\u0835\u0836\u0837\u0838\u0839\u083a\u083b\u083c\u083d\u083e\u083f\u0840\u0841\u0842\u0843\u0844\u0845\u0846\u0847\u0848\u0849\u084a\u084b\u084c\u084d\u084e\u084f\u0850\u0851\u0852\u0853\u0854\u0855\u0856\u0857\u0858\u0859\u085a\u085b\u085c\u085d\u085e\u085f\u0860\u0861\u0862\u0863\u0864\u0865\u0866\u0867\u0868\u0869\u086a\u086b\u086c\u086d\u086e\u086f\u0870\u0871\u0872\u0873\u0874\u0875\u0876\u0877\u0878\u0879\u087a\u087b\u087c\u087d\u087e\u087f\u0880\u0881\u0882\u0883\u0884\u0885\u0886\u0887\u0888\u0889\u088a\u088b\u088c\u088d\u088e\u088f\u0890\u0891\u0892\u0893\u0894\u0895\u0896\u0897\u0898\u0899\u089a\u089b\u089c\u089d\u089e\u089f\u08a0\u08a1\u08a2\u08a3\u08a4\u08a5\u08a6\u08a7\u08a8\u08a9\u08aa\u08ab\u08ac\u08ad\u08ae\u08af\u08b0\u08b1\u08b2\u08b3\u08b4\u08b5\u08b6\u08b7\u08b8\u08b9\u08ba\u08bb\u08bc\u08bd\u08be\u08bf\u08c0\u08c1\u08c2\u08c3\u08c4\u08c5\u08c6\u08c7\u08c8\u08c9\u08ca\u08cb\u08cc\u08cd\u08ce\u08cf\u08d0\u08d1\u08d2\u08d3\u08d4\u08d5\u08d6\u08d7\u08d8\u08d9\u08da\u08db\u08dc\u08dd\u08de\u08df\u08e0\u08e1\u08e2\u08e3\u08e4\u08e5\u08e6\u08e7\u08e8\u08e9\u08ea\u08eb\u08ec\u08ed\u08ee\u08ef\u08f0\u08f1\u08f2\u08f3\u08f4\u08f5\u08f6\u08f7\u08f8\u08f9\u08fa\u08fb\u08fc\u08fd\u08fe\u08ff\u0900\u093a\u093b\u094e\u094f\u0955\u0956\u0957\u0971\u0972\u0973\u0974\u0975\u0976\u0977\u0978\u0979\u097a\u097b\u097c\u097e\u097f\u0980\u0984\u098d\u098e\u0991\u0992\u09a9\u09b1\u09b3\u09b4\u09b5\u09ba\u09bb\u09c5\u09c6\u09c9\u09ca\u09cf\u09d0\u09d1\u09d2\u09d3\u09d4\u09d5\u09d6\u09d8\u09d9\u09da\u09db\u09de\u09e4\u09e5\u09fb\u09fc\u09fd\u09fe\u09ff\u0a00\u0a04\u0a0b\u0a0c\u0a0d\u0a0e\u0a11\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a\u0a3b\u0a3d\u0a43\u0a44\u0a45\u0a46\u0a49\u0a4a\u0a4e\u0a4f\u0a50\u0a51\u0a52\u0a53\u0a54\u0a55\u0a56\u0a57\u0a58\u0a5d\u0a5f\u0a60\u0a61\u0a62\u0a63\u0a64\u0a65\u0a75\u0a76\u0a77\u0a78\u0a79\u0a7a\u0a7b\u0a7c\u0a7d\u0a7e\u0a7f\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba\u0abb\u0ac6\u0aca\u0ace\u0acf\u0ad1\u0ad2\u0ad3\u0ad4\u0ad5\u0ad6\u0ad7\u0ad8\u0ad9\u0ada\u0adb\u0adc\u0add\u0ade\u0adf\u0ae4\u0ae5\u0af0\u0af2\u0af3\u0af4\u0af5\u0af6\u0af7\u0af8\u0af9\u0afa\u0afb\u0afc\u0afd\u0afe\u0aff\u0b00\u0b04\u0b0d\u0b0e\u0b11\u0b12\u0b29\u0b31\u0b34\u0b3a\u0b3b\u0b44\u0b45\u0b46\u0b49\u0b4a\u0b4e\u0b4f\u0b50\u0b51\u0b52\u0b53\u0b54\u0b55\u0b58\u0b59\u0b5a\u0b5b\u0b5e\u0b62\u0b63\u0b64\u0b65\u0b72\u0b73\u0b74\u0b75\u0b76\u0b77\u0b78\u0b79\u0b7a\u0b7b\u0b7c\u0b7d\u0b7e\u0b7f\u0b80\u0b81\u0b84\u0b8b\u0b8c\u0b8d\u0b91\u0b96\u0b97\u0b98\u0b9b\u0b9d\u0ba0\u0ba1\u0ba2\u0ba5\u0ba6\u0ba7\u0bab\u0bac\u0bad\u0bba\u0bbb\u0bbc\u0bbd\u0bc3\u0bc4\u0bc5\u0bc9\u0bce\u0bcf\u0bd0\u0bd1\u0bd2\u0bd3\u0bd4\u0bd5\u0bd6\u0bd8\u0bd9\u0bda\u0bdb\u0bdc\u0bdd\u0bde\u0bdf\u0be0\u0be1\u0be2\u0be3\u0be4\u0be5\u0bfb\u0bfc\u0bfd\u0bfe\u0bff\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a\u0c3b\u0c3c\u0c3d\u0c45\u0c49\u0c4e\u0c4f\u0c50\u0c51\u0c52\u0c53\u0c54\u0c57\u0c58\u0c59\u0c5a\u0c5b\u0c5c\u0c5d\u0c5e\u0c5f\u0c62\u0c63\u0c64\u0c65\u0c70\u0c71\u0c72\u0c73\u0c74\u0c75\u0c76\u0c77\u0c78\u0c79\u0c7a\u0c7b\u0c7c\u0c7d\u0c7e\u0c7f\u0c80\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba\u0cbb\u0cc5\u0cc9\u0cce\u0ccf\u0cd0\u0cd1\u0cd2\u0cd3\u0cd4\u0cd7\u0cd8\u0cd9\u0cda\u0cdb\u0cdc\u0cdd\u0cdf\u0ce2\u0ce3\u0ce4\u0ce5\u0cf0\u0cf1\u0cf2\u0cf3\u0cf4\u0cf5\u0cf6\u0cf7\u0cf8\u0cf9\u0cfa\u0cfb\u0cfc\u0cfd\u0cfe\u0cff\u0d00\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a\u0d3b\u0d3c\u0d3d\u0d44\u0d45\u0d49\u0d4e\u0d4f\u0d50\u0d51\u0d52\u0d53\u0d54\u0d55\u0d56\u0d58\u0d59\u0d5a\u0d5b\u0d5c\u0d5d\u0d5e\u0d5f\u0d62\u0d63\u0d64\u0d65\u0d70\u0d71\u0d72\u0d73\u0d74\u0d75\u0d76\u0d77\u0d78\u0d79\u0d7a\u0d7b\u0d7c\u0d7d\u0d7e\u0d7f\u0d80\u0d81\u0d84\u0d97\u0d98\u0d99\u0db2\u0dbc\u0dbe\u0dbf\u0dc7\u0dc8\u0dc9\u0dcb\u0dcc\u0dcd\u0dce\u0dd5\u0dd7\u0de0\u0de1\u0de2\u0de3\u0de4\u0de5\u0de6\u0de7\u0de8\u0de9\u0dea\u0deb\u0dec\u0ded\u0dee\u0def\u0df0\u0df1\u0df5\u0df6\u0df7\u0df8\u0df9\u0dfa\u0dfb\u0dfc\u0dfd\u0dfe\u0dff\u0e00\u0e3b\u0e3c\u0e3d\u0e3e\u0e5c\u0e5d\u0e5e\u0e5f\u0e60\u0e61\u0e62\u0e63\u0e64\u0e65\u0e66\u0e67\u0e68\u0e69\u0e6a\u0e6b\u0e6c\u0e6d\u0e6e\u0e6f\u0e70\u0e71\u0e72\u0e73\u0e74\u0e75\u0e76\u0e77\u0e78\u0e79\u0e7a\u0e7b\u0e7c\u0e7d\u0e7e\u0e7f\u0e80\u0e83\u0e85\u0e86\u0e89\u0e8b\u0e8c\u0e8e\u0e8f\u0e90\u0e91\u0e92\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8\u0ea9\u0eac\u0eba\u0ebe\u0ebf\u0ec5\u0ec7\u0ece\u0ecf\u0eda\u0edb\u0ede\u0edf\u0ee0\u0ee1\u0ee2\u0ee3\u0ee4\u0ee5\u0ee6\u0ee7\u0ee8\u0ee9\u0eea\u0eeb\u0eec\u0eed\u0eee\u0eef\u0ef0\u0ef1\u0ef2\u0ef3\u0ef4\u0ef5\u0ef6\u0ef7\u0ef8\u0ef9\u0efa\u0efb\u0efc\u0efd\u0efe\u0eff\u0f48\u0f6b\u0f6c\u0f6d\u0f6e\u0f6f\u0f70\u0f8c\u0f8d\u0f8e\u0f8f\u0f98\u0fbd\u0fcd\u0fce\u0fd2\u0fd3\u0fd4\u0fd5\u0fd6\u0fd7\u0fd8\u0fd9\u0fda\u0fdb\u0fdc\u0fdd\u0fde\u0fdf\u0fe0\u0fe1\u0fe2\u0fe3\u0fe4\u0fe5\u0fe6\u0fe7\u0fe8\u0fe9\u0fea\u0feb\u0fec\u0fed\u0fee\u0fef\u0ff0\u0ff1\u0ff2\u0ff3\u0ff4\u0ff5\u0ff6\u0ff7\u0ff8\u0ff9\u0ffa\u0ffb\u0ffc\u0ffd\u0ffe\u0fff\u1022\u1028\u102b\u1033\u1034\u1035\u103a\u103b\u103c\u103d\u103e\u103f\u105a\u105b\u105c\u105d\u105e\u105f\u1060\u1061\u1062\u1063\u1064\u1065\u1066\u1067\u1068\u1069\u106a\u106b\u106c\u106d\u106e\u106f\u1070\u1071\u1072\u1073\u1074\u1075\u1076\u1077\u1078\u1079\u107a\u107b\u107c\u107d\u107e\u107f\u1080\u1081\u1082\u1083\u1084\u1085\u1086\u1087\u1088\u1089\u108a\u108b\u108c\u108d\u108e\u108f\u1090\u1091\u1092\u1093\u1094\u1095\u1096\u1097\u1098\u1099\u109a\u109b\u109c\u109d\u109e\u109f\u10c6\u10c7\u10c8\u10c9\u10ca\u10cb\u10cc\u10cd\u10ce\u10cf\u10fd\u10fe\u10ff\u115a\u115b\u115c\u115d\u115e\u11a3\u11a4\u11a5\u11a6\u11a7\u11fa\u11fb\u11fc\u11fd\u11fe\u11ff\u1249\u124e\u124f\u1257\u1259\u125e\u125f\u1289\u128e\u128f\u12b1\u12b6\u12b7\u12bf\u12c1\u12c6\u12c7\u12d7\u1311\u1316\u1317\u135b\u135c\u135d\u135e\u137d\u137e\u137f\u139a\u139b\u139c\u139d\u139e\u139f\u13f5\u13f6\u13f7\u13f8\u13f9\u13fa\u13fb\u13fc\u13fd\u13fe\u13ff\u1400\u1677\u1678\u1679\u167a\u167b\u167c\u167d\u167e\u167f\u169d\u169e\u169f\u16f1\u16f2\u16f3\u16f4\u16f5\u16f6\u16f7\u16f8\u16f9\u16fa\u16fb\u16fc\u16fd\u16fe\u16ff\u170d\u1715\u1716\u1717\u1718\u1719\u171a\u171b\u171c\u171d\u171e\u171f\u1737\u1738\u1739\u173a\u173b\u173c\u173d\u173e\u173f\u1754\u1755\u1756\u1757\u1758\u1759\u175a\u175b\u175c\u175d\u175e\u175f\u176d\u1771\u1774\u1775\u1776\u1777\u1778\u1779\u177a\u177b\u177c\u177d\u177e\u177f\u17de\u17df\u17ea\u17eb\u17ec\u17ed\u17ee\u17ef\u17fa\u17fb\u17fc\u17fd\u17fe\u17ff\u180f\u181a\u181b\u181c\u181d\u181e\u181f\u1878\u1879\u187a\u187b\u187c\u187d\u187e\u187f\u18aa\u18ab\u18ac\u18ad\u18ae\u18af\u18b0\u18b1\u18b2\u18b3\u18b4\u18b5\u18b6\u18b7\u18b8\u18b9\u18ba\u18bb\u18bc\u18bd\u18be\u18bf\u18c0\u18c1\u18c2\u18c3\u18c4\u18c5\u18c6\u18c7\u18c8\u18c9\u18ca\u18cb\u18cc\u18cd\u18ce\u18cf\u18d0\u18d1\u18d2\u18d3\u18d4\u18d5\u18d6\u18d7\u18d8\u18d9\u18da\u18db\u18dc\u18dd\u18de\u18df\u18e0\u18e1\u18e2\u18e3\u18e4\u18e5\u18e6\u18e7\u18e8\u18e9\u18ea\u18eb\u18ec\u18ed\u18ee\u18ef\u18f0\u18f1\u18f2\u18f3\u18f4\u18f5\u18f6\u18f7\u18f8\u18f9\u18fa\u18fb\u18fc\u18fd\u18fe\u18ff\u191d\u191e\u191f\u192c\u192d\u192e\u192f\u193c\u193d\u193e\u193f\u1941\u1942\u1943\u196e\u196f\u1975\u1976\u1977\u1978\u1979\u197a\u197b\u197c\u197d\u197e\u197f\u19aa\u19ab\u19ac\u19ad\u19ae\u19af\u19ca\u19cb\u19cc\u19cd\u19ce\u19cf\u19da\u19db\u19dc\u19dd\u1a1c\u1a1d\u1a20\u1a21\u1a22\u1a23\u1a24\u1a25\u1a26\u1a27\u1a28\u1a29\u1a2a\u1a2b\u1a2c\u1a2d\u1a2e\u1a2f\u1a30\u1a31\u1a32\u1a33\u1a34\u1a35\u1a36\u1a37\u1a38\u1a39\u1a3a\u1a3b\u1a3c\u1a3d\u1a3e\u1a3f\u1a40\u1a41\u1a42\u1a43\u1a44\u1a45\u1a46\u1a47\u1a48\u1a49\u1a4a\u1a4b\u1a4c\u1a4d\u1a4e\u1a4f\u1a50\u1a51\u1a52\u1a53\u1a54\u1a55\u1a56\u1a57\u1a58\u1a59\u1a5a\u1a5b\u1a5c\u1a5d\u1a5e\u1a5f\u1a60\u1a61\u1a62\u1a63\u1a64\u1a65\u1a66\u1a67\u1a68\u1a69\u1a6a\u1a6b\u1a6c\u1a6d\u1a6e\u1a6f\u1a70\u1a71\u1a72\u1a73\u1a74\u1a75\u1a76\u1a77\u1a78\u1a79\u1a7a\u1a7b\u1a7c\u1a7d\u1a7e\u1a7f\u1a80\u1a81\u1a82\u1a83\u1a84\u1a85\u1a86\u1a87\u1a88\u1a89\u1a8a\u1a8b\u1a8c\u1a8d\u1a8e\u1a8f\u1a90\u1a91\u1a92\u1a93\u1a94\u1a95\u1a96\u1a97\u1a98\u1a99\u1a9a\u1a9b\u1a9c\u1a9d\u1a9e\u1a9f\u1aa0\u1aa1\u1aa2\u1aa3\u1aa4\u1aa5\u1aa6\u1aa7\u1aa8\u1aa9\u1aaa\u1aab\u1aac\u1aad\u1aae\u1aaf\u1ab0\u1ab1\u1ab2\u1ab3\u1ab4\u1ab5\u1ab6\u1ab7\u1ab8\u1ab9\u1aba\u1abb\u1abc\u1abd\u1abe\u1abf\u1ac0\u1ac1\u1ac2\u1ac3\u1ac4\u1ac5\u1ac6\u1ac7\u1ac8\u1ac9\u1aca\u1acb\u1acc\u1acd\u1ace\u1acf\u1ad0\u1ad1\u1ad2\u1ad3\u1ad4\u1ad5\u1ad6\u1ad7\u1ad8\u1ad9\u1ada\u1adb\u1adc\u1add\u1ade\u1adf\u1ae0\u1ae1\u1ae2\u1ae3\u1ae4\u1ae5\u1ae6\u1ae7\u1ae8\u1ae9\u1aea\u1aeb\u1aec\u1aed\u1aee\u1aef\u1af0\u1af1\u1af2\u1af3\u1af4\u1af5\u1af6\u1af7\u1af8\u1af9\u1afa\u1afb\u1afc\u1afd\u1afe\u1aff\u1b00\u1b01\u1b02\u1b03\u1b04\u1b05\u1b06\u1b07\u1b08\u1b09\u1b0a\u1b0b\u1b0c\u1b0d\u1b0e\u1b0f\u1b10\u1b11\u1b12\u1b13\u1b14\u1b15\u1b16\u1b17\u1b18\u1b19\u1b1a\u1b1b\u1b1c\u1b1d\u1b1e\u1b1f\u1b20\u1b21\u1b22\u1b23\u1b24\u1b25\u1b26\u1b27\u1b28\u1b29\u1b2a\u1b2b\u1b2c\u1b2d\u1b2e\u1b2f\u1b30\u1b31\u1b32\u1b33\u1b34\u1b35\u1b36\u1b37\u1b38\u1b39\u1b3a\u1b3b\u1b3c\u1b3d\u1b3e\u1b3f\u1b40\u1b41\u1b42\u1b43\u1b44\u1b45\u1b46\u1b47\u1b48\u1b49\u1b4a\u1b4b\u1b4c\u1b4d\u1b4e\u1b4f\u1b50\u1b51\u1b52\u1b53\u1b54\u1b55\u1b56\u1b57\u1b58\u1b59\u1b5a\u1b5b\u1b5c\u1b5d\u1b5e\u1b5f\u1b60\u1b61\u1b62\u1b63\u1b64\u1b65\u1b66\u1b67\u1b68\u1b69\u1b6a\u1b6b\u1b6c\u1b6d\u1b6e\u1b6f\u1b70\u1b71\u1b72\u1b73\u1b74\u1b75\u1b76\u1b77\u1b78\u1b79\u1b7a\u1b7b\u1b7c\u1b7d\u1b7e\u1b7f\u1b80\u1b81\u1b82\u1b83\u1b84\u1b85\u1b86\u1b87\u1b88\u1b89\u1b8a\u1b8b\u1b8c\u1b8d\u1b8e\u1b8f\u1b90\u1b91\u1b92\u1b93\u1b94\u1b95\u1b96\u1b97\u1b98\u1b99\u1b9a\u1b9b\u1b9c\u1b9d\u1b9e\u1b9f\u1ba0\u1ba1\u1ba2\u1ba3\u1ba4\u1ba5\u1ba6\u1ba7\u1ba8\u1ba9\u1baa\u1bab\u1bac\u1bad\u1bae\u1baf\u1bb0\u1bb1\u1bb2\u1bb3\u1bb4\u1bb5\u1bb6\u1bb7\u1bb8\u1bb9\u1bba\u1bbb\u1bbc\u1bbd\u1bbe\u1bbf\u1bc0\u1bc1\u1bc2\u1bc3\u1bc4\u1bc5\u1bc6\u1bc7\u1bc8\u1bc9\u1bca\u1bcb\u1bcc\u1bcd\u1bce\u1bcf\u1bd0\u1bd1\u1bd2\u1bd3\u1bd4\u1bd5\u1bd6\u1bd7\u1bd8\u1bd9\u1bda\u1bdb\u1bdc\u1bdd\u1bde\u1bdf\u1be0\u1be1\u1be2\u1be3\u1be4\u1be5\u1be6\u1be7\u1be8\u1be9\u1bea\u1beb\u1bec\u1bed\u1bee\u1bef\u1bf0\u1bf1\u1bf2\u1bf3\u1bf4\u1bf5\u1bf6\u1bf7\u1bf8\u1bf9\u1bfa\u1bfb\u1bfc\u1bfd\u1bfe\u1bff\u1c00\u1c01\u1c02\u1c03\u1c04\u1c05\u1c06\u1c07\u1c08\u1c09\u1c0a\u1c0b\u1c0c\u1c0d\u1c0e\u1c0f\u1c10\u1c11\u1c12\u1c13\u1c14\u1c15\u1c16\u1c17\u1c18\u1c19\u1c1a\u1c1b\u1c1c\u1c1d\u1c1e\u1c1f\u1c20\u1c21\u1c22\u1c23\u1c24\u1c25\u1c26\u1c27\u1c28\u1c29\u1c2a\u1c2b\u1c2c\u1c2d\u1c2e\u1c2f\u1c30\u1c31\u1c32\u1c33\u1c34\u1c35\u1c36\u1c37\u1c38\u1c39\u1c3a\u1c3b\u1c3c\u1c3d\u1c3e\u1c3f\u1c40\u1c41\u1c42\u1c43\u1c44\u1c45\u1c46\u1c47\u1c48\u1c49\u1c4a\u1c4b\u1c4c\u1c4d\u1c4e\u1c4f\u1c50\u1c51\u1c52\u1c53\u1c54\u1c55\u1c56\u1c57\u1c58\u1c59\u1c5a\u1c5b\u1c5c\u1c5d\u1c5e\u1c5f\u1c60\u1c61\u1c62\u1c63\u1c64\u1c65\u1c66\u1c67\u1c68\u1c69\u1c6a\u1c6b\u1c6c\u1c6d\u1c6e\u1c6f\u1c70\u1c71\u1c72\u1c73\u1c74\u1c75\u1c76\u1c77\u1c78\u1c79\u1c7a\u1c7b\u1c7c\u1c7d\u1c7e\u1c7f\u1c80\u1c81\u1c82\u1c83\u1c84\u1c85\u1c86\u1c87\u1c88\u1c89\u1c8a\u1c8b\u1c8c\u1c8d\u1c8e\u1c8f\u1c90\u1c91\u1c92\u1c93\u1c94\u1c95\u1c96\u1c97\u1c98\u1c99\u1c9a\u1c9b\u1c9c\u1c9d\u1c9e\u1c9f\u1ca0\u1ca1\u1ca2\u1ca3\u1ca4\u1ca5\u1ca6\u1ca7\u1ca8\u1ca9\u1caa\u1cab\u1cac\u1cad\u1cae\u1caf\u1cb0\u1cb1\u1cb2\u1cb3\u1cb4\u1cb5\u1cb6\u1cb7\u1cb8\u1cb9\u1cba\u1cbb\u1cbc\u1cbd\u1cbe\u1cbf\u1cc0\u1cc1\u1cc2\u1cc3\u1cc4\u1cc5\u1cc6\u1cc7\u1cc8\u1cc9\u1cca\u1ccb\u1ccc\u1ccd\u1cce\u1ccf\u1cd0\u1cd1\u1cd2\u1cd3\u1cd4\u1cd5\u1cd6\u1cd7\u1cd8\u1cd9\u1cda\u1cdb\u1cdc\u1cdd\u1cde\u1cdf\u1ce0\u1ce1\u1ce2\u1ce3\u1ce4\u1ce5\u1ce6\u1ce7\u1ce8\u1ce9\u1cea\u1ceb\u1cec\u1ced\u1cee\u1cef\u1cf0\u1cf1\u1cf2\u1cf3\u1cf4\u1cf5\u1cf6\u1cf7\u1cf8\u1cf9\u1cfa\u1cfb\u1cfc\u1cfd\u1cfe\u1cff\u1dc4\u1dc5\u1dc6\u1dc7\u1dc8\u1dc9\u1dca\u1dcb\u1dcc\u1dcd\u1dce\u1dcf\u1dd0\u1dd1\u1dd2\u1dd3\u1dd4\u1dd5\u1dd6\u1dd7\u1dd8\u1dd9\u1dda\u1ddb\u1ddc\u1ddd\u1dde\u1ddf\u1de0\u1de1\u1de2\u1de3\u1de4\u1de5\u1de6\u1de7\u1de8\u1de9\u1dea\u1deb\u1dec\u1ded\u1dee\u1def\u1df0\u1df1\u1df2\u1df3\u1df4\u1df5\u1df6\u1df7\u1df8\u1df9\u1dfa\u1dfb\u1dfc\u1dfd\u1dfe\u1dff\u1e9c\u1e9d\u1e9e\u1e9f\u1efa\u1efb\u1efc\u1efd\u1efe\u1eff\u1f16\u1f17\u1f1e\u1f1f\u1f46\u1f47\u1f4e\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e\u1f7f\u1fb5\u1fc5\u1fd4\u1fd5\u1fdc\u1ff0\u1ff1\u1ff5\u1fff\u2064\u2065\u2066\u2067\u2068\u2069\u2072\u2073\u208f\u2095\u2096\u2097\u2098\u2099\u209a\u209b\u209c\u209d\u209e\u209f\u20b6\u20b7\u20b8\u20b9\u20ba\u20bb\u20bc\u20bd\u20be\u20bf\u20c0\u20c1\u20c2\u20c3\u20c4\u20c5\u20c6\u20c7\u20c8\u20c9\u20ca\u20cb\u20cc\u20cd\u20ce\u20cf\u20ec\u20ed\u20ee\u20ef\u20f0\u20f1\u20f2\u20f3\u20f4\u20f5\u20f6\u20f7\u20f8\u20f9\u20fa\u20fb\u20fc\u20fd\u20fe\u20ff\u214d\u214e\u214f\u2150\u2151\u2152\u2184\u2185\u2186\u2187\u2188\u2189\u218a\u218b\u218c\u218d\u218e\u218f\u23dc\u23dd\u23de\u23df\u23e0\u23e1\u23e2\u23e3\u23e4\u23e5\u23e6\u23e7\u23e8\u23e9\u23ea\u23eb\u23ec\u23ed\u23ee\u23ef\u23f0\u23f1\u23f2\u23f3\u23f4\u23f5\u23f6\u23f7\u23f8\u23f9\u23fa\u23fb\u23fc\u23fd\u23fe\u23ff\u2427\u2428\u2429\u242a\u242b\u242c\u242d\u242e\u242f\u2430\u2431\u2432\u2433\u2434\u2435\u2436\u2437\u2438\u2439\u243a\u243b\u243c\u243d\u243e\u243f\u244b\u244c\u244d\u244e\u244f\u2450\u2451\u2452\u2453\u2454\u2455\u2456\u2457\u2458\u2459\u245a\u245b\u245c\u245d\u245e\u245f\u269d\u269e\u269f\u26b2\u26b3\u26b4\u26b5\u26b6\u26b7\u26b8\u26b9\u26ba\u26bb\u26bc\u26bd\u26be\u26bf\u26c0\u26c1\u26c2\u26c3\u26c4\u26c5\u26c6\u26c7\u26c8\u26c9\u26ca\u26cb\u26cc\u26cd\u26ce\u26cf\u26d0\u26d1\u26d2\u26d3\u26d4\u26d5\u26d6\u26d7\u26d8\u26d9\u26da\u26db\u26dc\u26dd\u26de\u26df\u26e0\u26e1\u26e2\u26e3\u26e4\u26e5\u26e6\u26e7\u26e8\u26e9\u26ea\u26eb\u26ec\u26ed\u26ee\u26ef\u26f0\u26f1\u26f2\u26f3\u26f4\u26f5\u26f6\u26f7\u26f8\u26f9\u26fa\u26fb\u26fc\u26fd\u26fe\u26ff\u2700\u2705\u270a\u270b\u2728\u274c\u274e\u2753\u2754\u2755\u2757\u275f\u2760\u2795\u2796\u2797\u27b0\u27bf\u27c7\u27c8\u27c9\u27ca\u27cb\u27cc\u27cd\u27ce\u27cf\u27ec\u27ed\u27ee\u27ef\u2b14\u2b15\u2b16\u2b17\u2b18\u2b19\u2b1a\u2b1b\u2b1c\u2b1d\u2b1e\u2b1f\u2b20\u2b21\u2b22\u2b23\u2b24\u2b25\u2b26\u2b27\u2b28\u2b29\u2b2a\u2b2b\u2b2c\u2b2d\u2b2e\u2b2f\u2b30\u2b31\u2b32\u2b33\u2b34\u2b35\u2b36\u2b37\u2b38\u2b39\u2b3a\u2b3b\u2b3c\u2b3d\u2b3e\u2b3f\u2b40\u2b41\u2b42\u2b43\u2b44\u2b45\u2b46\u2b47\u2b48\u2b49\u2b4a\u2b4b\u2b4c\u2b4d\u2b4e\u2b4f\u2b50\u2b51\u2b52\u2b53\u2b54\u2b55\u2b56\u2b57\u2b58\u2b59\u2b5a\u2b5b\u2b5c\u2b5d\u2b5e\u2b5f\u2b60\u2b61\u2b62\u2b63\u2b64\u2b65\u2b66\u2b67\u2b68\u2b69\u2b6a\u2b6b\u2b6c\u2b6d\u2b6e\u2b6f\u2b70\u2b71\u2b72\u2b73\u2b74\u2b75\u2b76\u2b77\u2b78\u2b79\u2b7a\u2b7b\u2b7c\u2b7d\u2b7e\u2b7f\u2b80\u2b81\u2b82\u2b83\u2b84\u2b85\u2b86\u2b87\u2b88\u2b89\u2b8a\u2b8b\u2b8c\u2b8d\u2b8e\u2b8f\u2b90\u2b91\u2b92\u2b93\u2b94\u2b95\u2b96\u2b97\u2b98\u2b99\u2b9a\u2b9b\u2b9c\u2b9d\u2b9e\u2b9f\u2ba0\u2ba1\u2ba2\u2ba3\u2ba4\u2ba5\u2ba6\u2ba7\u2ba8\u2ba9\u2baa\u2bab\u2bac\u2bad\u2bae\u2baf\u2bb0\u2bb1\u2bb2\u2bb3\u2bb4\u2bb5\u2bb6\u2bb7\u2bb8\u2bb9\u2bba\u2bbb\u2bbc\u2bbd\u2bbe\u2bbf\u2bc0\u2bc1\u2bc2\u2bc3\u2bc4\u2bc5\u2bc6\u2bc7\u2bc8\u2bc9\u2bca\u2bcb\u2bcc\u2bcd\u2bce\u2bcf\u2bd0\u2bd1\u2bd2\u2bd3\u2bd4\u2bd5\u2bd6\u2bd7\u2bd8\u2bd9\u2bda\u2bdb\u2bdc\u2bdd\u2bde\u2bdf\u2be0\u2be1\u2be2\u2be3\u2be4\u2be5\u2be6\u2be7\u2be8\u2be9\u2bea\u2beb\u2bec\u2bed\u2bee\u2bef\u2bf0\u2bf1\u2bf2\u2bf3\u2bf4\u2bf5\u2bf6\u2bf7\u2bf8\u2bf9\u2bfa\u2bfb\u2bfc\u2bfd\u2bfe\u2bff\u2c2f\u2c5f\u2c60\u2c61\u2c62\u2c63\u2c64\u2c65\u2c66\u2c67\u2c68\u2c69\u2c6a\u2c6b\u2c6c\u2c6d\u2c6e\u2c6f\u2c70\u2c71\u2c72\u2c73\u2c74\u2c75\u2c76\u2c77\u2c78\u2c79\u2c7a\u2c7b\u2c7c\u2c7d\u2c7e\u2c7f\u2ceb\u2cec\u2ced\u2cee\u2cef\u2cf0\u2cf1\u2cf2\u2cf3\u2cf4\u2cf5\u2cf6\u2cf7\u2cf8\u2d26\u2d27\u2d28\u2d29\u2d2a\u2d2b\u2d2c\u2d2d\u2d2e\u2d2f\u2d66\u2d67\u2d68\u2d69\u2d6a\u2d6b\u2d6c\u2d6d\u2d6e\u2d70\u2d71\u2d72\u2d73\u2d74\u2d75\u2d76\u2d77\u2d78\u2d79\u2d7a\u2d7b\u2d7c\u2d7d\u2d7e\u2d7f\u2d97\u2d98\u2d99\u2d9a\u2d9b\u2d9c\u2d9d\u2d9e\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2de0\u2de1\u2de2\u2de3\u2de4\u2de5\u2de6\u2de7\u2de8\u2de9\u2dea\u2deb\u2dec\u2ded\u2dee\u2def\u2df0\u2df1\u2df2\u2df3\u2df4\u2df5\u2df6\u2df7\u2df8\u2df9\u2dfa\u2dfb\u2dfc\u2dfd\u2dfe\u2dff\u2e18\u2e19\u2e1a\u2e1b\u2e1e\u2e1f\u2e20\u2e21\u2e22\u2e23\u2e24\u2e25\u2e26\u2e27\u2e28\u2e29\u2e2a\u2e2b\u2e2c\u2e2d\u2e2e\u2e2f\u2e30\u2e31\u2e32\u2e33\u2e34\u2e35\u2e36\u2e37\u2e38\u2e39\u2e3a\u2e3b\u2e3c\u2e3d\u2e3e\u2e3f\u2e40\u2e41\u2e42\u2e43\u2e44\u2e45\u2e46\u2e47\u2e48\u2e49\u2e4a\u2e4b\u2e4c\u2e4d\u2e4e\u2e4f\u2e50\u2e51\u2e52\u2e53\u2e54\u2e55\u2e56\u2e57\u2e58\u2e59\u2e5a\u2e5b\u2e5c\u2e5d\u2e5e\u2e5f\u2e60\u2e61\u2e62\u2e63\u2e64\u2e65\u2e66\u2e67\u2e68\u2e69\u2e6a\u2e6b\u2e6c\u2e6d\u2e6e\u2e6f\u2e70\u2e71\u2e72\u2e73\u2e74\u2e75\u2e76\u2e77\u2e78\u2e79\u2e7a\u2e7b\u2e7c\u2e7d\u2e7e\u2e7f\u2e9a\u2ef4\u2ef5\u2ef6\u2ef7\u2ef8\u2ef9\u2efa\u2efb\u2efc\u2efd\u2efe\u2eff\u2fd6\u2fd7\u2fd8\u2fd9\u2fda\u2fdb\u2fdc\u2fdd\u2fde\u2fdf\u2fe0\u2fe1\u2fe2\u2fe3\u2fe4\u2fe5\u2fe6\u2fe7\u2fe8\u2fe9\u2fea\u2feb\u2fec\u2fed\u2fee\u2fef\u2ffc\u2ffd\u2ffe\u2fff\u3040\u3097\u3098\u3100\u3101\u3102\u3103\u3104\u312d\u312e\u312f\u3130\u318f\u31b8\u31b9\u31ba\u31bb\u31bc\u31bd\u31be\u31bf\u31d0\u31d1\u31d2\u31d3\u31d4\u31d5\u31d6\u31d7\u31d8\u31d9\u31da\u31db\u31dc\u31dd\u31de\u31df\u31e0\u31e1\u31e2\u31e3\u31e4\u31e5\u31e6\u31e7\u31e8\u31e9\u31ea\u31eb\u31ec\u31ed\u31ee\u31ef\u321f\u3244\u3245\u3246\u3247\u3248\u3249\u324a\u324b\u324c\u324d\u324e\u324f\u32ff\u4db6\u4db7\u4db8\u4db9\u4dba\u4dbb\u4dbc\u4dbd\u4dbe\u4dbf\u9fbc\u9fbd\u9fbe\u9fbf\u9fc0\u9fc1\u9fc2\u9fc3\u9fc4\u9fc5\u9fc6\u9fc7\u9fc8\u9fc9\u9fca\u9fcb\u9fcc\u9fcd\u9fce\u9fcf\u9fd0\u9fd1\u9fd2\u9fd3\u9fd4\u9fd5\u9fd6\u9fd7\u9fd8\u9fd9\u9fda\u9fdb\u9fdc\u9fdd\u9fde\u9fdf\u9fe0\u9fe1\u9fe2\u9fe3\u9fe4\u9fe5\u9fe6\u9fe7\u9fe8\u9fe9\u9fea\u9feb\u9fec\u9fed\u9fee\u9fef\u9ff0\u9ff1\u9ff2\u9ff3\u9ff4\u9ff5\u9ff6\u9ff7\u9ff8\u9ff9\u9ffa\u9ffb\u9ffc\u9ffd\u9ffe\u9fff\ua48d\ua48e\ua48f\ua4c7\ua4c8\ua4c9\ua4ca\ua4cb\ua4cc\ua4cd\ua4ce\ua4cf\ua4d0\ua4d1\ua4d2\ua4d3\ua4d4\ua4d5\ua4d6\ua4d7\ua4d8\ua4d9\ua4da\ua4db\ua4dc\ua4dd\ua4de\ua4df\ua4e0\ua4e1\ua4e2\ua4e3\ua4e4\ua4e5\ua4e6\ua4e7\ua4e8\ua4e9\ua4ea\ua4eb\ua4ec\ua4ed\ua4ee\ua4ef\ua4f0\ua4f1\ua4f2\ua4f3\ua4f4\ua4f5\ua4f6\ua4f7\ua4f8\ua4f9\ua4fa\ua4fb\ua4fc\ua4fd\ua4fe\ua4ff\ua500\ua501\ua502\ua503\ua504\ua505\ua506\ua507\ua508\ua509\ua50a\ua50b\ua50c\ua50d\ua50e\ua50f\ua510\ua511\ua512\ua513\ua514\ua515\ua516\ua517\ua518\ua519\ua51a\ua51b\ua51c\ua51d\ua51e\ua51f\ua520\ua521\ua522\ua523\ua524\ua525\ua526\ua527\ua528\ua529\ua52a\ua52b\ua52c\ua52d\ua52e\ua52f\ua530\ua531\ua532\ua533\ua534\ua535\ua536\ua537\ua538\ua539\ua53a\ua53b\ua53c\ua53d\ua53e\ua53f\ua540\ua541\ua542\ua543\ua544\ua545\ua546\ua547\ua548\ua549\ua54a\ua54b\ua54c\ua54d\ua54e\ua54f\ua550\ua551\ua552\ua553\ua554\ua555\ua556\ua557\ua558\ua559\ua55a\ua55b\ua55c\ua55d\ua55e\ua55f\ua560\ua561\ua562\ua563\ua564\ua565\ua566\ua567\ua568\ua569\ua56a\ua56b\ua56c\ua56d\ua56e\ua56f\ua570\ua571\ua572\ua573\ua574\ua575\ua576\ua577\ua578\ua579\ua57a\ua57b\ua57c\ua57d\ua57e\ua57f\ua580\ua581\ua582\ua583\ua584\ua585\ua586\ua587\ua588\ua589\ua58a\ua58b\ua58c\ua58d\ua58e\ua58f\ua590\ua591\ua592\ua593\ua594\ua595\ua596\ua597\ua598\ua599\ua59a\ua59b\ua59c\ua59d\ua59e\ua59f\ua5a0\ua5a1\ua5a2\ua5a3\ua5a4\ua5a5\ua5a6\ua5a7\ua5a8\ua5a9\ua5aa\ua5ab\ua5ac\ua5ad\ua5ae\ua5af\ua5b0\ua5b1\ua5b2\ua5b3\ua5b4\ua5b5\ua5b6\ua5b7\ua5b8\ua5b9\ua5ba\ua5bb\ua5bc\ua5bd\ua5be\ua5bf\ua5c0\ua5c1\ua5c2\ua5c3\ua5c4\ua5c5\ua5c6\ua5c7\ua5c8\ua5c9\ua5ca\ua5cb\ua5cc\ua5cd\ua5ce\ua5cf\ua5d0\ua5d1\ua5d2\ua5d3\ua5d4\ua5d5\ua5d6\ua5d7\ua5d8\ua5d9\ua5da\ua5db\ua5dc\ua5dd\ua5de\ua5df\ua5e0\ua5e1\ua5e2\ua5e3\ua5e4\ua5e5\ua5e6\ua5e7\ua5e8\ua5e9\ua5ea\ua5eb\ua5ec\ua5ed\ua5ee\ua5ef\ua5f0\ua5f1\ua5f2\ua5f3\ua5f4\ua5f5\ua5f6\ua5f7\ua5f8\ua5f9\ua5fa\ua5fb\ua5fc\ua5fd\ua5fe\ua5ff\ua600\ua601\ua602\ua603\ua604\ua605\ua606\ua607\ua608\ua609\ua60a\ua60b\ua60c\ua60d\ua60e\ua60f\ua610\ua611\ua612\ua613\ua614\ua615\ua616\ua617\ua618\ua619\ua61a\ua61b\ua61c\ua61d\ua61e\ua61f\ua620\ua621\ua622\ua623\ua624\ua625\ua626\ua627\ua628\ua629\ua62a\ua62b\ua62c\ua62d\ua62e\ua62f\ua630\ua631\ua632\ua633\ua634\ua635\ua636\ua637\ua638\ua639\ua63a\ua63b\ua63c\ua63d\ua63e\ua63f\ua640\ua641\ua642\ua643\ua644\ua645\ua646\ua647\ua648\ua649\ua64a\ua64b\ua64c\ua64d\ua64e\ua64f\ua650\ua651\ua652\ua653\ua654\ua655\ua656\ua657\ua658\ua659\ua65a\ua65b\ua65c\ua65d\ua65e\ua65f\ua660\ua661\ua662\ua663\ua664\ua665\ua666\ua667\ua668\ua669\ua66a\ua66b\ua66c\ua66d\ua66e\ua66f\ua670\ua671\ua672\ua673\ua674\ua675\ua676\ua677\ua678\ua679\ua67a\ua67b\ua67c\ua67d\ua67e\ua67f\ua680\ua681\ua682\ua683\ua684\ua685\ua686\ua687\ua688\ua689\ua68a\ua68b\ua68c\ua68d\ua68e\ua68f\ua690\ua691\ua692\ua693\ua694\ua695\ua696\ua697\ua698\ua699\ua69a\ua69b\ua69c\ua69d\ua69e\ua69f\ua6a0\ua6a1\ua6a2\ua6a3\ua6a4\ua6a5\ua6a6\ua6a7\ua6a8\ua6a9\ua6aa\ua6ab\ua6ac\ua6ad\ua6ae\ua6af\ua6b0\ua6b1\ua6b2\ua6b3\ua6b4\ua6b5\ua6b6\ua6b7\ua6b8\ua6b9\ua6ba\ua6bb\ua6bc\ua6bd\ua6be\ua6bf\ua6c0\ua6c1\ua6c2\ua6c3\ua6c4\ua6c5\ua6c6\ua6c7\ua6c8\ua6c9\ua6ca\ua6cb\ua6cc\ua6cd\ua6ce\ua6cf\ua6d0\ua6d1\ua6d2\ua6d3\ua6d4\ua6d5\ua6d6\ua6d7\ua6d8\ua6d9\ua6da\ua6db\ua6dc\ua6dd\ua6de\ua6df\ua6e0\ua6e1\ua6e2\ua6e3\ua6e4\ua6e5\ua6e6\ua6e7\ua6e8\ua6e9\ua6ea\ua6eb\ua6ec\ua6ed\ua6ee\ua6ef\ua6f0\ua6f1\ua6f2\ua6f3\ua6f4\ua6f5\ua6f6\ua6f7\ua6f8\ua6f9\ua6fa\ua6fb\ua6fc\ua6fd\ua6fe\ua6ff\ua717\ua718\ua719\ua71a\ua71b\ua71c\ua71d\ua71e\ua71f\ua720\ua721\ua722\ua723\ua724\ua725\ua726\ua727\ua728\ua729\ua72a\ua72b\ua72c\ua72d\ua72e\ua72f\ua730\ua731\ua732\ua733\ua734\ua735\ua736\ua737\ua738\ua739\ua73a\ua73b\ua73c\ua73d\ua73e\ua73f\ua740\ua741\ua742\ua743\ua744\ua745\ua746\ua747\ua748\ua749\ua74a\ua74b\ua74c\ua74d\ua74e\ua74f\ua750\ua751\ua752\ua753\ua754\ua755\ua756\ua757\ua758\ua759\ua75a\ua75b\ua75c\ua75d\ua75e\ua75f\ua760\ua761\ua762\ua763\ua764\ua765\ua766\ua767\ua768\ua769\ua76a\ua76b\ua76c\ua76d\ua76e\ua76f\ua770\ua771\ua772\ua773\ua774\ua775\ua776\ua777\ua778\ua779\ua77a\ua77b\ua77c\ua77d\ua77e\ua77f\ua780\ua781\ua782\ua783\ua784\ua785\ua786\ua787\ua788\ua789\ua78a\ua78b\ua78c\ua78d\ua78e\ua78f\ua790\ua791\ua792\ua793\ua794\ua795\ua796\ua797\ua798\ua799\ua79a\ua79b\ua79c\ua79d\ua79e\ua79f\ua7a0\ua7a1\ua7a2\ua7a3\ua7a4\ua7a5\ua7a6\ua7a7\ua7a8\ua7a9\ua7aa\ua7ab\ua7ac\ua7ad\ua7ae\ua7af\ua7b0\ua7b1\ua7b2\ua7b3\ua7b4\ua7b5\ua7b6\ua7b7\ua7b8\ua7b9\ua7ba\ua7bb\ua7bc\ua7bd\ua7be\ua7bf\ua7c0\ua7c1\ua7c2\ua7c3\ua7c4\ua7c5\ua7c6\ua7c7\ua7c8\ua7c9\ua7ca\ua7cb\ua7cc\ua7cd\ua7ce\ua7cf\ua7d0\ua7d1\ua7d2\ua7d3\ua7d4\ua7d5\ua7d6\ua7d7\ua7d8\ua7d9\ua7da\ua7db\ua7dc\ua7dd\ua7de\ua7df\ua7e0\ua7e1\ua7e2\ua7e3\ua7e4\ua7e5\ua7e6\ua7e7\ua7e8\ua7e9\ua7ea\ua7eb\ua7ec\ua7ed\ua7ee\ua7ef\ua7f0\ua7f1\ua7f2\ua7f3\ua7f4\ua7f5\ua7f6\ua7f7\ua7f8\ua7f9\ua7fa\ua7fb\ua7fc\ua7fd\ua7fe\ua7ff\ua82c\ua82d\ua82e\ua82f\ua830\ua831\ua832\ua833\ua834\ua835\ua836\ua837\ua838\ua839\ua83a\ua83b\ua83c\ua83d\ua83e\ua83f\ua840\ua841\ua842\ua843\ua844\ua845\ua846\ua847\ua848\ua849\ua84a\ua84b\ua84c\ua84d\ua84e\ua84f\ua850\ua851\ua852\ua853\ua854\ua855\ua856\ua857\ua858\ua859\ua85a\ua85b\ua85c\ua85d\ua85e\ua85f\ua860\ua861\ua862\ua863\ua864\ua865\ua866\ua867\ua868\ua869\ua86a\ua86b\ua86c\ua86d\ua86e\ua86f\ua870\ua871\ua872\ua873\ua874\ua875\ua876\ua877\ua878\ua879\ua87a\ua87b\ua87c\ua87d\ua87e\ua87f\ua880\ua881\ua882\ua883\ua884\ua885\ua886\ua887\ua888\ua889\ua88a\ua88b\ua88c\ua88d\ua88e\ua88f\ua890\ua891\ua892\ua893\ua894\ua895\ua896\ua897\ua898\ua899\ua89a\ua89b\ua89c\ua89d\ua89e\ua89f\ua8a0\ua8a1\ua8a2\ua8a3\ua8a4\ua8a5\ua8a6\ua8a7\ua8a8\ua8a9\ua8aa\ua8ab\ua8ac\ua8ad\ua8ae\ua8af\ua8b0\ua8b1\ua8b2\ua8b3\ua8b4\ua8b5\ua8b6\ua8b7\ua8b8\ua8b9\ua8ba\ua8bb\ua8bc\ua8bd\ua8be\ua8bf\ua8c0\ua8c1\ua8c2\ua8c3\ua8c4\ua8c5\ua8c6\ua8c7\ua8c8\ua8c9\ua8ca\ua8cb\ua8cc\ua8cd\ua8ce\ua8cf\ua8d0\ua8d1\ua8d2\ua8d3\ua8d4\ua8d5\ua8d6\ua8d7\ua8d8\ua8d9\ua8da\ua8db\ua8dc\ua8dd\ua8de\ua8df\ua8e0\ua8e1\ua8e2\ua8e3\ua8e4\ua8e5\ua8e6\ua8e7\ua8e8\ua8e9\ua8ea\ua8eb\ua8ec\ua8ed\ua8ee\ua8ef\ua8f0\ua8f1\ua8f2\ua8f3\ua8f4\ua8f5\ua8f6\ua8f7\ua8f8\ua8f9\ua8fa\ua8fb\ua8fc\ua8fd\ua8fe\ua8ff\ua900\ua901\ua902\ua903\ua904\ua905\ua906\ua907\ua908\ua909\ua90a\ua90b\ua90c\ua90d\ua90e\ua90f\ua910\ua911\ua912\ua913\ua914\ua915\ua916\ua917\ua918\ua919\ua91a\ua91b\ua91c\ua91d\ua91e\ua91f\ua920\ua921\ua922\ua923\ua924\ua925\ua926\ua927\ua928\ua929\ua92a\ua92b\ua92c\ua92d\ua92e\ua92f\ua930\ua931\ua932\ua933\ua934\ua935\ua936\ua937\ua938\ua939\ua93a\ua93b\ua93c\ua93d\ua93e\ua93f\ua940\ua941\ua942\ua943\ua944\ua945\ua946\ua947\ua948\ua949\ua94a\ua94b\ua94c\ua94d\ua94e\ua94f\ua950\ua951\ua952\ua953\ua954\ua955\ua956\ua957\ua958\ua959\ua95a\ua95b\ua95c\ua95d\ua95e\ua95f\ua960\ua961\ua962\ua963\ua964\ua965\ua966\ua967\ua968\ua969\ua96a\ua96b\ua96c\ua96d\ua96e\ua96f\ua970\ua971\ua972\ua973\ua974\ua975\ua976\ua977\ua978\ua979\ua97a\ua97b\ua97c\ua97d\ua97e\ua97f\ua980\ua981\ua982\ua983\ua984\ua985\ua986\ua987\ua988\ua989\ua98a\ua98b\ua98c\ua98d\ua98e\ua98f\ua990\ua991\ua992\ua993\ua994\ua995\ua996\ua997\ua998\ua999\ua99a\ua99b\ua99c\ua99d\ua99e\ua99f\ua9a0\ua9a1\ua9a2\ua9a3\ua9a4\ua9a5\ua9a6\ua9a7\ua9a8\ua9a9\ua9aa\ua9ab\ua9ac\ua9ad\ua9ae\ua9af\ua9b0\ua9b1\ua9b2\ua9b3\ua9b4\ua9b5\ua9b6\ua9b7\ua9b8\ua9b9\ua9ba\ua9bb\ua9bc\ua9bd\ua9be\ua9bf\ua9c0\ua9c1\ua9c2\ua9c3\ua9c4\ua9c5\ua9c6\ua9c7\ua9c8\ua9c9\ua9ca\ua9cb\ua9cc\ua9cd\ua9ce\ua9cf\ua9d0\ua9d1\ua9d2\ua9d3\ua9d4\ua9d5\ua9d6\ua9d7\ua9d8\ua9d9\ua9da\ua9db\ua9dc\ua9dd\ua9de\ua9df\ua9e0\ua9e1\ua9e2\ua9e3\ua9e4\ua9e5\ua9e6\ua9e7\ua9e8\ua9e9\ua9ea\ua9eb\ua9ec\ua9ed\ua9ee\ua9ef\ua9f0\ua9f1\ua9f2\ua9f3\ua9f4\ua9f5\ua9f6\ua9f7\ua9f8\ua9f9\ua9fa\ua9fb\ua9fc\ua9fd\ua9fe\ua9ff\uaa00\uaa01\uaa02\uaa03\uaa04\uaa05\uaa06\uaa07\uaa08\uaa09\uaa0a\uaa0b\uaa0c\uaa0d\uaa0e\uaa0f\uaa10\uaa11\uaa12\uaa13\uaa14\uaa15\uaa16\uaa17\uaa18\uaa19\uaa1a\uaa1b\uaa1c\uaa1d\uaa1e\uaa1f\uaa20\uaa21\uaa22\uaa23\uaa24\uaa25\uaa26\uaa27\uaa28\uaa29\uaa2a\uaa2b\uaa2c\uaa2d\uaa2e\uaa2f\uaa30\uaa31\uaa32\uaa33\uaa34\uaa35\uaa36\uaa37\uaa38\uaa39\uaa3a\uaa3b\uaa3c\uaa3d\uaa3e\uaa3f\uaa40\uaa41\uaa42\uaa43\uaa44\uaa45\uaa46\uaa47\uaa48\uaa49\uaa4a\uaa4b\uaa4c\uaa4d\uaa4e\uaa4f\uaa50\uaa51\uaa52\uaa53\uaa54\uaa55\uaa56\uaa57\uaa58\uaa59\uaa5a\uaa5b\uaa5c\uaa5d\uaa5e\uaa5f\uaa60\uaa61\uaa62\uaa63\uaa64\uaa65\uaa66\uaa67\uaa68\uaa69\uaa6a\uaa6b\uaa6c\uaa6d\uaa6e\uaa6f\uaa70\uaa71\uaa72\uaa73\uaa74\uaa75\uaa76\uaa77\uaa78\uaa79\uaa7a\uaa7b\uaa7c\uaa7d\uaa7e\uaa7f\uaa80\uaa81\uaa82\uaa83\uaa84\uaa85\uaa86\uaa87\uaa88\uaa89\uaa8a\uaa8b\uaa8c\uaa8d\uaa8e\uaa8f\uaa90\uaa91\uaa92\uaa93\uaa94\uaa95\uaa96\uaa97\uaa98\uaa99\uaa9a\uaa9b\uaa9c\uaa9d\uaa9e\uaa9f\uaaa0\uaaa1\uaaa2\uaaa3\uaaa4\uaaa5\uaaa6\uaaa7\uaaa8\uaaa9\uaaaa\uaaab\uaaac\uaaad\uaaae\uaaaf\uaab0\uaab1\uaab2\uaab3\uaab4\uaab5\uaab6\uaab7\uaab8\uaab9\uaaba\uaabb\uaabc\uaabd\uaabe\uaabf\uaac0\uaac1\uaac2\uaac3\uaac4\uaac5\uaac6\uaac7\uaac8\uaac9\uaaca\uaacb\uaacc\uaacd\uaace\uaacf\uaad0\uaad1\uaad2\uaad3\uaad4\uaad5\uaad6\uaad7\uaad8\uaad9\uaada\uaadb\uaadc\uaadd\uaade\uaadf\uaae0\uaae1\uaae2\uaae3\uaae4\uaae5\uaae6\uaae7\uaae8\uaae9\uaaea\uaaeb\uaaec\uaaed\uaaee\uaaef\uaaf0\uaaf1\uaaf2\uaaf3\uaaf4\uaaf5\uaaf6\uaaf7\uaaf8\uaaf9\uaafa\uaafb\uaafc\uaafd\uaafe\uaaff\uab00\uab01\uab02\uab03\uab04\uab05\uab06\uab07\uab08\uab09\uab0a\uab0b\uab0c\uab0d\uab0e\uab0f\uab10\uab11\uab12\uab13\uab14\uab15\uab16\uab17\uab18\uab19\uab1a\uab1b\uab1c\uab1d\uab1e\uab1f\uab20\uab21\uab22\uab23\uab24\uab25\uab26\uab27\uab28\uab29\uab2a\uab2b\uab2c\uab2d\uab2e\uab2f\uab30\uab31\uab32\uab33\uab34\uab35\uab36\uab37\uab38\uab39\uab3a\uab3b\uab3c\uab3d\uab3e\uab3f\uab40\uab41\uab42\uab43\uab44\uab45\uab46\uab47\uab48\uab49\uab4a\uab4b\uab4c\uab4d\uab4e\uab4f\uab50\uab51\uab52\uab53\uab54\uab55\uab56\uab57\uab58\uab59\uab5a\uab5b\uab5c\uab5d\uab5e\uab5f\uab60\uab61\uab62\uab63\uab64\uab65\uab66\uab67\uab68\uab69\uab6a\uab6b\uab6c\uab6d\uab6e\uab6f\uab70\uab71\uab72\uab73\uab74\uab75\uab76\uab77\uab78\uab79\uab7a\uab7b\uab7c\uab7d\uab7e\uab7f\uab80\uab81\uab82\uab83\uab84\uab85\uab86\uab87\uab88\uab89\uab8a\uab8b\uab8c\uab8d\uab8e\uab8f\uab90\uab91\uab92\uab93\uab94\uab95\uab96\uab97\uab98\uab99\uab9a\uab9b\uab9c\uab9d\uab9e\uab9f\uaba0\uaba1\uaba2\uaba3\uaba4\uaba5\uaba6\uaba7\uaba8\uaba9\uabaa\uabab\uabac\uabad\uabae\uabaf\uabb0\uabb1\uabb2\uabb3\uabb4\uabb5\uabb6\uabb7\uabb8\uabb9\uabba\uabbb\uabbc\uabbd\uabbe\uabbf\uabc0\uabc1\uabc2\uabc3\uabc4\uabc5\uabc6\uabc7\uabc8\uabc9\uabca\uabcb\uabcc\uabcd\uabce\uabcf\uabd0\uabd1\uabd2\uabd3\uabd4\uabd5\uabd6\uabd7\uabd8\uabd9\uabda\uabdb\uabdc\uabdd\uabde\uabdf\uabe0\uabe1\uabe2\uabe3\uabe4\uabe5\uabe6\uabe7\uabe8\uabe9\uabea\uabeb\uabec\uabed\uabee\uabef\uabf0\uabf1\uabf2\uabf3\uabf4\uabf5\uabf6\uabf7\uabf8\uabf9\uabfa\uabfb\uabfc\uabfd\uabfe\uabff\ud7a4\ud7a5\ud7a6\ud7a7\ud7a8\ud7a9\ud7aa\ud7ab\ud7ac\ud7ad\ud7ae\ud7af\ud7b0\ud7b1\ud7b2\ud7b3\ud7b4\ud7b5\ud7b6\ud7b7\ud7b8\ud7b9\ud7ba\ud7bb\ud7bc\ud7bd\ud7be\ud7bf\ud7c0\ud7c1\ud7c2\ud7c3\ud7c4\ud7c5\ud7c6\ud7c7\ud7c8\ud7c9\ud7ca\ud7cb\ud7cc\ud7cd\ud7ce\ud7cf\ud7d0\ud7d1\ud7d2\ud7d3\ud7d4\ud7d5\ud7d6\ud7d7\ud7d8\ud7d9\ud7da\ud7db\ud7dc\ud7dd\ud7de\ud7df\ud7e0\ud7e1\ud7e2\ud7e3\ud7e4\ud7e5\ud7e6\ud7e7\ud7e8\ud7e9\ud7ea\ud7eb\ud7ec\ud7ed\ud7ee\ud7ef\ud7f0\ud7f1\ud7f2\ud7f3\ud7f4\ud7f5\ud7f6\ud7f7\ud7f8\ud7f9\ud7fa\ud7fb\ud7fc\ud7fd\ud7fe\ud7ff\ufa2e\ufa2f\ufa6b\ufa6c\ufa6d\ufa6e\ufa6f\ufada\ufadb\ufadc\ufadd\ufade\ufadf\ufae0\ufae1\ufae2\ufae3\ufae4\ufae5\ufae6\ufae7\ufae8\ufae9\ufaea\ufaeb\ufaec\ufaed\ufaee\ufaef\ufaf0\ufaf1\ufaf2\ufaf3\ufaf4\ufaf5\ufaf6\ufaf7\ufaf8\ufaf9\ufafa\ufafb\ufafc\ufafd\ufafe\ufaff\ufb07\ufb08\ufb09\ufb0a\ufb0b\ufb0c\ufb0d\ufb0e\ufb0f\ufb10\ufb11\ufb12\ufb18\ufb19\ufb1a\ufb1b\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbb2\ufbb3\ufbb4\ufbb5\ufbb6\ufbb7\ufbb8\ufbb9\ufbba\ufbbb\ufbbc\ufbbd\ufbbe\ufbbf\ufbc0\ufbc1\ufbc2\ufbc3\ufbc4\ufbc5\ufbc6\ufbc7\ufbc8\ufbc9\ufbca\ufbcb\ufbcc\ufbcd\ufbce\ufbcf\ufbd0\ufbd1\ufbd2\ufd40\ufd41\ufd42\ufd43\ufd44\ufd45\ufd46\ufd47\ufd48\ufd49\ufd4a\ufd4b\ufd4c\ufd4d\ufd4e\ufd4f\ufd90\ufd91\ufdc8\ufdc9\ufdca\ufdcb\ufdcc\ufdcd\ufdce\ufdcf\ufdd0\ufdd1\ufdd2\ufdd3\ufdd4\ufdd5\ufdd6\ufdd7\ufdd8\ufdd9\ufdda\ufddb\ufddc\ufddd\ufdde\ufddf\ufde0\ufde1\ufde2\ufde3\ufde4\ufde5\ufde6\ufde7\ufde8\ufde9\ufdea\ufdeb\ufdec\ufded\ufdee\ufdef\ufdfe\ufdff\ufe1a\ufe1b\ufe1c\ufe1d\ufe1e\ufe1f\ufe24\ufe25\ufe26\ufe27\ufe28\ufe29\ufe2a\ufe2b\ufe2c\ufe2d\ufe2e\ufe2f\ufe53\ufe67\ufe6c\ufe6d\ufe6e\ufe6f\ufe75\ufefd\ufefe\uff00\uffbf\uffc0\uffc1\uffc8\uffc9\uffd0\uffd1\uffd8\uffd9\uffdd\uffde\uffdf\uffe7\uffef\ufff0\ufff1\ufff2\ufff3\ufff4\ufff5\ufff6\ufff7\ufff8\ufffe'
+
+Co = u'\ue000\ue001\ue002\ue003\ue004\ue005\ue006\ue007\ue008\ue009\ue00a\ue00b\ue00c\ue00d\ue00e\ue00f\ue010\ue011\ue012\ue013\ue014\ue015\ue016\ue017\ue018\ue019\ue01a\ue01b\ue01c\ue01d\ue01e\ue01f\ue020\ue021\ue022\ue023\ue024\ue025\ue026\ue027\ue028\ue029\ue02a\ue02b\ue02c\ue02d\ue02e\ue02f\ue030\ue031\ue032\ue033\ue034\ue035\ue036\ue037\ue038\ue039\ue03a\ue03b\ue03c\ue03d\ue03e\ue03f\ue040\ue041\ue042\ue043\ue044\ue045\ue046\ue047\ue048\ue049\ue04a\ue04b\ue04c\ue04d\ue04e\ue04f\ue050\ue051\ue052\ue053\ue054\ue055\ue056\ue057\ue058\ue059\ue05a\ue05b\ue05c\ue05d\ue05e\ue05f\ue060\ue061\ue062\ue063\ue064\ue065\ue066\ue067\ue068\ue069\ue06a\ue06b\ue06c\ue06d\ue06e\ue06f\ue070\ue071\ue072\ue073\ue074\ue075\ue076\ue077\ue078\ue079\ue07a\ue07b\ue07c\ue07d\ue07e\ue07f\ue080\ue081\ue082\ue083\ue084\ue085\ue086\ue087\ue088\ue089\ue08a\ue08b\ue08c\ue08d\ue08e\ue08f\ue090\ue091\ue092\ue093\ue094\ue095\ue096\ue097\ue098\ue099\ue09a\ue09b\ue09c\ue09d\ue09e\ue09f\ue0a0\ue0a1\ue0a2\ue0a3\ue0a4\ue0a5\ue0a6\ue0a7\ue0a8\ue0a9\ue0aa\ue0ab\ue0ac\ue0ad\ue0ae\ue0af\ue0b0\ue0b1\ue0b2\ue0b3\ue0b4\ue0b5\ue0b6\ue0b7\ue0b8\ue0b9\ue0ba\ue0bb\ue0bc\ue0bd\ue0be\ue0bf\ue0c0\ue0c1\ue0c2\ue0c3\ue0c4\ue0c5\ue0c6\ue0c7\ue0c8\ue0c9\ue0ca\ue0cb\ue0cc\ue0cd\ue0ce\ue0cf\ue0d0\ue0d1\ue0d2\ue0d3\ue0d4\ue0d5\ue0d6\ue0d7\ue0d8\ue0d9\ue0da\ue0db\ue0dc\ue0dd\ue0de\ue0df\ue0e0\ue0e1\ue0e2\ue0e3\ue0e4\ue0e5\ue0e6\ue0e7\ue0e8\ue0e9\ue0ea\ue0eb\ue0ec\ue0ed\ue0ee\ue0ef\ue0f0\ue0f1\ue0f2\ue0f3\ue0f4\ue0f5\ue0f6\ue0f7\ue0f8\ue0f9\ue0fa\ue0fb\ue0fc\ue0fd\ue0fe\ue0ff\ue100\ue101\ue102\ue103\ue104\ue105\ue106\ue107\ue108\ue109\ue10a\ue10b\ue10c\ue10d\ue10e\ue10f\ue110\ue111\ue112\ue113\ue114\ue115\ue116\ue117\ue118\ue119\ue11a\ue11b\ue11c\ue11d\ue11e\ue11f\ue120\ue121\ue122\ue123\ue124\ue125\ue126\ue127\ue128\ue129\ue12a\ue12b\ue12c\ue12d\ue12e\ue12f\ue130\ue131\ue132\ue133\ue134\ue135\ue136\ue137\ue138\ue139\ue13a\ue13b\ue13c\ue13d\ue13e\ue13f\ue140\ue141\ue142\ue143\ue144\ue145\ue146\ue147\ue148\ue149\ue14a\ue14b\ue14c\ue14d\ue14e\ue14f\ue150\ue151\ue152\ue153\ue154\ue155\ue156\ue157\ue158\ue159\ue15a\ue15b\ue15c\ue15d\ue15e\ue15f\ue160\ue161\ue162\ue163\ue164\ue165\ue166\ue167\ue168\ue169\ue16a\ue16b\ue16c\ue16d\ue16e\ue16f\ue170\ue171\ue172\ue173\ue174\ue175\ue176\ue177\ue178\ue179\ue17a\ue17b\ue17c\ue17d\ue17e\ue17f\ue180\ue181\ue182\ue183\ue184\ue185\ue186\ue187\ue188\ue189\ue18a\ue18b\ue18c\ue18d\ue18e\ue18f\ue190\ue191\ue192\ue193\ue194\ue195\ue196\ue197\ue198\ue199\ue19a\ue19b\ue19c\ue19d\ue19e\ue19f\ue1a0\ue1a1\ue1a2\ue1a3\ue1a4\ue1a5\ue1a6\ue1a7\ue1a8\ue1a9\ue1aa\ue1ab\ue1ac\ue1ad\ue1ae\ue1af\ue1b0\ue1b1\ue1b2\ue1b3\ue1b4\ue1b5\ue1b6\ue1b7\ue1b8\ue1b9\ue1ba\ue1bb\ue1bc\ue1bd\ue1be\ue1bf\ue1c0\ue1c1\ue1c2\ue1c3\ue1c4\ue1c5\ue1c6\ue1c7\ue1c8\ue1c9\ue1ca\ue1cb\ue1cc\ue1cd\ue1ce\ue1cf\ue1d0\ue1d1\ue1d2\ue1d3\ue1d4\ue1d5\ue1d6\ue1d7\ue1d8\ue1d9\ue1da\ue1db\ue1dc\ue1dd\ue1de\ue1df\ue1e0\ue1e1\ue1e2\ue1e3\ue1e4\ue1e5\ue1e6\ue1e7\ue1e8\ue1e9\ue1ea\ue1eb\ue1ec\ue1ed\ue1ee\ue1ef\ue1f0\ue1f1\ue1f2\ue1f3\ue1f4\ue1f5\ue1f6\ue1f7\ue1f8\ue1f9\ue1fa\ue1fb\ue1fc\ue1fd\ue1fe\ue1ff\ue200\ue201\ue202\ue203\ue204\ue205\ue206\ue207\ue208\ue209\ue20a\ue20b\ue20c\ue20d\ue20e\ue20f\ue210\ue211\ue212\ue213\ue214\ue215\ue216\ue217\ue218\ue219\ue21a\ue21b\ue21c\ue21d\ue21e\ue21f\ue220\ue221\ue222\ue223\ue224\ue225\ue226\ue227\ue228\ue229\ue22a\ue22b\ue22c\ue22d\ue22e\ue22f\ue230\ue231\ue232\ue233\ue234\ue235\ue236\ue237\ue238\ue239\ue23a\ue23b\ue23c\ue23d\ue23e\ue23f\ue240\ue241\ue242\ue243\ue244\ue245\ue246\ue247\ue248\ue249\ue24a\ue24b\ue24c\ue24d\ue24e\ue24f\ue250\ue251\ue252\ue253\ue254\ue255\ue256\ue257\ue258\ue259\ue25a\ue25b\ue25c\ue25d\ue25e\ue25f\ue260\ue261\ue262\ue263\ue264\ue265\ue266\ue267\ue268\ue269\ue26a\ue26b\ue26c\ue26d\ue26e\ue26f\ue270\ue271\ue272\ue273\ue274\ue275\ue276\ue277\ue278\ue279\ue27a\ue27b\ue27c\ue27d\ue27e\ue27f\ue280\ue281\ue282\ue283\ue284\ue285\ue286\ue287\ue288\ue289\ue28a\ue28b\ue28c\ue28d\ue28e\ue28f\ue290\ue291\ue292\ue293\ue294\ue295\ue296\ue297\ue298\ue299\ue29a\ue29b\ue29c\ue29d\ue29e\ue29f\ue2a0\ue2a1\ue2a2\ue2a3\ue2a4\ue2a5\ue2a6\ue2a7\ue2a8\ue2a9\ue2aa\ue2ab\ue2ac\ue2ad\ue2ae\ue2af\ue2b0\ue2b1\ue2b2\ue2b3\ue2b4\ue2b5\ue2b6\ue2b7\ue2b8\ue2b9\ue2ba\ue2bb\ue2bc\ue2bd\ue2be\ue2bf\ue2c0\ue2c1\ue2c2\ue2c3\ue2c4\ue2c5\ue2c6\ue2c7\ue2c8\ue2c9\ue2ca\ue2cb\ue2cc\ue2cd\ue2ce\ue2cf\ue2d0\ue2d1\ue2d2\ue2d3\ue2d4\ue2d5\ue2d6\ue2d7\ue2d8\ue2d9\ue2da\ue2db\ue2dc\ue2dd\ue2de\ue2df\ue2e0\ue2e1\ue2e2\ue2e3\ue2e4\ue2e5\ue2e6\ue2e7\ue2e8\ue2e9\ue2ea\ue2eb\ue2ec\ue2ed\ue2ee\ue2ef\ue2f0\ue2f1\ue2f2\ue2f3\ue2f4\ue2f5\ue2f6\ue2f7\ue2f8\ue2f9\ue2fa\ue2fb\ue2fc\ue2fd\ue2fe\ue2ff\ue300\ue301\ue302\ue303\ue304\ue305\ue306\ue307\ue308\ue309\ue30a\ue30b\ue30c\ue30d\ue30e\ue30f\ue310\ue311\ue312\ue313\ue314\ue315\ue316\ue317\ue318\ue319\ue31a\ue31b\ue31c\ue31d\ue31e\ue31f\ue320\ue321\ue322\ue323\ue324\ue325\ue326\ue327\ue328\ue329\ue32a\ue32b\ue32c\ue32d\ue32e\ue32f\ue330\ue331\ue332\ue333\ue334\ue335\ue336\ue337\ue338\ue339\ue33a\ue33b\ue33c\ue33d\ue33e\ue33f\ue340\ue341\ue342\ue343\ue344\ue345\ue346\ue347\ue348\ue349\ue34a\ue34b\ue34c\ue34d\ue34e\ue34f\ue350\ue351\ue352\ue353\ue354\ue355\ue356\ue357\ue358\ue359\ue35a\ue35b\ue35c\ue35d\ue35e\ue35f\ue360\ue361\ue362\ue363\ue364\ue365\ue366\ue367\ue368\ue369\ue36a\ue36b\ue36c\ue36d\ue36e\ue36f\ue370\ue371\ue372\ue373\ue374\ue375\ue376\ue377\ue378\ue379\ue37a\ue37b\ue37c\ue37d\ue37e\ue37f\ue380\ue381\ue382\ue383\ue384\ue385\ue386\ue387\ue388\ue389\ue38a\ue38b\ue38c\ue38d\ue38e\ue38f\ue390\ue391\ue392\ue393\ue394\ue395\ue396\ue397\ue398\ue399\ue39a\ue39b\ue39c\ue39d\ue39e\ue39f\ue3a0\ue3a1\ue3a2\ue3a3\ue3a4\ue3a5\ue3a6\ue3a7\ue3a8\ue3a9\ue3aa\ue3ab\ue3ac\ue3ad\ue3ae\ue3af\ue3b0\ue3b1\ue3b2\ue3b3\ue3b4\ue3b5\ue3b6\ue3b7\ue3b8\ue3b9\ue3ba\ue3bb\ue3bc\ue3bd\ue3be\ue3bf\ue3c0\ue3c1\ue3c2\ue3c3\ue3c4\ue3c5\ue3c6\ue3c7\ue3c8\ue3c9\ue3ca\ue3cb\ue3cc\ue3cd\ue3ce\ue3cf\ue3d0\ue3d1\ue3d2\ue3d3\ue3d4\ue3d5\ue3d6\ue3d7\ue3d8\ue3d9\ue3da\ue3db\ue3dc\ue3dd\ue3de\ue3df\ue3e0\ue3e1\ue3e2\ue3e3\ue3e4\ue3e5\ue3e6\ue3e7\ue3e8\ue3e9\ue3ea\ue3eb\ue3ec\ue3ed\ue3ee\ue3ef\ue3f0\ue3f1\ue3f2\ue3f3\ue3f4\ue3f5\ue3f6\ue3f7\ue3f8\ue3f9\ue3fa\ue3fb\ue3fc\ue3fd\ue3fe\ue3ff\ue400\ue401\ue402\ue403\ue404\ue405\ue406\ue407\ue408\ue409\ue40a\ue40b\ue40c\ue40d\ue40e\ue40f\ue410\ue411\ue412\ue413\ue414\ue415\ue416\ue417\ue418\ue419\ue41a\ue41b\ue41c\ue41d\ue41e\ue41f\ue420\ue421\ue422\ue423\ue424\ue425\ue426\ue427\ue428\ue429\ue42a\ue42b\ue42c\ue42d\ue42e\ue42f\ue430\ue431\ue432\ue433\ue434\ue435\ue436\ue437\ue438\ue439\ue43a\ue43b\ue43c\ue43d\ue43e\ue43f\ue440\ue441\ue442\ue443\ue444\ue445\ue446\ue447\ue448\ue449\ue44a\ue44b\ue44c\ue44d\ue44e\ue44f\ue450\ue451\ue452\ue453\ue454\ue455\ue456\ue457\ue458\ue459\ue45a\ue45b\ue45c\ue45d\ue45e\ue45f\ue460\ue461\ue462\ue463\ue464\ue465\ue466\ue467\ue468\ue469\ue46a\ue46b\ue46c\ue46d\ue46e\ue46f\ue470\ue471\ue472\ue473\ue474\ue475\ue476\ue477\ue478\ue479\ue47a\ue47b\ue47c\ue47d\ue47e\ue47f\ue480\ue481\ue482\ue483\ue484\ue485\ue486\ue487\ue488\ue489\ue48a\ue48b\ue48c\ue48d\ue48e\ue48f\ue490\ue491\ue492\ue493\ue494\ue495\ue496\ue497\ue498\ue499\ue49a\ue49b\ue49c\ue49d\ue49e\ue49f\ue4a0\ue4a1\ue4a2\ue4a3\ue4a4\ue4a5\ue4a6\ue4a7\ue4a8\ue4a9\ue4aa\ue4ab\ue4ac\ue4ad\ue4ae\ue4af\ue4b0\ue4b1\ue4b2\ue4b3\ue4b4\ue4b5\ue4b6\ue4b7\ue4b8\ue4b9\ue4ba\ue4bb\ue4bc\ue4bd\ue4be\ue4bf\ue4c0\ue4c1\ue4c2\ue4c3\ue4c4\ue4c5\ue4c6\ue4c7\ue4c8\ue4c9\ue4ca\ue4cb\ue4cc\ue4cd\ue4ce\ue4cf\ue4d0\ue4d1\ue4d2\ue4d3\ue4d4\ue4d5\ue4d6\ue4d7\ue4d8\ue4d9\ue4da\ue4db\ue4dc\ue4dd\ue4de\ue4df\ue4e0\ue4e1\ue4e2\ue4e3\ue4e4\ue4e5\ue4e6\ue4e7\ue4e8\ue4e9\ue4ea\ue4eb\ue4ec\ue4ed\ue4ee\ue4ef\ue4f0\ue4f1\ue4f2\ue4f3\ue4f4\ue4f5\ue4f6\ue4f7\ue4f8\ue4f9\ue4fa\ue4fb\ue4fc\ue4fd\ue4fe\ue4ff\ue500\ue501\ue502\ue503\ue504\ue505\ue506\ue507\ue508\ue509\ue50a\ue50b\ue50c\ue50d\ue50e\ue50f\ue510\ue511\ue512\ue513\ue514\ue515\ue516\ue517\ue518\ue519\ue51a\ue51b\ue51c\ue51d\ue51e\ue51f\ue520\ue521\ue522\ue523\ue524\ue525\ue526\ue527\ue528\ue529\ue52a\ue52b\ue52c\ue52d\ue52e\ue52f\ue530\ue531\ue532\ue533\ue534\ue535\ue536\ue537\ue538\ue539\ue53a\ue53b\ue53c\ue53d\ue53e\ue53f\ue540\ue541\ue542\ue543\ue544\ue545\ue546\ue547\ue548\ue549\ue54a\ue54b\ue54c\ue54d\ue54e\ue54f\ue550\ue551\ue552\ue553\ue554\ue555\ue556\ue557\ue558\ue559\ue55a\ue55b\ue55c\ue55d\ue55e\ue55f\ue560\ue561\ue562\ue563\ue564\ue565\ue566\ue567\ue568\ue569\ue56a\ue56b\ue56c\ue56d\ue56e\ue56f\ue570\ue571\ue572\ue573\ue574\ue575\ue576\ue577\ue578\ue579\ue57a\ue57b\ue57c\ue57d\ue57e\ue57f\ue580\ue581\ue582\ue583\ue584\ue585\ue586\ue587\ue588\ue589\ue58a\ue58b\ue58c\ue58d\ue58e\ue58f\ue590\ue591\ue592\ue593\ue594\ue595\ue596\ue597\ue598\ue599\ue59a\ue59b\ue59c\ue59d\ue59e\ue59f\ue5a0\ue5a1\ue5a2\ue5a3\ue5a4\ue5a5\ue5a6\ue5a7\ue5a8\ue5a9\ue5aa\ue5ab\ue5ac\ue5ad\ue5ae\ue5af\ue5b0\ue5b1\ue5b2\ue5b3\ue5b4\ue5b5\ue5b6\ue5b7\ue5b8\ue5b9\ue5ba\ue5bb\ue5bc\ue5bd\ue5be\ue5bf\ue5c0\ue5c1\ue5c2\ue5c3\ue5c4\ue5c5\ue5c6\ue5c7\ue5c8\ue5c9\ue5ca\ue5cb\ue5cc\ue5cd\ue5ce\ue5cf\ue5d0\ue5d1\ue5d2\ue5d3\ue5d4\ue5d5\ue5d6\ue5d7\ue5d8\ue5d9\ue5da\ue5db\ue5dc\ue5dd\ue5de\ue5df\ue5e0\ue5e1\ue5e2\ue5e3\ue5e4\ue5e5\ue5e6\ue5e7\ue5e8\ue5e9\ue5ea\ue5eb\ue5ec\ue5ed\ue5ee\ue5ef\ue5f0\ue5f1\ue5f2\ue5f3\ue5f4\ue5f5\ue5f6\ue5f7\ue5f8\ue5f9\ue5fa\ue5fb\ue5fc\ue5fd\ue5fe\ue5ff\ue600\ue601\ue602\ue603\ue604\ue605\ue606\ue607\ue608\ue609\ue60a\ue60b\ue60c\ue60d\ue60e\ue60f\ue610\ue611\ue612\ue613\ue614\ue615\ue616\ue617\ue618\ue619\ue61a\ue61b\ue61c\ue61d\ue61e\ue61f\ue620\ue621\ue622\ue623\ue624\ue625\ue626\ue627\ue628\ue629\ue62a\ue62b\ue62c\ue62d\ue62e\ue62f\ue630\ue631\ue632\ue633\ue634\ue635\ue636\ue637\ue638\ue639\ue63a\ue63b\ue63c\ue63d\ue63e\ue63f\ue640\ue641\ue642\ue643\ue644\ue645\ue646\ue647\ue648\ue649\ue64a\ue64b\ue64c\ue64d\ue64e\ue64f\ue650\ue651\ue652\ue653\ue654\ue655\ue656\ue657\ue658\ue659\ue65a\ue65b\ue65c\ue65d\ue65e\ue65f\ue660\ue661\ue662\ue663\ue664\ue665\ue666\ue667\ue668\ue669\ue66a\ue66b\ue66c\ue66d\ue66e\ue66f\ue670\ue671\ue672\ue673\ue674\ue675\ue676\ue677\ue678\ue679\ue67a\ue67b\ue67c\ue67d\ue67e\ue67f\ue680\ue681\ue682\ue683\ue684\ue685\ue686\ue687\ue688\ue689\ue68a\ue68b\ue68c\ue68d\ue68e\ue68f\ue690\ue691\ue692\ue693\ue694\ue695\ue696\ue697\ue698\ue699\ue69a\ue69b\ue69c\ue69d\ue69e\ue69f\ue6a0\ue6a1\ue6a2\ue6a3\ue6a4\ue6a5\ue6a6\ue6a7\ue6a8\ue6a9\ue6aa\ue6ab\ue6ac\ue6ad\ue6ae\ue6af\ue6b0\ue6b1\ue6b2\ue6b3\ue6b4\ue6b5\ue6b6\ue6b7\ue6b8\ue6b9\ue6ba\ue6bb\ue6bc\ue6bd\ue6be\ue6bf\ue6c0\ue6c1\ue6c2\ue6c3\ue6c4\ue6c5\ue6c6\ue6c7\ue6c8\ue6c9\ue6ca\ue6cb\ue6cc\ue6cd\ue6ce\ue6cf\ue6d0\ue6d1\ue6d2\ue6d3\ue6d4\ue6d5\ue6d6\ue6d7\ue6d8\ue6d9\ue6da\ue6db\ue6dc\ue6dd\ue6de\ue6df\ue6e0\ue6e1\ue6e2\ue6e3\ue6e4\ue6e5\ue6e6\ue6e7\ue6e8\ue6e9\ue6ea\ue6eb\ue6ec\ue6ed\ue6ee\ue6ef\ue6f0\ue6f1\ue6f2\ue6f3\ue6f4\ue6f5\ue6f6\ue6f7\ue6f8\ue6f9\ue6fa\ue6fb\ue6fc\ue6fd\ue6fe\ue6ff\ue700\ue701\ue702\ue703\ue704\ue705\ue706\ue707\ue708\ue709\ue70a\ue70b\ue70c\ue70d\ue70e\ue70f\ue710\ue711\ue712\ue713\ue714\ue715\ue716\ue717\ue718\ue719\ue71a\ue71b\ue71c\ue71d\ue71e\ue71f\ue720\ue721\ue722\ue723\ue724\ue725\ue726\ue727\ue728\ue729\ue72a\ue72b\ue72c\ue72d\ue72e\ue72f\ue730\ue731\ue732\ue733\ue734\ue735\ue736\ue737\ue738\ue739\ue73a\ue73b\ue73c\ue73d\ue73e\ue73f\ue740\ue741\ue742\ue743\ue744\ue745\ue746\ue747\ue748\ue749\ue74a\ue74b\ue74c\ue74d\ue74e\ue74f\ue750\ue751\ue752\ue753\ue754\ue755\ue756\ue757\ue758\ue759\ue75a\ue75b\ue75c\ue75d\ue75e\ue75f\ue760\ue761\ue762\ue763\ue764\ue765\ue766\ue767\ue768\ue769\ue76a\ue76b\ue76c\ue76d\ue76e\ue76f\ue770\ue771\ue772\ue773\ue774\ue775\ue776\ue777\ue778\ue779\ue77a\ue77b\ue77c\ue77d\ue77e\ue77f\ue780\ue781\ue782\ue783\ue784\ue785\ue786\ue787\ue788\ue789\ue78a\ue78b\ue78c\ue78d\ue78e\ue78f\ue790\ue791\ue792\ue793\ue794\ue795\ue796\ue797\ue798\ue799\ue79a\ue79b\ue79c\ue79d\ue79e\ue79f\ue7a0\ue7a1\ue7a2\ue7a3\ue7a4\ue7a5\ue7a6\ue7a7\ue7a8\ue7a9\ue7aa\ue7ab\ue7ac\ue7ad\ue7ae\ue7af\ue7b0\ue7b1\ue7b2\ue7b3\ue7b4\ue7b5\ue7b6\ue7b7\ue7b8\ue7b9\ue7ba\ue7bb\ue7bc\ue7bd\ue7be\ue7bf\ue7c0\ue7c1\ue7c2\ue7c3\ue7c4\ue7c5\ue7c6\ue7c7\ue7c8\ue7c9\ue7ca\ue7cb\ue7cc\ue7cd\ue7ce\ue7cf\ue7d0\ue7d1\ue7d2\ue7d3\ue7d4\ue7d5\ue7d6\ue7d7\ue7d8\ue7d9\ue7da\ue7db\ue7dc\ue7dd\ue7de\ue7df\ue7e0\ue7e1\ue7e2\ue7e3\ue7e4\ue7e5\ue7e6\ue7e7\ue7e8\ue7e9\ue7ea\ue7eb\ue7ec\ue7ed\ue7ee\ue7ef\ue7f0\ue7f1\ue7f2\ue7f3\ue7f4\ue7f5\ue7f6\ue7f7\ue7f8\ue7f9\ue7fa\ue7fb\ue7fc\ue7fd\ue7fe\ue7ff\ue800\ue801\ue802\ue803\ue804\ue805\ue806\ue807\ue808\ue809\ue80a\ue80b\ue80c\ue80d\ue80e\ue80f\ue810\ue811\ue812\ue813\ue814\ue815\ue816\ue817\ue818\ue819\ue81a\ue81b\ue81c\ue81d\ue81e\ue81f\ue820\ue821\ue822\ue823\ue824\ue825\ue826\ue827\ue828\ue829\ue82a\ue82b\ue82c\ue82d\ue82e\ue82f\ue830\ue831\ue832\ue833\ue834\ue835\ue836\ue837\ue838\ue839\ue83a\ue83b\ue83c\ue83d\ue83e\ue83f\ue840\ue841\ue842\ue843\ue844\ue845\ue846\ue847\ue848\ue849\ue84a\ue84b\ue84c\ue84d\ue84e\ue84f\ue850\ue851\ue852\ue853\ue854\ue855\ue856\ue857\ue858\ue859\ue85a\ue85b\ue85c\ue85d\ue85e\ue85f\ue860\ue861\ue862\ue863\ue864\ue865\ue866\ue867\ue868\ue869\ue86a\ue86b\ue86c\ue86d\ue86e\ue86f\ue870\ue871\ue872\ue873\ue874\ue875\ue876\ue877\ue878\ue879\ue87a\ue87b\ue87c\ue87d\ue87e\ue87f\ue880\ue881\ue882\ue883\ue884\ue885\ue886\ue887\ue888\ue889\ue88a\ue88b\ue88c\ue88d\ue88e\ue88f\ue890\ue891\ue892\ue893\ue894\ue895\ue896\ue897\ue898\ue899\ue89a\ue89b\ue89c\ue89d\ue89e\ue89f\ue8a0\ue8a1\ue8a2\ue8a3\ue8a4\ue8a5\ue8a6\ue8a7\ue8a8\ue8a9\ue8aa\ue8ab\ue8ac\ue8ad\ue8ae\ue8af\ue8b0\ue8b1\ue8b2\ue8b3\ue8b4\ue8b5\ue8b6\ue8b7\ue8b8\ue8b9\ue8ba\ue8bb\ue8bc\ue8bd\ue8be\ue8bf\ue8c0\ue8c1\ue8c2\ue8c3\ue8c4\ue8c5\ue8c6\ue8c7\ue8c8\ue8c9\ue8ca\ue8cb\ue8cc\ue8cd\ue8ce\ue8cf\ue8d0\ue8d1\ue8d2\ue8d3\ue8d4\ue8d5\ue8d6\ue8d7\ue8d8\ue8d9\ue8da\ue8db\ue8dc\ue8dd\ue8de\ue8df\ue8e0\ue8e1\ue8e2\ue8e3\ue8e4\ue8e5\ue8e6\ue8e7\ue8e8\ue8e9\ue8ea\ue8eb\ue8ec\ue8ed\ue8ee\ue8ef\ue8f0\ue8f1\ue8f2\ue8f3\ue8f4\ue8f5\ue8f6\ue8f7\ue8f8\ue8f9\ue8fa\ue8fb\ue8fc\ue8fd\ue8fe\ue8ff\ue900\ue901\ue902\ue903\ue904\ue905\ue906\ue907\ue908\ue909\ue90a\ue90b\ue90c\ue90d\ue90e\ue90f\ue910\ue911\ue912\ue913\ue914\ue915\ue916\ue917\ue918\ue919\ue91a\ue91b\ue91c\ue91d\ue91e\ue91f\ue920\ue921\ue922\ue923\ue924\ue925\ue926\ue927\ue928\ue929\ue92a\ue92b\ue92c\ue92d\ue92e\ue92f\ue930\ue931\ue932\ue933\ue934\ue935\ue936\ue937\ue938\ue939\ue93a\ue93b\ue93c\ue93d\ue93e\ue93f\ue940\ue941\ue942\ue943\ue944\ue945\ue946\ue947\ue948\ue949\ue94a\ue94b\ue94c\ue94d\ue94e\ue94f\ue950\ue951\ue952\ue953\ue954\ue955\ue956\ue957\ue958\ue959\ue95a\ue95b\ue95c\ue95d\ue95e\ue95f\ue960\ue961\ue962\ue963\ue964\ue965\ue966\ue967\ue968\ue969\ue96a\ue96b\ue96c\ue96d\ue96e\ue96f\ue970\ue971\ue972\ue973\ue974\ue975\ue976\ue977\ue978\ue979\ue97a\ue97b\ue97c\ue97d\ue97e\ue97f\ue980\ue981\ue982\ue983\ue984\ue985\ue986\ue987\ue988\ue989\ue98a\ue98b\ue98c\ue98d\ue98e\ue98f\ue990\ue991\ue992\ue993\ue994\ue995\ue996\ue997\ue998\ue999\ue99a\ue99b\ue99c\ue99d\ue99e\ue99f\ue9a0\ue9a1\ue9a2\ue9a3\ue9a4\ue9a5\ue9a6\ue9a7\ue9a8\ue9a9\ue9aa\ue9ab\ue9ac\ue9ad\ue9ae\ue9af\ue9b0\ue9b1\ue9b2\ue9b3\ue9b4\ue9b5\ue9b6\ue9b7\ue9b8\ue9b9\ue9ba\ue9bb\ue9bc\ue9bd\ue9be\ue9bf\ue9c0\ue9c1\ue9c2\ue9c3\ue9c4\ue9c5\ue9c6\ue9c7\ue9c8\ue9c9\ue9ca\ue9cb\ue9cc\ue9cd\ue9ce\ue9cf\ue9d0\ue9d1\ue9d2\ue9d3\ue9d4\ue9d5\ue9d6\ue9d7\ue9d8\ue9d9\ue9da\ue9db\ue9dc\ue9dd\ue9de\ue9df\ue9e0\ue9e1\ue9e2\ue9e3\ue9e4\ue9e5\ue9e6\ue9e7\ue9e8\ue9e9\ue9ea\ue9eb\ue9ec\ue9ed\ue9ee\ue9ef\ue9f0\ue9f1\ue9f2\ue9f3\ue9f4\ue9f5\ue9f6\ue9f7\ue9f8\ue9f9\ue9fa\ue9fb\ue9fc\ue9fd\ue9fe\ue9ff\uea00\uea01\uea02\uea03\uea04\uea05\uea06\uea07\uea08\uea09\uea0a\uea0b\uea0c\uea0d\uea0e\uea0f\uea10\uea11\uea12\uea13\uea14\uea15\uea16\uea17\uea18\uea19\uea1a\uea1b\uea1c\uea1d\uea1e\uea1f\uea20\uea21\uea22\uea23\uea24\uea25\uea26\uea27\uea28\uea29\uea2a\uea2b\uea2c\uea2d\uea2e\uea2f\uea30\uea31\uea32\uea33\uea34\uea35\uea36\uea37\uea38\uea39\uea3a\uea3b\uea3c\uea3d\uea3e\uea3f\uea40\uea41\uea42\uea43\uea44\uea45\uea46\uea47\uea48\uea49\uea4a\uea4b\uea4c\uea4d\uea4e\uea4f\uea50\uea51\uea52\uea53\uea54\uea55\uea56\uea57\uea58\uea59\uea5a\uea5b\uea5c\uea5d\uea5e\uea5f\uea60\uea61\uea62\uea63\uea64\uea65\uea66\uea67\uea68\uea69\uea6a\uea6b\uea6c\uea6d\uea6e\uea6f\uea70\uea71\uea72\uea73\uea74\uea75\uea76\uea77\uea78\uea79\uea7a\uea7b\uea7c\uea7d\uea7e\uea7f\uea80\uea81\uea82\uea83\uea84\uea85\uea86\uea87\uea88\uea89\uea8a\uea8b\uea8c\uea8d\uea8e\uea8f\uea90\uea91\uea92\uea93\uea94\uea95\uea96\uea97\uea98\uea99\uea9a\uea9b\uea9c\uea9d\uea9e\uea9f\ueaa0\ueaa1\ueaa2\ueaa3\ueaa4\ueaa5\ueaa6\ueaa7\ueaa8\ueaa9\ueaaa\ueaab\ueaac\ueaad\ueaae\ueaaf\ueab0\ueab1\ueab2\ueab3\ueab4\ueab5\ueab6\ueab7\ueab8\ueab9\ueaba\ueabb\ueabc\ueabd\ueabe\ueabf\ueac0\ueac1\ueac2\ueac3\ueac4\ueac5\ueac6\ueac7\ueac8\ueac9\ueaca\ueacb\ueacc\ueacd\ueace\ueacf\uead0\uead1\uead2\uead3\uead4\uead5\uead6\uead7\uead8\uead9\ueada\ueadb\ueadc\ueadd\ueade\ueadf\ueae0\ueae1\ueae2\ueae3\ueae4\ueae5\ueae6\ueae7\ueae8\ueae9\ueaea\ueaeb\ueaec\ueaed\ueaee\ueaef\ueaf0\ueaf1\ueaf2\ueaf3\ueaf4\ueaf5\ueaf6\ueaf7\ueaf8\ueaf9\ueafa\ueafb\ueafc\ueafd\ueafe\ueaff\ueb00\ueb01\ueb02\ueb03\ueb04\ueb05\ueb06\ueb07\ueb08\ueb09\ueb0a\ueb0b\ueb0c\ueb0d\ueb0e\ueb0f\ueb10\ueb11\ueb12\ueb13\ueb14\ueb15\ueb16\ueb17\ueb18\ueb19\ueb1a\ueb1b\ueb1c\ueb1d\ueb1e\ueb1f\ueb20\ueb21\ueb22\ueb23\ueb24\ueb25\ueb26\ueb27\ueb28\ueb29\ueb2a\ueb2b\ueb2c\ueb2d\ueb2e\ueb2f\ueb30\ueb31\ueb32\ueb33\ueb34\ueb35\ueb36\ueb37\ueb38\ueb39\ueb3a\ueb3b\ueb3c\ueb3d\ueb3e\ueb3f\ueb40\ueb41\ueb42\ueb43\ueb44\ueb45\ueb46\ueb47\ueb48\ueb49\ueb4a\ueb4b\ueb4c\ueb4d\ueb4e\ueb4f\ueb50\ueb51\ueb52\ueb53\ueb54\ueb55\ueb56\ueb57\ueb58\ueb59\ueb5a\ueb5b\ueb5c\ueb5d\ueb5e\ueb5f\ueb60\ueb61\ueb62\ueb63\ueb64\ueb65\ueb66\ueb67\ueb68\ueb69\ueb6a\ueb6b\ueb6c\ueb6d\ueb6e\ueb6f\ueb70\ueb71\ueb72\ueb73\ueb74\ueb75\ueb76\ueb77\ueb78\ueb79\ueb7a\ueb7b\ueb7c\ueb7d\ueb7e\ueb7f\ueb80\ueb81\ueb82\ueb83\ueb84\ueb85\ueb86\ueb87\ueb88\ueb89\ueb8a\ueb8b\ueb8c\ueb8d\ueb8e\ueb8f\ueb90\ueb91\ueb92\ueb93\ueb94\ueb95\ueb96\ueb97\ueb98\ueb99\ueb9a\ueb9b\ueb9c\ueb9d\ueb9e\ueb9f\ueba0\ueba1\ueba2\ueba3\ueba4\ueba5\ueba6\ueba7\ueba8\ueba9\uebaa\uebab\uebac\uebad\uebae\uebaf\uebb0\uebb1\uebb2\uebb3\uebb4\uebb5\uebb6\uebb7\uebb8\uebb9\uebba\uebbb\uebbc\uebbd\uebbe\uebbf\uebc0\uebc1\uebc2\uebc3\uebc4\uebc5\uebc6\uebc7\uebc8\uebc9\uebca\uebcb\uebcc\uebcd\uebce\uebcf\uebd0\uebd1\uebd2\uebd3\uebd4\uebd5\uebd6\uebd7\uebd8\uebd9\uebda\uebdb\uebdc\uebdd\uebde\uebdf\uebe0\uebe1\uebe2\uebe3\uebe4\uebe5\uebe6\uebe7\uebe8\uebe9\uebea\uebeb\uebec\uebed\uebee\uebef\uebf0\uebf1\uebf2\uebf3\uebf4\uebf5\uebf6\uebf7\uebf8\uebf9\uebfa\uebfb\uebfc\uebfd\uebfe\uebff\uec00\uec01\uec02\uec03\uec04\uec05\uec06\uec07\uec08\uec09\uec0a\uec0b\uec0c\uec0d\uec0e\uec0f\uec10\uec11\uec12\uec13\uec14\uec15\uec16\uec17\uec18\uec19\uec1a\uec1b\uec1c\uec1d\uec1e\uec1f\uec20\uec21\uec22\uec23\uec24\uec25\uec26\uec27\uec28\uec29\uec2a\uec2b\uec2c\uec2d\uec2e\uec2f\uec30\uec31\uec32\uec33\uec34\uec35\uec36\uec37\uec38\uec39\uec3a\uec3b\uec3c\uec3d\uec3e\uec3f\uec40\uec41\uec42\uec43\uec44\uec45\uec46\uec47\uec48\uec49\uec4a\uec4b\uec4c\uec4d\uec4e\uec4f\uec50\uec51\uec52\uec53\uec54\uec55\uec56\uec57\uec58\uec59\uec5a\uec5b\uec5c\uec5d\uec5e\uec5f\uec60\uec61\uec62\uec63\uec64\uec65\uec66\uec67\uec68\uec69\uec6a\uec6b\uec6c\uec6d\uec6e\uec6f\uec70\uec71\uec72\uec73\uec74\uec75\uec76\uec77\uec78\uec79\uec7a\uec7b\uec7c\uec7d\uec7e\uec7f\uec80\uec81\uec82\uec83\uec84\uec85\uec86\uec87\uec88\uec89\uec8a\uec8b\uec8c\uec8d\uec8e\uec8f\uec90\uec91\uec92\uec93\uec94\uec95\uec96\uec97\uec98\uec99\uec9a\uec9b\uec9c\uec9d\uec9e\uec9f\ueca0\ueca1\ueca2\ueca3\ueca4\ueca5\ueca6\ueca7\ueca8\ueca9\uecaa\uecab\uecac\uecad\uecae\uecaf\uecb0\uecb1\uecb2\uecb3\uecb4\uecb5\uecb6\uecb7\uecb8\uecb9\uecba\uecbb\uecbc\uecbd\uecbe\uecbf\uecc0\uecc1\uecc2\uecc3\uecc4\uecc5\uecc6\uecc7\uecc8\uecc9\uecca\ueccb\ueccc\ueccd\uecce\ueccf\uecd0\uecd1\uecd2\uecd3\uecd4\uecd5\uecd6\uecd7\uecd8\uecd9\uecda\uecdb\uecdc\uecdd\uecde\uecdf\uece0\uece1\uece2\uece3\uece4\uece5\uece6\uece7\uece8\uece9\uecea\ueceb\uecec\ueced\uecee\uecef\uecf0\uecf1\uecf2\uecf3\uecf4\uecf5\uecf6\uecf7\uecf8\uecf9\uecfa\uecfb\uecfc\uecfd\uecfe\uecff\ued00\ued01\ued02\ued03\ued04\ued05\ued06\ued07\ued08\ued09\ued0a\ued0b\ued0c\ued0d\ued0e\ued0f\ued10\ued11\ued12\ued13\ued14\ued15\ued16\ued17\ued18\ued19\ued1a\ued1b\ued1c\ued1d\ued1e\ued1f\ued20\ued21\ued22\ued23\ued24\ued25\ued26\ued27\ued28\ued29\ued2a\ued2b\ued2c\ued2d\ued2e\ued2f\ued30\ued31\ued32\ued33\ued34\ued35\ued36\ued37\ued38\ued39\ued3a\ued3b\ued3c\ued3d\ued3e\ued3f\ued40\ued41\ued42\ued43\ued44\ued45\ued46\ued47\ued48\ued49\ued4a\ued4b\ued4c\ued4d\ued4e\ued4f\ued50\ued51\ued52\ued53\ued54\ued55\ued56\ued57\ued58\ued59\ued5a\ued5b\ued5c\ued5d\ued5e\ued5f\ued60\ued61\ued62\ued63\ued64\ued65\ued66\ued67\ued68\ued69\ued6a\ued6b\ued6c\ued6d\ued6e\ued6f\ued70\ued71\ued72\ued73\ued74\ued75\ued76\ued77\ued78\ued79\ued7a\ued7b\ued7c\ued7d\ued7e\ued7f\ued80\ued81\ued82\ued83\ued84\ued85\ued86\ued87\ued88\ued89\ued8a\ued8b\ued8c\ued8d\ued8e\ued8f\ued90\ued91\ued92\ued93\ued94\ued95\ued96\ued97\ued98\ued99\ued9a\ued9b\ued9c\ued9d\ued9e\ued9f\ueda0\ueda1\ueda2\ueda3\ueda4\ueda5\ueda6\ueda7\ueda8\ueda9\uedaa\uedab\uedac\uedad\uedae\uedaf\uedb0\uedb1\uedb2\uedb3\uedb4\uedb5\uedb6\uedb7\uedb8\uedb9\uedba\uedbb\uedbc\uedbd\uedbe\uedbf\uedc0\uedc1\uedc2\uedc3\uedc4\uedc5\uedc6\uedc7\uedc8\uedc9\uedca\uedcb\uedcc\uedcd\uedce\uedcf\uedd0\uedd1\uedd2\uedd3\uedd4\uedd5\uedd6\uedd7\uedd8\uedd9\uedda\ueddb\ueddc\ueddd\uedde\ueddf\uede0\uede1\uede2\uede3\uede4\uede5\uede6\uede7\uede8\uede9\uedea\uedeb\uedec\ueded\uedee\uedef\uedf0\uedf1\uedf2\uedf3\uedf4\uedf5\uedf6\uedf7\uedf8\uedf9\uedfa\uedfb\uedfc\uedfd\uedfe\uedff\uee00\uee01\uee02\uee03\uee04\uee05\uee06\uee07\uee08\uee09\uee0a\uee0b\uee0c\uee0d\uee0e\uee0f\uee10\uee11\uee12\uee13\uee14\uee15\uee16\uee17\uee18\uee19\uee1a\uee1b\uee1c\uee1d\uee1e\uee1f\uee20\uee21\uee22\uee23\uee24\uee25\uee26\uee27\uee28\uee29\uee2a\uee2b\uee2c\uee2d\uee2e\uee2f\uee30\uee31\uee32\uee33\uee34\uee35\uee36\uee37\uee38\uee39\uee3a\uee3b\uee3c\uee3d\uee3e\uee3f\uee40\uee41\uee42\uee43\uee44\uee45\uee46\uee47\uee48\uee49\uee4a\uee4b\uee4c\uee4d\uee4e\uee4f\uee50\uee51\uee52\uee53\uee54\uee55\uee56\uee57\uee58\uee59\uee5a\uee5b\uee5c\uee5d\uee5e\uee5f\uee60\uee61\uee62\uee63\uee64\uee65\uee66\uee67\uee68\uee69\uee6a\uee6b\uee6c\uee6d\uee6e\uee6f\uee70\uee71\uee72\uee73\uee74\uee75\uee76\uee77\uee78\uee79\uee7a\uee7b\uee7c\uee7d\uee7e\uee7f\uee80\uee81\uee82\uee83\uee84\uee85\uee86\uee87\uee88\uee89\uee8a\uee8b\uee8c\uee8d\uee8e\uee8f\uee90\uee91\uee92\uee93\uee94\uee95\uee96\uee97\uee98\uee99\uee9a\uee9b\uee9c\uee9d\uee9e\uee9f\ueea0\ueea1\ueea2\ueea3\ueea4\ueea5\ueea6\ueea7\ueea8\ueea9\ueeaa\ueeab\ueeac\ueead\ueeae\ueeaf\ueeb0\ueeb1\ueeb2\ueeb3\ueeb4\ueeb5\ueeb6\ueeb7\ueeb8\ueeb9\ueeba\ueebb\ueebc\ueebd\ueebe\ueebf\ueec0\ueec1\ueec2\ueec3\ueec4\ueec5\ueec6\ueec7\ueec8\ueec9\ueeca\ueecb\ueecc\ueecd\ueece\ueecf\ueed0\ueed1\ueed2\ueed3\ueed4\ueed5\ueed6\ueed7\ueed8\ueed9\ueeda\ueedb\ueedc\ueedd\ueede\ueedf\ueee0\ueee1\ueee2\ueee3\ueee4\ueee5\ueee6\ueee7\ueee8\ueee9\ueeea\ueeeb\ueeec\ueeed\ueeee\ueeef\ueef0\ueef1\ueef2\ueef3\ueef4\ueef5\ueef6\ueef7\ueef8\ueef9\ueefa\ueefb\ueefc\ueefd\ueefe\ueeff\uef00\uef01\uef02\uef03\uef04\uef05\uef06\uef07\uef08\uef09\uef0a\uef0b\uef0c\uef0d\uef0e\uef0f\uef10\uef11\uef12\uef13\uef14\uef15\uef16\uef17\uef18\uef19\uef1a\uef1b\uef1c\uef1d\uef1e\uef1f\uef20\uef21\uef22\uef23\uef24\uef25\uef26\uef27\uef28\uef29\uef2a\uef2b\uef2c\uef2d\uef2e\uef2f\uef30\uef31\uef32\uef33\uef34\uef35\uef36\uef37\uef38\uef39\uef3a\uef3b\uef3c\uef3d\uef3e\uef3f\uef40\uef41\uef42\uef43\uef44\uef45\uef46\uef47\uef48\uef49\uef4a\uef4b\uef4c\uef4d\uef4e\uef4f\uef50\uef51\uef52\uef53\uef54\uef55\uef56\uef57\uef58\uef59\uef5a\uef5b\uef5c\uef5d\uef5e\uef5f\uef60\uef61\uef62\uef63\uef64\uef65\uef66\uef67\uef68\uef69\uef6a\uef6b\uef6c\uef6d\uef6e\uef6f\uef70\uef71\uef72\uef73\uef74\uef75\uef76\uef77\uef78\uef79\uef7a\uef7b\uef7c\uef7d\uef7e\uef7f\uef80\uef81\uef82\uef83\uef84\uef85\uef86\uef87\uef88\uef89\uef8a\uef8b\uef8c\uef8d\uef8e\uef8f\uef90\uef91\uef92\uef93\uef94\uef95\uef96\uef97\uef98\uef99\uef9a\uef9b\uef9c\uef9d\uef9e\uef9f\uefa0\uefa1\uefa2\uefa3\uefa4\uefa5\uefa6\uefa7\uefa8\uefa9\uefaa\uefab\uefac\uefad\uefae\uefaf\uefb0\uefb1\uefb2\uefb3\uefb4\uefb5\uefb6\uefb7\uefb8\uefb9\uefba\uefbb\uefbc\uefbd\uefbe\uefbf\uefc0\uefc1\uefc2\uefc3\uefc4\uefc5\uefc6\uefc7\uefc8\uefc9\uefca\uefcb\uefcc\uefcd\uefce\uefcf\uefd0\uefd1\uefd2\uefd3\uefd4\uefd5\uefd6\uefd7\uefd8\uefd9\uefda\uefdb\uefdc\uefdd\uefde\uefdf\uefe0\uefe1\uefe2\uefe3\uefe4\uefe5\uefe6\uefe7\uefe8\uefe9\uefea\uefeb\uefec\uefed\uefee\uefef\ueff0\ueff1\ueff2\ueff3\ueff4\ueff5\ueff6\ueff7\ueff8\ueff9\ueffa\ueffb\ueffc\ueffd\ueffe\uefff\uf000\uf001\uf002\uf003\uf004\uf005\uf006\uf007\uf008\uf009\uf00a\uf00b\uf00c\uf00d\uf00e\uf00f\uf010\uf011\uf012\uf013\uf014\uf015\uf016\uf017\uf018\uf019\uf01a\uf01b\uf01c\uf01d\uf01e\uf01f\uf020\uf021\uf022\uf023\uf024\uf025\uf026\uf027\uf028\uf029\uf02a\uf02b\uf02c\uf02d\uf02e\uf02f\uf030\uf031\uf032\uf033\uf034\uf035\uf036\uf037\uf038\uf039\uf03a\uf03b\uf03c\uf03d\uf03e\uf03f\uf040\uf041\uf042\uf043\uf044\uf045\uf046\uf047\uf048\uf049\uf04a\uf04b\uf04c\uf04d\uf04e\uf04f\uf050\uf051\uf052\uf053\uf054\uf055\uf056\uf057\uf058\uf059\uf05a\uf05b\uf05c\uf05d\uf05e\uf05f\uf060\uf061\uf062\uf063\uf064\uf065\uf066\uf067\uf068\uf069\uf06a\uf06b\uf06c\uf06d\uf06e\uf06f\uf070\uf071\uf072\uf073\uf074\uf075\uf076\uf077\uf078\uf079\uf07a\uf07b\uf07c\uf07d\uf07e\uf07f\uf080\uf081\uf082\uf083\uf084\uf085\uf086\uf087\uf088\uf089\uf08a\uf08b\uf08c\uf08d\uf08e\uf08f\uf090\uf091\uf092\uf093\uf094\uf095\uf096\uf097\uf098\uf099\uf09a\uf09b\uf09c\uf09d\uf09e\uf09f\uf0a0\uf0a1\uf0a2\uf0a3\uf0a4\uf0a5\uf0a6\uf0a7\uf0a8\uf0a9\uf0aa\uf0ab\uf0ac\uf0ad\uf0ae\uf0af\uf0b0\uf0b1\uf0b2\uf0b3\uf0b4\uf0b5\uf0b6\uf0b7\uf0b8\uf0b9\uf0ba\uf0bb\uf0bc\uf0bd\uf0be\uf0bf\uf0c0\uf0c1\uf0c2\uf0c3\uf0c4\uf0c5\uf0c6\uf0c7\uf0c8\uf0c9\uf0ca\uf0cb\uf0cc\uf0cd\uf0ce\uf0cf\uf0d0\uf0d1\uf0d2\uf0d3\uf0d4\uf0d5\uf0d6\uf0d7\uf0d8\uf0d9\uf0da\uf0db\uf0dc\uf0dd\uf0de\uf0df\uf0e0\uf0e1\uf0e2\uf0e3\uf0e4\uf0e5\uf0e6\uf0e7\uf0e8\uf0e9\uf0ea\uf0eb\uf0ec\uf0ed\uf0ee\uf0ef\uf0f0\uf0f1\uf0f2\uf0f3\uf0f4\uf0f5\uf0f6\uf0f7\uf0f8\uf0f9\uf0fa\uf0fb\uf0fc\uf0fd\uf0fe\uf0ff\uf100\uf101\uf102\uf103\uf104\uf105\uf106\uf107\uf108\uf109\uf10a\uf10b\uf10c\uf10d\uf10e\uf10f\uf110\uf111\uf112\uf113\uf114\uf115\uf116\uf117\uf118\uf119\uf11a\uf11b\uf11c\uf11d\uf11e\uf11f\uf120\uf121\uf122\uf123\uf124\uf125\uf126\uf127\uf128\uf129\uf12a\uf12b\uf12c\uf12d\uf12e\uf12f\uf130\uf131\uf132\uf133\uf134\uf135\uf136\uf137\uf138\uf139\uf13a\uf13b\uf13c\uf13d\uf13e\uf13f\uf140\uf141\uf142\uf143\uf144\uf145\uf146\uf147\uf148\uf149\uf14a\uf14b\uf14c\uf14d\uf14e\uf14f\uf150\uf151\uf152\uf153\uf154\uf155\uf156\uf157\uf158\uf159\uf15a\uf15b\uf15c\uf15d\uf15e\uf15f\uf160\uf161\uf162\uf163\uf164\uf165\uf166\uf167\uf168\uf169\uf16a\uf16b\uf16c\uf16d\uf16e\uf16f\uf170\uf171\uf172\uf173\uf174\uf175\uf176\uf177\uf178\uf179\uf17a\uf17b\uf17c\uf17d\uf17e\uf17f\uf180\uf181\uf182\uf183\uf184\uf185\uf186\uf187\uf188\uf189\uf18a\uf18b\uf18c\uf18d\uf18e\uf18f\uf190\uf191\uf192\uf193\uf194\uf195\uf196\uf197\uf198\uf199\uf19a\uf19b\uf19c\uf19d\uf19e\uf19f\uf1a0\uf1a1\uf1a2\uf1a3\uf1a4\uf1a5\uf1a6\uf1a7\uf1a8\uf1a9\uf1aa\uf1ab\uf1ac\uf1ad\uf1ae\uf1af\uf1b0\uf1b1\uf1b2\uf1b3\uf1b4\uf1b5\uf1b6\uf1b7\uf1b8\uf1b9\uf1ba\uf1bb\uf1bc\uf1bd\uf1be\uf1bf\uf1c0\uf1c1\uf1c2\uf1c3\uf1c4\uf1c5\uf1c6\uf1c7\uf1c8\uf1c9\uf1ca\uf1cb\uf1cc\uf1cd\uf1ce\uf1cf\uf1d0\uf1d1\uf1d2\uf1d3\uf1d4\uf1d5\uf1d6\uf1d7\uf1d8\uf1d9\uf1da\uf1db\uf1dc\uf1dd\uf1de\uf1df\uf1e0\uf1e1\uf1e2\uf1e3\uf1e4\uf1e5\uf1e6\uf1e7\uf1e8\uf1e9\uf1ea\uf1eb\uf1ec\uf1ed\uf1ee\uf1ef\uf1f0\uf1f1\uf1f2\uf1f3\uf1f4\uf1f5\uf1f6\uf1f7\uf1f8\uf1f9\uf1fa\uf1fb\uf1fc\uf1fd\uf1fe\uf1ff\uf200\uf201\uf202\uf203\uf204\uf205\uf206\uf207\uf208\uf209\uf20a\uf20b\uf20c\uf20d\uf20e\uf20f\uf210\uf211\uf212\uf213\uf214\uf215\uf216\uf217\uf218\uf219\uf21a\uf21b\uf21c\uf21d\uf21e\uf21f\uf220\uf221\uf222\uf223\uf224\uf225\uf226\uf227\uf228\uf229\uf22a\uf22b\uf22c\uf22d\uf22e\uf22f\uf230\uf231\uf232\uf233\uf234\uf235\uf236\uf237\uf238\uf239\uf23a\uf23b\uf23c\uf23d\uf23e\uf23f\uf240\uf241\uf242\uf243\uf244\uf245\uf246\uf247\uf248\uf249\uf24a\uf24b\uf24c\uf24d\uf24e\uf24f\uf250\uf251\uf252\uf253\uf254\uf255\uf256\uf257\uf258\uf259\uf25a\uf25b\uf25c\uf25d\uf25e\uf25f\uf260\uf261\uf262\uf263\uf264\uf265\uf266\uf267\uf268\uf269\uf26a\uf26b\uf26c\uf26d\uf26e\uf26f\uf270\uf271\uf272\uf273\uf274\uf275\uf276\uf277\uf278\uf279\uf27a\uf27b\uf27c\uf27d\uf27e\uf27f\uf280\uf281\uf282\uf283\uf284\uf285\uf286\uf287\uf288\uf289\uf28a\uf28b\uf28c\uf28d\uf28e\uf28f\uf290\uf291\uf292\uf293\uf294\uf295\uf296\uf297\uf298\uf299\uf29a\uf29b\uf29c\uf29d\uf29e\uf29f\uf2a0\uf2a1\uf2a2\uf2a3\uf2a4\uf2a5\uf2a6\uf2a7\uf2a8\uf2a9\uf2aa\uf2ab\uf2ac\uf2ad\uf2ae\uf2af\uf2b0\uf2b1\uf2b2\uf2b3\uf2b4\uf2b5\uf2b6\uf2b7\uf2b8\uf2b9\uf2ba\uf2bb\uf2bc\uf2bd\uf2be\uf2bf\uf2c0\uf2c1\uf2c2\uf2c3\uf2c4\uf2c5\uf2c6\uf2c7\uf2c8\uf2c9\uf2ca\uf2cb\uf2cc\uf2cd\uf2ce\uf2cf\uf2d0\uf2d1\uf2d2\uf2d3\uf2d4\uf2d5\uf2d6\uf2d7\uf2d8\uf2d9\uf2da\uf2db\uf2dc\uf2dd\uf2de\uf2df\uf2e0\uf2e1\uf2e2\uf2e3\uf2e4\uf2e5\uf2e6\uf2e7\uf2e8\uf2e9\uf2ea\uf2eb\uf2ec\uf2ed\uf2ee\uf2ef\uf2f0\uf2f1\uf2f2\uf2f3\uf2f4\uf2f5\uf2f6\uf2f7\uf2f8\uf2f9\uf2fa\uf2fb\uf2fc\uf2fd\uf2fe\uf2ff\uf300\uf301\uf302\uf303\uf304\uf305\uf306\uf307\uf308\uf309\uf30a\uf30b\uf30c\uf30d\uf30e\uf30f\uf310\uf311\uf312\uf313\uf314\uf315\uf316\uf317\uf318\uf319\uf31a\uf31b\uf31c\uf31d\uf31e\uf31f\uf320\uf321\uf322\uf323\uf324\uf325\uf326\uf327\uf328\uf329\uf32a\uf32b\uf32c\uf32d\uf32e\uf32f\uf330\uf331\uf332\uf333\uf334\uf335\uf336\uf337\uf338\uf339\uf33a\uf33b\uf33c\uf33d\uf33e\uf33f\uf340\uf341\uf342\uf343\uf344\uf345\uf346\uf347\uf348\uf349\uf34a\uf34b\uf34c\uf34d\uf34e\uf34f\uf350\uf351\uf352\uf353\uf354\uf355\uf356\uf357\uf358\uf359\uf35a\uf35b\uf35c\uf35d\uf35e\uf35f\uf360\uf361\uf362\uf363\uf364\uf365\uf366\uf367\uf368\uf369\uf36a\uf36b\uf36c\uf36d\uf36e\uf36f\uf370\uf371\uf372\uf373\uf374\uf375\uf376\uf377\uf378\uf379\uf37a\uf37b\uf37c\uf37d\uf37e\uf37f\uf380\uf381\uf382\uf383\uf384\uf385\uf386\uf387\uf388\uf389\uf38a\uf38b\uf38c\uf38d\uf38e\uf38f\uf390\uf391\uf392\uf393\uf394\uf395\uf396\uf397\uf398\uf399\uf39a\uf39b\uf39c\uf39d\uf39e\uf39f\uf3a0\uf3a1\uf3a2\uf3a3\uf3a4\uf3a5\uf3a6\uf3a7\uf3a8\uf3a9\uf3aa\uf3ab\uf3ac\uf3ad\uf3ae\uf3af\uf3b0\uf3b1\uf3b2\uf3b3\uf3b4\uf3b5\uf3b6\uf3b7\uf3b8\uf3b9\uf3ba\uf3bb\uf3bc\uf3bd\uf3be\uf3bf\uf3c0\uf3c1\uf3c2\uf3c3\uf3c4\uf3c5\uf3c6\uf3c7\uf3c8\uf3c9\uf3ca\uf3cb\uf3cc\uf3cd\uf3ce\uf3cf\uf3d0\uf3d1\uf3d2\uf3d3\uf3d4\uf3d5\uf3d6\uf3d7\uf3d8\uf3d9\uf3da\uf3db\uf3dc\uf3dd\uf3de\uf3df\uf3e0\uf3e1\uf3e2\uf3e3\uf3e4\uf3e5\uf3e6\uf3e7\uf3e8\uf3e9\uf3ea\uf3eb\uf3ec\uf3ed\uf3ee\uf3ef\uf3f0\uf3f1\uf3f2\uf3f3\uf3f4\uf3f5\uf3f6\uf3f7\uf3f8\uf3f9\uf3fa\uf3fb\uf3fc\uf3fd\uf3fe\uf3ff\uf400\uf401\uf402\uf403\uf404\uf405\uf406\uf407\uf408\uf409\uf40a\uf40b\uf40c\uf40d\uf40e\uf40f\uf410\uf411\uf412\uf413\uf414\uf415\uf416\uf417\uf418\uf419\uf41a\uf41b\uf41c\uf41d\uf41e\uf41f\uf420\uf421\uf422\uf423\uf424\uf425\uf426\uf427\uf428\uf429\uf42a\uf42b\uf42c\uf42d\uf42e\uf42f\uf430\uf431\uf432\uf433\uf434\uf435\uf436\uf437\uf438\uf439\uf43a\uf43b\uf43c\uf43d\uf43e\uf43f\uf440\uf441\uf442\uf443\uf444\uf445\uf446\uf447\uf448\uf449\uf44a\uf44b\uf44c\uf44d\uf44e\uf44f\uf450\uf451\uf452\uf453\uf454\uf455\uf456\uf457\uf458\uf459\uf45a\uf45b\uf45c\uf45d\uf45e\uf45f\uf460\uf461\uf462\uf463\uf464\uf465\uf466\uf467\uf468\uf469\uf46a\uf46b\uf46c\uf46d\uf46e\uf46f\uf470\uf471\uf472\uf473\uf474\uf475\uf476\uf477\uf478\uf479\uf47a\uf47b\uf47c\uf47d\uf47e\uf47f\uf480\uf481\uf482\uf483\uf484\uf485\uf486\uf487\uf488\uf489\uf48a\uf48b\uf48c\uf48d\uf48e\uf48f\uf490\uf491\uf492\uf493\uf494\uf495\uf496\uf497\uf498\uf499\uf49a\uf49b\uf49c\uf49d\uf49e\uf49f\uf4a0\uf4a1\uf4a2\uf4a3\uf4a4\uf4a5\uf4a6\uf4a7\uf4a8\uf4a9\uf4aa\uf4ab\uf4ac\uf4ad\uf4ae\uf4af\uf4b0\uf4b1\uf4b2\uf4b3\uf4b4\uf4b5\uf4b6\uf4b7\uf4b8\uf4b9\uf4ba\uf4bb\uf4bc\uf4bd\uf4be\uf4bf\uf4c0\uf4c1\uf4c2\uf4c3\uf4c4\uf4c5\uf4c6\uf4c7\uf4c8\uf4c9\uf4ca\uf4cb\uf4cc\uf4cd\uf4ce\uf4cf\uf4d0\uf4d1\uf4d2\uf4d3\uf4d4\uf4d5\uf4d6\uf4d7\uf4d8\uf4d9\uf4da\uf4db\uf4dc\uf4dd\uf4de\uf4df\uf4e0\uf4e1\uf4e2\uf4e3\uf4e4\uf4e5\uf4e6\uf4e7\uf4e8\uf4e9\uf4ea\uf4eb\uf4ec\uf4ed\uf4ee\uf4ef\uf4f0\uf4f1\uf4f2\uf4f3\uf4f4\uf4f5\uf4f6\uf4f7\uf4f8\uf4f9\uf4fa\uf4fb\uf4fc\uf4fd\uf4fe\uf4ff\uf500\uf501\uf502\uf503\uf504\uf505\uf506\uf507\uf508\uf509\uf50a\uf50b\uf50c\uf50d\uf50e\uf50f\uf510\uf511\uf512\uf513\uf514\uf515\uf516\uf517\uf518\uf519\uf51a\uf51b\uf51c\uf51d\uf51e\uf51f\uf520\uf521\uf522\uf523\uf524\uf525\uf526\uf527\uf528\uf529\uf52a\uf52b\uf52c\uf52d\uf52e\uf52f\uf530\uf531\uf532\uf533\uf534\uf535\uf536\uf537\uf538\uf539\uf53a\uf53b\uf53c\uf53d\uf53e\uf53f\uf540\uf541\uf542\uf543\uf544\uf545\uf546\uf547\uf548\uf549\uf54a\uf54b\uf54c\uf54d\uf54e\uf54f\uf550\uf551\uf552\uf553\uf554\uf555\uf556\uf557\uf558\uf559\uf55a\uf55b\uf55c\uf55d\uf55e\uf55f\uf560\uf561\uf562\uf563\uf564\uf565\uf566\uf567\uf568\uf569\uf56a\uf56b\uf56c\uf56d\uf56e\uf56f\uf570\uf571\uf572\uf573\uf574\uf575\uf576\uf577\uf578\uf579\uf57a\uf57b\uf57c\uf57d\uf57e\uf57f\uf580\uf581\uf582\uf583\uf584\uf585\uf586\uf587\uf588\uf589\uf58a\uf58b\uf58c\uf58d\uf58e\uf58f\uf590\uf591\uf592\uf593\uf594\uf595\uf596\uf597\uf598\uf599\uf59a\uf59b\uf59c\uf59d\uf59e\uf59f\uf5a0\uf5a1\uf5a2\uf5a3\uf5a4\uf5a5\uf5a6\uf5a7\uf5a8\uf5a9\uf5aa\uf5ab\uf5ac\uf5ad\uf5ae\uf5af\uf5b0\uf5b1\uf5b2\uf5b3\uf5b4\uf5b5\uf5b6\uf5b7\uf5b8\uf5b9\uf5ba\uf5bb\uf5bc\uf5bd\uf5be\uf5bf\uf5c0\uf5c1\uf5c2\uf5c3\uf5c4\uf5c5\uf5c6\uf5c7\uf5c8\uf5c9\uf5ca\uf5cb\uf5cc\uf5cd\uf5ce\uf5cf\uf5d0\uf5d1\uf5d2\uf5d3\uf5d4\uf5d5\uf5d6\uf5d7\uf5d8\uf5d9\uf5da\uf5db\uf5dc\uf5dd\uf5de\uf5df\uf5e0\uf5e1\uf5e2\uf5e3\uf5e4\uf5e5\uf5e6\uf5e7\uf5e8\uf5e9\uf5ea\uf5eb\uf5ec\uf5ed\uf5ee\uf5ef\uf5f0\uf5f1\uf5f2\uf5f3\uf5f4\uf5f5\uf5f6\uf5f7\uf5f8\uf5f9\uf5fa\uf5fb\uf5fc\uf5fd\uf5fe\uf5ff\uf600\uf601\uf602\uf603\uf604\uf605\uf606\uf607\uf608\uf609\uf60a\uf60b\uf60c\uf60d\uf60e\uf60f\uf610\uf611\uf612\uf613\uf614\uf615\uf616\uf617\uf618\uf619\uf61a\uf61b\uf61c\uf61d\uf61e\uf61f\uf620\uf621\uf622\uf623\uf624\uf625\uf626\uf627\uf628\uf629\uf62a\uf62b\uf62c\uf62d\uf62e\uf62f\uf630\uf631\uf632\uf633\uf634\uf635\uf636\uf637\uf638\uf639\uf63a\uf63b\uf63c\uf63d\uf63e\uf63f\uf640\uf641\uf642\uf643\uf644\uf645\uf646\uf647\uf648\uf649\uf64a\uf64b\uf64c\uf64d\uf64e\uf64f\uf650\uf651\uf652\uf653\uf654\uf655\uf656\uf657\uf658\uf659\uf65a\uf65b\uf65c\uf65d\uf65e\uf65f\uf660\uf661\uf662\uf663\uf664\uf665\uf666\uf667\uf668\uf669\uf66a\uf66b\uf66c\uf66d\uf66e\uf66f\uf670\uf671\uf672\uf673\uf674\uf675\uf676\uf677\uf678\uf679\uf67a\uf67b\uf67c\uf67d\uf67e\uf67f\uf680\uf681\uf682\uf683\uf684\uf685\uf686\uf687\uf688\uf689\uf68a\uf68b\uf68c\uf68d\uf68e\uf68f\uf690\uf691\uf692\uf693\uf694\uf695\uf696\uf697\uf698\uf699\uf69a\uf69b\uf69c\uf69d\uf69e\uf69f\uf6a0\uf6a1\uf6a2\uf6a3\uf6a4\uf6a5\uf6a6\uf6a7\uf6a8\uf6a9\uf6aa\uf6ab\uf6ac\uf6ad\uf6ae\uf6af\uf6b0\uf6b1\uf6b2\uf6b3\uf6b4\uf6b5\uf6b6\uf6b7\uf6b8\uf6b9\uf6ba\uf6bb\uf6bc\uf6bd\uf6be\uf6bf\uf6c0\uf6c1\uf6c2\uf6c3\uf6c4\uf6c5\uf6c6\uf6c7\uf6c8\uf6c9\uf6ca\uf6cb\uf6cc\uf6cd\uf6ce\uf6cf\uf6d0\uf6d1\uf6d2\uf6d3\uf6d4\uf6d5\uf6d6\uf6d7\uf6d8\uf6d9\uf6da\uf6db\uf6dc\uf6dd\uf6de\uf6df\uf6e0\uf6e1\uf6e2\uf6e3\uf6e4\uf6e5\uf6e6\uf6e7\uf6e8\uf6e9\uf6ea\uf6eb\uf6ec\uf6ed\uf6ee\uf6ef\uf6f0\uf6f1\uf6f2\uf6f3\uf6f4\uf6f5\uf6f6\uf6f7\uf6f8\uf6f9\uf6fa\uf6fb\uf6fc\uf6fd\uf6fe\uf6ff\uf700\uf701\uf702\uf703\uf704\uf705\uf706\uf707\uf708\uf709\uf70a\uf70b\uf70c\uf70d\uf70e\uf70f\uf710\uf711\uf712\uf713\uf714\uf715\uf716\uf717\uf718\uf719\uf71a\uf71b\uf71c\uf71d\uf71e\uf71f\uf720\uf721\uf722\uf723\uf724\uf725\uf726\uf727\uf728\uf729\uf72a\uf72b\uf72c\uf72d\uf72e\uf72f\uf730\uf731\uf732\uf733\uf734\uf735\uf736\uf737\uf738\uf739\uf73a\uf73b\uf73c\uf73d\uf73e\uf73f\uf740\uf741\uf742\uf743\uf744\uf745\uf746\uf747\uf748\uf749\uf74a\uf74b\uf74c\uf74d\uf74e\uf74f\uf750\uf751\uf752\uf753\uf754\uf755\uf756\uf757\uf758\uf759\uf75a\uf75b\uf75c\uf75d\uf75e\uf75f\uf760\uf761\uf762\uf763\uf764\uf765\uf766\uf767\uf768\uf769\uf76a\uf76b\uf76c\uf76d\uf76e\uf76f\uf770\uf771\uf772\uf773\uf774\uf775\uf776\uf777\uf778\uf779\uf77a\uf77b\uf77c\uf77d\uf77e\uf77f\uf780\uf781\uf782\uf783\uf784\uf785\uf786\uf787\uf788\uf789\uf78a\uf78b\uf78c\uf78d\uf78e\uf78f\uf790\uf791\uf792\uf793\uf794\uf795\uf796\uf797\uf798\uf799\uf79a\uf79b\uf79c\uf79d\uf79e\uf79f\uf7a0\uf7a1\uf7a2\uf7a3\uf7a4\uf7a5\uf7a6\uf7a7\uf7a8\uf7a9\uf7aa\uf7ab\uf7ac\uf7ad\uf7ae\uf7af\uf7b0\uf7b1\uf7b2\uf7b3\uf7b4\uf7b5\uf7b6\uf7b7\uf7b8\uf7b9\uf7ba\uf7bb\uf7bc\uf7bd\uf7be\uf7bf\uf7c0\uf7c1\uf7c2\uf7c3\uf7c4\uf7c5\uf7c6\uf7c7\uf7c8\uf7c9\uf7ca\uf7cb\uf7cc\uf7cd\uf7ce\uf7cf\uf7d0\uf7d1\uf7d2\uf7d3\uf7d4\uf7d5\uf7d6\uf7d7\uf7d8\uf7d9\uf7da\uf7db\uf7dc\uf7dd\uf7de\uf7df\uf7e0\uf7e1\uf7e2\uf7e3\uf7e4\uf7e5\uf7e6\uf7e7\uf7e8\uf7e9\uf7ea\uf7eb\uf7ec\uf7ed\uf7ee\uf7ef\uf7f0\uf7f1\uf7f2\uf7f3\uf7f4\uf7f5\uf7f6\uf7f7\uf7f8\uf7f9\uf7fa\uf7fb\uf7fc\uf7fd\uf7fe\uf7ff\uf800\uf801\uf802\uf803\uf804\uf805\uf806\uf807\uf808\uf809\uf80a\uf80b\uf80c\uf80d\uf80e\uf80f\uf810\uf811\uf812\uf813\uf814\uf815\uf816\uf817\uf818\uf819\uf81a\uf81b\uf81c\uf81d\uf81e\uf81f\uf820\uf821\uf822\uf823\uf824\uf825\uf826\uf827\uf828\uf829\uf82a\uf82b\uf82c\uf82d\uf82e\uf82f\uf830\uf831\uf832\uf833\uf834\uf835\uf836\uf837\uf838\uf839\uf83a\uf83b\uf83c\uf83d\uf83e\uf83f\uf840\uf841\uf842\uf843\uf844\uf845\uf846\uf847\uf848\uf849\uf84a\uf84b\uf84c\uf84d\uf84e\uf84f\uf850\uf851\uf852\uf853\uf854\uf855\uf856\uf857\uf858\uf859\uf85a\uf85b\uf85c\uf85d\uf85e\uf85f\uf860\uf861\uf862\uf863\uf864\uf865\uf866\uf867\uf868\uf869\uf86a\uf86b\uf86c\uf86d\uf86e\uf86f\uf870\uf871\uf872\uf873\uf874\uf875\uf876\uf877\uf878\uf879\uf87a\uf87b\uf87c\uf87d\uf87e\uf87f\uf880\uf881\uf882\uf883\uf884\uf885\uf886\uf887\uf888\uf889\uf88a\uf88b\uf88c\uf88d\uf88e\uf88f\uf890\uf891\uf892\uf893\uf894\uf895\uf896\uf897\uf898\uf899\uf89a\uf89b\uf89c\uf89d\uf89e\uf89f\uf8a0\uf8a1\uf8a2\uf8a3\uf8a4\uf8a5\uf8a6\uf8a7\uf8a8\uf8a9\uf8aa\uf8ab\uf8ac\uf8ad\uf8ae\uf8af\uf8b0\uf8b1\uf8b2\uf8b3\uf8b4\uf8b5\uf8b6\uf8b7\uf8b8\uf8b9\uf8ba\uf8bb\uf8bc\uf8bd\uf8be\uf8bf\uf8c0\uf8c1\uf8c2\uf8c3\uf8c4\uf8c5\uf8c6\uf8c7\uf8c8\uf8c9\uf8ca\uf8cb\uf8cc\uf8cd\uf8ce\uf8cf\uf8d0\uf8d1\uf8d2\uf8d3\uf8d4\uf8d5\uf8d6\uf8d7\uf8d8\uf8d9\uf8da\uf8db\uf8dc\uf8dd\uf8de\uf8df\uf8e0\uf8e1\uf8e2\uf8e3\uf8e4\uf8e5\uf8e6\uf8e7\uf8e8\uf8e9\uf8ea\uf8eb\uf8ec\uf8ed\uf8ee\uf8ef\uf8f0\uf8f1\uf8f2\uf8f3\uf8f4\uf8f5\uf8f6\uf8f7\uf8f8\uf8f9\uf8fa\uf8fb\uf8fc\uf8fd\uf8fe\uf8ff'
+
+try:
+ Cs = eval(r"'\ud800\ud801\ud802\ud803\ud804\ud805\ud806\ud807\ud808\ud809\ud80a\ud80b\ud80c\ud80d\ud80e\ud80f\ud810\ud811\ud812\ud813\ud814\ud815\ud816\ud817\ud818\ud819\ud81a\ud81b\ud81c\ud81d\ud81e\ud81f\ud820\ud821\ud822\ud823\ud824\ud825\ud826\ud827\ud828\ud829\ud82a\ud82b\ud82c\ud82d\ud82e\ud82f\ud830\ud831\ud832\ud833\ud834\ud835\ud836\ud837\ud838\ud839\ud83a\ud83b\ud83c\ud83d\ud83e\ud83f\ud840\ud841\ud842\ud843\ud844\ud845\ud846\ud847\ud848\ud849\ud84a\ud84b\ud84c\ud84d\ud84e\ud84f\ud850\ud851\ud852\ud853\ud854\ud855\ud856\ud857\ud858\ud859\ud85a\ud85b\ud85c\ud85d\ud85e\ud85f\ud860\ud861\ud862\ud863\ud864\ud865\ud866\ud867\ud868\ud869\ud86a\ud86b\ud86c\ud86d\ud86e\ud86f\ud870\ud871\ud872\ud873\ud874\ud875\ud876\ud877\ud878\ud879\ud87a\ud87b\ud87c\ud87d\ud87e\ud87f\ud880\ud881\ud882\ud883\ud884\ud885\ud886\ud887\ud888\ud889\ud88a\ud88b\ud88c\ud88d\ud88e\ud88f\ud890\ud891\ud892\ud893\ud894\ud895\ud896\ud897\ud898\ud899\ud89a\ud89b\ud89c\ud89d\ud89e\ud89f\ud8a0\ud8a1\ud8a2\ud8a3\ud8a4\ud8a5\ud8a6\ud8a7\ud8a8\ud8a9\ud8aa\ud8ab\ud8ac\ud8ad\ud8ae\ud8af\ud8b0\ud8b1\ud8b2\ud8b3\ud8b4\ud8b5\ud8b6\ud8b7\ud8b8\ud8b9\ud8ba\ud8bb\ud8bc\ud8bd\ud8be\ud8bf\ud8c0\ud8c1\ud8c2\ud8c3\ud8c4\ud8c5\ud8c6\ud8c7\ud8c8\ud8c9\ud8ca\ud8cb\ud8cc\ud8cd\ud8ce\ud8cf\ud8d0\ud8d1\ud8d2\ud8d3\ud8d4\ud8d5\ud8d6\ud8d7\ud8d8\ud8d9\ud8da\ud8db\ud8dc\ud8dd\ud8de\ud8df\ud8e0\ud8e1\ud8e2\ud8e3\ud8e4\ud8e5\ud8e6\ud8e7\ud8e8\ud8e9\ud8ea\ud8eb\ud8ec\ud8ed\ud8ee\ud8ef\ud8f0\ud8f1\ud8f2\ud8f3\ud8f4\ud8f5\ud8f6\ud8f7\ud8f8\ud8f9\ud8fa\ud8fb\ud8fc\ud8fd\ud8fe\ud8ff\ud900\ud901\ud902\ud903\ud904\ud905\ud906\ud907\ud908\ud909\ud90a\ud90b\ud90c\ud90d\ud90e\ud90f\ud910\ud911\ud912\ud913\ud914\ud915\ud916\ud917\ud918\ud919\ud91a\ud91b\ud91c\ud91d\ud91e\ud91f\ud920\ud921\ud922\ud923\ud924\ud925\ud926\ud927\ud928\ud929\ud92a\ud92b\ud92c\ud92d\ud92e\ud92f\ud930\ud931\ud932\ud933\ud934\ud935\ud936\ud937\ud938\ud939\ud93a\ud93b\ud93c\ud93d\ud93e\ud93f\ud940\ud941\ud942\ud943\ud944\ud945\ud946\ud947\ud948\ud949\ud94a\ud94b\ud94c\ud94d\ud94e\ud94f\ud950\ud951\ud952\ud953\ud954\ud955\ud956\ud957\ud958\ud959\ud95a\ud95b\ud95c\ud95d\ud95e\ud95f\ud960\ud961\ud962\ud963\ud964\ud965\ud966\ud967\ud968\ud969\ud96a\ud96b\ud96c\ud96d\ud96e\ud96f\ud970\ud971\ud972\ud973\ud974\ud975\ud976\ud977\ud978\ud979\ud97a\ud97b\ud97c\ud97d\ud97e\ud97f\ud980\ud981\ud982\ud983\ud984\ud985\ud986\ud987\ud988\ud989\ud98a\ud98b\ud98c\ud98d\ud98e\ud98f\ud990\ud991\ud992\ud993\ud994\ud995\ud996\ud997\ud998\ud999\ud99a\ud99b\ud99c\ud99d\ud99e\ud99f\ud9a0\ud9a1\ud9a2\ud9a3\ud9a4\ud9a5\ud9a6\ud9a7\ud9a8\ud9a9\ud9aa\ud9ab\ud9ac\ud9ad\ud9ae\ud9af\ud9b0\ud9b1\ud9b2\ud9b3\ud9b4\ud9b5\ud9b6\ud9b7\ud9b8\ud9b9\ud9ba\ud9bb\ud9bc\ud9bd\ud9be\ud9bf\ud9c0\ud9c1\ud9c2\ud9c3\ud9c4\ud9c5\ud9c6\ud9c7\ud9c8\ud9c9\ud9ca\ud9cb\ud9cc\ud9cd\ud9ce\ud9cf\ud9d0\ud9d1\ud9d2\ud9d3\ud9d4\ud9d5\ud9d6\ud9d7\ud9d8\ud9d9\ud9da\ud9db\ud9dc\ud9dd\ud9de\ud9df\ud9e0\ud9e1\ud9e2\ud9e3\ud9e4\ud9e5\ud9e6\ud9e7\ud9e8\ud9e9\ud9ea\ud9eb\ud9ec\ud9ed\ud9ee\ud9ef\ud9f0\ud9f1\ud9f2\ud9f3\ud9f4\ud9f5\ud9f6\ud9f7\ud9f8\ud9f9\ud9fa\ud9fb\ud9fc\ud9fd\ud9fe\ud9ff\uda00\uda01\uda02\uda03\uda04\uda05\uda06\uda07\uda08\uda09\uda0a\uda0b\uda0c\uda0d\uda0e\uda0f\uda10\uda11\uda12\uda13\uda14\uda15\uda16\uda17\uda18\uda19\uda1a\uda1b\uda1c\uda1d\uda1e\uda1f\uda20\uda21\uda22\uda23\uda24\uda25\uda26\uda27\uda28\uda29\uda2a\uda2b\uda2c\uda2d\uda2e\uda2f\uda30\uda31\uda32\uda33\uda34\uda35\uda36\uda37\uda38\uda39\uda3a\uda3b\uda3c\uda3d\uda3e\uda3f\uda40\uda41\uda42\uda43\uda44\uda45\uda46\uda47\uda48\uda49\uda4a\uda4b\uda4c\uda4d\uda4e\uda4f\uda50\uda51\uda52\uda53\uda54\uda55\uda56\uda57\uda58\uda59\uda5a\uda5b\uda5c\uda5d\uda5e\uda5f\uda60\uda61\uda62\uda63\uda64\uda65\uda66\uda67\uda68\uda69\uda6a\uda6b\uda6c\uda6d\uda6e\uda6f\uda70\uda71\uda72\uda73\uda74\uda75\uda76\uda77\uda78\uda79\uda7a\uda7b\uda7c\uda7d\uda7e\uda7f\uda80\uda81\uda82\uda83\uda84\uda85\uda86\uda87\uda88\uda89\uda8a\uda8b\uda8c\uda8d\uda8e\uda8f\uda90\uda91\uda92\uda93\uda94\uda95\uda96\uda97\uda98\uda99\uda9a\uda9b\uda9c\uda9d\uda9e\uda9f\udaa0\udaa1\udaa2\udaa3\udaa4\udaa5\udaa6\udaa7\udaa8\udaa9\udaaa\udaab\udaac\udaad\udaae\udaaf\udab0\udab1\udab2\udab3\udab4\udab5\udab6\udab7\udab8\udab9\udaba\udabb\udabc\udabd\udabe\udabf\udac0\udac1\udac2\udac3\udac4\udac5\udac6\udac7\udac8\udac9\udaca\udacb\udacc\udacd\udace\udacf\udad0\udad1\udad2\udad3\udad4\udad5\udad6\udad7\udad8\udad9\udada\udadb\udadc\udadd\udade\udadf\udae0\udae1\udae2\udae3\udae4\udae5\udae6\udae7\udae8\udae9\udaea\udaeb\udaec\udaed\udaee\udaef\udaf0\udaf1\udaf2\udaf3\udaf4\udaf5\udaf6\udaf7\udaf8\udaf9\udafa\udafb\udafc\udafd\udafe\udaff\udb00\udb01\udb02\udb03\udb04\udb05\udb06\udb07\udb08\udb09\udb0a\udb0b\udb0c\udb0d\udb0e\udb0f\udb10\udb11\udb12\udb13\udb14\udb15\udb16\udb17\udb18\udb19\udb1a\udb1b\udb1c\udb1d\udb1e\udb1f\udb20\udb21\udb22\udb23\udb24\udb25\udb26\udb27\udb28\udb29\udb2a\udb2b\udb2c\udb2d\udb2e\udb2f\udb30\udb31\udb32\udb33\udb34\udb35\udb36\udb37\udb38\udb39\udb3a\udb3b\udb3c\udb3d\udb3e\udb3f\udb40\udb41\udb42\udb43\udb44\udb45\udb46\udb47\udb48\udb49\udb4a\udb4b\udb4c\udb4d\udb4e\udb4f\udb50\udb51\udb52\udb53\udb54\udb55\udb56\udb57\udb58\udb59\udb5a\udb5b\udb5c\udb5d\udb5e\udb5f\udb60\udb61\udb62\udb63\udb64\udb65\udb66\udb67\udb68\udb69\udb6a\udb6b\udb6c\udb6d\udb6e\udb6f\udb70\udb71\udb72\udb73\udb74\udb75\udb76\udb77\udb78\udb79\udb7a\udb7b\udb7c\udb7d\udb7e\udb7f\udb80\udb81\udb82\udb83\udb84\udb85\udb86\udb87\udb88\udb89\udb8a\udb8b\udb8c\udb8d\udb8e\udb8f\udb90\udb91\udb92\udb93\udb94\udb95\udb96\udb97\udb98\udb99\udb9a\udb9b\udb9c\udb9d\udb9e\udb9f\udba0\udba1\udba2\udba3\udba4\udba5\udba6\udba7\udba8\udba9\udbaa\udbab\udbac\udbad\udbae\udbaf\udbb0\udbb1\udbb2\udbb3\udbb4\udbb5\udbb6\udbb7\udbb8\udbb9\udbba\udbbb\udbbc\udbbd\udbbe\udbbf\udbc0\udbc1\udbc2\udbc3\udbc4\udbc5\udbc6\udbc7\udbc8\udbc9\udbca\udbcb\udbcc\udbcd\udbce\udbcf\udbd0\udbd1\udbd2\udbd3\udbd4\udbd5\udbd6\udbd7\udbd8\udbd9\udbda\udbdb\udbdc\udbdd\udbde\udbdf\udbe0\udbe1\udbe2\udbe3\udbe4\udbe5\udbe6\udbe7\udbe8\udbe9\udbea\udbeb\udbec\udbed\udbee\udbef\udbf0\udbf1\udbf2\udbf3\udbf4\udbf5\udbf6\udbf7\udbf8\udbf9\udbfa\udbfb\udbfc\udbfd\udbfe\U0010fc00\udc01\udc02\udc03\udc04\udc05\udc06\udc07\udc08\udc09\udc0a\udc0b\udc0c\udc0d\udc0e\udc0f\udc10\udc11\udc12\udc13\udc14\udc15\udc16\udc17\udc18\udc19\udc1a\udc1b\udc1c\udc1d\udc1e\udc1f\udc20\udc21\udc22\udc23\udc24\udc25\udc26\udc27\udc28\udc29\udc2a\udc2b\udc2c\udc2d\udc2e\udc2f\udc30\udc31\udc32\udc33\udc34\udc35\udc36\udc37\udc38\udc39\udc3a\udc3b\udc3c\udc3d\udc3e\udc3f\udc40\udc41\udc42\udc43\udc44\udc45\udc46\udc47\udc48\udc49\udc4a\udc4b\udc4c\udc4d\udc4e\udc4f\udc50\udc51\udc52\udc53\udc54\udc55\udc56\udc57\udc58\udc59\udc5a\udc5b\udc5c\udc5d\udc5e\udc5f\udc60\udc61\udc62\udc63\udc64\udc65\udc66\udc67\udc68\udc69\udc6a\udc6b\udc6c\udc6d\udc6e\udc6f\udc70\udc71\udc72\udc73\udc74\udc75\udc76\udc77\udc78\udc79\udc7a\udc7b\udc7c\udc7d\udc7e\udc7f\udc80\udc81\udc82\udc83\udc84\udc85\udc86\udc87\udc88\udc89\udc8a\udc8b\udc8c\udc8d\udc8e\udc8f\udc90\udc91\udc92\udc93\udc94\udc95\udc96\udc97\udc98\udc99\udc9a\udc9b\udc9c\udc9d\udc9e\udc9f\udca0\udca1\udca2\udca3\udca4\udca5\udca6\udca7\udca8\udca9\udcaa\udcab\udcac\udcad\udcae\udcaf\udcb0\udcb1\udcb2\udcb3\udcb4\udcb5\udcb6\udcb7\udcb8\udcb9\udcba\udcbb\udcbc\udcbd\udcbe\udcbf\udcc0\udcc1\udcc2\udcc3\udcc4\udcc5\udcc6\udcc7\udcc8\udcc9\udcca\udccb\udccc\udccd\udcce\udccf\udcd0\udcd1\udcd2\udcd3\udcd4\udcd5\udcd6\udcd7\udcd8\udcd9\udcda\udcdb\udcdc\udcdd\udcde\udcdf\udce0\udce1\udce2\udce3\udce4\udce5\udce6\udce7\udce8\udce9\udcea\udceb\udcec\udced\udcee\udcef\udcf0\udcf1\udcf2\udcf3\udcf4\udcf5\udcf6\udcf7\udcf8\udcf9\udcfa\udcfb\udcfc\udcfd\udcfe\udcff\udd00\udd01\udd02\udd03\udd04\udd05\udd06\udd07\udd08\udd09\udd0a\udd0b\udd0c\udd0d\udd0e\udd0f\udd10\udd11\udd12\udd13\udd14\udd15\udd16\udd17\udd18\udd19\udd1a\udd1b\udd1c\udd1d\udd1e\udd1f\udd20\udd21\udd22\udd23\udd24\udd25\udd26\udd27\udd28\udd29\udd2a\udd2b\udd2c\udd2d\udd2e\udd2f\udd30\udd31\udd32\udd33\udd34\udd35\udd36\udd37\udd38\udd39\udd3a\udd3b\udd3c\udd3d\udd3e\udd3f\udd40\udd41\udd42\udd43\udd44\udd45\udd46\udd47\udd48\udd49\udd4a\udd4b\udd4c\udd4d\udd4e\udd4f\udd50\udd51\udd52\udd53\udd54\udd55\udd56\udd57\udd58\udd59\udd5a\udd5b\udd5c\udd5d\udd5e\udd5f\udd60\udd61\udd62\udd63\udd64\udd65\udd66\udd67\udd68\udd69\udd6a\udd6b\udd6c\udd6d\udd6e\udd6f\udd70\udd71\udd72\udd73\udd74\udd75\udd76\udd77\udd78\udd79\udd7a\udd7b\udd7c\udd7d\udd7e\udd7f\udd80\udd81\udd82\udd83\udd84\udd85\udd86\udd87\udd88\udd89\udd8a\udd8b\udd8c\udd8d\udd8e\udd8f\udd90\udd91\udd92\udd93\udd94\udd95\udd96\udd97\udd98\udd99\udd9a\udd9b\udd9c\udd9d\udd9e\udd9f\udda0\udda1\udda2\udda3\udda4\udda5\udda6\udda7\udda8\udda9\uddaa\uddab\uddac\uddad\uddae\uddaf\uddb0\uddb1\uddb2\uddb3\uddb4\uddb5\uddb6\uddb7\uddb8\uddb9\uddba\uddbb\uddbc\uddbd\uddbe\uddbf\uddc0\uddc1\uddc2\uddc3\uddc4\uddc5\uddc6\uddc7\uddc8\uddc9\uddca\uddcb\uddcc\uddcd\uddce\uddcf\uddd0\uddd1\uddd2\uddd3\uddd4\uddd5\uddd6\uddd7\uddd8\uddd9\uddda\udddb\udddc\udddd\uddde\udddf\udde0\udde1\udde2\udde3\udde4\udde5\udde6\udde7\udde8\udde9\uddea\uddeb\uddec\udded\uddee\uddef\uddf0\uddf1\uddf2\uddf3\uddf4\uddf5\uddf6\uddf7\uddf8\uddf9\uddfa\uddfb\uddfc\uddfd\uddfe\uddff\ude00\ude01\ude02\ude03\ude04\ude05\ude06\ude07\ude08\ude09\ude0a\ude0b\ude0c\ude0d\ude0e\ude0f\ude10\ude11\ude12\ude13\ude14\ude15\ude16\ude17\ude18\ude19\ude1a\ude1b\ude1c\ude1d\ude1e\ude1f\ude20\ude21\ude22\ude23\ude24\ude25\ude26\ude27\ude28\ude29\ude2a\ude2b\ude2c\ude2d\ude2e\ude2f\ude30\ude31\ude32\ude33\ude34\ude35\ude36\ude37\ude38\ude39\ude3a\ude3b\ude3c\ude3d\ude3e\ude3f\ude40\ude41\ude42\ude43\ude44\ude45\ude46\ude47\ude48\ude49\ude4a\ude4b\ude4c\ude4d\ude4e\ude4f\ude50\ude51\ude52\ude53\ude54\ude55\ude56\ude57\ude58\ude59\ude5a\ude5b\ude5c\ude5d\ude5e\ude5f\ude60\ude61\ude62\ude63\ude64\ude65\ude66\ude67\ude68\ude69\ude6a\ude6b\ude6c\ude6d\ude6e\ude6f\ude70\ude71\ude72\ude73\ude74\ude75\ude76\ude77\ude78\ude79\ude7a\ude7b\ude7c\ude7d\ude7e\ude7f\ude80\ude81\ude82\ude83\ude84\ude85\ude86\ude87\ude88\ude89\ude8a\ude8b\ude8c\ude8d\ude8e\ude8f\ude90\ude91\ude92\ude93\ude94\ude95\ude96\ude97\ude98\ude99\ude9a\ude9b\ude9c\ude9d\ude9e\ude9f\udea0\udea1\udea2\udea3\udea4\udea5\udea6\udea7\udea8\udea9\udeaa\udeab\udeac\udead\udeae\udeaf\udeb0\udeb1\udeb2\udeb3\udeb4\udeb5\udeb6\udeb7\udeb8\udeb9\udeba\udebb\udebc\udebd\udebe\udebf\udec0\udec1\udec2\udec3\udec4\udec5\udec6\udec7\udec8\udec9\udeca\udecb\udecc\udecd\udece\udecf\uded0\uded1\uded2\uded3\uded4\uded5\uded6\uded7\uded8\uded9\udeda\udedb\udedc\udedd\udede\udedf\udee0\udee1\udee2\udee3\udee4\udee5\udee6\udee7\udee8\udee9\udeea\udeeb\udeec\udeed\udeee\udeef\udef0\udef1\udef2\udef3\udef4\udef5\udef6\udef7\udef8\udef9\udefa\udefb\udefc\udefd\udefe\udeff\udf00\udf01\udf02\udf03\udf04\udf05\udf06\udf07\udf08\udf09\udf0a\udf0b\udf0c\udf0d\udf0e\udf0f\udf10\udf11\udf12\udf13\udf14\udf15\udf16\udf17\udf18\udf19\udf1a\udf1b\udf1c\udf1d\udf1e\udf1f\udf20\udf21\udf22\udf23\udf24\udf25\udf26\udf27\udf28\udf29\udf2a\udf2b\udf2c\udf2d\udf2e\udf2f\udf30\udf31\udf32\udf33\udf34\udf35\udf36\udf37\udf38\udf39\udf3a\udf3b\udf3c\udf3d\udf3e\udf3f\udf40\udf41\udf42\udf43\udf44\udf45\udf46\udf47\udf48\udf49\udf4a\udf4b\udf4c\udf4d\udf4e\udf4f\udf50\udf51\udf52\udf53\udf54\udf55\udf56\udf57\udf58\udf59\udf5a\udf5b\udf5c\udf5d\udf5e\udf5f\udf60\udf61\udf62\udf63\udf64\udf65\udf66\udf67\udf68\udf69\udf6a\udf6b\udf6c\udf6d\udf6e\udf6f\udf70\udf71\udf72\udf73\udf74\udf75\udf76\udf77\udf78\udf79\udf7a\udf7b\udf7c\udf7d\udf7e\udf7f\udf80\udf81\udf82\udf83\udf84\udf85\udf86\udf87\udf88\udf89\udf8a\udf8b\udf8c\udf8d\udf8e\udf8f\udf90\udf91\udf92\udf93\udf94\udf95\udf96\udf97\udf98\udf99\udf9a\udf9b\udf9c\udf9d\udf9e\udf9f\udfa0\udfa1\udfa2\udfa3\udfa4\udfa5\udfa6\udfa7\udfa8\udfa9\udfaa\udfab\udfac\udfad\udfae\udfaf\udfb0\udfb1\udfb2\udfb3\udfb4\udfb5\udfb6\udfb7\udfb8\udfb9\udfba\udfbb\udfbc\udfbd\udfbe\udfbf\udfc0\udfc1\udfc2\udfc3\udfc4\udfc5\udfc6\udfc7\udfc8\udfc9\udfca\udfcb\udfcc\udfcd\udfce\udfcf\udfd0\udfd1\udfd2\udfd3\udfd4\udfd5\udfd6\udfd7\udfd8\udfd9\udfda\udfdb\udfdc\udfdd\udfde\udfdf\udfe0\udfe1\udfe2\udfe3\udfe4\udfe5\udfe6\udfe7\udfe8\udfe9\udfea\udfeb\udfec\udfed\udfee\udfef\udff0\udff1\udff2\udff3\udff4\udff5\udff6\udff7\udff8\udff9\udffa\udffb\udffc\udffd\udffe\udfff'")
+except UnicodeDecodeError:
+ Cs = '' # Jython can't handle isolated surrogates
+
+Ll = u'abcdefghijklmnopqrstuvwxyz\xaa\xb5\xba\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff\u0101\u0103\u0105\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d\u012f\u0131\u0133\u0135\u0137\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148\u0149\u014b\u014d\u014f\u0151\u0153\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167\u0169\u016b\u016d\u016f\u0171\u0173\u0175\u0177\u017a\u017c\u017e\u017f\u0180\u0183\u0185\u0188\u018c\u018d\u0192\u0195\u0199\u019a\u019b\u019e\u01a1\u01a3\u01a5\u01a8\u01aa\u01ab\u01ad\u01b0\u01b4\u01b6\u01b9\u01ba\u01bd\u01be\u01bf\u01c6\u01c9\u01cc\u01ce\u01d0\u01d2\u01d4\u01d6\u01d8\u01da\u01dc\u01dd\u01df\u01e1\u01e3\u01e5\u01e7\u01e9\u01eb\u01ed\u01ef\u01f0\u01f3\u01f5\u01f9\u01fb\u01fd\u01ff\u0201\u0203\u0205\u0207\u0209\u020b\u020d\u020f\u0211\u0213\u0215\u0217\u0219\u021b\u021d\u021f\u0221\u0223\u0225\u0227\u0229\u022b\u022d\u022f\u0231\u0233\u0234\u0235\u0236\u0237\u0238\u0239\u023c\u023f\u0240\u0250\u0251\u0252\u0253\u0254\u0255\u0256\u0257\u0258\u0259\u025a\u025b\u025c\u025d\u025e\u025f\u0260\u0261\u0262\u0263\u0264\u0265\u0266\u0267\u0268\u0269\u026a\u026b\u026c\u026d\u026e\u026f\u0270\u0271\u0272\u0273\u0274\u0275\u0276\u0277\u0278\u0279\u027a\u027b\u027c\u027d\u027e\u027f\u0280\u0281\u0282\u0283\u0284\u0285\u0286\u0287\u0288\u0289\u028a\u028b\u028c\u028d\u028e\u028f\u0290\u0291\u0292\u0293\u0294\u0295\u0296\u0297\u0298\u0299\u029a\u029b\u029c\u029d\u029e\u029f\u02a0\u02a1\u02a2\u02a3\u02a4\u02a5\u02a6\u02a7\u02a8\u02a9\u02aa\u02ab\u02ac\u02ad\u02ae\u02af\u0390\u03ac\u03ad\u03ae\u03af\u03b0\u03b1\u03b2\u03b3\u03b4\u03b5\u03b6\u03b7\u03b8\u03b9\u03ba\u03bb\u03bc\u03bd\u03be\u03bf\u03c0\u03c1\u03c2\u03c3\u03c4\u03c5\u03c6\u03c7\u03c8\u03c9\u03ca\u03cb\u03cc\u03cd\u03ce\u03d0\u03d1\u03d5\u03d6\u03d7\u03d9\u03db\u03dd\u03df\u03e1\u03e3\u03e5\u03e7\u03e9\u03eb\u03ed\u03ef\u03f0\u03f1\u03f2\u03f3\u03f5\u03f8\u03fb\u03fc\u0430\u0431\u0432\u0433\u0434\u0435\u0436\u0437\u0438\u0439\u043a\u043b\u043c\u043d\u043e\u043f\u0440\u0441\u0442\u0443\u0444\u0445\u0446\u0447\u0448\u0449\u044a\u044b\u044c\u044d\u044e\u044f\u0450\u0451\u0452\u0453\u0454\u0455\u0456\u0457\u0458\u0459\u045a\u045b\u045c\u045d\u045e\u045f\u0461\u0463\u0465\u0467\u0469\u046b\u046d\u046f\u0471\u0473\u0475\u0477\u0479\u047b\u047d\u047f\u0481\u048b\u048d\u048f\u0491\u0493\u0495\u0497\u0499\u049b\u049d\u049f\u04a1\u04a3\u04a5\u04a7\u04a9\u04ab\u04ad\u04af\u04b1\u04b3\u04b5\u04b7\u04b9\u04bb\u04bd\u04bf\u04c2\u04c4\u04c6\u04c8\u04ca\u04cc\u04ce\u04d1\u04d3\u04d5\u04d7\u04d9\u04db\u04dd\u04df\u04e1\u04e3\u04e5\u04e7\u04e9\u04eb\u04ed\u04ef\u04f1\u04f3\u04f5\u04f7\u04f9\u0501\u0503\u0505\u0507\u0509\u050b\u050d\u050f\u0561\u0562\u0563\u0564\u0565\u0566\u0567\u0568\u0569\u056a\u056b\u056c\u056d\u056e\u056f\u0570\u0571\u0572\u0573\u0574\u0575\u0576\u0577\u0578\u0579\u057a\u057b\u057c\u057d\u057e\u057f\u0580\u0581\u0582\u0583\u0584\u0585\u0586\u0587\u1d00\u1d01\u1d02\u1d03\u1d04\u1d05\u1d06\u1d07\u1d08\u1d09\u1d0a\u1d0b\u1d0c\u1d0d\u1d0e\u1d0f\u1d10\u1d11\u1d12\u1d13\u1d14\u1d15\u1d16\u1d17\u1d18\u1d19\u1d1a\u1d1b\u1d1c\u1d1d\u1d1e\u1d1f\u1d20\u1d21\u1d22\u1d23\u1d24\u1d25\u1d26\u1d27\u1d28\u1d29\u1d2a\u1d2b\u1d62\u1d63\u1d64\u1d65\u1d66\u1d67\u1d68\u1d69\u1d6a\u1d6b\u1d6c\u1d6d\u1d6e\u1d6f\u1d70\u1d71\u1d72\u1d73\u1d74\u1d75\u1d76\u1d77\u1d79\u1d7a\u1d7b\u1d7c\u1d7d\u1d7e\u1d7f\u1d80\u1d81\u1d82\u1d83\u1d84\u1d85\u1d86\u1d87\u1d88\u1d89\u1d8a\u1d8b\u1d8c\u1d8d\u1d8e\u1d8f\u1d90\u1d91\u1d92\u1d93\u1d94\u1d95\u1d96\u1d97\u1d98\u1d99\u1d9a\u1e01\u1e03\u1e05\u1e07\u1e09\u1e0b\u1e0d\u1e0f\u1e11\u1e13\u1e15\u1e17\u1e19\u1e1b\u1e1d\u1e1f\u1e21\u1e23\u1e25\u1e27\u1e29\u1e2b\u1e2d\u1e2f\u1e31\u1e33\u1e35\u1e37\u1e39\u1e3b\u1e3d\u1e3f\u1e41\u1e43\u1e45\u1e47\u1e49\u1e4b\u1e4d\u1e4f\u1e51\u1e53\u1e55\u1e57\u1e59\u1e5b\u1e5d\u1e5f\u1e61\u1e63\u1e65\u1e67\u1e69\u1e6b\u1e6d\u1e6f\u1e71\u1e73\u1e75\u1e77\u1e79\u1e7b\u1e7d\u1e7f\u1e81\u1e83\u1e85\u1e87\u1e89\u1e8b\u1e8d\u1e8f\u1e91\u1e93\u1e95\u1e96\u1e97\u1e98\u1e99\u1e9a\u1e9b\u1ea1\u1ea3\u1ea5\u1ea7\u1ea9\u1eab\u1ead\u1eaf\u1eb1\u1eb3\u1eb5\u1eb7\u1eb9\u1ebb\u1ebd\u1ebf\u1ec1\u1ec3\u1ec5\u1ec7\u1ec9\u1ecb\u1ecd\u1ecf\u1ed1\u1ed3\u1ed5\u1ed7\u1ed9\u1edb\u1edd\u1edf\u1ee1\u1ee3\u1ee5\u1ee7\u1ee9\u1eeb\u1eed\u1eef\u1ef1\u1ef3\u1ef5\u1ef7\u1ef9\u1f00\u1f01\u1f02\u1f03\u1f04\u1f05\u1f06\u1f07\u1f10\u1f11\u1f12\u1f13\u1f14\u1f15\u1f20\u1f21\u1f22\u1f23\u1f24\u1f25\u1f26\u1f27\u1f30\u1f31\u1f32\u1f33\u1f34\u1f35\u1f36\u1f37\u1f40\u1f41\u1f42\u1f43\u1f44\u1f45\u1f50\u1f51\u1f52\u1f53\u1f54\u1f55\u1f56\u1f57\u1f60\u1f61\u1f62\u1f63\u1f64\u1f65\u1f66\u1f67\u1f70\u1f71\u1f72\u1f73\u1f74\u1f75\u1f76\u1f77\u1f78\u1f79\u1f7a\u1f7b\u1f7c\u1f7d\u1f80\u1f81\u1f82\u1f83\u1f84\u1f85\u1f86\u1f87\u1f90\u1f91\u1f92\u1f93\u1f94\u1f95\u1f96\u1f97\u1fa0\u1fa1\u1fa2\u1fa3\u1fa4\u1fa5\u1fa6\u1fa7\u1fb0\u1fb1\u1fb2\u1fb3\u1fb4\u1fb6\u1fb7\u1fbe\u1fc2\u1fc3\u1fc4\u1fc6\u1fc7\u1fd0\u1fd1\u1fd2\u1fd3\u1fd6\u1fd7\u1fe0\u1fe1\u1fe2\u1fe3\u1fe4\u1fe5\u1fe6\u1fe7\u1ff2\u1ff3\u1ff4\u1ff6\u1ff7\u2071\u207f\u210a\u210e\u210f\u2113\u212f\u2134\u2139\u213c\u213d\u2146\u2147\u2148\u2149\u2c30\u2c31\u2c32\u2c33\u2c34\u2c35\u2c36\u2c37\u2c38\u2c39\u2c3a\u2c3b\u2c3c\u2c3d\u2c3e\u2c3f\u2c40\u2c41\u2c42\u2c43\u2c44\u2c45\u2c46\u2c47\u2c48\u2c49\u2c4a\u2c4b\u2c4c\u2c4d\u2c4e\u2c4f\u2c50\u2c51\u2c52\u2c53\u2c54\u2c55\u2c56\u2c57\u2c58\u2c59\u2c5a\u2c5b\u2c5c\u2c5d\u2c5e\u2c81\u2c83\u2c85\u2c87\u2c89\u2c8b\u2c8d\u2c8f\u2c91\u2c93\u2c95\u2c97\u2c99\u2c9b\u2c9d\u2c9f\u2ca1\u2ca3\u2ca5\u2ca7\u2ca9\u2cab\u2cad\u2caf\u2cb1\u2cb3\u2cb5\u2cb7\u2cb9\u2cbb\u2cbd\u2cbf\u2cc1\u2cc3\u2cc5\u2cc7\u2cc9\u2ccb\u2ccd\u2ccf\u2cd1\u2cd3\u2cd5\u2cd7\u2cd9\u2cdb\u2cdd\u2cdf\u2ce1\u2ce3\u2ce4\u2d00\u2d01\u2d02\u2d03\u2d04\u2d05\u2d06\u2d07\u2d08\u2d09\u2d0a\u2d0b\u2d0c\u2d0d\u2d0e\u2d0f\u2d10\u2d11\u2d12\u2d13\u2d14\u2d15\u2d16\u2d17\u2d18\u2d19\u2d1a\u2d1b\u2d1c\u2d1d\u2d1e\u2d1f\u2d20\u2d21\u2d22\u2d23\u2d24\u2d25\ufb00\ufb01\ufb02\ufb03\ufb04\ufb05\ufb06\ufb13\ufb14\ufb15\ufb16\ufb17\uff41\uff42\uff43\uff44\uff45\uff46\uff47\uff48\uff49\uff4a\uff4b\uff4c\uff4d\uff4e\uff4f\uff50\uff51\uff52\uff53\uff54\uff55\uff56\uff57\uff58\uff59\uff5a'
+
+Lm = u'\u02b0\u02b1\u02b2\u02b3\u02b4\u02b5\u02b6\u02b7\u02b8\u02b9\u02ba\u02bb\u02bc\u02bd\u02be\u02bf\u02c0\u02c1\u02c6\u02c7\u02c8\u02c9\u02ca\u02cb\u02cc\u02cd\u02ce\u02cf\u02d0\u02d1\u02e0\u02e1\u02e2\u02e3\u02e4\u02ee\u037a\u0559\u0640\u06e5\u06e6\u0e46\u0ec6\u10fc\u17d7\u1843\u1d2c\u1d2d\u1d2e\u1d2f\u1d30\u1d31\u1d32\u1d33\u1d34\u1d35\u1d36\u1d37\u1d38\u1d39\u1d3a\u1d3b\u1d3c\u1d3d\u1d3e\u1d3f\u1d40\u1d41\u1d42\u1d43\u1d44\u1d45\u1d46\u1d47\u1d48\u1d49\u1d4a\u1d4b\u1d4c\u1d4d\u1d4e\u1d4f\u1d50\u1d51\u1d52\u1d53\u1d54\u1d55\u1d56\u1d57\u1d58\u1d59\u1d5a\u1d5b\u1d5c\u1d5d\u1d5e\u1d5f\u1d60\u1d61\u1d78\u1d9b\u1d9c\u1d9d\u1d9e\u1d9f\u1da0\u1da1\u1da2\u1da3\u1da4\u1da5\u1da6\u1da7\u1da8\u1da9\u1daa\u1dab\u1dac\u1dad\u1dae\u1daf\u1db0\u1db1\u1db2\u1db3\u1db4\u1db5\u1db6\u1db7\u1db8\u1db9\u1dba\u1dbb\u1dbc\u1dbd\u1dbe\u1dbf\u2090\u2091\u2092\u2093\u2094\u2d6f\u3005\u3031\u3032\u3033\u3034\u3035\u303b\u309d\u309e\u30fc\u30fd\u30fe\ua015\uff70\uff9e\uff9f'
+
+Lo = u'\u01bb\u01c0\u01c1\u01c2\u01c3\u05d0\u05d1\u05d2\u05d3\u05d4\u05d5\u05d6\u05d7\u05d8\u05d9\u05da\u05db\u05dc\u05dd\u05de\u05df\u05e0\u05e1\u05e2\u05e3\u05e4\u05e5\u05e6\u05e7\u05e8\u05e9\u05ea\u05f0\u05f1\u05f2\u0621\u0622\u0623\u0624\u0625\u0626\u0627\u0628\u0629\u062a\u062b\u062c\u062d\u062e\u062f\u0630\u0631\u0632\u0633\u0634\u0635\u0636\u0637\u0638\u0639\u063a\u0641\u0642\u0643\u0644\u0645\u0646\u0647\u0648\u0649\u064a\u066e\u066f\u0671\u0672\u0673\u0674\u0675\u0676\u0677\u0678\u0679\u067a\u067b\u067c\u067d\u067e\u067f\u0680\u0681\u0682\u0683\u0684\u0685\u0686\u0687\u0688\u0689\u068a\u068b\u068c\u068d\u068e\u068f\u0690\u0691\u0692\u0693\u0694\u0695\u0696\u0697\u0698\u0699\u069a\u069b\u069c\u069d\u069e\u069f\u06a0\u06a1\u06a2\u06a3\u06a4\u06a5\u06a6\u06a7\u06a8\u06a9\u06aa\u06ab\u06ac\u06ad\u06ae\u06af\u06b0\u06b1\u06b2\u06b3\u06b4\u06b5\u06b6\u06b7\u06b8\u06b9\u06ba\u06bb\u06bc\u06bd\u06be\u06bf\u06c0\u06c1\u06c2\u06c3\u06c4\u06c5\u06c6\u06c7\u06c8\u06c9\u06ca\u06cb\u06cc\u06cd\u06ce\u06cf\u06d0\u06d1\u06d2\u06d3\u06d5\u06ee\u06ef\u06fa\u06fb\u06fc\u06ff\u0710\u0712\u0713\u0714\u0715\u0716\u0717\u0718\u0719\u071a\u071b\u071c\u071d\u071e\u071f\u0720\u0721\u0722\u0723\u0724\u0725\u0726\u0727\u0728\u0729\u072a\u072b\u072c\u072d\u072e\u072f\u074d\u074e\u074f\u0750\u0751\u0752\u0753\u0754\u0755\u0756\u0757\u0758\u0759\u075a\u075b\u075c\u075d\u075e\u075f\u0760\u0761\u0762\u0763\u0764\u0765\u0766\u0767\u0768\u0769\u076a\u076b\u076c\u076d\u0780\u0781\u0782\u0783\u0784\u0785\u0786\u0787\u0788\u0789\u078a\u078b\u078c\u078d\u078e\u078f\u0790\u0791\u0792\u0793\u0794\u0795\u0796\u0797\u0798\u0799\u079a\u079b\u079c\u079d\u079e\u079f\u07a0\u07a1\u07a2\u07a3\u07a4\u07a5\u07b1\u0904\u0905\u0906\u0907\u0908\u0909\u090a\u090b\u090c\u090d\u090e\u090f\u0910\u0911\u0912\u0913\u0914\u0915\u0916\u0917\u0918\u0919\u091a\u091b\u091c\u091d\u091e\u091f\u0920\u0921\u0922\u0923\u0924\u0925\u0926\u0927\u0928\u0929\u092a\u092b\u092c\u092d\u092e\u092f\u0930\u0931\u0932\u0933\u0934\u0935\u0936\u0937\u0938\u0939\u093d\u0950\u0958\u0959\u095a\u095b\u095c\u095d\u095e\u095f\u0960\u0961\u097d\u0985\u0986\u0987\u0988\u0989\u098a\u098b\u098c\u098f\u0990\u0993\u0994\u0995\u0996\u0997\u0998\u0999\u099a\u099b\u099c\u099d\u099e\u099f\u09a0\u09a1\u09a2\u09a3\u09a4\u09a5\u09a6\u09a7\u09a8\u09aa\u09ab\u09ac\u09ad\u09ae\u09af\u09b0\u09b2\u09b6\u09b7\u09b8\u09b9\u09bd\u09ce\u09dc\u09dd\u09df\u09e0\u09e1\u09f0\u09f1\u0a05\u0a06\u0a07\u0a08\u0a09\u0a0a\u0a0f\u0a10\u0a13\u0a14\u0a15\u0a16\u0a17\u0a18\u0a19\u0a1a\u0a1b\u0a1c\u0a1d\u0a1e\u0a1f\u0a20\u0a21\u0a22\u0a23\u0a24\u0a25\u0a26\u0a27\u0a28\u0a2a\u0a2b\u0a2c\u0a2d\u0a2e\u0a2f\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59\u0a5a\u0a5b\u0a5c\u0a5e\u0a72\u0a73\u0a74\u0a85\u0a86\u0a87\u0a88\u0a89\u0a8a\u0a8b\u0a8c\u0a8d\u0a8f\u0a90\u0a91\u0a93\u0a94\u0a95\u0a96\u0a97\u0a98\u0a99\u0a9a\u0a9b\u0a9c\u0a9d\u0a9e\u0a9f\u0aa0\u0aa1\u0aa2\u0aa3\u0aa4\u0aa5\u0aa6\u0aa7\u0aa8\u0aaa\u0aab\u0aac\u0aad\u0aae\u0aaf\u0ab0\u0ab2\u0ab3\u0ab5\u0ab6\u0ab7\u0ab8\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0b05\u0b06\u0b07\u0b08\u0b09\u0b0a\u0b0b\u0b0c\u0b0f\u0b10\u0b13\u0b14\u0b15\u0b16\u0b17\u0b18\u0b19\u0b1a\u0b1b\u0b1c\u0b1d\u0b1e\u0b1f\u0b20\u0b21\u0b22\u0b23\u0b24\u0b25\u0b26\u0b27\u0b28\u0b2a\u0b2b\u0b2c\u0b2d\u0b2e\u0b2f\u0b30\u0b32\u0b33\u0b35\u0b36\u0b37\u0b38\u0b39\u0b3d\u0b5c\u0b5d\u0b5f\u0b60\u0b61\u0b71\u0b83\u0b85\u0b86\u0b87\u0b88\u0b89\u0b8a\u0b8e\u0b8f\u0b90\u0b92\u0b93\u0b94\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8\u0ba9\u0baa\u0bae\u0baf\u0bb0\u0bb1\u0bb2\u0bb3\u0bb4\u0bb5\u0bb6\u0bb7\u0bb8\u0bb9\u0c05\u0c06\u0c07\u0c08\u0c09\u0c0a\u0c0b\u0c0c\u0c0e\u0c0f\u0c10\u0c12\u0c13\u0c14\u0c15\u0c16\u0c17\u0c18\u0c19\u0c1a\u0c1b\u0c1c\u0c1d\u0c1e\u0c1f\u0c20\u0c21\u0c22\u0c23\u0c24\u0c25\u0c26\u0c27\u0c28\u0c2a\u0c2b\u0c2c\u0c2d\u0c2e\u0c2f\u0c30\u0c31\u0c32\u0c33\u0c35\u0c36\u0c37\u0c38\u0c39\u0c60\u0c61\u0c85\u0c86\u0c87\u0c88\u0c89\u0c8a\u0c8b\u0c8c\u0c8e\u0c8f\u0c90\u0c92\u0c93\u0c94\u0c95\u0c96\u0c97\u0c98\u0c99\u0c9a\u0c9b\u0c9c\u0c9d\u0c9e\u0c9f\u0ca0\u0ca1\u0ca2\u0ca3\u0ca4\u0ca5\u0ca6\u0ca7\u0ca8\u0caa\u0cab\u0cac\u0cad\u0cae\u0caf\u0cb0\u0cb1\u0cb2\u0cb3\u0cb5\u0cb6\u0cb7\u0cb8\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0d05\u0d06\u0d07\u0d08\u0d09\u0d0a\u0d0b\u0d0c\u0d0e\u0d0f\u0d10\u0d12\u0d13\u0d14\u0d15\u0d16\u0d17\u0d18\u0d19\u0d1a\u0d1b\u0d1c\u0d1d\u0d1e\u0d1f\u0d20\u0d21\u0d22\u0d23\u0d24\u0d25\u0d26\u0d27\u0d28\u0d2a\u0d2b\u0d2c\u0d2d\u0d2e\u0d2f\u0d30\u0d31\u0d32\u0d33\u0d34\u0d35\u0d36\u0d37\u0d38\u0d39\u0d60\u0d61\u0d85\u0d86\u0d87\u0d88\u0d89\u0d8a\u0d8b\u0d8c\u0d8d\u0d8e\u0d8f\u0d90\u0d91\u0d92\u0d93\u0d94\u0d95\u0d96\u0d9a\u0d9b\u0d9c\u0d9d\u0d9e\u0d9f\u0da0\u0da1\u0da2\u0da3\u0da4\u0da5\u0da6\u0da7\u0da8\u0da9\u0daa\u0dab\u0dac\u0dad\u0dae\u0daf\u0db0\u0db1\u0db3\u0db4\u0db5\u0db6\u0db7\u0db8\u0db9\u0dba\u0dbb\u0dbd\u0dc0\u0dc1\u0dc2\u0dc3\u0dc4\u0dc5\u0dc6\u0e01\u0e02\u0e03\u0e04\u0e05\u0e06\u0e07\u0e08\u0e09\u0e0a\u0e0b\u0e0c\u0e0d\u0e0e\u0e0f\u0e10\u0e11\u0e12\u0e13\u0e14\u0e15\u0e16\u0e17\u0e18\u0e19\u0e1a\u0e1b\u0e1c\u0e1d\u0e1e\u0e1f\u0e20\u0e21\u0e22\u0e23\u0e24\u0e25\u0e26\u0e27\u0e28\u0e29\u0e2a\u0e2b\u0e2c\u0e2d\u0e2e\u0e2f\u0e30\u0e32\u0e33\u0e40\u0e41\u0e42\u0e43\u0e44\u0e45\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94\u0e95\u0e96\u0e97\u0e99\u0e9a\u0e9b\u0e9c\u0e9d\u0e9e\u0e9f\u0ea1\u0ea2\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead\u0eae\u0eaf\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0\u0ec1\u0ec2\u0ec3\u0ec4\u0edc\u0edd\u0f00\u0f40\u0f41\u0f42\u0f43\u0f44\u0f45\u0f46\u0f47\u0f49\u0f4a\u0f4b\u0f4c\u0f4d\u0f4e\u0f4f\u0f50\u0f51\u0f52\u0f53\u0f54\u0f55\u0f56\u0f57\u0f58\u0f59\u0f5a\u0f5b\u0f5c\u0f5d\u0f5e\u0f5f\u0f60\u0f61\u0f62\u0f63\u0f64\u0f65\u0f66\u0f67\u0f68\u0f69\u0f6a\u0f88\u0f89\u0f8a\u0f8b\u1000\u1001\u1002\u1003\u1004\u1005\u1006\u1007\u1008\u1009\u100a\u100b\u100c\u100d\u100e\u100f\u1010\u1011\u1012\u1013\u1014\u1015\u1016\u1017\u1018\u1019\u101a\u101b\u101c\u101d\u101e\u101f\u1020\u1021\u1023\u1024\u1025\u1026\u1027\u1029\u102a\u1050\u1051\u1052\u1053\u1054\u1055\u10d0\u10d1\u10d2\u10d3\u10d4\u10d5\u10d6\u10d7\u10d8\u10d9\u10da\u10db\u10dc\u10dd\u10de\u10df\u10e0\u10e1\u10e2\u10e3\u10e4\u10e5\u10e6\u10e7\u10e8\u10e9\u10ea\u10eb\u10ec\u10ed\u10ee\u10ef\u10f0\u10f1\u10f2\u10f3\u10f4\u10f5\u10f6\u10f7\u10f8\u10f9\u10fa\u1100\u1101\u1102\u1103\u1104\u1105\u1106\u1107\u1108\u1109\u110a\u110b\u110c\u110d\u110e\u110f\u1110\u1111\u1112\u1113\u1114\u1115\u1116\u1117\u1118\u1119\u111a\u111b\u111c\u111d\u111e\u111f\u1120\u1121\u1122\u1123\u1124\u1125\u1126\u1127\u1128\u1129\u112a\u112b\u112c\u112d\u112e\u112f\u1130\u1131\u1132\u1133\u1134\u1135\u1136\u1137\u1138\u1139\u113a\u113b\u113c\u113d\u113e\u113f\u1140\u1141\u1142\u1143\u1144\u1145\u1146\u1147\u1148\u1149\u114a\u114b\u114c\u114d\u114e\u114f\u1150\u1151\u1152\u1153\u1154\u1155\u1156\u1157\u1158\u1159\u115f\u1160\u1161\u1162\u1163\u1164\u1165\u1166\u1167\u1168\u1169\u116a\u116b\u116c\u116d\u116e\u116f\u1170\u1171\u1172\u1173\u1174\u1175\u1176\u1177\u1178\u1179\u117a\u117b\u117c\u117d\u117e\u117f\u1180\u1181\u1182\u1183\u1184\u1185\u1186\u1187\u1188\u1189\u118a\u118b\u118c\u118d\u118e\u118f\u1190\u1191\u1192\u1193\u1194\u1195\u1196\u1197\u1198\u1199\u119a\u119b\u119c\u119d\u119e\u119f\u11a0\u11a1\u11a2\u11a8\u11a9\u11aa\u11ab\u11ac\u11ad\u11ae\u11af\u11b0\u11b1\u11b2\u11b3\u11b4\u11b5\u11b6\u11b7\u11b8\u11b9\u11ba\u11bb\u11bc\u11bd\u11be\u11bf\u11c0\u11c1\u11c2\u11c3\u11c4\u11c5\u11c6\u11c7\u11c8\u11c9\u11ca\u11cb\u11cc\u11cd\u11ce\u11cf\u11d0\u11d1\u11d2\u11d3\u11d4\u11d5\u11d6\u11d7\u11d8\u11d9\u11da\u11db\u11dc\u11dd\u11de\u11df\u11e0\u11e1\u11e2\u11e3\u11e4\u11e5\u11e6\u11e7\u11e8\u11e9\u11ea\u11eb\u11ec\u11ed\u11ee\u11ef\u11f0\u11f1\u11f2\u11f3\u11f4\u11f5\u11f6\u11f7\u11f8\u11f9\u1200\u1201\u1202\u1203\u1204\u1205\u1206\u1207\u1208\u1209\u120a\u120b\u120c\u120d\u120e\u120f\u1210\u1211\u1212\u1213\u1214\u1215\u1216\u1217\u1218\u1219\u121a\u121b\u121c\u121d\u121e\u121f\u1220\u1221\u1222\u1223\u1224\u1225\u1226\u1227\u1228\u1229\u122a\u122b\u122c\u122d\u122e\u122f\u1230\u1231\u1232\u1233\u1234\u1235\u1236\u1237\u1238\u1239\u123a\u123b\u123c\u123d\u123e\u123f\u1240\u1241\u1242\u1243\u1244\u1245\u1246\u1247\u1248\u124a\u124b\u124c\u124d\u1250\u1251\u1252\u1253\u1254\u1255\u1256\u1258\u125a\u125b\u125c\u125d\u1260\u1261\u1262\u1263\u1264\u1265\u1266\u1267\u1268\u1269\u126a\u126b\u126c\u126d\u126e\u126f\u1270\u1271\u1272\u1273\u1274\u1275\u1276\u1277\u1278\u1279\u127a\u127b\u127c\u127d\u127e\u127f\u1280\u1281\u1282\u1283\u1284\u1285\u1286\u1287\u1288\u128a\u128b\u128c\u128d\u1290\u1291\u1292\u1293\u1294\u1295\u1296\u1297\u1298\u1299\u129a\u129b\u129c\u129d\u129e\u129f\u12a0\u12a1\u12a2\u12a3\u12a4\u12a5\u12a6\u12a7\u12a8\u12a9\u12aa\u12ab\u12ac\u12ad\u12ae\u12af\u12b0\u12b2\u12b3\u12b4\u12b5\u12b8\u12b9\u12ba\u12bb\u12bc\u12bd\u12be\u12c0\u12c2\u12c3\u12c4\u12c5\u12c8\u12c9\u12ca\u12cb\u12cc\u12cd\u12ce\u12cf\u12d0\u12d1\u12d2\u12d3\u12d4\u12d5\u12d6\u12d8\u12d9\u12da\u12db\u12dc\u12dd\u12de\u12df\u12e0\u12e1\u12e2\u12e3\u12e4\u12e5\u12e6\u12e7\u12e8\u12e9\u12ea\u12eb\u12ec\u12ed\u12ee\u12ef\u12f0\u12f1\u12f2\u12f3\u12f4\u12f5\u12f6\u12f7\u12f8\u12f9\u12fa\u12fb\u12fc\u12fd\u12fe\u12ff\u1300\u1301\u1302\u1303\u1304\u1305\u1306\u1307\u1308\u1309\u130a\u130b\u130c\u130d\u130e\u130f\u1310\u1312\u1313\u1314\u1315\u1318\u1319\u131a\u131b\u131c\u131d\u131e\u131f\u1320\u1321\u1322\u1323\u1324\u1325\u1326\u1327\u1328\u1329\u132a\u132b\u132c\u132d\u132e\u132f\u1330\u1331\u1332\u1333\u1334\u1335\u1336\u1337\u1338\u1339\u133a\u133b\u133c\u133d\u133e\u133f\u1340\u1341\u1342\u1343\u1344\u1345\u1346\u1347\u1348\u1349\u134a\u134b\u134c\u134d\u134e\u134f\u1350\u1351\u1352\u1353\u1354\u1355\u1356\u1357\u1358\u1359\u135a\u1380\u1381\u1382\u1383\u1384\u1385\u1386\u1387\u1388\u1389\u138a\u138b\u138c\u138d\u138e\u138f\u13a0\u13a1\u13a2\u13a3\u13a4\u13a5\u13a6\u13a7\u13a8\u13a9\u13aa\u13ab\u13ac\u13ad\u13ae\u13af\u13b0\u13b1\u13b2\u13b3\u13b4\u13b5\u13b6\u13b7\u13b8\u13b9\u13ba\u13bb\u13bc\u13bd\u13be\u13bf\u13c0\u13c1\u13c2\u13c3\u13c4\u13c5\u13c6\u13c7\u13c8\u13c9\u13ca\u13cb\u13cc\u13cd\u13ce\u13cf\u13d0\u13d1\u13d2\u13d3\u13d4\u13d5\u13d6\u13d7\u13d8\u13d9\u13da\u13db\u13dc\u13dd\u13de\u13df\u13e0\u13e1\u13e2\u13e3\u13e4\u13e5\u13e6\u13e7\u13e8\u13e9\u13ea\u13eb\u13ec\u13ed\u13ee\u13ef\u13f0\u13f1\u13f2\u13f3\u13f4\u1401\u1402\u1403\u1404\u1405\u1406\u1407\u1408\u1409\u140a\u140b\u140c\u140d\u140e\u140f\u1410\u1411\u1412\u1413\u1414\u1415\u1416\u1417\u1418\u1419\u141a\u141b\u141c\u141d\u141e\u141f\u1420\u1421\u1422\u1423\u1424\u1425\u1426\u1427\u1428\u1429\u142a\u142b\u142c\u142d\u142e\u142f\u1430\u1431\u1432\u1433\u1434\u1435\u1436\u1437\u1438\u1439\u143a\u143b\u143c\u143d\u143e\u143f\u1440\u1441\u1442\u1443\u1444\u1445\u1446\u1447\u1448\u1449\u144a\u144b\u144c\u144d\u144e\u144f\u1450\u1451\u1452\u1453\u1454\u1455\u1456\u1457\u1458\u1459\u145a\u145b\u145c\u145d\u145e\u145f\u1460\u1461\u1462\u1463\u1464\u1465\u1466\u1467\u1468\u1469\u146a\u146b\u146c\u146d\u146e\u146f\u1470\u1471\u1472\u1473\u1474\u1475\u1476\u1477\u1478\u1479\u147a\u147b\u147c\u147d\u147e\u147f\u1480\u1481\u1482\u1483\u1484\u1485\u1486\u1487\u1488\u1489\u148a\u148b\u148c\u148d\u148e\u148f\u1490\u1491\u1492\u1493\u1494\u1495\u1496\u1497\u1498\u1499\u149a\u149b\u149c\u149d\u149e\u149f\u14a0\u14a1\u14a2\u14a3\u14a4\u14a5\u14a6\u14a7\u14a8\u14a9\u14aa\u14ab\u14ac\u14ad\u14ae\u14af\u14b0\u14b1\u14b2\u14b3\u14b4\u14b5\u14b6\u14b7\u14b8\u14b9\u14ba\u14bb\u14bc\u14bd\u14be\u14bf\u14c0\u14c1\u14c2\u14c3\u14c4\u14c5\u14c6\u14c7\u14c8\u14c9\u14ca\u14cb\u14cc\u14cd\u14ce\u14cf\u14d0\u14d1\u14d2\u14d3\u14d4\u14d5\u14d6\u14d7\u14d8\u14d9\u14da\u14db\u14dc\u14dd\u14de\u14df\u14e0\u14e1\u14e2\u14e3\u14e4\u14e5\u14e6\u14e7\u14e8\u14e9\u14ea\u14eb\u14ec\u14ed\u14ee\u14ef\u14f0\u14f1\u14f2\u14f3\u14f4\u14f5\u14f6\u14f7\u14f8\u14f9\u14fa\u14fb\u14fc\u14fd\u14fe\u14ff\u1500\u1501\u1502\u1503\u1504\u1505\u1506\u1507\u1508\u1509\u150a\u150b\u150c\u150d\u150e\u150f\u1510\u1511\u1512\u1513\u1514\u1515\u1516\u1517\u1518\u1519\u151a\u151b\u151c\u151d\u151e\u151f\u1520\u1521\u1522\u1523\u1524\u1525\u1526\u1527\u1528\u1529\u152a\u152b\u152c\u152d\u152e\u152f\u1530\u1531\u1532\u1533\u1534\u1535\u1536\u1537\u1538\u1539\u153a\u153b\u153c\u153d\u153e\u153f\u1540\u1541\u1542\u1543\u1544\u1545\u1546\u1547\u1548\u1549\u154a\u154b\u154c\u154d\u154e\u154f\u1550\u1551\u1552\u1553\u1554\u1555\u1556\u1557\u1558\u1559\u155a\u155b\u155c\u155d\u155e\u155f\u1560\u1561\u1562\u1563\u1564\u1565\u1566\u1567\u1568\u1569\u156a\u156b\u156c\u156d\u156e\u156f\u1570\u1571\u1572\u1573\u1574\u1575\u1576\u1577\u1578\u1579\u157a\u157b\u157c\u157d\u157e\u157f\u1580\u1581\u1582\u1583\u1584\u1585\u1586\u1587\u1588\u1589\u158a\u158b\u158c\u158d\u158e\u158f\u1590\u1591\u1592\u1593\u1594\u1595\u1596\u1597\u1598\u1599\u159a\u159b\u159c\u159d\u159e\u159f\u15a0\u15a1\u15a2\u15a3\u15a4\u15a5\u15a6\u15a7\u15a8\u15a9\u15aa\u15ab\u15ac\u15ad\u15ae\u15af\u15b0\u15b1\u15b2\u15b3\u15b4\u15b5\u15b6\u15b7\u15b8\u15b9\u15ba\u15bb\u15bc\u15bd\u15be\u15bf\u15c0\u15c1\u15c2\u15c3\u15c4\u15c5\u15c6\u15c7\u15c8\u15c9\u15ca\u15cb\u15cc\u15cd\u15ce\u15cf\u15d0\u15d1\u15d2\u15d3\u15d4\u15d5\u15d6\u15d7\u15d8\u15d9\u15da\u15db\u15dc\u15dd\u15de\u15df\u15e0\u15e1\u15e2\u15e3\u15e4\u15e5\u15e6\u15e7\u15e8\u15e9\u15ea\u15eb\u15ec\u15ed\u15ee\u15ef\u15f0\u15f1\u15f2\u15f3\u15f4\u15f5\u15f6\u15f7\u15f8\u15f9\u15fa\u15fb\u15fc\u15fd\u15fe\u15ff\u1600\u1601\u1602\u1603\u1604\u1605\u1606\u1607\u1608\u1609\u160a\u160b\u160c\u160d\u160e\u160f\u1610\u1611\u1612\u1613\u1614\u1615\u1616\u1617\u1618\u1619\u161a\u161b\u161c\u161d\u161e\u161f\u1620\u1621\u1622\u1623\u1624\u1625\u1626\u1627\u1628\u1629\u162a\u162b\u162c\u162d\u162e\u162f\u1630\u1631\u1632\u1633\u1634\u1635\u1636\u1637\u1638\u1639\u163a\u163b\u163c\u163d\u163e\u163f\u1640\u1641\u1642\u1643\u1644\u1645\u1646\u1647\u1648\u1649\u164a\u164b\u164c\u164d\u164e\u164f\u1650\u1651\u1652\u1653\u1654\u1655\u1656\u1657\u1658\u1659\u165a\u165b\u165c\u165d\u165e\u165f\u1660\u1661\u1662\u1663\u1664\u1665\u1666\u1667\u1668\u1669\u166a\u166b\u166c\u166f\u1670\u1671\u1672\u1673\u1674\u1675\u1676\u1681\u1682\u1683\u1684\u1685\u1686\u1687\u1688\u1689\u168a\u168b\u168c\u168d\u168e\u168f\u1690\u1691\u1692\u1693\u1694\u1695\u1696\u1697\u1698\u1699\u169a\u16a0\u16a1\u16a2\u16a3\u16a4\u16a5\u16a6\u16a7\u16a8\u16a9\u16aa\u16ab\u16ac\u16ad\u16ae\u16af\u16b0\u16b1\u16b2\u16b3\u16b4\u16b5\u16b6\u16b7\u16b8\u16b9\u16ba\u16bb\u16bc\u16bd\u16be\u16bf\u16c0\u16c1\u16c2\u16c3\u16c4\u16c5\u16c6\u16c7\u16c8\u16c9\u16ca\u16cb\u16cc\u16cd\u16ce\u16cf\u16d0\u16d1\u16d2\u16d3\u16d4\u16d5\u16d6\u16d7\u16d8\u16d9\u16da\u16db\u16dc\u16dd\u16de\u16df\u16e0\u16e1\u16e2\u16e3\u16e4\u16e5\u16e6\u16e7\u16e8\u16e9\u16ea\u1700\u1701\u1702\u1703\u1704\u1705\u1706\u1707\u1708\u1709\u170a\u170b\u170c\u170e\u170f\u1710\u1711\u1720\u1721\u1722\u1723\u1724\u1725\u1726\u1727\u1728\u1729\u172a\u172b\u172c\u172d\u172e\u172f\u1730\u1731\u1740\u1741\u1742\u1743\u1744\u1745\u1746\u1747\u1748\u1749\u174a\u174b\u174c\u174d\u174e\u174f\u1750\u1751\u1760\u1761\u1762\u1763\u1764\u1765\u1766\u1767\u1768\u1769\u176a\u176b\u176c\u176e\u176f\u1770\u1780\u1781\u1782\u1783\u1784\u1785\u1786\u1787\u1788\u1789\u178a\u178b\u178c\u178d\u178e\u178f\u1790\u1791\u1792\u1793\u1794\u1795\u1796\u1797\u1798\u1799\u179a\u179b\u179c\u179d\u179e\u179f\u17a0\u17a1\u17a2\u17a3\u17a4\u17a5\u17a6\u17a7\u17a8\u17a9\u17aa\u17ab\u17ac\u17ad\u17ae\u17af\u17b0\u17b1\u17b2\u17b3\u17dc\u1820\u1821\u1822\u1823\u1824\u1825\u1826\u1827\u1828\u1829\u182a\u182b\u182c\u182d\u182e\u182f\u1830\u1831\u1832\u1833\u1834\u1835\u1836\u1837\u1838\u1839\u183a\u183b\u183c\u183d\u183e\u183f\u1840\u1841\u1842\u1844\u1845\u1846\u1847\u1848\u1849\u184a\u184b\u184c\u184d\u184e\u184f\u1850\u1851\u1852\u1853\u1854\u1855\u1856\u1857\u1858\u1859\u185a\u185b\u185c\u185d\u185e\u185f\u1860\u1861\u1862\u1863\u1864\u1865\u1866\u1867\u1868\u1869\u186a\u186b\u186c\u186d\u186e\u186f\u1870\u1871\u1872\u1873\u1874\u1875\u1876\u1877\u1880\u1881\u1882\u1883\u1884\u1885\u1886\u1887\u1888\u1889\u188a\u188b\u188c\u188d\u188e\u188f\u1890\u1891\u1892\u1893\u1894\u1895\u1896\u1897\u1898\u1899\u189a\u189b\u189c\u189d\u189e\u189f\u18a0\u18a1\u18a2\u18a3\u18a4\u18a5\u18a6\u18a7\u18a8\u1900\u1901\u1902\u1903\u1904\u1905\u1906\u1907\u1908\u1909\u190a\u190b\u190c\u190d\u190e\u190f\u1910\u1911\u1912\u1913\u1914\u1915\u1916\u1917\u1918\u1919\u191a\u191b\u191c\u1950\u1951\u1952\u1953\u1954\u1955\u1956\u1957\u1958\u1959\u195a\u195b\u195c\u195d\u195e\u195f\u1960\u1961\u1962\u1963\u1964\u1965\u1966\u1967\u1968\u1969\u196a\u196b\u196c\u196d\u1970\u1971\u1972\u1973\u1974\u1980\u1981\u1982\u1983\u1984\u1985\u1986\u1987\u1988\u1989\u198a\u198b\u198c\u198d\u198e\u198f\u1990\u1991\u1992\u1993\u1994\u1995\u1996\u1997\u1998\u1999\u199a\u199b\u199c\u199d\u199e\u199f\u19a0\u19a1\u19a2\u19a3\u19a4\u19a5\u19a6\u19a7\u19a8\u19a9\u19c1\u19c2\u19c3\u19c4\u19c5\u19c6\u19c7\u1a00\u1a01\u1a02\u1a03\u1a04\u1a05\u1a06\u1a07\u1a08\u1a09\u1a0a\u1a0b\u1a0c\u1a0d\u1a0e\u1a0f\u1a10\u1a11\u1a12\u1a13\u1a14\u1a15\u1a16\u2135\u2136\u2137\u2138\u2d30\u2d31\u2d32\u2d33\u2d34\u2d35\u2d36\u2d37\u2d38\u2d39\u2d3a\u2d3b\u2d3c\u2d3d\u2d3e\u2d3f\u2d40\u2d41\u2d42\u2d43\u2d44\u2d45\u2d46\u2d47\u2d48\u2d49\u2d4a\u2d4b\u2d4c\u2d4d\u2d4e\u2d4f\u2d50\u2d51\u2d52\u2d53\u2d54\u2d55\u2d56\u2d57\u2d58\u2d59\u2d5a\u2d5b\u2d5c\u2d5d\u2d5e\u2d5f\u2d60\u2d61\u2d62\u2d63\u2d64\u2d65\u2d80\u2d81\u2d82\u2d83\u2d84\u2d85\u2d86\u2d87\u2d88\u2d89\u2d8a\u2d8b\u2d8c\u2d8d\u2d8e\u2d8f\u2d90\u2d91\u2d92\u2d93\u2d94\u2d95\u2d96\u2da0\u2da1\u2da2\u2da3\u2da4\u2da5\u2da6\u2da8\u2da9\u2daa\u2dab\u2dac\u2dad\u2dae\u2db0\u2db1\u2db2\u2db3\u2db4\u2db5\u2db6\u2db8\u2db9\u2dba\u2dbb\u2dbc\u2dbd\u2dbe\u2dc0\u2dc1\u2dc2\u2dc3\u2dc4\u2dc5\u2dc6\u2dc8\u2dc9\u2dca\u2dcb\u2dcc\u2dcd\u2dce\u2dd0\u2dd1\u2dd2\u2dd3\u2dd4\u2dd5\u2dd6\u2dd8\u2dd9\u2dda\u2ddb\u2ddc\u2ddd\u2dde\u3006\u303c\u3041\u3042\u3043\u3044\u3045\u3046\u3047\u3048\u3049\u304a\u304b\u304c\u304d\u304e\u304f\u3050\u3051\u3052\u3053\u3054\u3055\u3056\u3057\u3058\u3059\u305a\u305b\u305c\u305d\u305e\u305f\u3060\u3061\u3062\u3063\u3064\u3065\u3066\u3067\u3068\u3069\u306a\u306b\u306c\u306d\u306e\u306f\u3070\u3071\u3072\u3073\u3074\u3075\u3076\u3077\u3078\u3079\u307a\u307b\u307c\u307d\u307e\u307f\u3080\u3081\u3082\u3083\u3084\u3085\u3086\u3087\u3088\u3089\u308a\u308b\u308c\u308d\u308e\u308f\u3090\u3091\u3092\u3093\u3094\u3095\u3096\u309f\u30a1\u30a2\u30a3\u30a4\u30a5\u30a6\u30a7\u30a8\u30a9\u30aa\u30ab\u30ac\u30ad\u30ae\u30af\u30b0\u30b1\u30b2\u30b3\u30b4\u30b5\u30b6\u30b7\u30b8\u30b9\u30ba\u30bb\u30bc\u30bd\u30be\u30bf\u30c0\u30c1\u30c2\u30c3\u30c4\u30c5\u30c6\u30c7\u30c8\u30c9\u30ca\u30cb\u30cc\u30cd\u30ce\u30cf\u30d0\u30d1\u30d2\u30d3\u30d4\u30d5\u30d6\u30d7\u30d8\u30d9\u30da\u30db\u30dc\u30dd\u30de\u30df\u30e0\u30e1\u30e2\u30e3\u30e4\u30e5\u30e6\u30e7\u30e8\u30e9\u30ea\u30eb\u30ec\u30ed\u30ee\u30ef\u30f0\u30f1\u30f2\u30f3\u30f4\u30f5\u30f6\u30f7\u30f8\u30f9\u30fa\u30ff\u3105\u3106\u3107\u3108\u3109\u310a\u310b\u310c\u310d\u310e\u310f\u3110\u3111\u3112\u3113\u3114\u3115\u3116\u3117\u3118\u3119\u311a\u311b\u311c\u311d\u311e\u311f\u3120\u3121\u3122\u3123\u3124\u3125\u3126\u3127\u3128\u3129\u312a\u312b\u312c\u3131\u3132\u3133\u3134\u3135\u3136\u3137\u3138\u3139\u313a\u313b\u313c\u313d\u313e\u313f\u3140\u3141\u3142\u3143\u3144\u3145\u3146\u3147\u3148\u3149\u314a\u314b\u314c\u314d\u314e\u314f\u3150\u3151\u3152\u3153\u3154\u3155\u3156\u3157\u3158\u3159\u315a\u315b\u315c\u315d\u315e\u315f\u3160\u3161\u3162\u3163\u3164\u3165\u3166\u3167\u3168\u3169\u316a\u316b\u316c\u316d\u316e\u316f\u3170\u3171\u3172\u3173\u3174\u3175\u3176\u3177\u3178\u3179\u317a\u317b\u317c\u317d\u317e\u317f\u3180\u3181\u3182\u3183\u3184\u3185\u3186\u3187\u3188\u3189\u318a\u318b\u318c\u318d\u318e\u31a0\u31a1\u31a2\u31a3\u31a4\u31a5\u31a6\u31a7\u31a8\u31a9\u31aa\u31ab\u31ac\u31ad\u31ae\u31af\u31b0\u31b1\u31b2\u31b3\u31b4\u31b5\u31b6\u31b7\u31f0\u31f1\u31f2\u31f3\u31f4\u31f5\u31f6\u31f7\u31f8\u31f9\u31fa\u31fb\u31fc\u31fd\u31fe\u31ff\u3400\u3401\u3402\u3403\u3404\u3405\u3406\u3407\u3408\u3409\u340a\u340b\u340c\u340d\u340e\u340f\u3410\u3411\u3412\u3413\u3414\u3415\u3416\u3417\u3418\u3419\u341a\u341b\u341c\u341d\u341e\u341f\u3420\u3421\u3422\u3423\u3424\u3425\u3426\u3427\u3428\u3429\u342a\u342b\u342c\u342d\u342e\u342f\u3430\u3431\u3432\u3433\u3434\u3435\u3436\u3437\u3438\u3439\u343a\u343b\u343c\u343d\u343e\u343f\u3440\u3441\u3442\u3443\u3444\u3445\u3446\u3447\u3448\u3449\u344a\u344b\u344c\u344d\u344e\u344f\u3450\u3451\u3452\u3453\u3454\u3455\u3456\u3457\u3458\u3459\u345a\u345b\u345c\u345d\u345e\u345f\u3460\u3461\u3462\u3463\u3464\u3465\u3466\u3467\u3468\u3469\u346a\u346b\u346c\u346d\u346e\u346f\u3470\u3471\u3472\u3473\u3474\u3475\u3476\u3477\u3478\u3479\u347a\u347b\u347c\u347d\u347e\u347f\u3480\u3481\u3482\u3483\u3484\u3485\u3486\u3487\u3488\u3489\u348a\u348b\u348c\u348d\u348e\u348f\u3490\u3491\u3492\u3493\u3494\u3495\u3496\u3497\u3498\u3499\u349a\u349b\u349c\u349d\u349e\u349f\u34a0\u34a1\u34a2\u34a3\u34a4\u34a5\u34a6\u34a7\u34a8\u34a9\u34aa\u34ab\u34ac\u34ad\u34ae\u34af\u34b0\u34b1\u34b2\u34b3\u34b4\u34b5\u34b6\u34b7\u34b8\u34b9\u34ba\u34bb\u34bc\u34bd\u34be\u34bf\u34c0\u34c1\u34c2\u34c3\u34c4\u34c5\u34c6\u34c7\u34c8\u34c9\u34ca\u34cb\u34cc\u34cd\u34ce\u34cf\u34d0\u34d1\u34d2\u34d3\u34d4\u34d5\u34d6\u34d7\u34d8\u34d9\u34da\u34db\u34dc\u34dd\u34de\u34df\u34e0\u34e1\u34e2\u34e3\u34e4\u34e5\u34e6\u34e7\u34e8\u34e9\u34ea\u34eb\u34ec\u34ed\u34ee\u34ef\u34f0\u34f1\u34f2\u34f3\u34f4\u34f5\u34f6\u34f7\u34f8\u34f9\u34fa\u34fb\u34fc\u34fd\u34fe\u34ff\u3500\u3501\u3502\u3503\u3504\u3505\u3506\u3507\u3508\u3509\u350a\u350b\u350c\u350d\u350e\u350f\u3510\u3511\u3512\u3513\u3514\u3515\u3516\u3517\u3518\u3519\u351a\u351b\u351c\u351d\u351e\u351f\u3520\u3521\u3522\u3523\u3524\u3525\u3526\u3527\u3528\u3529\u352a\u352b\u352c\u352d\u352e\u352f\u3530\u3531\u3532\u3533\u3534\u3535\u3536\u3537\u3538\u3539\u353a\u353b\u353c\u353d\u353e\u353f\u3540\u3541\u3542\u3543\u3544\u3545\u3546\u3547\u3548\u3549\u354a\u354b\u354c\u354d\u354e\u354f\u3550\u3551\u3552\u3553\u3554\u3555\u3556\u3557\u3558\u3559\u355a\u355b\u355c\u355d\u355e\u355f\u3560\u3561\u3562\u3563\u3564\u3565\u3566\u3567\u3568\u3569\u356a\u356b\u356c\u356d\u356e\u356f\u3570\u3571\u3572\u3573\u3574\u3575\u3576\u3577\u3578\u3579\u357a\u357b\u357c\u357d\u357e\u357f\u3580\u3581\u3582\u3583\u3584\u3585\u3586\u3587\u3588\u3589\u358a\u358b\u358c\u358d\u358e\u358f\u3590\u3591\u3592\u3593\u3594\u3595\u3596\u3597\u3598\u3599\u359a\u359b\u359c\u359d\u359e\u359f\u35a0\u35a1\u35a2\u35a3\u35a4\u35a5\u35a6\u35a7\u35a8\u35a9\u35aa\u35ab\u35ac\u35ad\u35ae\u35af\u35b0\u35b1\u35b2\u35b3\u35b4\u35b5\u35b6\u35b7\u35b8\u35b9\u35ba\u35bb\u35bc\u35bd\u35be\u35bf\u35c0\u35c1\u35c2\u35c3\u35c4\u35c5\u35c6\u35c7\u35c8\u35c9\u35ca\u35cb\u35cc\u35cd\u35ce\u35cf\u35d0\u35d1\u35d2\u35d3\u35d4\u35d5\u35d6\u35d7\u35d8\u35d9\u35da\u35db\u35dc\u35dd\u35de\u35df\u35e0\u35e1\u35e2\u35e3\u35e4\u35e5\u35e6\u35e7\u35e8\u35e9\u35ea\u35eb\u35ec\u35ed\u35ee\u35ef\u35f0\u35f1\u35f2\u35f3\u35f4\u35f5\u35f6\u35f7\u35f8\u35f9\u35fa\u35fb\u35fc\u35fd\u35fe\u35ff\u3600\u3601\u3602\u3603\u3604\u3605\u3606\u3607\u3608\u3609\u360a\u360b\u360c\u360d\u360e\u360f\u3610\u3611\u3612\u3613\u3614\u3615\u3616\u3617\u3618\u3619\u361a\u361b\u361c\u361d\u361e\u361f\u3620\u3621\u3622\u3623\u3624\u3625\u3626\u3627\u3628\u3629\u362a\u362b\u362c\u362d\u362e\u362f\u3630\u3631\u3632\u3633\u3634\u3635\u3636\u3637\u3638\u3639\u363a\u363b\u363c\u363d\u363e\u363f\u3640\u3641\u3642\u3643\u3644\u3645\u3646\u3647\u3648\u3649\u364a\u364b\u364c\u364d\u364e\u364f\u3650\u3651\u3652\u3653\u3654\u3655\u3656\u3657\u3658\u3659\u365a\u365b\u365c\u365d\u365e\u365f\u3660\u3661\u3662\u3663\u3664\u3665\u3666\u3667\u3668\u3669\u366a\u366b\u366c\u366d\u366e\u366f\u3670\u3671\u3672\u3673\u3674\u3675\u3676\u3677\u3678\u3679\u367a\u367b\u367c\u367d\u367e\u367f\u3680\u3681\u3682\u3683\u3684\u3685\u3686\u3687\u3688\u3689\u368a\u368b\u368c\u368d\u368e\u368f\u3690\u3691\u3692\u3693\u3694\u3695\u3696\u3697\u3698\u3699\u369a\u369b\u369c\u369d\u369e\u369f\u36a0\u36a1\u36a2\u36a3\u36a4\u36a5\u36a6\u36a7\u36a8\u36a9\u36aa\u36ab\u36ac\u36ad\u36ae\u36af\u36b0\u36b1\u36b2\u36b3\u36b4\u36b5\u36b6\u36b7\u36b8\u36b9\u36ba\u36bb\u36bc\u36bd\u36be\u36bf\u36c0\u36c1\u36c2\u36c3\u36c4\u36c5\u36c6\u36c7\u36c8\u36c9\u36ca\u36cb\u36cc\u36cd\u36ce\u36cf\u36d0\u36d1\u36d2\u36d3\u36d4\u36d5\u36d6\u36d7\u36d8\u36d9\u36da\u36db\u36dc\u36dd\u36de\u36df\u36e0\u36e1\u36e2\u36e3\u36e4\u36e5\u36e6\u36e7\u36e8\u36e9\u36ea\u36eb\u36ec\u36ed\u36ee\u36ef\u36f0\u36f1\u36f2\u36f3\u36f4\u36f5\u36f6\u36f7\u36f8\u36f9\u36fa\u36fb\u36fc\u36fd\u36fe\u36ff\u3700\u3701\u3702\u3703\u3704\u3705\u3706\u3707\u3708\u3709\u370a\u370b\u370c\u370d\u370e\u370f\u3710\u3711\u3712\u3713\u3714\u3715\u3716\u3717\u3718\u3719\u371a\u371b\u371c\u371d\u371e\u371f\u3720\u3721\u3722\u3723\u3724\u3725\u3726\u3727\u3728\u3729\u372a\u372b\u372c\u372d\u372e\u372f\u3730\u3731\u3732\u3733\u3734\u3735\u3736\u3737\u3738\u3739\u373a\u373b\u373c\u373d\u373e\u373f\u3740\u3741\u3742\u3743\u3744\u3745\u3746\u3747\u3748\u3749\u374a\u374b\u374c\u374d\u374e\u374f\u3750\u3751\u3752\u3753\u3754\u3755\u3756\u3757\u3758\u3759\u375a\u375b\u375c\u375d\u375e\u375f\u3760\u3761\u3762\u3763\u3764\u3765\u3766\u3767\u3768\u3769\u376a\u376b\u376c\u376d\u376e\u376f\u3770\u3771\u3772\u3773\u3774\u3775\u3776\u3777\u3778\u3779\u377a\u377b\u377c\u377d\u377e\u377f\u3780\u3781\u3782\u3783\u3784\u3785\u3786\u3787\u3788\u3789\u378a\u378b\u378c\u378d\u378e\u378f\u3790\u3791\u3792\u3793\u3794\u3795\u3796\u3797\u3798\u3799\u379a\u379b\u379c\u379d\u379e\u379f\u37a0\u37a1\u37a2\u37a3\u37a4\u37a5\u37a6\u37a7\u37a8\u37a9\u37aa\u37ab\u37ac\u37ad\u37ae\u37af\u37b0\u37b1\u37b2\u37b3\u37b4\u37b5\u37b6\u37b7\u37b8\u37b9\u37ba\u37bb\u37bc\u37bd\u37be\u37bf\u37c0\u37c1\u37c2\u37c3\u37c4\u37c5\u37c6\u37c7\u37c8\u37c9\u37ca\u37cb\u37cc\u37cd\u37ce\u37cf\u37d0\u37d1\u37d2\u37d3\u37d4\u37d5\u37d6\u37d7\u37d8\u37d9\u37da\u37db\u37dc\u37dd\u37de\u37df\u37e0\u37e1\u37e2\u37e3\u37e4\u37e5\u37e6\u37e7\u37e8\u37e9\u37ea\u37eb\u37ec\u37ed\u37ee\u37ef\u37f0\u37f1\u37f2\u37f3\u37f4\u37f5\u37f6\u37f7\u37f8\u37f9\u37fa\u37fb\u37fc\u37fd\u37fe\u37ff\u3800\u3801\u3802\u3803\u3804\u3805\u3806\u3807\u3808\u3809\u380a\u380b\u380c\u380d\u380e\u380f\u3810\u3811\u3812\u3813\u3814\u3815\u3816\u3817\u3818\u3819\u381a\u381b\u381c\u381d\u381e\u381f\u3820\u3821\u3822\u3823\u3824\u3825\u3826\u3827\u3828\u3829\u382a\u382b\u382c\u382d\u382e\u382f\u3830\u3831\u3832\u3833\u3834\u3835\u3836\u3837\u3838\u3839\u383a\u383b\u383c\u383d\u383e\u383f\u3840\u3841\u3842\u3843\u3844\u3845\u3846\u3847\u3848\u3849\u384a\u384b\u384c\u384d\u384e\u384f\u3850\u3851\u3852\u3853\u3854\u3855\u3856\u3857\u3858\u3859\u385a\u385b\u385c\u385d\u385e\u385f\u3860\u3861\u3862\u3863\u3864\u3865\u3866\u3867\u3868\u3869\u386a\u386b\u386c\u386d\u386e\u386f\u3870\u3871\u3872\u3873\u3874\u3875\u3876\u3877\u3878\u3879\u387a\u387b\u387c\u387d\u387e\u387f\u3880\u3881\u3882\u3883\u3884\u3885\u3886\u3887\u3888\u3889\u388a\u388b\u388c\u388d\u388e\u388f\u3890\u3891\u3892\u3893\u3894\u3895\u3896\u3897\u3898\u3899\u389a\u389b\u389c\u389d\u389e\u389f\u38a0\u38a1\u38a2\u38a3\u38a4\u38a5\u38a6\u38a7\u38a8\u38a9\u38aa\u38ab\u38ac\u38ad\u38ae\u38af\u38b0\u38b1\u38b2\u38b3\u38b4\u38b5\u38b6\u38b7\u38b8\u38b9\u38ba\u38bb\u38bc\u38bd\u38be\u38bf\u38c0\u38c1\u38c2\u38c3\u38c4\u38c5\u38c6\u38c7\u38c8\u38c9\u38ca\u38cb\u38cc\u38cd\u38ce\u38cf\u38d0\u38d1\u38d2\u38d3\u38d4\u38d5\u38d6\u38d7\u38d8\u38d9\u38da\u38db\u38dc\u38dd\u38de\u38df\u38e0\u38e1\u38e2\u38e3\u38e4\u38e5\u38e6\u38e7\u38e8\u38e9\u38ea\u38eb\u38ec\u38ed\u38ee\u38ef\u38f0\u38f1\u38f2\u38f3\u38f4\u38f5\u38f6\u38f7\u38f8\u38f9\u38fa\u38fb\u38fc\u38fd\u38fe\u38ff\u3900\u3901\u3902\u3903\u3904\u3905\u3906\u3907\u3908\u3909\u390a\u390b\u390c\u390d\u390e\u390f\u3910\u3911\u3912\u3913\u3914\u3915\u3916\u3917\u3918\u3919\u391a\u391b\u391c\u391d\u391e\u391f\u3920\u3921\u3922\u3923\u3924\u3925\u3926\u3927\u3928\u3929\u392a\u392b\u392c\u392d\u392e\u392f\u3930\u3931\u3932\u3933\u3934\u3935\u3936\u3937\u3938\u3939\u393a\u393b\u393c\u393d\u393e\u393f\u3940\u3941\u3942\u3943\u3944\u3945\u3946\u3947\u3948\u3949\u394a\u394b\u394c\u394d\u394e\u394f\u3950\u3951\u3952\u3953\u3954\u3955\u3956\u3957\u3958\u3959\u395a\u395b\u395c\u395d\u395e\u395f\u3960\u3961\u3962\u3963\u3964\u3965\u3966\u3967\u3968\u3969\u396a\u396b\u396c\u396d\u396e\u396f\u3970\u3971\u3972\u3973\u3974\u3975\u3976\u3977\u3978\u3979\u397a\u397b\u397c\u397d\u397e\u397f\u3980\u3981\u3982\u3983\u3984\u3985\u3986\u3987\u3988\u3989\u398a\u398b\u398c\u398d\u398e\u398f\u3990\u3991\u3992\u3993\u3994\u3995\u3996\u3997\u3998\u3999\u399a\u399b\u399c\u399d\u399e\u399f\u39a0\u39a1\u39a2\u39a3\u39a4\u39a5\u39a6\u39a7\u39a8\u39a9\u39aa\u39ab\u39ac\u39ad\u39ae\u39af\u39b0\u39b1\u39b2\u39b3\u39b4\u39b5\u39b6\u39b7\u39b8\u39b9\u39ba\u39bb\u39bc\u39bd\u39be\u39bf\u39c0\u39c1\u39c2\u39c3\u39c4\u39c5\u39c6\u39c7\u39c8\u39c9\u39ca\u39cb\u39cc\u39cd\u39ce\u39cf\u39d0\u39d1\u39d2\u39d3\u39d4\u39d5\u39d6\u39d7\u39d8\u39d9\u39da\u39db\u39dc\u39dd\u39de\u39df\u39e0\u39e1\u39e2\u39e3\u39e4\u39e5\u39e6\u39e7\u39e8\u39e9\u39ea\u39eb\u39ec\u39ed\u39ee\u39ef\u39f0\u39f1\u39f2\u39f3\u39f4\u39f5\u39f6\u39f7\u39f8\u39f9\u39fa\u39fb\u39fc\u39fd\u39fe\u39ff\u3a00\u3a01\u3a02\u3a03\u3a04\u3a05\u3a06\u3a07\u3a08\u3a09\u3a0a\u3a0b\u3a0c\u3a0d\u3a0e\u3a0f\u3a10\u3a11\u3a12\u3a13\u3a14\u3a15\u3a16\u3a17\u3a18\u3a19\u3a1a\u3a1b\u3a1c\u3a1d\u3a1e\u3a1f\u3a20\u3a21\u3a22\u3a23\u3a24\u3a25\u3a26\u3a27\u3a28\u3a29\u3a2a\u3a2b\u3a2c\u3a2d\u3a2e\u3a2f\u3a30\u3a31\u3a32\u3a33\u3a34\u3a35\u3a36\u3a37\u3a38\u3a39\u3a3a\u3a3b\u3a3c\u3a3d\u3a3e\u3a3f\u3a40\u3a41\u3a42\u3a43\u3a44\u3a45\u3a46\u3a47\u3a48\u3a49\u3a4a\u3a4b\u3a4c\u3a4d\u3a4e\u3a4f\u3a50\u3a51\u3a52\u3a53\u3a54\u3a55\u3a56\u3a57\u3a58\u3a59\u3a5a\u3a5b\u3a5c\u3a5d\u3a5e\u3a5f\u3a60\u3a61\u3a62\u3a63\u3a64\u3a65\u3a66\u3a67\u3a68\u3a69\u3a6a\u3a6b\u3a6c\u3a6d\u3a6e\u3a6f\u3a70\u3a71\u3a72\u3a73\u3a74\u3a75\u3a76\u3a77\u3a78\u3a79\u3a7a\u3a7b\u3a7c\u3a7d\u3a7e\u3a7f\u3a80\u3a81\u3a82\u3a83\u3a84\u3a85\u3a86\u3a87\u3a88\u3a89\u3a8a\u3a8b\u3a8c\u3a8d\u3a8e\u3a8f\u3a90\u3a91\u3a92\u3a93\u3a94\u3a95\u3a96\u3a97\u3a98\u3a99\u3a9a\u3a9b\u3a9c\u3a9d\u3a9e\u3a9f\u3aa0\u3aa1\u3aa2\u3aa3\u3aa4\u3aa5\u3aa6\u3aa7\u3aa8\u3aa9\u3aaa\u3aab\u3aac\u3aad\u3aae\u3aaf\u3ab0\u3ab1\u3ab2\u3ab3\u3ab4\u3ab5\u3ab6\u3ab7\u3ab8\u3ab9\u3aba\u3abb\u3abc\u3abd\u3abe\u3abf\u3ac0\u3ac1\u3ac2\u3ac3\u3ac4\u3ac5\u3ac6\u3ac7\u3ac8\u3ac9\u3aca\u3acb\u3acc\u3acd\u3ace\u3acf\u3ad0\u3ad1\u3ad2\u3ad3\u3ad4\u3ad5\u3ad6\u3ad7\u3ad8\u3ad9\u3ada\u3adb\u3adc\u3add\u3ade\u3adf\u3ae0\u3ae1\u3ae2\u3ae3\u3ae4\u3ae5\u3ae6\u3ae7\u3ae8\u3ae9\u3aea\u3aeb\u3aec\u3aed\u3aee\u3aef\u3af0\u3af1\u3af2\u3af3\u3af4\u3af5\u3af6\u3af7\u3af8\u3af9\u3afa\u3afb\u3afc\u3afd\u3afe\u3aff\u3b00\u3b01\u3b02\u3b03\u3b04\u3b05\u3b06\u3b07\u3b08\u3b09\u3b0a\u3b0b\u3b0c\u3b0d\u3b0e\u3b0f\u3b10\u3b11\u3b12\u3b13\u3b14\u3b15\u3b16\u3b17\u3b18\u3b19\u3b1a\u3b1b\u3b1c\u3b1d\u3b1e\u3b1f\u3b20\u3b21\u3b22\u3b23\u3b24\u3b25\u3b26\u3b27\u3b28\u3b29\u3b2a\u3b2b\u3b2c\u3b2d\u3b2e\u3b2f\u3b30\u3b31\u3b32\u3b33\u3b34\u3b35\u3b36\u3b37\u3b38\u3b39\u3b3a\u3b3b\u3b3c\u3b3d\u3b3e\u3b3f\u3b40\u3b41\u3b42\u3b43\u3b44\u3b45\u3b46\u3b47\u3b48\u3b49\u3b4a\u3b4b\u3b4c\u3b4d\u3b4e\u3b4f\u3b50\u3b51\u3b52\u3b53\u3b54\u3b55\u3b56\u3b57\u3b58\u3b59\u3b5a\u3b5b\u3b5c\u3b5d\u3b5e\u3b5f\u3b60\u3b61\u3b62\u3b63\u3b64\u3b65\u3b66\u3b67\u3b68\u3b69\u3b6a\u3b6b\u3b6c\u3b6d\u3b6e\u3b6f\u3b70\u3b71\u3b72\u3b73\u3b74\u3b75\u3b76\u3b77\u3b78\u3b79\u3b7a\u3b7b\u3b7c\u3b7d\u3b7e\u3b7f\u3b80\u3b81\u3b82\u3b83\u3b84\u3b85\u3b86\u3b87\u3b88\u3b89\u3b8a\u3b8b\u3b8c\u3b8d\u3b8e\u3b8f\u3b90\u3b91\u3b92\u3b93\u3b94\u3b95\u3b96\u3b97\u3b98\u3b99\u3b9a\u3b9b\u3b9c\u3b9d\u3b9e\u3b9f\u3ba0\u3ba1\u3ba2\u3ba3\u3ba4\u3ba5\u3ba6\u3ba7\u3ba8\u3ba9\u3baa\u3bab\u3bac\u3bad\u3bae\u3baf\u3bb0\u3bb1\u3bb2\u3bb3\u3bb4\u3bb5\u3bb6\u3bb7\u3bb8\u3bb9\u3bba\u3bbb\u3bbc\u3bbd\u3bbe\u3bbf\u3bc0\u3bc1\u3bc2\u3bc3\u3bc4\u3bc5\u3bc6\u3bc7\u3bc8\u3bc9\u3bca\u3bcb\u3bcc\u3bcd\u3bce\u3bcf\u3bd0\u3bd1\u3bd2\u3bd3\u3bd4\u3bd5\u3bd6\u3bd7\u3bd8\u3bd9\u3bda\u3bdb\u3bdc\u3bdd\u3bde\u3bdf\u3be0\u3be1\u3be2\u3be3\u3be4\u3be5\u3be6\u3be7\u3be8\u3be9\u3bea\u3beb\u3bec\u3bed\u3bee\u3bef\u3bf0\u3bf1\u3bf2\u3bf3\u3bf4\u3bf5\u3bf6\u3bf7\u3bf8\u3bf9\u3bfa\u3bfb\u3bfc\u3bfd\u3bfe\u3bff\u3c00\u3c01\u3c02\u3c03\u3c04\u3c05\u3c06\u3c07\u3c08\u3c09\u3c0a\u3c0b\u3c0c\u3c0d\u3c0e\u3c0f\u3c10\u3c11\u3c12\u3c13\u3c14\u3c15\u3c16\u3c17\u3c18\u3c19\u3c1a\u3c1b\u3c1c\u3c1d\u3c1e\u3c1f\u3c20\u3c21\u3c22\u3c23\u3c24\u3c25\u3c26\u3c27\u3c28\u3c29\u3c2a\u3c2b\u3c2c\u3c2d\u3c2e\u3c2f\u3c30\u3c31\u3c32\u3c33\u3c34\u3c35\u3c36\u3c37\u3c38\u3c39\u3c3a\u3c3b\u3c3c\u3c3d\u3c3e\u3c3f\u3c40\u3c41\u3c42\u3c43\u3c44\u3c45\u3c46\u3c47\u3c48\u3c49\u3c4a\u3c4b\u3c4c\u3c4d\u3c4e\u3c4f\u3c50\u3c51\u3c52\u3c53\u3c54\u3c55\u3c56\u3c57\u3c58\u3c59\u3c5a\u3c5b\u3c5c\u3c5d\u3c5e\u3c5f\u3c60\u3c61\u3c62\u3c63\u3c64\u3c65\u3c66\u3c67\u3c68\u3c69\u3c6a\u3c6b\u3c6c\u3c6d\u3c6e\u3c6f\u3c70\u3c71\u3c72\u3c73\u3c74\u3c75\u3c76\u3c77\u3c78\u3c79\u3c7a\u3c7b\u3c7c\u3c7d\u3c7e\u3c7f\u3c80\u3c81\u3c82\u3c83\u3c84\u3c85\u3c86\u3c87\u3c88\u3c89\u3c8a\u3c8b\u3c8c\u3c8d\u3c8e\u3c8f\u3c90\u3c91\u3c92\u3c93\u3c94\u3c95\u3c96\u3c97\u3c98\u3c99\u3c9a\u3c9b\u3c9c\u3c9d\u3c9e\u3c9f\u3ca0\u3ca1\u3ca2\u3ca3\u3ca4\u3ca5\u3ca6\u3ca7\u3ca8\u3ca9\u3caa\u3cab\u3cac\u3cad\u3cae\u3caf\u3cb0\u3cb1\u3cb2\u3cb3\u3cb4\u3cb5\u3cb6\u3cb7\u3cb8\u3cb9\u3cba\u3cbb\u3cbc\u3cbd\u3cbe\u3cbf\u3cc0\u3cc1\u3cc2\u3cc3\u3cc4\u3cc5\u3cc6\u3cc7\u3cc8\u3cc9\u3cca\u3ccb\u3ccc\u3ccd\u3cce\u3ccf\u3cd0\u3cd1\u3cd2\u3cd3\u3cd4\u3cd5\u3cd6\u3cd7\u3cd8\u3cd9\u3cda\u3cdb\u3cdc\u3cdd\u3cde\u3cdf\u3ce0\u3ce1\u3ce2\u3ce3\u3ce4\u3ce5\u3ce6\u3ce7\u3ce8\u3ce9\u3cea\u3ceb\u3cec\u3ced\u3cee\u3cef\u3cf0\u3cf1\u3cf2\u3cf3\u3cf4\u3cf5\u3cf6\u3cf7\u3cf8\u3cf9\u3cfa\u3cfb\u3cfc\u3cfd\u3cfe\u3cff\u3d00\u3d01\u3d02\u3d03\u3d04\u3d05\u3d06\u3d07\u3d08\u3d09\u3d0a\u3d0b\u3d0c\u3d0d\u3d0e\u3d0f\u3d10\u3d11\u3d12\u3d13\u3d14\u3d15\u3d16\u3d17\u3d18\u3d19\u3d1a\u3d1b\u3d1c\u3d1d\u3d1e\u3d1f\u3d20\u3d21\u3d22\u3d23\u3d24\u3d25\u3d26\u3d27\u3d28\u3d29\u3d2a\u3d2b\u3d2c\u3d2d\u3d2e\u3d2f\u3d30\u3d31\u3d32\u3d33\u3d34\u3d35\u3d36\u3d37\u3d38\u3d39\u3d3a\u3d3b\u3d3c\u3d3d\u3d3e\u3d3f\u3d40\u3d41\u3d42\u3d43\u3d44\u3d45\u3d46\u3d47\u3d48\u3d49\u3d4a\u3d4b\u3d4c\u3d4d\u3d4e\u3d4f\u3d50\u3d51\u3d52\u3d53\u3d54\u3d55\u3d56\u3d57\u3d58\u3d59\u3d5a\u3d5b\u3d5c\u3d5d\u3d5e\u3d5f\u3d60\u3d61\u3d62\u3d63\u3d64\u3d65\u3d66\u3d67\u3d68\u3d69\u3d6a\u3d6b\u3d6c\u3d6d\u3d6e\u3d6f\u3d70\u3d71\u3d72\u3d73\u3d74\u3d75\u3d76\u3d77\u3d78\u3d79\u3d7a\u3d7b\u3d7c\u3d7d\u3d7e\u3d7f\u3d80\u3d81\u3d82\u3d83\u3d84\u3d85\u3d86\u3d87\u3d88\u3d89\u3d8a\u3d8b\u3d8c\u3d8d\u3d8e\u3d8f\u3d90\u3d91\u3d92\u3d93\u3d94\u3d95\u3d96\u3d97\u3d98\u3d99\u3d9a\u3d9b\u3d9c\u3d9d\u3d9e\u3d9f\u3da0\u3da1\u3da2\u3da3\u3da4\u3da5\u3da6\u3da7\u3da8\u3da9\u3daa\u3dab\u3dac\u3dad\u3dae\u3daf\u3db0\u3db1\u3db2\u3db3\u3db4\u3db5\u3db6\u3db7\u3db8\u3db9\u3dba\u3dbb\u3dbc\u3dbd\u3dbe\u3dbf\u3dc0\u3dc1\u3dc2\u3dc3\u3dc4\u3dc5\u3dc6\u3dc7\u3dc8\u3dc9\u3dca\u3dcb\u3dcc\u3dcd\u3dce\u3dcf\u3dd0\u3dd1\u3dd2\u3dd3\u3dd4\u3dd5\u3dd6\u3dd7\u3dd8\u3dd9\u3dda\u3ddb\u3ddc\u3ddd\u3dde\u3ddf\u3de0\u3de1\u3de2\u3de3\u3de4\u3de5\u3de6\u3de7\u3de8\u3de9\u3dea\u3deb\u3dec\u3ded\u3dee\u3def\u3df0\u3df1\u3df2\u3df3\u3df4\u3df5\u3df6\u3df7\u3df8\u3df9\u3dfa\u3dfb\u3dfc\u3dfd\u3dfe\u3dff\u3e00\u3e01\u3e02\u3e03\u3e04\u3e05\u3e06\u3e07\u3e08\u3e09\u3e0a\u3e0b\u3e0c\u3e0d\u3e0e\u3e0f\u3e10\u3e11\u3e12\u3e13\u3e14\u3e15\u3e16\u3e17\u3e18\u3e19\u3e1a\u3e1b\u3e1c\u3e1d\u3e1e\u3e1f\u3e20\u3e21\u3e22\u3e23\u3e24\u3e25\u3e26\u3e27\u3e28\u3e29\u3e2a\u3e2b\u3e2c\u3e2d\u3e2e\u3e2f\u3e30\u3e31\u3e32\u3e33\u3e34\u3e35\u3e36\u3e37\u3e38\u3e39\u3e3a\u3e3b\u3e3c\u3e3d\u3e3e\u3e3f\u3e40\u3e41\u3e42\u3e43\u3e44\u3e45\u3e46\u3e47\u3e48\u3e49\u3e4a\u3e4b\u3e4c\u3e4d\u3e4e\u3e4f\u3e50\u3e51\u3e52\u3e53\u3e54\u3e55\u3e56\u3e57\u3e58\u3e59\u3e5a\u3e5b\u3e5c\u3e5d\u3e5e\u3e5f\u3e60\u3e61\u3e62\u3e63\u3e64\u3e65\u3e66\u3e67\u3e68\u3e69\u3e6a\u3e6b\u3e6c\u3e6d\u3e6e\u3e6f\u3e70\u3e71\u3e72\u3e73\u3e74\u3e75\u3e76\u3e77\u3e78\u3e79\u3e7a\u3e7b\u3e7c\u3e7d\u3e7e\u3e7f\u3e80\u3e81\u3e82\u3e83\u3e84\u3e85\u3e86\u3e87\u3e88\u3e89\u3e8a\u3e8b\u3e8c\u3e8d\u3e8e\u3e8f\u3e90\u3e91\u3e92\u3e93\u3e94\u3e95\u3e96\u3e97\u3e98\u3e99\u3e9a\u3e9b\u3e9c\u3e9d\u3e9e\u3e9f\u3ea0\u3ea1\u3ea2\u3ea3\u3ea4\u3ea5\u3ea6\u3ea7\u3ea8\u3ea9\u3eaa\u3eab\u3eac\u3ead\u3eae\u3eaf\u3eb0\u3eb1\u3eb2\u3eb3\u3eb4\u3eb5\u3eb6\u3eb7\u3eb8\u3eb9\u3eba\u3ebb\u3ebc\u3ebd\u3ebe\u3ebf\u3ec0\u3ec1\u3ec2\u3ec3\u3ec4\u3ec5\u3ec6\u3ec7\u3ec8\u3ec9\u3eca\u3ecb\u3ecc\u3ecd\u3ece\u3ecf\u3ed0\u3ed1\u3ed2\u3ed3\u3ed4\u3ed5\u3ed6\u3ed7\u3ed8\u3ed9\u3eda\u3edb\u3edc\u3edd\u3ede\u3edf\u3ee0\u3ee1\u3ee2\u3ee3\u3ee4\u3ee5\u3ee6\u3ee7\u3ee8\u3ee9\u3eea\u3eeb\u3eec\u3eed\u3eee\u3eef\u3ef0\u3ef1\u3ef2\u3ef3\u3ef4\u3ef5\u3ef6\u3ef7\u3ef8\u3ef9\u3efa\u3efb\u3efc\u3efd\u3efe\u3eff\u3f00\u3f01\u3f02\u3f03\u3f04\u3f05\u3f06\u3f07\u3f08\u3f09\u3f0a\u3f0b\u3f0c\u3f0d\u3f0e\u3f0f\u3f10\u3f11\u3f12\u3f13\u3f14\u3f15\u3f16\u3f17\u3f18\u3f19\u3f1a\u3f1b\u3f1c\u3f1d\u3f1e\u3f1f\u3f20\u3f21\u3f22\u3f23\u3f24\u3f25\u3f26\u3f27\u3f28\u3f29\u3f2a\u3f2b\u3f2c\u3f2d\u3f2e\u3f2f\u3f30\u3f31\u3f32\u3f33\u3f34\u3f35\u3f36\u3f37\u3f38\u3f39\u3f3a\u3f3b\u3f3c\u3f3d\u3f3e\u3f3f\u3f40\u3f41\u3f42\u3f43\u3f44\u3f45\u3f46\u3f47\u3f48\u3f49\u3f4a\u3f4b\u3f4c\u3f4d\u3f4e\u3f4f\u3f50\u3f51\u3f52\u3f53\u3f54\u3f55\u3f56\u3f57\u3f58\u3f59\u3f5a\u3f5b\u3f5c\u3f5d\u3f5e\u3f5f\u3f60\u3f61\u3f62\u3f63\u3f64\u3f65\u3f66\u3f67\u3f68\u3f69\u3f6a\u3f6b\u3f6c\u3f6d\u3f6e\u3f6f\u3f70\u3f71\u3f72\u3f73\u3f74\u3f75\u3f76\u3f77\u3f78\u3f79\u3f7a\u3f7b\u3f7c\u3f7d\u3f7e\u3f7f\u3f80\u3f81\u3f82\u3f83\u3f84\u3f85\u3f86\u3f87\u3f88\u3f89\u3f8a\u3f8b\u3f8c\u3f8d\u3f8e\u3f8f\u3f90\u3f91\u3f92\u3f93\u3f94\u3f95\u3f96\u3f97\u3f98\u3f99\u3f9a\u3f9b\u3f9c\u3f9d\u3f9e\u3f9f\u3fa0\u3fa1\u3fa2\u3fa3\u3fa4\u3fa5\u3fa6\u3fa7\u3fa8\u3fa9\u3faa\u3fab\u3fac\u3fad\u3fae\u3faf\u3fb0\u3fb1\u3fb2\u3fb3\u3fb4\u3fb5\u3fb6\u3fb7\u3fb8\u3fb9\u3fba\u3fbb\u3fbc\u3fbd\u3fbe\u3fbf\u3fc0\u3fc1\u3fc2\u3fc3\u3fc4\u3fc5\u3fc6\u3fc7\u3fc8\u3fc9\u3fca\u3fcb\u3fcc\u3fcd\u3fce\u3fcf\u3fd0\u3fd1\u3fd2\u3fd3\u3fd4\u3fd5\u3fd6\u3fd7\u3fd8\u3fd9\u3fda\u3fdb\u3fdc\u3fdd\u3fde\u3fdf\u3fe0\u3fe1\u3fe2\u3fe3\u3fe4\u3fe5\u3fe6\u3fe7\u3fe8\u3fe9\u3fea\u3feb\u3fec\u3fed\u3fee\u3fef\u3ff0\u3ff1\u3ff2\u3ff3\u3ff4\u3ff5\u3ff6\u3ff7\u3ff8\u3ff9\u3ffa\u3ffb\u3ffc\u3ffd\u3ffe\u3fff\u4000\u4001\u4002\u4003\u4004\u4005\u4006\u4007\u4008\u4009\u400a\u400b\u400c\u400d\u400e\u400f\u4010\u4011\u4012\u4013\u4014\u4015\u4016\u4017\u4018\u4019\u401a\u401b\u401c\u401d\u401e\u401f\u4020\u4021\u4022\u4023\u4024\u4025\u4026\u4027\u4028\u4029\u402a\u402b\u402c\u402d\u402e\u402f\u4030\u4031\u4032\u4033\u4034\u4035\u4036\u4037\u4038\u4039\u403a\u403b\u403c\u403d\u403e\u403f\u4040\u4041\u4042\u4043\u4044\u4045\u4046\u4047\u4048\u4049\u404a\u404b\u404c\u404d\u404e\u404f\u4050\u4051\u4052\u4053\u4054\u4055\u4056\u4057\u4058\u4059\u405a\u405b\u405c\u405d\u405e\u405f\u4060\u4061\u4062\u4063\u4064\u4065\u4066\u4067\u4068\u4069\u406a\u406b\u406c\u406d\u406e\u406f\u4070\u4071\u4072\u4073\u4074\u4075\u4076\u4077\u4078\u4079\u407a\u407b\u407c\u407d\u407e\u407f\u4080\u4081\u4082\u4083\u4084\u4085\u4086\u4087\u4088\u4089\u408a\u408b\u408c\u408d\u408e\u408f\u4090\u4091\u4092\u4093\u4094\u4095\u4096\u4097\u4098\u4099\u409a\u409b\u409c\u409d\u409e\u409f\u40a0\u40a1\u40a2\u40a3\u40a4\u40a5\u40a6\u40a7\u40a8\u40a9\u40aa\u40ab\u40ac\u40ad\u40ae\u40af\u40b0\u40b1\u40b2\u40b3\u40b4\u40b5\u40b6\u40b7\u40b8\u40b9\u40ba\u40bb\u40bc\u40bd\u40be\u40bf\u40c0\u40c1\u40c2\u40c3\u40c4\u40c5\u40c6\u40c7\u40c8\u40c9\u40ca\u40cb\u40cc\u40cd\u40ce\u40cf\u40d0\u40d1\u40d2\u40d3\u40d4\u40d5\u40d6\u40d7\u40d8\u40d9\u40da\u40db\u40dc\u40dd\u40de\u40df\u40e0\u40e1\u40e2\u40e3\u40e4\u40e5\u40e6\u40e7\u40e8\u40e9\u40ea\u40eb\u40ec\u40ed\u40ee\u40ef\u40f0\u40f1\u40f2\u40f3\u40f4\u40f5\u40f6\u40f7\u40f8\u40f9\u40fa\u40fb\u40fc\u40fd\u40fe\u40ff\u4100\u4101\u4102\u4103\u4104\u4105\u4106\u4107\u4108\u4109\u410a\u410b\u410c\u410d\u410e\u410f\u4110\u4111\u4112\u4113\u4114\u4115\u4116\u4117\u4118\u4119\u411a\u411b\u411c\u411d\u411e\u411f\u4120\u4121\u4122\u4123\u4124\u4125\u4126\u4127\u4128\u4129\u412a\u412b\u412c\u412d\u412e\u412f\u4130\u4131\u4132\u4133\u4134\u4135\u4136\u4137\u4138\u4139\u413a\u413b\u413c\u413d\u413e\u413f\u4140\u4141\u4142\u4143\u4144\u4145\u4146\u4147\u4148\u4149\u414a\u414b\u414c\u414d\u414e\u414f\u4150\u4151\u4152\u4153\u4154\u4155\u4156\u4157\u4158\u4159\u415a\u415b\u415c\u415d\u415e\u415f\u4160\u4161\u4162\u4163\u4164\u4165\u4166\u4167\u4168\u4169\u416a\u416b\u416c\u416d\u416e\u416f\u4170\u4171\u4172\u4173\u4174\u4175\u4176\u4177\u4178\u4179\u417a\u417b\u417c\u417d\u417e\u417f\u4180\u4181\u4182\u4183\u4184\u4185\u4186\u4187\u4188\u4189\u418a\u418b\u418c\u418d\u418e\u418f\u4190\u4191\u4192\u4193\u4194\u4195\u4196\u4197\u4198\u4199\u419a\u419b\u419c\u419d\u419e\u419f\u41a0\u41a1\u41a2\u41a3\u41a4\u41a5\u41a6\u41a7\u41a8\u41a9\u41aa\u41ab\u41ac\u41ad\u41ae\u41af\u41b0\u41b1\u41b2\u41b3\u41b4\u41b5\u41b6\u41b7\u41b8\u41b9\u41ba\u41bb\u41bc\u41bd\u41be\u41bf\u41c0\u41c1\u41c2\u41c3\u41c4\u41c5\u41c6\u41c7\u41c8\u41c9\u41ca\u41cb\u41cc\u41cd\u41ce\u41cf\u41d0\u41d1\u41d2\u41d3\u41d4\u41d5\u41d6\u41d7\u41d8\u41d9\u41da\u41db\u41dc\u41dd\u41de\u41df\u41e0\u41e1\u41e2\u41e3\u41e4\u41e5\u41e6\u41e7\u41e8\u41e9\u41ea\u41eb\u41ec\u41ed\u41ee\u41ef\u41f0\u41f1\u41f2\u41f3\u41f4\u41f5\u41f6\u41f7\u41f8\u41f9\u41fa\u41fb\u41fc\u41fd\u41fe\u41ff\u4200\u4201\u4202\u4203\u4204\u4205\u4206\u4207\u4208\u4209\u420a\u420b\u420c\u420d\u420e\u420f\u4210\u4211\u4212\u4213\u4214\u4215\u4216\u4217\u4218\u4219\u421a\u421b\u421c\u421d\u421e\u421f\u4220\u4221\u4222\u4223\u4224\u4225\u4226\u4227\u4228\u4229\u422a\u422b\u422c\u422d\u422e\u422f\u4230\u4231\u4232\u4233\u4234\u4235\u4236\u4237\u4238\u4239\u423a\u423b\u423c\u423d\u423e\u423f\u4240\u4241\u4242\u4243\u4244\u4245\u4246\u4247\u4248\u4249\u424a\u424b\u424c\u424d\u424e\u424f\u4250\u4251\u4252\u4253\u4254\u4255\u4256\u4257\u4258\u4259\u425a\u425b\u425c\u425d\u425e\u425f\u4260\u4261\u4262\u4263\u4264\u4265\u4266\u4267\u4268\u4269\u426a\u426b\u426c\u426d\u426e\u426f\u4270\u4271\u4272\u4273\u4274\u4275\u4276\u4277\u4278\u4279\u427a\u427b\u427c\u427d\u427e\u427f\u4280\u4281\u4282\u4283\u4284\u4285\u4286\u4287\u4288\u4289\u428a\u428b\u428c\u428d\u428e\u428f\u4290\u4291\u4292\u4293\u4294\u4295\u4296\u4297\u4298\u4299\u429a\u429b\u429c\u429d\u429e\u429f\u42a0\u42a1\u42a2\u42a3\u42a4\u42a5\u42a6\u42a7\u42a8\u42a9\u42aa\u42ab\u42ac\u42ad\u42ae\u42af\u42b0\u42b1\u42b2\u42b3\u42b4\u42b5\u42b6\u42b7\u42b8\u42b9\u42ba\u42bb\u42bc\u42bd\u42be\u42bf\u42c0\u42c1\u42c2\u42c3\u42c4\u42c5\u42c6\u42c7\u42c8\u42c9\u42ca\u42cb\u42cc\u42cd\u42ce\u42cf\u42d0\u42d1\u42d2\u42d3\u42d4\u42d5\u42d6\u42d7\u42d8\u42d9\u42da\u42db\u42dc\u42dd\u42de\u42df\u42e0\u42e1\u42e2\u42e3\u42e4\u42e5\u42e6\u42e7\u42e8\u42e9\u42ea\u42eb\u42ec\u42ed\u42ee\u42ef\u42f0\u42f1\u42f2\u42f3\u42f4\u42f5\u42f6\u42f7\u42f8\u42f9\u42fa\u42fb\u42fc\u42fd\u42fe\u42ff\u4300\u4301\u4302\u4303\u4304\u4305\u4306\u4307\u4308\u4309\u430a\u430b\u430c\u430d\u430e\u430f\u4310\u4311\u4312\u4313\u4314\u4315\u4316\u4317\u4318\u4319\u431a\u431b\u431c\u431d\u431e\u431f\u4320\u4321\u4322\u4323\u4324\u4325\u4326\u4327\u4328\u4329\u432a\u432b\u432c\u432d\u432e\u432f\u4330\u4331\u4332\u4333\u4334\u4335\u4336\u4337\u4338\u4339\u433a\u433b\u433c\u433d\u433e\u433f\u4340\u4341\u4342\u4343\u4344\u4345\u4346\u4347\u4348\u4349\u434a\u434b\u434c\u434d\u434e\u434f\u4350\u4351\u4352\u4353\u4354\u4355\u4356\u4357\u4358\u4359\u435a\u435b\u435c\u435d\u435e\u435f\u4360\u4361\u4362\u4363\u4364\u4365\u4366\u4367\u4368\u4369\u436a\u436b\u436c\u436d\u436e\u436f\u4370\u4371\u4372\u4373\u4374\u4375\u4376\u4377\u4378\u4379\u437a\u437b\u437c\u437d\u437e\u437f\u4380\u4381\u4382\u4383\u4384\u4385\u4386\u4387\u4388\u4389\u438a\u438b\u438c\u438d\u438e\u438f\u4390\u4391\u4392\u4393\u4394\u4395\u4396\u4397\u4398\u4399\u439a\u439b\u439c\u439d\u439e\u439f\u43a0\u43a1\u43a2\u43a3\u43a4\u43a5\u43a6\u43a7\u43a8\u43a9\u43aa\u43ab\u43ac\u43ad\u43ae\u43af\u43b0\u43b1\u43b2\u43b3\u43b4\u43b5\u43b6\u43b7\u43b8\u43b9\u43ba\u43bb\u43bc\u43bd\u43be\u43bf\u43c0\u43c1\u43c2\u43c3\u43c4\u43c5\u43c6\u43c7\u43c8\u43c9\u43ca\u43cb\u43cc\u43cd\u43ce\u43cf\u43d0\u43d1\u43d2\u43d3\u43d4\u43d5\u43d6\u43d7\u43d8\u43d9\u43da\u43db\u43dc\u43dd\u43de\u43df\u43e0\u43e1\u43e2\u43e3\u43e4\u43e5\u43e6\u43e7\u43e8\u43e9\u43ea\u43eb\u43ec\u43ed\u43ee\u43ef\u43f0\u43f1\u43f2\u43f3\u43f4\u43f5\u43f6\u43f7\u43f8\u43f9\u43fa\u43fb\u43fc\u43fd\u43fe\u43ff\u4400\u4401\u4402\u4403\u4404\u4405\u4406\u4407\u4408\u4409\u440a\u440b\u440c\u440d\u440e\u440f\u4410\u4411\u4412\u4413\u4414\u4415\u4416\u4417\u4418\u4419\u441a\u441b\u441c\u441d\u441e\u441f\u4420\u4421\u4422\u4423\u4424\u4425\u4426\u4427\u4428\u4429\u442a\u442b\u442c\u442d\u442e\u442f\u4430\u4431\u4432\u4433\u4434\u4435\u4436\u4437\u4438\u4439\u443a\u443b\u443c\u443d\u443e\u443f\u4440\u4441\u4442\u4443\u4444\u4445\u4446\u4447\u4448\u4449\u444a\u444b\u444c\u444d\u444e\u444f\u4450\u4451\u4452\u4453\u4454\u4455\u4456\u4457\u4458\u4459\u445a\u445b\u445c\u445d\u445e\u445f\u4460\u4461\u4462\u4463\u4464\u4465\u4466\u4467\u4468\u4469\u446a\u446b\u446c\u446d\u446e\u446f\u4470\u4471\u4472\u4473\u4474\u4475\u4476\u4477\u4478\u4479\u447a\u447b\u447c\u447d\u447e\u447f\u4480\u4481\u4482\u4483\u4484\u4485\u4486\u4487\u4488\u4489\u448a\u448b\u448c\u448d\u448e\u448f\u4490\u4491\u4492\u4493\u4494\u4495\u4496\u4497\u4498\u4499\u449a\u449b\u449c\u449d\u449e\u449f\u44a0\u44a1\u44a2\u44a3\u44a4\u44a5\u44a6\u44a7\u44a8\u44a9\u44aa\u44ab\u44ac\u44ad\u44ae\u44af\u44b0\u44b1\u44b2\u44b3\u44b4\u44b5\u44b6\u44b7\u44b8\u44b9\u44ba\u44bb\u44bc\u44bd\u44be\u44bf\u44c0\u44c1\u44c2\u44c3\u44c4\u44c5\u44c6\u44c7\u44c8\u44c9\u44ca\u44cb\u44cc\u44cd\u44ce\u44cf\u44d0\u44d1\u44d2\u44d3\u44d4\u44d5\u44d6\u44d7\u44d8\u44d9\u44da\u44db\u44dc\u44dd\u44de\u44df\u44e0\u44e1\u44e2\u44e3\u44e4\u44e5\u44e6\u44e7\u44e8\u44e9\u44ea\u44eb\u44ec\u44ed\u44ee\u44ef\u44f0\u44f1\u44f2\u44f3\u44f4\u44f5\u44f6\u44f7\u44f8\u44f9\u44fa\u44fb\u44fc\u44fd\u44fe\u44ff\u4500\u4501\u4502\u4503\u4504\u4505\u4506\u4507\u4508\u4509\u450a\u450b\u450c\u450d\u450e\u450f\u4510\u4511\u4512\u4513\u4514\u4515\u4516\u4517\u4518\u4519\u451a\u451b\u451c\u451d\u451e\u451f\u4520\u4521\u4522\u4523\u4524\u4525\u4526\u4527\u4528\u4529\u452a\u452b\u452c\u452d\u452e\u452f\u4530\u4531\u4532\u4533\u4534\u4535\u4536\u4537\u4538\u4539\u453a\u453b\u453c\u453d\u453e\u453f\u4540\u4541\u4542\u4543\u4544\u4545\u4546\u4547\u4548\u4549\u454a\u454b\u454c\u454d\u454e\u454f\u4550\u4551\u4552\u4553\u4554\u4555\u4556\u4557\u4558\u4559\u455a\u455b\u455c\u455d\u455e\u455f\u4560\u4561\u4562\u4563\u4564\u4565\u4566\u4567\u4568\u4569\u456a\u456b\u456c\u456d\u456e\u456f\u4570\u4571\u4572\u4573\u4574\u4575\u4576\u4577\u4578\u4579\u457a\u457b\u457c\u457d\u457e\u457f\u4580\u4581\u4582\u4583\u4584\u4585\u4586\u4587\u4588\u4589\u458a\u458b\u458c\u458d\u458e\u458f\u4590\u4591\u4592\u4593\u4594\u4595\u4596\u4597\u4598\u4599\u459a\u459b\u459c\u459d\u459e\u459f\u45a0\u45a1\u45a2\u45a3\u45a4\u45a5\u45a6\u45a7\u45a8\u45a9\u45aa\u45ab\u45ac\u45ad\u45ae\u45af\u45b0\u45b1\u45b2\u45b3\u45b4\u45b5\u45b6\u45b7\u45b8\u45b9\u45ba\u45bb\u45bc\u45bd\u45be\u45bf\u45c0\u45c1\u45c2\u45c3\u45c4\u45c5\u45c6\u45c7\u45c8\u45c9\u45ca\u45cb\u45cc\u45cd\u45ce\u45cf\u45d0\u45d1\u45d2\u45d3\u45d4\u45d5\u45d6\u45d7\u45d8\u45d9\u45da\u45db\u45dc\u45dd\u45de\u45df\u45e0\u45e1\u45e2\u45e3\u45e4\u45e5\u45e6\u45e7\u45e8\u45e9\u45ea\u45eb\u45ec\u45ed\u45ee\u45ef\u45f0\u45f1\u45f2\u45f3\u45f4\u45f5\u45f6\u45f7\u45f8\u45f9\u45fa\u45fb\u45fc\u45fd\u45fe\u45ff\u4600\u4601\u4602\u4603\u4604\u4605\u4606\u4607\u4608\u4609\u460a\u460b\u460c\u460d\u460e\u460f\u4610\u4611\u4612\u4613\u4614\u4615\u4616\u4617\u4618\u4619\u461a\u461b\u461c\u461d\u461e\u461f\u4620\u4621\u4622\u4623\u4624\u4625\u4626\u4627\u4628\u4629\u462a\u462b\u462c\u462d\u462e\u462f\u4630\u4631\u4632\u4633\u4634\u4635\u4636\u4637\u4638\u4639\u463a\u463b\u463c\u463d\u463e\u463f\u4640\u4641\u4642\u4643\u4644\u4645\u4646\u4647\u4648\u4649\u464a\u464b\u464c\u464d\u464e\u464f\u4650\u4651\u4652\u4653\u4654\u4655\u4656\u4657\u4658\u4659\u465a\u465b\u465c\u465d\u465e\u465f\u4660\u4661\u4662\u4663\u4664\u4665\u4666\u4667\u4668\u4669\u466a\u466b\u466c\u466d\u466e\u466f\u4670\u4671\u4672\u4673\u4674\u4675\u4676\u4677\u4678\u4679\u467a\u467b\u467c\u467d\u467e\u467f\u4680\u4681\u4682\u4683\u4684\u4685\u4686\u4687\u4688\u4689\u468a\u468b\u468c\u468d\u468e\u468f\u4690\u4691\u4692\u4693\u4694\u4695\u4696\u4697\u4698\u4699\u469a\u469b\u469c\u469d\u469e\u469f\u46a0\u46a1\u46a2\u46a3\u46a4\u46a5\u46a6\u46a7\u46a8\u46a9\u46aa\u46ab\u46ac\u46ad\u46ae\u46af\u46b0\u46b1\u46b2\u46b3\u46b4\u46b5\u46b6\u46b7\u46b8\u46b9\u46ba\u46bb\u46bc\u46bd\u46be\u46bf\u46c0\u46c1\u46c2\u46c3\u46c4\u46c5\u46c6\u46c7\u46c8\u46c9\u46ca\u46cb\u46cc\u46cd\u46ce\u46cf\u46d0\u46d1\u46d2\u46d3\u46d4\u46d5\u46d6\u46d7\u46d8\u46d9\u46da\u46db\u46dc\u46dd\u46de\u46df\u46e0\u46e1\u46e2\u46e3\u46e4\u46e5\u46e6\u46e7\u46e8\u46e9\u46ea\u46eb\u46ec\u46ed\u46ee\u46ef\u46f0\u46f1\u46f2\u46f3\u46f4\u46f5\u46f6\u46f7\u46f8\u46f9\u46fa\u46fb\u46fc\u46fd\u46fe\u46ff\u4700\u4701\u4702\u4703\u4704\u4705\u4706\u4707\u4708\u4709\u470a\u470b\u470c\u470d\u470e\u470f\u4710\u4711\u4712\u4713\u4714\u4715\u4716\u4717\u4718\u4719\u471a\u471b\u471c\u471d\u471e\u471f\u4720\u4721\u4722\u4723\u4724\u4725\u4726\u4727\u4728\u4729\u472a\u472b\u472c\u472d\u472e\u472f\u4730\u4731\u4732\u4733\u4734\u4735\u4736\u4737\u4738\u4739\u473a\u473b\u473c\u473d\u473e\u473f\u4740\u4741\u4742\u4743\u4744\u4745\u4746\u4747\u4748\u4749\u474a\u474b\u474c\u474d\u474e\u474f\u4750\u4751\u4752\u4753\u4754\u4755\u4756\u4757\u4758\u4759\u475a\u475b\u475c\u475d\u475e\u475f\u4760\u4761\u4762\u4763\u4764\u4765\u4766\u4767\u4768\u4769\u476a\u476b\u476c\u476d\u476e\u476f\u4770\u4771\u4772\u4773\u4774\u4775\u4776\u4777\u4778\u4779\u477a\u477b\u477c\u477d\u477e\u477f\u4780\u4781\u4782\u4783\u4784\u4785\u4786\u4787\u4788\u4789\u478a\u478b\u478c\u478d\u478e\u478f\u4790\u4791\u4792\u4793\u4794\u4795\u4796\u4797\u4798\u4799\u479a\u479b\u479c\u479d\u479e\u479f\u47a0\u47a1\u47a2\u47a3\u47a4\u47a5\u47a6\u47a7\u47a8\u47a9\u47aa\u47ab\u47ac\u47ad\u47ae\u47af\u47b0\u47b1\u47b2\u47b3\u47b4\u47b5\u47b6\u47b7\u47b8\u47b9\u47ba\u47bb\u47bc\u47bd\u47be\u47bf\u47c0\u47c1\u47c2\u47c3\u47c4\u47c5\u47c6\u47c7\u47c8\u47c9\u47ca\u47cb\u47cc\u47cd\u47ce\u47cf\u47d0\u47d1\u47d2\u47d3\u47d4\u47d5\u47d6\u47d7\u47d8\u47d9\u47da\u47db\u47dc\u47dd\u47de\u47df\u47e0\u47e1\u47e2\u47e3\u47e4\u47e5\u47e6\u47e7\u47e8\u47e9\u47ea\u47eb\u47ec\u47ed\u47ee\u47ef\u47f0\u47f1\u47f2\u47f3\u47f4\u47f5\u47f6\u47f7\u47f8\u47f9\u47fa\u47fb\u47fc\u47fd\u47fe\u47ff\u4800\u4801\u4802\u4803\u4804\u4805\u4806\u4807\u4808\u4809\u480a\u480b\u480c\u480d\u480e\u480f\u4810\u4811\u4812\u4813\u4814\u4815\u4816\u4817\u4818\u4819\u481a\u481b\u481c\u481d\u481e\u481f\u4820\u4821\u4822\u4823\u4824\u4825\u4826\u4827\u4828\u4829\u482a\u482b\u482c\u482d\u482e\u482f\u4830\u4831\u4832\u4833\u4834\u4835\u4836\u4837\u4838\u4839\u483a\u483b\u483c\u483d\u483e\u483f\u4840\u4841\u4842\u4843\u4844\u4845\u4846\u4847\u4848\u4849\u484a\u484b\u484c\u484d\u484e\u484f\u4850\u4851\u4852\u4853\u4854\u4855\u4856\u4857\u4858\u4859\u485a\u485b\u485c\u485d\u485e\u485f\u4860\u4861\u4862\u4863\u4864\u4865\u4866\u4867\u4868\u4869\u486a\u486b\u486c\u486d\u486e\u486f\u4870\u4871\u4872\u4873\u4874\u4875\u4876\u4877\u4878\u4879\u487a\u487b\u487c\u487d\u487e\u487f\u4880\u4881\u4882\u4883\u4884\u4885\u4886\u4887\u4888\u4889\u488a\u488b\u488c\u488d\u488e\u488f\u4890\u4891\u4892\u4893\u4894\u4895\u4896\u4897\u4898\u4899\u489a\u489b\u489c\u489d\u489e\u489f\u48a0\u48a1\u48a2\u48a3\u48a4\u48a5\u48a6\u48a7\u48a8\u48a9\u48aa\u48ab\u48ac\u48ad\u48ae\u48af\u48b0\u48b1\u48b2\u48b3\u48b4\u48b5\u48b6\u48b7\u48b8\u48b9\u48ba\u48bb\u48bc\u48bd\u48be\u48bf\u48c0\u48c1\u48c2\u48c3\u48c4\u48c5\u48c6\u48c7\u48c8\u48c9\u48ca\u48cb\u48cc\u48cd\u48ce\u48cf\u48d0\u48d1\u48d2\u48d3\u48d4\u48d5\u48d6\u48d7\u48d8\u48d9\u48da\u48db\u48dc\u48dd\u48de\u48df\u48e0\u48e1\u48e2\u48e3\u48e4\u48e5\u48e6\u48e7\u48e8\u48e9\u48ea\u48eb\u48ec\u48ed\u48ee\u48ef\u48f0\u48f1\u48f2\u48f3\u48f4\u48f5\u48f6\u48f7\u48f8\u48f9\u48fa\u48fb\u48fc\u48fd\u48fe\u48ff\u4900\u4901\u4902\u4903\u4904\u4905\u4906\u4907\u4908\u4909\u490a\u490b\u490c\u490d\u490e\u490f\u4910\u4911\u4912\u4913\u4914\u4915\u4916\u4917\u4918\u4919\u491a\u491b\u491c\u491d\u491e\u491f\u4920\u4921\u4922\u4923\u4924\u4925\u4926\u4927\u4928\u4929\u492a\u492b\u492c\u492d\u492e\u492f\u4930\u4931\u4932\u4933\u4934\u4935\u4936\u4937\u4938\u4939\u493a\u493b\u493c\u493d\u493e\u493f\u4940\u4941\u4942\u4943\u4944\u4945\u4946\u4947\u4948\u4949\u494a\u494b\u494c\u494d\u494e\u494f\u4950\u4951\u4952\u4953\u4954\u4955\u4956\u4957\u4958\u4959\u495a\u495b\u495c\u495d\u495e\u495f\u4960\u4961\u4962\u4963\u4964\u4965\u4966\u4967\u4968\u4969\u496a\u496b\u496c\u496d\u496e\u496f\u4970\u4971\u4972\u4973\u4974\u4975\u4976\u4977\u4978\u4979\u497a\u497b\u497c\u497d\u497e\u497f\u4980\u4981\u4982\u4983\u4984\u4985\u4986\u4987\u4988\u4989\u498a\u498b\u498c\u498d\u498e\u498f\u4990\u4991\u4992\u4993\u4994\u4995\u4996\u4997\u4998\u4999\u499a\u499b\u499c\u499d\u499e\u499f\u49a0\u49a1\u49a2\u49a3\u49a4\u49a5\u49a6\u49a7\u49a8\u49a9\u49aa\u49ab\u49ac\u49ad\u49ae\u49af\u49b0\u49b1\u49b2\u49b3\u49b4\u49b5\u49b6\u49b7\u49b8\u49b9\u49ba\u49bb\u49bc\u49bd\u49be\u49bf\u49c0\u49c1\u49c2\u49c3\u49c4\u49c5\u49c6\u49c7\u49c8\u49c9\u49ca\u49cb\u49cc\u49cd\u49ce\u49cf\u49d0\u49d1\u49d2\u49d3\u49d4\u49d5\u49d6\u49d7\u49d8\u49d9\u49da\u49db\u49dc\u49dd\u49de\u49df\u49e0\u49e1\u49e2\u49e3\u49e4\u49e5\u49e6\u49e7\u49e8\u49e9\u49ea\u49eb\u49ec\u49ed\u49ee\u49ef\u49f0\u49f1\u49f2\u49f3\u49f4\u49f5\u49f6\u49f7\u49f8\u49f9\u49fa\u49fb\u49fc\u49fd\u49fe\u49ff\u4a00\u4a01\u4a02\u4a03\u4a04\u4a05\u4a06\u4a07\u4a08\u4a09\u4a0a\u4a0b\u4a0c\u4a0d\u4a0e\u4a0f\u4a10\u4a11\u4a12\u4a13\u4a14\u4a15\u4a16\u4a17\u4a18\u4a19\u4a1a\u4a1b\u4a1c\u4a1d\u4a1e\u4a1f\u4a20\u4a21\u4a22\u4a23\u4a24\u4a25\u4a26\u4a27\u4a28\u4a29\u4a2a\u4a2b\u4a2c\u4a2d\u4a2e\u4a2f\u4a30\u4a31\u4a32\u4a33\u4a34\u4a35\u4a36\u4a37\u4a38\u4a39\u4a3a\u4a3b\u4a3c\u4a3d\u4a3e\u4a3f\u4a40\u4a41\u4a42\u4a43\u4a44\u4a45\u4a46\u4a47\u4a48\u4a49\u4a4a\u4a4b\u4a4c\u4a4d\u4a4e\u4a4f\u4a50\u4a51\u4a52\u4a53\u4a54\u4a55\u4a56\u4a57\u4a58\u4a59\u4a5a\u4a5b\u4a5c\u4a5d\u4a5e\u4a5f\u4a60\u4a61\u4a62\u4a63\u4a64\u4a65\u4a66\u4a67\u4a68\u4a69\u4a6a\u4a6b\u4a6c\u4a6d\u4a6e\u4a6f\u4a70\u4a71\u4a72\u4a73\u4a74\u4a75\u4a76\u4a77\u4a78\u4a79\u4a7a\u4a7b\u4a7c\u4a7d\u4a7e\u4a7f\u4a80\u4a81\u4a82\u4a83\u4a84\u4a85\u4a86\u4a87\u4a88\u4a89\u4a8a\u4a8b\u4a8c\u4a8d\u4a8e\u4a8f\u4a90\u4a91\u4a92\u4a93\u4a94\u4a95\u4a96\u4a97\u4a98\u4a99\u4a9a\u4a9b\u4a9c\u4a9d\u4a9e\u4a9f\u4aa0\u4aa1\u4aa2\u4aa3\u4aa4\u4aa5\u4aa6\u4aa7\u4aa8\u4aa9\u4aaa\u4aab\u4aac\u4aad\u4aae\u4aaf\u4ab0\u4ab1\u4ab2\u4ab3\u4ab4\u4ab5\u4ab6\u4ab7\u4ab8\u4ab9\u4aba\u4abb\u4abc\u4abd\u4abe\u4abf\u4ac0\u4ac1\u4ac2\u4ac3\u4ac4\u4ac5\u4ac6\u4ac7\u4ac8\u4ac9\u4aca\u4acb\u4acc\u4acd\u4ace\u4acf\u4ad0\u4ad1\u4ad2\u4ad3\u4ad4\u4ad5\u4ad6\u4ad7\u4ad8\u4ad9\u4ada\u4adb\u4adc\u4add\u4ade\u4adf\u4ae0\u4ae1\u4ae2\u4ae3\u4ae4\u4ae5\u4ae6\u4ae7\u4ae8\u4ae9\u4aea\u4aeb\u4aec\u4aed\u4aee\u4aef\u4af0\u4af1\u4af2\u4af3\u4af4\u4af5\u4af6\u4af7\u4af8\u4af9\u4afa\u4afb\u4afc\u4afd\u4afe\u4aff\u4b00\u4b01\u4b02\u4b03\u4b04\u4b05\u4b06\u4b07\u4b08\u4b09\u4b0a\u4b0b\u4b0c\u4b0d\u4b0e\u4b0f\u4b10\u4b11\u4b12\u4b13\u4b14\u4b15\u4b16\u4b17\u4b18\u4b19\u4b1a\u4b1b\u4b1c\u4b1d\u4b1e\u4b1f\u4b20\u4b21\u4b22\u4b23\u4b24\u4b25\u4b26\u4b27\u4b28\u4b29\u4b2a\u4b2b\u4b2c\u4b2d\u4b2e\u4b2f\u4b30\u4b31\u4b32\u4b33\u4b34\u4b35\u4b36\u4b37\u4b38\u4b39\u4b3a\u4b3b\u4b3c\u4b3d\u4b3e\u4b3f\u4b40\u4b41\u4b42\u4b43\u4b44\u4b45\u4b46\u4b47\u4b48\u4b49\u4b4a\u4b4b\u4b4c\u4b4d\u4b4e\u4b4f\u4b50\u4b51\u4b52\u4b53\u4b54\u4b55\u4b56\u4b57\u4b58\u4b59\u4b5a\u4b5b\u4b5c\u4b5d\u4b5e\u4b5f\u4b60\u4b61\u4b62\u4b63\u4b64\u4b65\u4b66\u4b67\u4b68\u4b69\u4b6a\u4b6b\u4b6c\u4b6d\u4b6e\u4b6f\u4b70\u4b71\u4b72\u4b73\u4b74\u4b75\u4b76\u4b77\u4b78\u4b79\u4b7a\u4b7b\u4b7c\u4b7d\u4b7e\u4b7f\u4b80\u4b81\u4b82\u4b83\u4b84\u4b85\u4b86\u4b87\u4b88\u4b89\u4b8a\u4b8b\u4b8c\u4b8d\u4b8e\u4b8f\u4b90\u4b91\u4b92\u4b93\u4b94\u4b95\u4b96\u4b97\u4b98\u4b99\u4b9a\u4b9b\u4b9c\u4b9d\u4b9e\u4b9f\u4ba0\u4ba1\u4ba2\u4ba3\u4ba4\u4ba5\u4ba6\u4ba7\u4ba8\u4ba9\u4baa\u4bab\u4bac\u4bad\u4bae\u4baf\u4bb0\u4bb1\u4bb2\u4bb3\u4bb4\u4bb5\u4bb6\u4bb7\u4bb8\u4bb9\u4bba\u4bbb\u4bbc\u4bbd\u4bbe\u4bbf\u4bc0\u4bc1\u4bc2\u4bc3\u4bc4\u4bc5\u4bc6\u4bc7\u4bc8\u4bc9\u4bca\u4bcb\u4bcc\u4bcd\u4bce\u4bcf\u4bd0\u4bd1\u4bd2\u4bd3\u4bd4\u4bd5\u4bd6\u4bd7\u4bd8\u4bd9\u4bda\u4bdb\u4bdc\u4bdd\u4bde\u4bdf\u4be0\u4be1\u4be2\u4be3\u4be4\u4be5\u4be6\u4be7\u4be8\u4be9\u4bea\u4beb\u4bec\u4bed\u4bee\u4bef\u4bf0\u4bf1\u4bf2\u4bf3\u4bf4\u4bf5\u4bf6\u4bf7\u4bf8\u4bf9\u4bfa\u4bfb\u4bfc\u4bfd\u4bfe\u4bff\u4c00\u4c01\u4c02\u4c03\u4c04\u4c05\u4c06\u4c07\u4c08\u4c09\u4c0a\u4c0b\u4c0c\u4c0d\u4c0e\u4c0f\u4c10\u4c11\u4c12\u4c13\u4c14\u4c15\u4c16\u4c17\u4c18\u4c19\u4c1a\u4c1b\u4c1c\u4c1d\u4c1e\u4c1f\u4c20\u4c21\u4c22\u4c23\u4c24\u4c25\u4c26\u4c27\u4c28\u4c29\u4c2a\u4c2b\u4c2c\u4c2d\u4c2e\u4c2f\u4c30\u4c31\u4c32\u4c33\u4c34\u4c35\u4c36\u4c37\u4c38\u4c39\u4c3a\u4c3b\u4c3c\u4c3d\u4c3e\u4c3f\u4c40\u4c41\u4c42\u4c43\u4c44\u4c45\u4c46\u4c47\u4c48\u4c49\u4c4a\u4c4b\u4c4c\u4c4d\u4c4e\u4c4f\u4c50\u4c51\u4c52\u4c53\u4c54\u4c55\u4c56\u4c57\u4c58\u4c59\u4c5a\u4c5b\u4c5c\u4c5d\u4c5e\u4c5f\u4c60\u4c61\u4c62\u4c63\u4c64\u4c65\u4c66\u4c67\u4c68\u4c69\u4c6a\u4c6b\u4c6c\u4c6d\u4c6e\u4c6f\u4c70\u4c71\u4c72\u4c73\u4c74\u4c75\u4c76\u4c77\u4c78\u4c79\u4c7a\u4c7b\u4c7c\u4c7d\u4c7e\u4c7f\u4c80\u4c81\u4c82\u4c83\u4c84\u4c85\u4c86\u4c87\u4c88\u4c89\u4c8a\u4c8b\u4c8c\u4c8d\u4c8e\u4c8f\u4c90\u4c91\u4c92\u4c93\u4c94\u4c95\u4c96\u4c97\u4c98\u4c99\u4c9a\u4c9b\u4c9c\u4c9d\u4c9e\u4c9f\u4ca0\u4ca1\u4ca2\u4ca3\u4ca4\u4ca5\u4ca6\u4ca7\u4ca8\u4ca9\u4caa\u4cab\u4cac\u4cad\u4cae\u4caf\u4cb0\u4cb1\u4cb2\u4cb3\u4cb4\u4cb5\u4cb6\u4cb7\u4cb8\u4cb9\u4cba\u4cbb\u4cbc\u4cbd\u4cbe\u4cbf\u4cc0\u4cc1\u4cc2\u4cc3\u4cc4\u4cc5\u4cc6\u4cc7\u4cc8\u4cc9\u4cca\u4ccb\u4ccc\u4ccd\u4cce\u4ccf\u4cd0\u4cd1\u4cd2\u4cd3\u4cd4\u4cd5\u4cd6\u4cd7\u4cd8\u4cd9\u4cda\u4cdb\u4cdc\u4cdd\u4cde\u4cdf\u4ce0\u4ce1\u4ce2\u4ce3\u4ce4\u4ce5\u4ce6\u4ce7\u4ce8\u4ce9\u4cea\u4ceb\u4cec\u4ced\u4cee\u4cef\u4cf0\u4cf1\u4cf2\u4cf3\u4cf4\u4cf5\u4cf6\u4cf7\u4cf8\u4cf9\u4cfa\u4cfb\u4cfc\u4cfd\u4cfe\u4cff\u4d00\u4d01\u4d02\u4d03\u4d04\u4d05\u4d06\u4d07\u4d08\u4d09\u4d0a\u4d0b\u4d0c\u4d0d\u4d0e\u4d0f\u4d10\u4d11\u4d12\u4d13\u4d14\u4d15\u4d16\u4d17\u4d18\u4d19\u4d1a\u4d1b\u4d1c\u4d1d\u4d1e\u4d1f\u4d20\u4d21\u4d22\u4d23\u4d24\u4d25\u4d26\u4d27\u4d28\u4d29\u4d2a\u4d2b\u4d2c\u4d2d\u4d2e\u4d2f\u4d30\u4d31\u4d32\u4d33\u4d34\u4d35\u4d36\u4d37\u4d38\u4d39\u4d3a\u4d3b\u4d3c\u4d3d\u4d3e\u4d3f\u4d40\u4d41\u4d42\u4d43\u4d44\u4d45\u4d46\u4d47\u4d48\u4d49\u4d4a\u4d4b\u4d4c\u4d4d\u4d4e\u4d4f\u4d50\u4d51\u4d52\u4d53\u4d54\u4d55\u4d56\u4d57\u4d58\u4d59\u4d5a\u4d5b\u4d5c\u4d5d\u4d5e\u4d5f\u4d60\u4d61\u4d62\u4d63\u4d64\u4d65\u4d66\u4d67\u4d68\u4d69\u4d6a\u4d6b\u4d6c\u4d6d\u4d6e\u4d6f\u4d70\u4d71\u4d72\u4d73\u4d74\u4d75\u4d76\u4d77\u4d78\u4d79\u4d7a\u4d7b\u4d7c\u4d7d\u4d7e\u4d7f\u4d80\u4d81\u4d82\u4d83\u4d84\u4d85\u4d86\u4d87\u4d88\u4d89\u4d8a\u4d8b\u4d8c\u4d8d\u4d8e\u4d8f\u4d90\u4d91\u4d92\u4d93\u4d94\u4d95\u4d96\u4d97\u4d98\u4d99\u4d9a\u4d9b\u4d9c\u4d9d\u4d9e\u4d9f\u4da0\u4da1\u4da2\u4da3\u4da4\u4da5\u4da6\u4da7\u4da8\u4da9\u4daa\u4dab\u4dac\u4dad\u4dae\u4daf\u4db0\u4db1\u4db2\u4db3\u4db4\u4db5\u4e00\u4e01\u4e02\u4e03\u4e04\u4e05\u4e06\u4e07\u4e08\u4e09\u4e0a\u4e0b\u4e0c\u4e0d\u4e0e\u4e0f\u4e10\u4e11\u4e12\u4e13\u4e14\u4e15\u4e16\u4e17\u4e18\u4e19\u4e1a\u4e1b\u4e1c\u4e1d\u4e1e\u4e1f\u4e20\u4e21\u4e22\u4e23\u4e24\u4e25\u4e26\u4e27\u4e28\u4e29\u4e2a\u4e2b\u4e2c\u4e2d\u4e2e\u4e2f\u4e30\u4e31\u4e32\u4e33\u4e34\u4e35\u4e36\u4e37\u4e38\u4e39\u4e3a\u4e3b\u4e3c\u4e3d\u4e3e\u4e3f\u4e40\u4e41\u4e42\u4e43\u4e44\u4e45\u4e46\u4e47\u4e48\u4e49\u4e4a\u4e4b\u4e4c\u4e4d\u4e4e\u4e4f\u4e50\u4e51\u4e52\u4e53\u4e54\u4e55\u4e56\u4e57\u4e58\u4e59\u4e5a\u4e5b\u4e5c\u4e5d\u4e5e\u4e5f\u4e60\u4e61\u4e62\u4e63\u4e64\u4e65\u4e66\u4e67\u4e68\u4e69\u4e6a\u4e6b\u4e6c\u4e6d\u4e6e\u4e6f\u4e70\u4e71\u4e72\u4e73\u4e74\u4e75\u4e76\u4e77\u4e78\u4e79\u4e7a\u4e7b\u4e7c\u4e7d\u4e7e\u4e7f\u4e80\u4e81\u4e82\u4e83\u4e84\u4e85\u4e86\u4e87\u4e88\u4e89\u4e8a\u4e8b\u4e8c\u4e8d\u4e8e\u4e8f\u4e90\u4e91\u4e92\u4e93\u4e94\u4e95\u4e96\u4e97\u4e98\u4e99\u4e9a\u4e9b\u4e9c\u4e9d\u4e9e\u4e9f\u4ea0\u4ea1\u4ea2\u4ea3\u4ea4\u4ea5\u4ea6\u4ea7\u4ea8\u4ea9\u4eaa\u4eab\u4eac\u4ead\u4eae\u4eaf\u4eb0\u4eb1\u4eb2\u4eb3\u4eb4\u4eb5\u4eb6\u4eb7\u4eb8\u4eb9\u4eba\u4ebb\u4ebc\u4ebd\u4ebe\u4ebf\u4ec0\u4ec1\u4ec2\u4ec3\u4ec4\u4ec5\u4ec6\u4ec7\u4ec8\u4ec9\u4eca\u4ecb\u4ecc\u4ecd\u4ece\u4ecf\u4ed0\u4ed1\u4ed2\u4ed3\u4ed4\u4ed5\u4ed6\u4ed7\u4ed8\u4ed9\u4eda\u4edb\u4edc\u4edd\u4ede\u4edf\u4ee0\u4ee1\u4ee2\u4ee3\u4ee4\u4ee5\u4ee6\u4ee7\u4ee8\u4ee9\u4eea\u4eeb\u4eec\u4eed\u4eee\u4eef\u4ef0\u4ef1\u4ef2\u4ef3\u4ef4\u4ef5\u4ef6\u4ef7\u4ef8\u4ef9\u4efa\u4efb\u4efc\u4efd\u4efe\u4eff\u4f00\u4f01\u4f02\u4f03\u4f04\u4f05\u4f06\u4f07\u4f08\u4f09\u4f0a\u4f0b\u4f0c\u4f0d\u4f0e\u4f0f\u4f10\u4f11\u4f12\u4f13\u4f14\u4f15\u4f16\u4f17\u4f18\u4f19\u4f1a\u4f1b\u4f1c\u4f1d\u4f1e\u4f1f\u4f20\u4f21\u4f22\u4f23\u4f24\u4f25\u4f26\u4f27\u4f28\u4f29\u4f2a\u4f2b\u4f2c\u4f2d\u4f2e\u4f2f\u4f30\u4f31\u4f32\u4f33\u4f34\u4f35\u4f36\u4f37\u4f38\u4f39\u4f3a\u4f3b\u4f3c\u4f3d\u4f3e\u4f3f\u4f40\u4f41\u4f42\u4f43\u4f44\u4f45\u4f46\u4f47\u4f48\u4f49\u4f4a\u4f4b\u4f4c\u4f4d\u4f4e\u4f4f\u4f50\u4f51\u4f52\u4f53\u4f54\u4f55\u4f56\u4f57\u4f58\u4f59\u4f5a\u4f5b\u4f5c\u4f5d\u4f5e\u4f5f\u4f60\u4f61\u4f62\u4f63\u4f64\u4f65\u4f66\u4f67\u4f68\u4f69\u4f6a\u4f6b\u4f6c\u4f6d\u4f6e\u4f6f\u4f70\u4f71\u4f72\u4f73\u4f74\u4f75\u4f76\u4f77\u4f78\u4f79\u4f7a\u4f7b\u4f7c\u4f7d\u4f7e\u4f7f\u4f80\u4f81\u4f82\u4f83\u4f84\u4f85\u4f86\u4f87\u4f88\u4f89\u4f8a\u4f8b\u4f8c\u4f8d\u4f8e\u4f8f\u4f90\u4f91\u4f92\u4f93\u4f94\u4f95\u4f96\u4f97\u4f98\u4f99\u4f9a\u4f9b\u4f9c\u4f9d\u4f9e\u4f9f\u4fa0\u4fa1\u4fa2\u4fa3\u4fa4\u4fa5\u4fa6\u4fa7\u4fa8\u4fa9\u4faa\u4fab\u4fac\u4fad\u4fae\u4faf\u4fb0\u4fb1\u4fb2\u4fb3\u4fb4\u4fb5\u4fb6\u4fb7\u4fb8\u4fb9\u4fba\u4fbb\u4fbc\u4fbd\u4fbe\u4fbf\u4fc0\u4fc1\u4fc2\u4fc3\u4fc4\u4fc5\u4fc6\u4fc7\u4fc8\u4fc9\u4fca\u4fcb\u4fcc\u4fcd\u4fce\u4fcf\u4fd0\u4fd1\u4fd2\u4fd3\u4fd4\u4fd5\u4fd6\u4fd7\u4fd8\u4fd9\u4fda\u4fdb\u4fdc\u4fdd\u4fde\u4fdf\u4fe0\u4fe1\u4fe2\u4fe3\u4fe4\u4fe5\u4fe6\u4fe7\u4fe8\u4fe9\u4fea\u4feb\u4fec\u4fed\u4fee\u4fef\u4ff0\u4ff1\u4ff2\u4ff3\u4ff4\u4ff5\u4ff6\u4ff7\u4ff8\u4ff9\u4ffa\u4ffb\u4ffc\u4ffd\u4ffe\u4fff\u5000\u5001\u5002\u5003\u5004\u5005\u5006\u5007\u5008\u5009\u500a\u500b\u500c\u500d\u500e\u500f\u5010\u5011\u5012\u5013\u5014\u5015\u5016\u5017\u5018\u5019\u501a\u501b\u501c\u501d\u501e\u501f\u5020\u5021\u5022\u5023\u5024\u5025\u5026\u5027\u5028\u5029\u502a\u502b\u502c\u502d\u502e\u502f\u5030\u5031\u5032\u5033\u5034\u5035\u5036\u5037\u5038\u5039\u503a\u503b\u503c\u503d\u503e\u503f\u5040\u5041\u5042\u5043\u5044\u5045\u5046\u5047\u5048\u5049\u504a\u504b\u504c\u504d\u504e\u504f\u5050\u5051\u5052\u5053\u5054\u5055\u5056\u5057\u5058\u5059\u505a\u505b\u505c\u505d\u505e\u505f\u5060\u5061\u5062\u5063\u5064\u5065\u5066\u5067\u5068\u5069\u506a\u506b\u506c\u506d\u506e\u506f\u5070\u5071\u5072\u5073\u5074\u5075\u5076\u5077\u5078\u5079\u507a\u507b\u507c\u507d\u507e\u507f\u5080\u5081\u5082\u5083\u5084\u5085\u5086\u5087\u5088\u5089\u508a\u508b\u508c\u508d\u508e\u508f\u5090\u5091\u5092\u5093\u5094\u5095\u5096\u5097\u5098\u5099\u509a\u509b\u509c\u509d\u509e\u509f\u50a0\u50a1\u50a2\u50a3\u50a4\u50a5\u50a6\u50a7\u50a8\u50a9\u50aa\u50ab\u50ac\u50ad\u50ae\u50af\u50b0\u50b1\u50b2\u50b3\u50b4\u50b5\u50b6\u50b7\u50b8\u50b9\u50ba\u50bb\u50bc\u50bd\u50be\u50bf\u50c0\u50c1\u50c2\u50c3\u50c4\u50c5\u50c6\u50c7\u50c8\u50c9\u50ca\u50cb\u50cc\u50cd\u50ce\u50cf\u50d0\u50d1\u50d2\u50d3\u50d4\u50d5\u50d6\u50d7\u50d8\u50d9\u50da\u50db\u50dc\u50dd\u50de\u50df\u50e0\u50e1\u50e2\u50e3\u50e4\u50e5\u50e6\u50e7\u50e8\u50e9\u50ea\u50eb\u50ec\u50ed\u50ee\u50ef\u50f0\u50f1\u50f2\u50f3\u50f4\u50f5\u50f6\u50f7\u50f8\u50f9\u50fa\u50fb\u50fc\u50fd\u50fe\u50ff\u5100\u5101\u5102\u5103\u5104\u5105\u5106\u5107\u5108\u5109\u510a\u510b\u510c\u510d\u510e\u510f\u5110\u5111\u5112\u5113\u5114\u5115\u5116\u5117\u5118\u5119\u511a\u511b\u511c\u511d\u511e\u511f\u5120\u5121\u5122\u5123\u5124\u5125\u5126\u5127\u5128\u5129\u512a\u512b\u512c\u512d\u512e\u512f\u5130\u5131\u5132\u5133\u5134\u5135\u5136\u5137\u5138\u5139\u513a\u513b\u513c\u513d\u513e\u513f\u5140\u5141\u5142\u5143\u5144\u5145\u5146\u5147\u5148\u5149\u514a\u514b\u514c\u514d\u514e\u514f\u5150\u5151\u5152\u5153\u5154\u5155\u5156\u5157\u5158\u5159\u515a\u515b\u515c\u515d\u515e\u515f\u5160\u5161\u5162\u5163\u5164\u5165\u5166\u5167\u5168\u5169\u516a\u516b\u516c\u516d\u516e\u516f\u5170\u5171\u5172\u5173\u5174\u5175\u5176\u5177\u5178\u5179\u517a\u517b\u517c\u517d\u517e\u517f\u5180\u5181\u5182\u5183\u5184\u5185\u5186\u5187\u5188\u5189\u518a\u518b\u518c\u518d\u518e\u518f\u5190\u5191\u5192\u5193\u5194\u5195\u5196\u5197\u5198\u5199\u519a\u519b\u519c\u519d\u519e\u519f\u51a0\u51a1\u51a2\u51a3\u51a4\u51a5\u51a6\u51a7\u51a8\u51a9\u51aa\u51ab\u51ac\u51ad\u51ae\u51af\u51b0\u51b1\u51b2\u51b3\u51b4\u51b5\u51b6\u51b7\u51b8\u51b9\u51ba\u51bb\u51bc\u51bd\u51be\u51bf\u51c0\u51c1\u51c2\u51c3\u51c4\u51c5\u51c6\u51c7\u51c8\u51c9\u51ca\u51cb\u51cc\u51cd\u51ce\u51cf\u51d0\u51d1\u51d2\u51d3\u51d4\u51d5\u51d6\u51d7\u51d8\u51d9\u51da\u51db\u51dc\u51dd\u51de\u51df\u51e0\u51e1\u51e2\u51e3\u51e4\u51e5\u51e6\u51e7\u51e8\u51e9\u51ea\u51eb\u51ec\u51ed\u51ee\u51ef\u51f0\u51f1\u51f2\u51f3\u51f4\u51f5\u51f6\u51f7\u51f8\u51f9\u51fa\u51fb\u51fc\u51fd\u51fe\u51ff\u5200\u5201\u5202\u5203\u5204\u5205\u5206\u5207\u5208\u5209\u520a\u520b\u520c\u520d\u520e\u520f\u5210\u5211\u5212\u5213\u5214\u5215\u5216\u5217\u5218\u5219\u521a\u521b\u521c\u521d\u521e\u521f\u5220\u5221\u5222\u5223\u5224\u5225\u5226\u5227\u5228\u5229\u522a\u522b\u522c\u522d\u522e\u522f\u5230\u5231\u5232\u5233\u5234\u5235\u5236\u5237\u5238\u5239\u523a\u523b\u523c\u523d\u523e\u523f\u5240\u5241\u5242\u5243\u5244\u5245\u5246\u5247\u5248\u5249\u524a\u524b\u524c\u524d\u524e\u524f\u5250\u5251\u5252\u5253\u5254\u5255\u5256\u5257\u5258\u5259\u525a\u525b\u525c\u525d\u525e\u525f\u5260\u5261\u5262\u5263\u5264\u5265\u5266\u5267\u5268\u5269\u526a\u526b\u526c\u526d\u526e\u526f\u5270\u5271\u5272\u5273\u5274\u5275\u5276\u5277\u5278\u5279\u527a\u527b\u527c\u527d\u527e\u527f\u5280\u5281\u5282\u5283\u5284\u5285\u5286\u5287\u5288\u5289\u528a\u528b\u528c\u528d\u528e\u528f\u5290\u5291\u5292\u5293\u5294\u5295\u5296\u5297\u5298\u5299\u529a\u529b\u529c\u529d\u529e\u529f\u52a0\u52a1\u52a2\u52a3\u52a4\u52a5\u52a6\u52a7\u52a8\u52a9\u52aa\u52ab\u52ac\u52ad\u52ae\u52af\u52b0\u52b1\u52b2\u52b3\u52b4\u52b5\u52b6\u52b7\u52b8\u52b9\u52ba\u52bb\u52bc\u52bd\u52be\u52bf\u52c0\u52c1\u52c2\u52c3\u52c4\u52c5\u52c6\u52c7\u52c8\u52c9\u52ca\u52cb\u52cc\u52cd\u52ce\u52cf\u52d0\u52d1\u52d2\u52d3\u52d4\u52d5\u52d6\u52d7\u52d8\u52d9\u52da\u52db\u52dc\u52dd\u52de\u52df\u52e0\u52e1\u52e2\u52e3\u52e4\u52e5\u52e6\u52e7\u52e8\u52e9\u52ea\u52eb\u52ec\u52ed\u52ee\u52ef\u52f0\u52f1\u52f2\u52f3\u52f4\u52f5\u52f6\u52f7\u52f8\u52f9\u52fa\u52fb\u52fc\u52fd\u52fe\u52ff\u5300\u5301\u5302\u5303\u5304\u5305\u5306\u5307\u5308\u5309\u530a\u530b\u530c\u530d\u530e\u530f\u5310\u5311\u5312\u5313\u5314\u5315\u5316\u5317\u5318\u5319\u531a\u531b\u531c\u531d\u531e\u531f\u5320\u5321\u5322\u5323\u5324\u5325\u5326\u5327\u5328\u5329\u532a\u532b\u532c\u532d\u532e\u532f\u5330\u5331\u5332\u5333\u5334\u5335\u5336\u5337\u5338\u5339\u533a\u533b\u533c\u533d\u533e\u533f\u5340\u5341\u5342\u5343\u5344\u5345\u5346\u5347\u5348\u5349\u534a\u534b\u534c\u534d\u534e\u534f\u5350\u5351\u5352\u5353\u5354\u5355\u5356\u5357\u5358\u5359\u535a\u535b\u535c\u535d\u535e\u535f\u5360\u5361\u5362\u5363\u5364\u5365\u5366\u5367\u5368\u5369\u536a\u536b\u536c\u536d\u536e\u536f\u5370\u5371\u5372\u5373\u5374\u5375\u5376\u5377\u5378\u5379\u537a\u537b\u537c\u537d\u537e\u537f\u5380\u5381\u5382\u5383\u5384\u5385\u5386\u5387\u5388\u5389\u538a\u538b\u538c\u538d\u538e\u538f\u5390\u5391\u5392\u5393\u5394\u5395\u5396\u5397\u5398\u5399\u539a\u539b\u539c\u539d\u539e\u539f\u53a0\u53a1\u53a2\u53a3\u53a4\u53a5\u53a6\u53a7\u53a8\u53a9\u53aa\u53ab\u53ac\u53ad\u53ae\u53af\u53b0\u53b1\u53b2\u53b3\u53b4\u53b5\u53b6\u53b7\u53b8\u53b9\u53ba\u53bb\u53bc\u53bd\u53be\u53bf\u53c0\u53c1\u53c2\u53c3\u53c4\u53c5\u53c6\u53c7\u53c8\u53c9\u53ca\u53cb\u53cc\u53cd\u53ce\u53cf\u53d0\u53d1\u53d2\u53d3\u53d4\u53d5\u53d6\u53d7\u53d8\u53d9\u53da\u53db\u53dc\u53dd\u53de\u53df\u53e0\u53e1\u53e2\u53e3\u53e4\u53e5\u53e6\u53e7\u53e8\u53e9\u53ea\u53eb\u53ec\u53ed\u53ee\u53ef\u53f0\u53f1\u53f2\u53f3\u53f4\u53f5\u53f6\u53f7\u53f8\u53f9\u53fa\u53fb\u53fc\u53fd\u53fe\u53ff\u5400\u5401\u5402\u5403\u5404\u5405\u5406\u5407\u5408\u5409\u540a\u540b\u540c\u540d\u540e\u540f\u5410\u5411\u5412\u5413\u5414\u5415\u5416\u5417\u5418\u5419\u541a\u541b\u541c\u541d\u541e\u541f\u5420\u5421\u5422\u5423\u5424\u5425\u5426\u5427\u5428\u5429\u542a\u542b\u542c\u542d\u542e\u542f\u5430\u5431\u5432\u5433\u5434\u5435\u5436\u5437\u5438\u5439\u543a\u543b\u543c\u543d\u543e\u543f\u5440\u5441\u5442\u5443\u5444\u5445\u5446\u5447\u5448\u5449\u544a\u544b\u544c\u544d\u544e\u544f\u5450\u5451\u5452\u5453\u5454\u5455\u5456\u5457\u5458\u5459\u545a\u545b\u545c\u545d\u545e\u545f\u5460\u5461\u5462\u5463\u5464\u5465\u5466\u5467\u5468\u5469\u546a\u546b\u546c\u546d\u546e\u546f\u5470\u5471\u5472\u5473\u5474\u5475\u5476\u5477\u5478\u5479\u547a\u547b\u547c\u547d\u547e\u547f\u5480\u5481\u5482\u5483\u5484\u5485\u5486\u5487\u5488\u5489\u548a\u548b\u548c\u548d\u548e\u548f\u5490\u5491\u5492\u5493\u5494\u5495\u5496\u5497\u5498\u5499\u549a\u549b\u549c\u549d\u549e\u549f\u54a0\u54a1\u54a2\u54a3\u54a4\u54a5\u54a6\u54a7\u54a8\u54a9\u54aa\u54ab\u54ac\u54ad\u54ae\u54af\u54b0\u54b1\u54b2\u54b3\u54b4\u54b5\u54b6\u54b7\u54b8\u54b9\u54ba\u54bb\u54bc\u54bd\u54be\u54bf\u54c0\u54c1\u54c2\u54c3\u54c4\u54c5\u54c6\u54c7\u54c8\u54c9\u54ca\u54cb\u54cc\u54cd\u54ce\u54cf\u54d0\u54d1\u54d2\u54d3\u54d4\u54d5\u54d6\u54d7\u54d8\u54d9\u54da\u54db\u54dc\u54dd\u54de\u54df\u54e0\u54e1\u54e2\u54e3\u54e4\u54e5\u54e6\u54e7\u54e8\u54e9\u54ea\u54eb\u54ec\u54ed\u54ee\u54ef\u54f0\u54f1\u54f2\u54f3\u54f4\u54f5\u54f6\u54f7\u54f8\u54f9\u54fa\u54fb\u54fc\u54fd\u54fe\u54ff\u5500\u5501\u5502\u5503\u5504\u5505\u5506\u5507\u5508\u5509\u550a\u550b\u550c\u550d\u550e\u550f\u5510\u5511\u5512\u5513\u5514\u5515\u5516\u5517\u5518\u5519\u551a\u551b\u551c\u551d\u551e\u551f\u5520\u5521\u5522\u5523\u5524\u5525\u5526\u5527\u5528\u5529\u552a\u552b\u552c\u552d\u552e\u552f\u5530\u5531\u5532\u5533\u5534\u5535\u5536\u5537\u5538\u5539\u553a\u553b\u553c\u553d\u553e\u553f\u5540\u5541\u5542\u5543\u5544\u5545\u5546\u5547\u5548\u5549\u554a\u554b\u554c\u554d\u554e\u554f\u5550\u5551\u5552\u5553\u5554\u5555\u5556\u5557\u5558\u5559\u555a\u555b\u555c\u555d\u555e\u555f\u5560\u5561\u5562\u5563\u5564\u5565\u5566\u5567\u5568\u5569\u556a\u556b\u556c\u556d\u556e\u556f\u5570\u5571\u5572\u5573\u5574\u5575\u5576\u5577\u5578\u5579\u557a\u557b\u557c\u557d\u557e\u557f\u5580\u5581\u5582\u5583\u5584\u5585\u5586\u5587\u5588\u5589\u558a\u558b\u558c\u558d\u558e\u558f\u5590\u5591\u5592\u5593\u5594\u5595\u5596\u5597\u5598\u5599\u559a\u559b\u559c\u559d\u559e\u559f\u55a0\u55a1\u55a2\u55a3\u55a4\u55a5\u55a6\u55a7\u55a8\u55a9\u55aa\u55ab\u55ac\u55ad\u55ae\u55af\u55b0\u55b1\u55b2\u55b3\u55b4\u55b5\u55b6\u55b7\u55b8\u55b9\u55ba\u55bb\u55bc\u55bd\u55be\u55bf\u55c0\u55c1\u55c2\u55c3\u55c4\u55c5\u55c6\u55c7\u55c8\u55c9\u55ca\u55cb\u55cc\u55cd\u55ce\u55cf\u55d0\u55d1\u55d2\u55d3\u55d4\u55d5\u55d6\u55d7\u55d8\u55d9\u55da\u55db\u55dc\u55dd\u55de\u55df\u55e0\u55e1\u55e2\u55e3\u55e4\u55e5\u55e6\u55e7\u55e8\u55e9\u55ea\u55eb\u55ec\u55ed\u55ee\u55ef\u55f0\u55f1\u55f2\u55f3\u55f4\u55f5\u55f6\u55f7\u55f8\u55f9\u55fa\u55fb\u55fc\u55fd\u55fe\u55ff\u5600\u5601\u5602\u5603\u5604\u5605\u5606\u5607\u5608\u5609\u560a\u560b\u560c\u560d\u560e\u560f\u5610\u5611\u5612\u5613\u5614\u5615\u5616\u5617\u5618\u5619\u561a\u561b\u561c\u561d\u561e\u561f\u5620\u5621\u5622\u5623\u5624\u5625\u5626\u5627\u5628\u5629\u562a\u562b\u562c\u562d\u562e\u562f\u5630\u5631\u5632\u5633\u5634\u5635\u5636\u5637\u5638\u5639\u563a\u563b\u563c\u563d\u563e\u563f\u5640\u5641\u5642\u5643\u5644\u5645\u5646\u5647\u5648\u5649\u564a\u564b\u564c\u564d\u564e\u564f\u5650\u5651\u5652\u5653\u5654\u5655\u5656\u5657\u5658\u5659\u565a\u565b\u565c\u565d\u565e\u565f\u5660\u5661\u5662\u5663\u5664\u5665\u5666\u5667\u5668\u5669\u566a\u566b\u566c\u566d\u566e\u566f\u5670\u5671\u5672\u5673\u5674\u5675\u5676\u5677\u5678\u5679\u567a\u567b\u567c\u567d\u567e\u567f\u5680\u5681\u5682\u5683\u5684\u5685\u5686\u5687\u5688\u5689\u568a\u568b\u568c\u568d\u568e\u568f\u5690\u5691\u5692\u5693\u5694\u5695\u5696\u5697\u5698\u5699\u569a\u569b\u569c\u569d\u569e\u569f\u56a0\u56a1\u56a2\u56a3\u56a4\u56a5\u56a6\u56a7\u56a8\u56a9\u56aa\u56ab\u56ac\u56ad\u56ae\u56af\u56b0\u56b1\u56b2\u56b3\u56b4\u56b5\u56b6\u56b7\u56b8\u56b9\u56ba\u56bb\u56bc\u56bd\u56be\u56bf\u56c0\u56c1\u56c2\u56c3\u56c4\u56c5\u56c6\u56c7\u56c8\u56c9\u56ca\u56cb\u56cc\u56cd\u56ce\u56cf\u56d0\u56d1\u56d2\u56d3\u56d4\u56d5\u56d6\u56d7\u56d8\u56d9\u56da\u56db\u56dc\u56dd\u56de\u56df\u56e0\u56e1\u56e2\u56e3\u56e4\u56e5\u56e6\u56e7\u56e8\u56e9\u56ea\u56eb\u56ec\u56ed\u56ee\u56ef\u56f0\u56f1\u56f2\u56f3\u56f4\u56f5\u56f6\u56f7\u56f8\u56f9\u56fa\u56fb\u56fc\u56fd\u56fe\u56ff\u5700\u5701\u5702\u5703\u5704\u5705\u5706\u5707\u5708\u5709\u570a\u570b\u570c\u570d\u570e\u570f\u5710\u5711\u5712\u5713\u5714\u5715\u5716\u5717\u5718\u5719\u571a\u571b\u571c\u571d\u571e\u571f\u5720\u5721\u5722\u5723\u5724\u5725\u5726\u5727\u5728\u5729\u572a\u572b\u572c\u572d\u572e\u572f\u5730\u5731\u5732\u5733\u5734\u5735\u5736\u5737\u5738\u5739\u573a\u573b\u573c\u573d\u573e\u573f\u5740\u5741\u5742\u5743\u5744\u5745\u5746\u5747\u5748\u5749\u574a\u574b\u574c\u574d\u574e\u574f\u5750\u5751\u5752\u5753\u5754\u5755\u5756\u5757\u5758\u5759\u575a\u575b\u575c\u575d\u575e\u575f\u5760\u5761\u5762\u5763\u5764\u5765\u5766\u5767\u5768\u5769\u576a\u576b\u576c\u576d\u576e\u576f\u5770\u5771\u5772\u5773\u5774\u5775\u5776\u5777\u5778\u5779\u577a\u577b\u577c\u577d\u577e\u577f\u5780\u5781\u5782\u5783\u5784\u5785\u5786\u5787\u5788\u5789\u578a\u578b\u578c\u578d\u578e\u578f\u5790\u5791\u5792\u5793\u5794\u5795\u5796\u5797\u5798\u5799\u579a\u579b\u579c\u579d\u579e\u579f\u57a0\u57a1\u57a2\u57a3\u57a4\u57a5\u57a6\u57a7\u57a8\u57a9\u57aa\u57ab\u57ac\u57ad\u57ae\u57af\u57b0\u57b1\u57b2\u57b3\u57b4\u57b5\u57b6\u57b7\u57b8\u57b9\u57ba\u57bb\u57bc\u57bd\u57be\u57bf\u57c0\u57c1\u57c2\u57c3\u57c4\u57c5\u57c6\u57c7\u57c8\u57c9\u57ca\u57cb\u57cc\u57cd\u57ce\u57cf\u57d0\u57d1\u57d2\u57d3\u57d4\u57d5\u57d6\u57d7\u57d8\u57d9\u57da\u57db\u57dc\u57dd\u57de\u57df\u57e0\u57e1\u57e2\u57e3\u57e4\u57e5\u57e6\u57e7\u57e8\u57e9\u57ea\u57eb\u57ec\u57ed\u57ee\u57ef\u57f0\u57f1\u57f2\u57f3\u57f4\u57f5\u57f6\u57f7\u57f8\u57f9\u57fa\u57fb\u57fc\u57fd\u57fe\u57ff\u5800\u5801\u5802\u5803\u5804\u5805\u5806\u5807\u5808\u5809\u580a\u580b\u580c\u580d\u580e\u580f\u5810\u5811\u5812\u5813\u5814\u5815\u5816\u5817\u5818\u5819\u581a\u581b\u581c\u581d\u581e\u581f\u5820\u5821\u5822\u5823\u5824\u5825\u5826\u5827\u5828\u5829\u582a\u582b\u582c\u582d\u582e\u582f\u5830\u5831\u5832\u5833\u5834\u5835\u5836\u5837\u5838\u5839\u583a\u583b\u583c\u583d\u583e\u583f\u5840\u5841\u5842\u5843\u5844\u5845\u5846\u5847\u5848\u5849\u584a\u584b\u584c\u584d\u584e\u584f\u5850\u5851\u5852\u5853\u5854\u5855\u5856\u5857\u5858\u5859\u585a\u585b\u585c\u585d\u585e\u585f\u5860\u5861\u5862\u5863\u5864\u5865\u5866\u5867\u5868\u5869\u586a\u586b\u586c\u586d\u586e\u586f\u5870\u5871\u5872\u5873\u5874\u5875\u5876\u5877\u5878\u5879\u587a\u587b\u587c\u587d\u587e\u587f\u5880\u5881\u5882\u5883\u5884\u5885\u5886\u5887\u5888\u5889\u588a\u588b\u588c\u588d\u588e\u588f\u5890\u5891\u5892\u5893\u5894\u5895\u5896\u5897\u5898\u5899\u589a\u589b\u589c\u589d\u589e\u589f\u58a0\u58a1\u58a2\u58a3\u58a4\u58a5\u58a6\u58a7\u58a8\u58a9\u58aa\u58ab\u58ac\u58ad\u58ae\u58af\u58b0\u58b1\u58b2\u58b3\u58b4\u58b5\u58b6\u58b7\u58b8\u58b9\u58ba\u58bb\u58bc\u58bd\u58be\u58bf\u58c0\u58c1\u58c2\u58c3\u58c4\u58c5\u58c6\u58c7\u58c8\u58c9\u58ca\u58cb\u58cc\u58cd\u58ce\u58cf\u58d0\u58d1\u58d2\u58d3\u58d4\u58d5\u58d6\u58d7\u58d8\u58d9\u58da\u58db\u58dc\u58dd\u58de\u58df\u58e0\u58e1\u58e2\u58e3\u58e4\u58e5\u58e6\u58e7\u58e8\u58e9\u58ea\u58eb\u58ec\u58ed\u58ee\u58ef\u58f0\u58f1\u58f2\u58f3\u58f4\u58f5\u58f6\u58f7\u58f8\u58f9\u58fa\u58fb\u58fc\u58fd\u58fe\u58ff\u5900\u5901\u5902\u5903\u5904\u5905\u5906\u5907\u5908\u5909\u590a\u590b\u590c\u590d\u590e\u590f\u5910\u5911\u5912\u5913\u5914\u5915\u5916\u5917\u5918\u5919\u591a\u591b\u591c\u591d\u591e\u591f\u5920\u5921\u5922\u5923\u5924\u5925\u5926\u5927\u5928\u5929\u592a\u592b\u592c\u592d\u592e\u592f\u5930\u5931\u5932\u5933\u5934\u5935\u5936\u5937\u5938\u5939\u593a\u593b\u593c\u593d\u593e\u593f\u5940\u5941\u5942\u5943\u5944\u5945\u5946\u5947\u5948\u5949\u594a\u594b\u594c\u594d\u594e\u594f\u5950\u5951\u5952\u5953\u5954\u5955\u5956\u5957\u5958\u5959\u595a\u595b\u595c\u595d\u595e\u595f\u5960\u5961\u5962\u5963\u5964\u5965\u5966\u5967\u5968\u5969\u596a\u596b\u596c\u596d\u596e\u596f\u5970\u5971\u5972\u5973\u5974\u5975\u5976\u5977\u5978\u5979\u597a\u597b\u597c\u597d\u597e\u597f\u5980\u5981\u5982\u5983\u5984\u5985\u5986\u5987\u5988\u5989\u598a\u598b\u598c\u598d\u598e\u598f\u5990\u5991\u5992\u5993\u5994\u5995\u5996\u5997\u5998\u5999\u599a\u599b\u599c\u599d\u599e\u599f\u59a0\u59a1\u59a2\u59a3\u59a4\u59a5\u59a6\u59a7\u59a8\u59a9\u59aa\u59ab\u59ac\u59ad\u59ae\u59af\u59b0\u59b1\u59b2\u59b3\u59b4\u59b5\u59b6\u59b7\u59b8\u59b9\u59ba\u59bb\u59bc\u59bd\u59be\u59bf\u59c0\u59c1\u59c2\u59c3\u59c4\u59c5\u59c6\u59c7\u59c8\u59c9\u59ca\u59cb\u59cc\u59cd\u59ce\u59cf\u59d0\u59d1\u59d2\u59d3\u59d4\u59d5\u59d6\u59d7\u59d8\u59d9\u59da\u59db\u59dc\u59dd\u59de\u59df\u59e0\u59e1\u59e2\u59e3\u59e4\u59e5\u59e6\u59e7\u59e8\u59e9\u59ea\u59eb\u59ec\u59ed\u59ee\u59ef\u59f0\u59f1\u59f2\u59f3\u59f4\u59f5\u59f6\u59f7\u59f8\u59f9\u59fa\u59fb\u59fc\u59fd\u59fe\u59ff\u5a00\u5a01\u5a02\u5a03\u5a04\u5a05\u5a06\u5a07\u5a08\u5a09\u5a0a\u5a0b\u5a0c\u5a0d\u5a0e\u5a0f\u5a10\u5a11\u5a12\u5a13\u5a14\u5a15\u5a16\u5a17\u5a18\u5a19\u5a1a\u5a1b\u5a1c\u5a1d\u5a1e\u5a1f\u5a20\u5a21\u5a22\u5a23\u5a24\u5a25\u5a26\u5a27\u5a28\u5a29\u5a2a\u5a2b\u5a2c\u5a2d\u5a2e\u5a2f\u5a30\u5a31\u5a32\u5a33\u5a34\u5a35\u5a36\u5a37\u5a38\u5a39\u5a3a\u5a3b\u5a3c\u5a3d\u5a3e\u5a3f\u5a40\u5a41\u5a42\u5a43\u5a44\u5a45\u5a46\u5a47\u5a48\u5a49\u5a4a\u5a4b\u5a4c\u5a4d\u5a4e\u5a4f\u5a50\u5a51\u5a52\u5a53\u5a54\u5a55\u5a56\u5a57\u5a58\u5a59\u5a5a\u5a5b\u5a5c\u5a5d\u5a5e\u5a5f\u5a60\u5a61\u5a62\u5a63\u5a64\u5a65\u5a66\u5a67\u5a68\u5a69\u5a6a\u5a6b\u5a6c\u5a6d\u5a6e\u5a6f\u5a70\u5a71\u5a72\u5a73\u5a74\u5a75\u5a76\u5a77\u5a78\u5a79\u5a7a\u5a7b\u5a7c\u5a7d\u5a7e\u5a7f\u5a80\u5a81\u5a82\u5a83\u5a84\u5a85\u5a86\u5a87\u5a88\u5a89\u5a8a\u5a8b\u5a8c\u5a8d\u5a8e\u5a8f\u5a90\u5a91\u5a92\u5a93\u5a94\u5a95\u5a96\u5a97\u5a98\u5a99\u5a9a\u5a9b\u5a9c\u5a9d\u5a9e\u5a9f\u5aa0\u5aa1\u5aa2\u5aa3\u5aa4\u5aa5\u5aa6\u5aa7\u5aa8\u5aa9\u5aaa\u5aab\u5aac\u5aad\u5aae\u5aaf\u5ab0\u5ab1\u5ab2\u5ab3\u5ab4\u5ab5\u5ab6\u5ab7\u5ab8\u5ab9\u5aba\u5abb\u5abc\u5abd\u5abe\u5abf\u5ac0\u5ac1\u5ac2\u5ac3\u5ac4\u5ac5\u5ac6\u5ac7\u5ac8\u5ac9\u5aca\u5acb\u5acc\u5acd\u5ace\u5acf\u5ad0\u5ad1\u5ad2\u5ad3\u5ad4\u5ad5\u5ad6\u5ad7\u5ad8\u5ad9\u5ada\u5adb\u5adc\u5add\u5ade\u5adf\u5ae0\u5ae1\u5ae2\u5ae3\u5ae4\u5ae5\u5ae6\u5ae7\u5ae8\u5ae9\u5aea\u5aeb\u5aec\u5aed\u5aee\u5aef\u5af0\u5af1\u5af2\u5af3\u5af4\u5af5\u5af6\u5af7\u5af8\u5af9\u5afa\u5afb\u5afc\u5afd\u5afe\u5aff\u5b00\u5b01\u5b02\u5b03\u5b04\u5b05\u5b06\u5b07\u5b08\u5b09\u5b0a\u5b0b\u5b0c\u5b0d\u5b0e\u5b0f\u5b10\u5b11\u5b12\u5b13\u5b14\u5b15\u5b16\u5b17\u5b18\u5b19\u5b1a\u5b1b\u5b1c\u5b1d\u5b1e\u5b1f\u5b20\u5b21\u5b22\u5b23\u5b24\u5b25\u5b26\u5b27\u5b28\u5b29\u5b2a\u5b2b\u5b2c\u5b2d\u5b2e\u5b2f\u5b30\u5b31\u5b32\u5b33\u5b34\u5b35\u5b36\u5b37\u5b38\u5b39\u5b3a\u5b3b\u5b3c\u5b3d\u5b3e\u5b3f\u5b40\u5b41\u5b42\u5b43\u5b44\u5b45\u5b46\u5b47\u5b48\u5b49\u5b4a\u5b4b\u5b4c\u5b4d\u5b4e\u5b4f\u5b50\u5b51\u5b52\u5b53\u5b54\u5b55\u5b56\u5b57\u5b58\u5b59\u5b5a\u5b5b\u5b5c\u5b5d\u5b5e\u5b5f\u5b60\u5b61\u5b62\u5b63\u5b64\u5b65\u5b66\u5b67\u5b68\u5b69\u5b6a\u5b6b\u5b6c\u5b6d\u5b6e\u5b6f\u5b70\u5b71\u5b72\u5b73\u5b74\u5b75\u5b76\u5b77\u5b78\u5b79\u5b7a\u5b7b\u5b7c\u5b7d\u5b7e\u5b7f\u5b80\u5b81\u5b82\u5b83\u5b84\u5b85\u5b86\u5b87\u5b88\u5b89\u5b8a\u5b8b\u5b8c\u5b8d\u5b8e\u5b8f\u5b90\u5b91\u5b92\u5b93\u5b94\u5b95\u5b96\u5b97\u5b98\u5b99\u5b9a\u5b9b\u5b9c\u5b9d\u5b9e\u5b9f\u5ba0\u5ba1\u5ba2\u5ba3\u5ba4\u5ba5\u5ba6\u5ba7\u5ba8\u5ba9\u5baa\u5bab\u5bac\u5bad\u5bae\u5baf\u5bb0\u5bb1\u5bb2\u5bb3\u5bb4\u5bb5\u5bb6\u5bb7\u5bb8\u5bb9\u5bba\u5bbb\u5bbc\u5bbd\u5bbe\u5bbf\u5bc0\u5bc1\u5bc2\u5bc3\u5bc4\u5bc5\u5bc6\u5bc7\u5bc8\u5bc9\u5bca\u5bcb\u5bcc\u5bcd\u5bce\u5bcf\u5bd0\u5bd1\u5bd2\u5bd3\u5bd4\u5bd5\u5bd6\u5bd7\u5bd8\u5bd9\u5bda\u5bdb\u5bdc\u5bdd\u5bde\u5bdf\u5be0\u5be1\u5be2\u5be3\u5be4\u5be5\u5be6\u5be7\u5be8\u5be9\u5bea\u5beb\u5bec\u5bed\u5bee\u5bef\u5bf0\u5bf1\u5bf2\u5bf3\u5bf4\u5bf5\u5bf6\u5bf7\u5bf8\u5bf9\u5bfa\u5bfb\u5bfc\u5bfd\u5bfe\u5bff\u5c00\u5c01\u5c02\u5c03\u5c04\u5c05\u5c06\u5c07\u5c08\u5c09\u5c0a\u5c0b\u5c0c\u5c0d\u5c0e\u5c0f\u5c10\u5c11\u5c12\u5c13\u5c14\u5c15\u5c16\u5c17\u5c18\u5c19\u5c1a\u5c1b\u5c1c\u5c1d\u5c1e\u5c1f\u5c20\u5c21\u5c22\u5c23\u5c24\u5c25\u5c26\u5c27\u5c28\u5c29\u5c2a\u5c2b\u5c2c\u5c2d\u5c2e\u5c2f\u5c30\u5c31\u5c32\u5c33\u5c34\u5c35\u5c36\u5c37\u5c38\u5c39\u5c3a\u5c3b\u5c3c\u5c3d\u5c3e\u5c3f\u5c40\u5c41\u5c42\u5c43\u5c44\u5c45\u5c46\u5c47\u5c48\u5c49\u5c4a\u5c4b\u5c4c\u5c4d\u5c4e\u5c4f\u5c50\u5c51\u5c52\u5c53\u5c54\u5c55\u5c56\u5c57\u5c58\u5c59\u5c5a\u5c5b\u5c5c\u5c5d\u5c5e\u5c5f\u5c60\u5c61\u5c62\u5c63\u5c64\u5c65\u5c66\u5c67\u5c68\u5c69\u5c6a\u5c6b\u5c6c\u5c6d\u5c6e\u5c6f\u5c70\u5c71\u5c72\u5c73\u5c74\u5c75\u5c76\u5c77\u5c78\u5c79\u5c7a\u5c7b\u5c7c\u5c7d\u5c7e\u5c7f\u5c80\u5c81\u5c82\u5c83\u5c84\u5c85\u5c86\u5c87\u5c88\u5c89\u5c8a\u5c8b\u5c8c\u5c8d\u5c8e\u5c8f\u5c90\u5c91\u5c92\u5c93\u5c94\u5c95\u5c96\u5c97\u5c98\u5c99\u5c9a\u5c9b\u5c9c\u5c9d\u5c9e\u5c9f\u5ca0\u5ca1\u5ca2\u5ca3\u5ca4\u5ca5\u5ca6\u5ca7\u5ca8\u5ca9\u5caa\u5cab\u5cac\u5cad\u5cae\u5caf\u5cb0\u5cb1\u5cb2\u5cb3\u5cb4\u5cb5\u5cb6\u5cb7\u5cb8\u5cb9\u5cba\u5cbb\u5cbc\u5cbd\u5cbe\u5cbf\u5cc0\u5cc1\u5cc2\u5cc3\u5cc4\u5cc5\u5cc6\u5cc7\u5cc8\u5cc9\u5cca\u5ccb\u5ccc\u5ccd\u5cce\u5ccf\u5cd0\u5cd1\u5cd2\u5cd3\u5cd4\u5cd5\u5cd6\u5cd7\u5cd8\u5cd9\u5cda\u5cdb\u5cdc\u5cdd\u5cde\u5cdf\u5ce0\u5ce1\u5ce2\u5ce3\u5ce4\u5ce5\u5ce6\u5ce7\u5ce8\u5ce9\u5cea\u5ceb\u5cec\u5ced\u5cee\u5cef\u5cf0\u5cf1\u5cf2\u5cf3\u5cf4\u5cf5\u5cf6\u5cf7\u5cf8\u5cf9\u5cfa\u5cfb\u5cfc\u5cfd\u5cfe\u5cff\u5d00\u5d01\u5d02\u5d03\u5d04\u5d05\u5d06\u5d07\u5d08\u5d09\u5d0a\u5d0b\u5d0c\u5d0d\u5d0e\u5d0f\u5d10\u5d11\u5d12\u5d13\u5d14\u5d15\u5d16\u5d17\u5d18\u5d19\u5d1a\u5d1b\u5d1c\u5d1d\u5d1e\u5d1f\u5d20\u5d21\u5d22\u5d23\u5d24\u5d25\u5d26\u5d27\u5d28\u5d29\u5d2a\u5d2b\u5d2c\u5d2d\u5d2e\u5d2f\u5d30\u5d31\u5d32\u5d33\u5d34\u5d35\u5d36\u5d37\u5d38\u5d39\u5d3a\u5d3b\u5d3c\u5d3d\u5d3e\u5d3f\u5d40\u5d41\u5d42\u5d43\u5d44\u5d45\u5d46\u5d47\u5d48\u5d49\u5d4a\u5d4b\u5d4c\u5d4d\u5d4e\u5d4f\u5d50\u5d51\u5d52\u5d53\u5d54\u5d55\u5d56\u5d57\u5d58\u5d59\u5d5a\u5d5b\u5d5c\u5d5d\u5d5e\u5d5f\u5d60\u5d61\u5d62\u5d63\u5d64\u5d65\u5d66\u5d67\u5d68\u5d69\u5d6a\u5d6b\u5d6c\u5d6d\u5d6e\u5d6f\u5d70\u5d71\u5d72\u5d73\u5d74\u5d75\u5d76\u5d77\u5d78\u5d79\u5d7a\u5d7b\u5d7c\u5d7d\u5d7e\u5d7f\u5d80\u5d81\u5d82\u5d83\u5d84\u5d85\u5d86\u5d87\u5d88\u5d89\u5d8a\u5d8b\u5d8c\u5d8d\u5d8e\u5d8f\u5d90\u5d91\u5d92\u5d93\u5d94\u5d95\u5d96\u5d97\u5d98\u5d99\u5d9a\u5d9b\u5d9c\u5d9d\u5d9e\u5d9f\u5da0\u5da1\u5da2\u5da3\u5da4\u5da5\u5da6\u5da7\u5da8\u5da9\u5daa\u5dab\u5dac\u5dad\u5dae\u5daf\u5db0\u5db1\u5db2\u5db3\u5db4\u5db5\u5db6\u5db7\u5db8\u5db9\u5dba\u5dbb\u5dbc\u5dbd\u5dbe\u5dbf\u5dc0\u5dc1\u5dc2\u5dc3\u5dc4\u5dc5\u5dc6\u5dc7\u5dc8\u5dc9\u5dca\u5dcb\u5dcc\u5dcd\u5dce\u5dcf\u5dd0\u5dd1\u5dd2\u5dd3\u5dd4\u5dd5\u5dd6\u5dd7\u5dd8\u5dd9\u5dda\u5ddb\u5ddc\u5ddd\u5dde\u5ddf\u5de0\u5de1\u5de2\u5de3\u5de4\u5de5\u5de6\u5de7\u5de8\u5de9\u5dea\u5deb\u5dec\u5ded\u5dee\u5def\u5df0\u5df1\u5df2\u5df3\u5df4\u5df5\u5df6\u5df7\u5df8\u5df9\u5dfa\u5dfb\u5dfc\u5dfd\u5dfe\u5dff\u5e00\u5e01\u5e02\u5e03\u5e04\u5e05\u5e06\u5e07\u5e08\u5e09\u5e0a\u5e0b\u5e0c\u5e0d\u5e0e\u5e0f\u5e10\u5e11\u5e12\u5e13\u5e14\u5e15\u5e16\u5e17\u5e18\u5e19\u5e1a\u5e1b\u5e1c\u5e1d\u5e1e\u5e1f\u5e20\u5e21\u5e22\u5e23\u5e24\u5e25\u5e26\u5e27\u5e28\u5e29\u5e2a\u5e2b\u5e2c\u5e2d\u5e2e\u5e2f\u5e30\u5e31\u5e32\u5e33\u5e34\u5e35\u5e36\u5e37\u5e38\u5e39\u5e3a\u5e3b\u5e3c\u5e3d\u5e3e\u5e3f\u5e40\u5e41\u5e42\u5e43\u5e44\u5e45\u5e46\u5e47\u5e48\u5e49\u5e4a\u5e4b\u5e4c\u5e4d\u5e4e\u5e4f\u5e50\u5e51\u5e52\u5e53\u5e54\u5e55\u5e56\u5e57\u5e58\u5e59\u5e5a\u5e5b\u5e5c\u5e5d\u5e5e\u5e5f\u5e60\u5e61\u5e62\u5e63\u5e64\u5e65\u5e66\u5e67\u5e68\u5e69\u5e6a\u5e6b\u5e6c\u5e6d\u5e6e\u5e6f\u5e70\u5e71\u5e72\u5e73\u5e74\u5e75\u5e76\u5e77\u5e78\u5e79\u5e7a\u5e7b\u5e7c\u5e7d\u5e7e\u5e7f\u5e80\u5e81\u5e82\u5e83\u5e84\u5e85\u5e86\u5e87\u5e88\u5e89\u5e8a\u5e8b\u5e8c\u5e8d\u5e8e\u5e8f\u5e90\u5e91\u5e92\u5e93\u5e94\u5e95\u5e96\u5e97\u5e98\u5e99\u5e9a\u5e9b\u5e9c\u5e9d\u5e9e\u5e9f\u5ea0\u5ea1\u5ea2\u5ea3\u5ea4\u5ea5\u5ea6\u5ea7\u5ea8\u5ea9\u5eaa\u5eab\u5eac\u5ead\u5eae\u5eaf\u5eb0\u5eb1\u5eb2\u5eb3\u5eb4\u5eb5\u5eb6\u5eb7\u5eb8\u5eb9\u5eba\u5ebb\u5ebc\u5ebd\u5ebe\u5ebf\u5ec0\u5ec1\u5ec2\u5ec3\u5ec4\u5ec5\u5ec6\u5ec7\u5ec8\u5ec9\u5eca\u5ecb\u5ecc\u5ecd\u5ece\u5ecf\u5ed0\u5ed1\u5ed2\u5ed3\u5ed4\u5ed5\u5ed6\u5ed7\u5ed8\u5ed9\u5eda\u5edb\u5edc\u5edd\u5ede\u5edf\u5ee0\u5ee1\u5ee2\u5ee3\u5ee4\u5ee5\u5ee6\u5ee7\u5ee8\u5ee9\u5eea\u5eeb\u5eec\u5eed\u5eee\u5eef\u5ef0\u5ef1\u5ef2\u5ef3\u5ef4\u5ef5\u5ef6\u5ef7\u5ef8\u5ef9\u5efa\u5efb\u5efc\u5efd\u5efe\u5eff\u5f00\u5f01\u5f02\u5f03\u5f04\u5f05\u5f06\u5f07\u5f08\u5f09\u5f0a\u5f0b\u5f0c\u5f0d\u5f0e\u5f0f\u5f10\u5f11\u5f12\u5f13\u5f14\u5f15\u5f16\u5f17\u5f18\u5f19\u5f1a\u5f1b\u5f1c\u5f1d\u5f1e\u5f1f\u5f20\u5f21\u5f22\u5f23\u5f24\u5f25\u5f26\u5f27\u5f28\u5f29\u5f2a\u5f2b\u5f2c\u5f2d\u5f2e\u5f2f\u5f30\u5f31\u5f32\u5f33\u5f34\u5f35\u5f36\u5f37\u5f38\u5f39\u5f3a\u5f3b\u5f3c\u5f3d\u5f3e\u5f3f\u5f40\u5f41\u5f42\u5f43\u5f44\u5f45\u5f46\u5f47\u5f48\u5f49\u5f4a\u5f4b\u5f4c\u5f4d\u5f4e\u5f4f\u5f50\u5f51\u5f52\u5f53\u5f54\u5f55\u5f56\u5f57\u5f58\u5f59\u5f5a\u5f5b\u5f5c\u5f5d\u5f5e\u5f5f\u5f60\u5f61\u5f62\u5f63\u5f64\u5f65\u5f66\u5f67\u5f68\u5f69\u5f6a\u5f6b\u5f6c\u5f6d\u5f6e\u5f6f\u5f70\u5f71\u5f72\u5f73\u5f74\u5f75\u5f76\u5f77\u5f78\u5f79\u5f7a\u5f7b\u5f7c\u5f7d\u5f7e\u5f7f\u5f80\u5f81\u5f82\u5f83\u5f84\u5f85\u5f86\u5f87\u5f88\u5f89\u5f8a\u5f8b\u5f8c\u5f8d\u5f8e\u5f8f\u5f90\u5f91\u5f92\u5f93\u5f94\u5f95\u5f96\u5f97\u5f98\u5f99\u5f9a\u5f9b\u5f9c\u5f9d\u5f9e\u5f9f\u5fa0\u5fa1\u5fa2\u5fa3\u5fa4\u5fa5\u5fa6\u5fa7\u5fa8\u5fa9\u5faa\u5fab\u5fac\u5fad\u5fae\u5faf\u5fb0\u5fb1\u5fb2\u5fb3\u5fb4\u5fb5\u5fb6\u5fb7\u5fb8\u5fb9\u5fba\u5fbb\u5fbc\u5fbd\u5fbe\u5fbf\u5fc0\u5fc1\u5fc2\u5fc3\u5fc4\u5fc5\u5fc6\u5fc7\u5fc8\u5fc9\u5fca\u5fcb\u5fcc\u5fcd\u5fce\u5fcf\u5fd0\u5fd1\u5fd2\u5fd3\u5fd4\u5fd5\u5fd6\u5fd7\u5fd8\u5fd9\u5fda\u5fdb\u5fdc\u5fdd\u5fde\u5fdf\u5fe0\u5fe1\u5fe2\u5fe3\u5fe4\u5fe5\u5fe6\u5fe7\u5fe8\u5fe9\u5fea\u5feb\u5fec\u5fed\u5fee\u5fef\u5ff0\u5ff1\u5ff2\u5ff3\u5ff4\u5ff5\u5ff6\u5ff7\u5ff8\u5ff9\u5ffa\u5ffb\u5ffc\u5ffd\u5ffe\u5fff\u6000\u6001\u6002\u6003\u6004\u6005\u6006\u6007\u6008\u6009\u600a\u600b\u600c\u600d\u600e\u600f\u6010\u6011\u6012\u6013\u6014\u6015\u6016\u6017\u6018\u6019\u601a\u601b\u601c\u601d\u601e\u601f\u6020\u6021\u6022\u6023\u6024\u6025\u6026\u6027\u6028\u6029\u602a\u602b\u602c\u602d\u602e\u602f\u6030\u6031\u6032\u6033\u6034\u6035\u6036\u6037\u6038\u6039\u603a\u603b\u603c\u603d\u603e\u603f\u6040\u6041\u6042\u6043\u6044\u6045\u6046\u6047\u6048\u6049\u604a\u604b\u604c\u604d\u604e\u604f\u6050\u6051\u6052\u6053\u6054\u6055\u6056\u6057\u6058\u6059\u605a\u605b\u605c\u605d\u605e\u605f\u6060\u6061\u6062\u6063\u6064\u6065\u6066\u6067\u6068\u6069\u606a\u606b\u606c\u606d\u606e\u606f\u6070\u6071\u6072\u6073\u6074\u6075\u6076\u6077\u6078\u6079\u607a\u607b\u607c\u607d\u607e\u607f\u6080\u6081\u6082\u6083\u6084\u6085\u6086\u6087\u6088\u6089\u608a\u608b\u608c\u608d\u608e\u608f\u6090\u6091\u6092\u6093\u6094\u6095\u6096\u6097\u6098\u6099\u609a\u609b\u609c\u609d\u609e\u609f\u60a0\u60a1\u60a2\u60a3\u60a4\u60a5\u60a6\u60a7\u60a8\u60a9\u60aa\u60ab\u60ac\u60ad\u60ae\u60af\u60b0\u60b1\u60b2\u60b3\u60b4\u60b5\u60b6\u60b7\u60b8\u60b9\u60ba\u60bb\u60bc\u60bd\u60be\u60bf\u60c0\u60c1\u60c2\u60c3\u60c4\u60c5\u60c6\u60c7\u60c8\u60c9\u60ca\u60cb\u60cc\u60cd\u60ce\u60cf\u60d0\u60d1\u60d2\u60d3\u60d4\u60d5\u60d6\u60d7\u60d8\u60d9\u60da\u60db\u60dc\u60dd\u60de\u60df\u60e0\u60e1\u60e2\u60e3\u60e4\u60e5\u60e6\u60e7\u60e8\u60e9\u60ea\u60eb\u60ec\u60ed\u60ee\u60ef\u60f0\u60f1\u60f2\u60f3\u60f4\u60f5\u60f6\u60f7\u60f8\u60f9\u60fa\u60fb\u60fc\u60fd\u60fe\u60ff\u6100\u6101\u6102\u6103\u6104\u6105\u6106\u6107\u6108\u6109\u610a\u610b\u610c\u610d\u610e\u610f\u6110\u6111\u6112\u6113\u6114\u6115\u6116\u6117\u6118\u6119\u611a\u611b\u611c\u611d\u611e\u611f\u6120\u6121\u6122\u6123\u6124\u6125\u6126\u6127\u6128\u6129\u612a\u612b\u612c\u612d\u612e\u612f\u6130\u6131\u6132\u6133\u6134\u6135\u6136\u6137\u6138\u6139\u613a\u613b\u613c\u613d\u613e\u613f\u6140\u6141\u6142\u6143\u6144\u6145\u6146\u6147\u6148\u6149\u614a\u614b\u614c\u614d\u614e\u614f\u6150\u6151\u6152\u6153\u6154\u6155\u6156\u6157\u6158\u6159\u615a\u615b\u615c\u615d\u615e\u615f\u6160\u6161\u6162\u6163\u6164\u6165\u6166\u6167\u6168\u6169\u616a\u616b\u616c\u616d\u616e\u616f\u6170\u6171\u6172\u6173\u6174\u6175\u6176\u6177\u6178\u6179\u617a\u617b\u617c\u617d\u617e\u617f\u6180\u6181\u6182\u6183\u6184\u6185\u6186\u6187\u6188\u6189\u618a\u618b\u618c\u618d\u618e\u618f\u6190\u6191\u6192\u6193\u6194\u6195\u6196\u6197\u6198\u6199\u619a\u619b\u619c\u619d\u619e\u619f\u61a0\u61a1\u61a2\u61a3\u61a4\u61a5\u61a6\u61a7\u61a8\u61a9\u61aa\u61ab\u61ac\u61ad\u61ae\u61af\u61b0\u61b1\u61b2\u61b3\u61b4\u61b5\u61b6\u61b7\u61b8\u61b9\u61ba\u61bb\u61bc\u61bd\u61be\u61bf\u61c0\u61c1\u61c2\u61c3\u61c4\u61c5\u61c6\u61c7\u61c8\u61c9\u61ca\u61cb\u61cc\u61cd\u61ce\u61cf\u61d0\u61d1\u61d2\u61d3\u61d4\u61d5\u61d6\u61d7\u61d8\u61d9\u61da\u61db\u61dc\u61dd\u61de\u61df\u61e0\u61e1\u61e2\u61e3\u61e4\u61e5\u61e6\u61e7\u61e8\u61e9\u61ea\u61eb\u61ec\u61ed\u61ee\u61ef\u61f0\u61f1\u61f2\u61f3\u61f4\u61f5\u61f6\u61f7\u61f8\u61f9\u61fa\u61fb\u61fc\u61fd\u61fe\u61ff\u6200\u6201\u6202\u6203\u6204\u6205\u6206\u6207\u6208\u6209\u620a\u620b\u620c\u620d\u620e\u620f\u6210\u6211\u6212\u6213\u6214\u6215\u6216\u6217\u6218\u6219\u621a\u621b\u621c\u621d\u621e\u621f\u6220\u6221\u6222\u6223\u6224\u6225\u6226\u6227\u6228\u6229\u622a\u622b\u622c\u622d\u622e\u622f\u6230\u6231\u6232\u6233\u6234\u6235\u6236\u6237\u6238\u6239\u623a\u623b\u623c\u623d\u623e\u623f\u6240\u6241\u6242\u6243\u6244\u6245\u6246\u6247\u6248\u6249\u624a\u624b\u624c\u624d\u624e\u624f\u6250\u6251\u6252\u6253\u6254\u6255\u6256\u6257\u6258\u6259\u625a\u625b\u625c\u625d\u625e\u625f\u6260\u6261\u6262\u6263\u6264\u6265\u6266\u6267\u6268\u6269\u626a\u626b\u626c\u626d\u626e\u626f\u6270\u6271\u6272\u6273\u6274\u6275\u6276\u6277\u6278\u6279\u627a\u627b\u627c\u627d\u627e\u627f\u6280\u6281\u6282\u6283\u6284\u6285\u6286\u6287\u6288\u6289\u628a\u628b\u628c\u628d\u628e\u628f\u6290\u6291\u6292\u6293\u6294\u6295\u6296\u6297\u6298\u6299\u629a\u629b\u629c\u629d\u629e\u629f\u62a0\u62a1\u62a2\u62a3\u62a4\u62a5\u62a6\u62a7\u62a8\u62a9\u62aa\u62ab\u62ac\u62ad\u62ae\u62af\u62b0\u62b1\u62b2\u62b3\u62b4\u62b5\u62b6\u62b7\u62b8\u62b9\u62ba\u62bb\u62bc\u62bd\u62be\u62bf\u62c0\u62c1\u62c2\u62c3\u62c4\u62c5\u62c6\u62c7\u62c8\u62c9\u62ca\u62cb\u62cc\u62cd\u62ce\u62cf\u62d0\u62d1\u62d2\u62d3\u62d4\u62d5\u62d6\u62d7\u62d8\u62d9\u62da\u62db\u62dc\u62dd\u62de\u62df\u62e0\u62e1\u62e2\u62e3\u62e4\u62e5\u62e6\u62e7\u62e8\u62e9\u62ea\u62eb\u62ec\u62ed\u62ee\u62ef\u62f0\u62f1\u62f2\u62f3\u62f4\u62f5\u62f6\u62f7\u62f8\u62f9\u62fa\u62fb\u62fc\u62fd\u62fe\u62ff\u6300\u6301\u6302\u6303\u6304\u6305\u6306\u6307\u6308\u6309\u630a\u630b\u630c\u630d\u630e\u630f\u6310\u6311\u6312\u6313\u6314\u6315\u6316\u6317\u6318\u6319\u631a\u631b\u631c\u631d\u631e\u631f\u6320\u6321\u6322\u6323\u6324\u6325\u6326\u6327\u6328\u6329\u632a\u632b\u632c\u632d\u632e\u632f\u6330\u6331\u6332\u6333\u6334\u6335\u6336\u6337\u6338\u6339\u633a\u633b\u633c\u633d\u633e\u633f\u6340\u6341\u6342\u6343\u6344\u6345\u6346\u6347\u6348\u6349\u634a\u634b\u634c\u634d\u634e\u634f\u6350\u6351\u6352\u6353\u6354\u6355\u6356\u6357\u6358\u6359\u635a\u635b\u635c\u635d\u635e\u635f\u6360\u6361\u6362\u6363\u6364\u6365\u6366\u6367\u6368\u6369\u636a\u636b\u636c\u636d\u636e\u636f\u6370\u6371\u6372\u6373\u6374\u6375\u6376\u6377\u6378\u6379\u637a\u637b\u637c\u637d\u637e\u637f\u6380\u6381\u6382\u6383\u6384\u6385\u6386\u6387\u6388\u6389\u638a\u638b\u638c\u638d\u638e\u638f\u6390\u6391\u6392\u6393\u6394\u6395\u6396\u6397\u6398\u6399\u639a\u639b\u639c\u639d\u639e\u639f\u63a0\u63a1\u63a2\u63a3\u63a4\u63a5\u63a6\u63a7\u63a8\u63a9\u63aa\u63ab\u63ac\u63ad\u63ae\u63af\u63b0\u63b1\u63b2\u63b3\u63b4\u63b5\u63b6\u63b7\u63b8\u63b9\u63ba\u63bb\u63bc\u63bd\u63be\u63bf\u63c0\u63c1\u63c2\u63c3\u63c4\u63c5\u63c6\u63c7\u63c8\u63c9\u63ca\u63cb\u63cc\u63cd\u63ce\u63cf\u63d0\u63d1\u63d2\u63d3\u63d4\u63d5\u63d6\u63d7\u63d8\u63d9\u63da\u63db\u63dc\u63dd\u63de\u63df\u63e0\u63e1\u63e2\u63e3\u63e4\u63e5\u63e6\u63e7\u63e8\u63e9\u63ea\u63eb\u63ec\u63ed\u63ee\u63ef\u63f0\u63f1\u63f2\u63f3\u63f4\u63f5\u63f6\u63f7\u63f8\u63f9\u63fa\u63fb\u63fc\u63fd\u63fe\u63ff\u6400\u6401\u6402\u6403\u6404\u6405\u6406\u6407\u6408\u6409\u640a\u640b\u640c\u640d\u640e\u640f\u6410\u6411\u6412\u6413\u6414\u6415\u6416\u6417\u6418\u6419\u641a\u641b\u641c\u641d\u641e\u641f\u6420\u6421\u6422\u6423\u6424\u6425\u6426\u6427\u6428\u6429\u642a\u642b\u642c\u642d\u642e\u642f\u6430\u6431\u6432\u6433\u6434\u6435\u6436\u6437\u6438\u6439\u643a\u643b\u643c\u643d\u643e\u643f\u6440\u6441\u6442\u6443\u6444\u6445\u6446\u6447\u6448\u6449\u644a\u644b\u644c\u644d\u644e\u644f\u6450\u6451\u6452\u6453\u6454\u6455\u6456\u6457\u6458\u6459\u645a\u645b\u645c\u645d\u645e\u645f\u6460\u6461\u6462\u6463\u6464\u6465\u6466\u6467\u6468\u6469\u646a\u646b\u646c\u646d\u646e\u646f\u6470\u6471\u6472\u6473\u6474\u6475\u6476\u6477\u6478\u6479\u647a\u647b\u647c\u647d\u647e\u647f\u6480\u6481\u6482\u6483\u6484\u6485\u6486\u6487\u6488\u6489\u648a\u648b\u648c\u648d\u648e\u648f\u6490\u6491\u6492\u6493\u6494\u6495\u6496\u6497\u6498\u6499\u649a\u649b\u649c\u649d\u649e\u649f\u64a0\u64a1\u64a2\u64a3\u64a4\u64a5\u64a6\u64a7\u64a8\u64a9\u64aa\u64ab\u64ac\u64ad\u64ae\u64af\u64b0\u64b1\u64b2\u64b3\u64b4\u64b5\u64b6\u64b7\u64b8\u64b9\u64ba\u64bb\u64bc\u64bd\u64be\u64bf\u64c0\u64c1\u64c2\u64c3\u64c4\u64c5\u64c6\u64c7\u64c8\u64c9\u64ca\u64cb\u64cc\u64cd\u64ce\u64cf\u64d0\u64d1\u64d2\u64d3\u64d4\u64d5\u64d6\u64d7\u64d8\u64d9\u64da\u64db\u64dc\u64dd\u64de\u64df\u64e0\u64e1\u64e2\u64e3\u64e4\u64e5\u64e6\u64e7\u64e8\u64e9\u64ea\u64eb\u64ec\u64ed\u64ee\u64ef\u64f0\u64f1\u64f2\u64f3\u64f4\u64f5\u64f6\u64f7\u64f8\u64f9\u64fa\u64fb\u64fc\u64fd\u64fe\u64ff\u6500\u6501\u6502\u6503\u6504\u6505\u6506\u6507\u6508\u6509\u650a\u650b\u650c\u650d\u650e\u650f\u6510\u6511\u6512\u6513\u6514\u6515\u6516\u6517\u6518\u6519\u651a\u651b\u651c\u651d\u651e\u651f\u6520\u6521\u6522\u6523\u6524\u6525\u6526\u6527\u6528\u6529\u652a\u652b\u652c\u652d\u652e\u652f\u6530\u6531\u6532\u6533\u6534\u6535\u6536\u6537\u6538\u6539\u653a\u653b\u653c\u653d\u653e\u653f\u6540\u6541\u6542\u6543\u6544\u6545\u6546\u6547\u6548\u6549\u654a\u654b\u654c\u654d\u654e\u654f\u6550\u6551\u6552\u6553\u6554\u6555\u6556\u6557\u6558\u6559\u655a\u655b\u655c\u655d\u655e\u655f\u6560\u6561\u6562\u6563\u6564\u6565\u6566\u6567\u6568\u6569\u656a\u656b\u656c\u656d\u656e\u656f\u6570\u6571\u6572\u6573\u6574\u6575\u6576\u6577\u6578\u6579\u657a\u657b\u657c\u657d\u657e\u657f\u6580\u6581\u6582\u6583\u6584\u6585\u6586\u6587\u6588\u6589\u658a\u658b\u658c\u658d\u658e\u658f\u6590\u6591\u6592\u6593\u6594\u6595\u6596\u6597\u6598\u6599\u659a\u659b\u659c\u659d\u659e\u659f\u65a0\u65a1\u65a2\u65a3\u65a4\u65a5\u65a6\u65a7\u65a8\u65a9\u65aa\u65ab\u65ac\u65ad\u65ae\u65af\u65b0\u65b1\u65b2\u65b3\u65b4\u65b5\u65b6\u65b7\u65b8\u65b9\u65ba\u65bb\u65bc\u65bd\u65be\u65bf\u65c0\u65c1\u65c2\u65c3\u65c4\u65c5\u65c6\u65c7\u65c8\u65c9\u65ca\u65cb\u65cc\u65cd\u65ce\u65cf\u65d0\u65d1\u65d2\u65d3\u65d4\u65d5\u65d6\u65d7\u65d8\u65d9\u65da\u65db\u65dc\u65dd\u65de\u65df\u65e0\u65e1\u65e2\u65e3\u65e4\u65e5\u65e6\u65e7\u65e8\u65e9\u65ea\u65eb\u65ec\u65ed\u65ee\u65ef\u65f0\u65f1\u65f2\u65f3\u65f4\u65f5\u65f6\u65f7\u65f8\u65f9\u65fa\u65fb\u65fc\u65fd\u65fe\u65ff\u6600\u6601\u6602\u6603\u6604\u6605\u6606\u6607\u6608\u6609\u660a\u660b\u660c\u660d\u660e\u660f\u6610\u6611\u6612\u6613\u6614\u6615\u6616\u6617\u6618\u6619\u661a\u661b\u661c\u661d\u661e\u661f\u6620\u6621\u6622\u6623\u6624\u6625\u6626\u6627\u6628\u6629\u662a\u662b\u662c\u662d\u662e\u662f\u6630\u6631\u6632\u6633\u6634\u6635\u6636\u6637\u6638\u6639\u663a\u663b\u663c\u663d\u663e\u663f\u6640\u6641\u6642\u6643\u6644\u6645\u6646\u6647\u6648\u6649\u664a\u664b\u664c\u664d\u664e\u664f\u6650\u6651\u6652\u6653\u6654\u6655\u6656\u6657\u6658\u6659\u665a\u665b\u665c\u665d\u665e\u665f\u6660\u6661\u6662\u6663\u6664\u6665\u6666\u6667\u6668\u6669\u666a\u666b\u666c\u666d\u666e\u666f\u6670\u6671\u6672\u6673\u6674\u6675\u6676\u6677\u6678\u6679\u667a\u667b\u667c\u667d\u667e\u667f\u6680\u6681\u6682\u6683\u6684\u6685\u6686\u6687\u6688\u6689\u668a\u668b\u668c\u668d\u668e\u668f\u6690\u6691\u6692\u6693\u6694\u6695\u6696\u6697\u6698\u6699\u669a\u669b\u669c\u669d\u669e\u669f\u66a0\u66a1\u66a2\u66a3\u66a4\u66a5\u66a6\u66a7\u66a8\u66a9\u66aa\u66ab\u66ac\u66ad\u66ae\u66af\u66b0\u66b1\u66b2\u66b3\u66b4\u66b5\u66b6\u66b7\u66b8\u66b9\u66ba\u66bb\u66bc\u66bd\u66be\u66bf\u66c0\u66c1\u66c2\u66c3\u66c4\u66c5\u66c6\u66c7\u66c8\u66c9\u66ca\u66cb\u66cc\u66cd\u66ce\u66cf\u66d0\u66d1\u66d2\u66d3\u66d4\u66d5\u66d6\u66d7\u66d8\u66d9\u66da\u66db\u66dc\u66dd\u66de\u66df\u66e0\u66e1\u66e2\u66e3\u66e4\u66e5\u66e6\u66e7\u66e8\u66e9\u66ea\u66eb\u66ec\u66ed\u66ee\u66ef\u66f0\u66f1\u66f2\u66f3\u66f4\u66f5\u66f6\u66f7\u66f8\u66f9\u66fa\u66fb\u66fc\u66fd\u66fe\u66ff\u6700\u6701\u6702\u6703\u6704\u6705\u6706\u6707\u6708\u6709\u670a\u670b\u670c\u670d\u670e\u670f\u6710\u6711\u6712\u6713\u6714\u6715\u6716\u6717\u6718\u6719\u671a\u671b\u671c\u671d\u671e\u671f\u6720\u6721\u6722\u6723\u6724\u6725\u6726\u6727\u6728\u6729\u672a\u672b\u672c\u672d\u672e\u672f\u6730\u6731\u6732\u6733\u6734\u6735\u6736\u6737\u6738\u6739\u673a\u673b\u673c\u673d\u673e\u673f\u6740\u6741\u6742\u6743\u6744\u6745\u6746\u6747\u6748\u6749\u674a\u674b\u674c\u674d\u674e\u674f\u6750\u6751\u6752\u6753\u6754\u6755\u6756\u6757\u6758\u6759\u675a\u675b\u675c\u675d\u675e\u675f\u6760\u6761\u6762\u6763\u6764\u6765\u6766\u6767\u6768\u6769\u676a\u676b\u676c\u676d\u676e\u676f\u6770\u6771\u6772\u6773\u6774\u6775\u6776\u6777\u6778\u6779\u677a\u677b\u677c\u677d\u677e\u677f\u6780\u6781\u6782\u6783\u6784\u6785\u6786\u6787\u6788\u6789\u678a\u678b\u678c\u678d\u678e\u678f\u6790\u6791\u6792\u6793\u6794\u6795\u6796\u6797\u6798\u6799\u679a\u679b\u679c\u679d\u679e\u679f\u67a0\u67a1\u67a2\u67a3\u67a4\u67a5\u67a6\u67a7\u67a8\u67a9\u67aa\u67ab\u67ac\u67ad\u67ae\u67af\u67b0\u67b1\u67b2\u67b3\u67b4\u67b5\u67b6\u67b7\u67b8\u67b9\u67ba\u67bb\u67bc\u67bd\u67be\u67bf\u67c0\u67c1\u67c2\u67c3\u67c4\u67c5\u67c6\u67c7\u67c8\u67c9\u67ca\u67cb\u67cc\u67cd\u67ce\u67cf\u67d0\u67d1\u67d2\u67d3\u67d4\u67d5\u67d6\u67d7\u67d8\u67d9\u67da\u67db\u67dc\u67dd\u67de\u67df\u67e0\u67e1\u67e2\u67e3\u67e4\u67e5\u67e6\u67e7\u67e8\u67e9\u67ea\u67eb\u67ec\u67ed\u67ee\u67ef\u67f0\u67f1\u67f2\u67f3\u67f4\u67f5\u67f6\u67f7\u67f8\u67f9\u67fa\u67fb\u67fc\u67fd\u67fe\u67ff\u6800\u6801\u6802\u6803\u6804\u6805\u6806\u6807\u6808\u6809\u680a\u680b\u680c\u680d\u680e\u680f\u6810\u6811\u6812\u6813\u6814\u6815\u6816\u6817\u6818\u6819\u681a\u681b\u681c\u681d\u681e\u681f\u6820\u6821\u6822\u6823\u6824\u6825\u6826\u6827\u6828\u6829\u682a\u682b\u682c\u682d\u682e\u682f\u6830\u6831\u6832\u6833\u6834\u6835\u6836\u6837\u6838\u6839\u683a\u683b\u683c\u683d\u683e\u683f\u6840\u6841\u6842\u6843\u6844\u6845\u6846\u6847\u6848\u6849\u684a\u684b\u684c\u684d\u684e\u684f\u6850\u6851\u6852\u6853\u6854\u6855\u6856\u6857\u6858\u6859\u685a\u685b\u685c\u685d\u685e\u685f\u6860\u6861\u6862\u6863\u6864\u6865\u6866\u6867\u6868\u6869\u686a\u686b\u686c\u686d\u686e\u686f\u6870\u6871\u6872\u6873\u6874\u6875\u6876\u6877\u6878\u6879\u687a\u687b\u687c\u687d\u687e\u687f\u6880\u6881\u6882\u6883\u6884\u6885\u6886\u6887\u6888\u6889\u688a\u688b\u688c\u688d\u688e\u688f\u6890\u6891\u6892\u6893\u6894\u6895\u6896\u6897\u6898\u6899\u689a\u689b\u689c\u689d\u689e\u689f\u68a0\u68a1\u68a2\u68a3\u68a4\u68a5\u68a6\u68a7\u68a8\u68a9\u68aa\u68ab\u68ac\u68ad\u68ae\u68af\u68b0\u68b1\u68b2\u68b3\u68b4\u68b5\u68b6\u68b7\u68b8\u68b9\u68ba\u68bb\u68bc\u68bd\u68be\u68bf\u68c0\u68c1\u68c2\u68c3\u68c4\u68c5\u68c6\u68c7\u68c8\u68c9\u68ca\u68cb\u68cc\u68cd\u68ce\u68cf\u68d0\u68d1\u68d2\u68d3\u68d4\u68d5\u68d6\u68d7\u68d8\u68d9\u68da\u68db\u68dc\u68dd\u68de\u68df\u68e0\u68e1\u68e2\u68e3\u68e4\u68e5\u68e6\u68e7\u68e8\u68e9\u68ea\u68eb\u68ec\u68ed\u68ee\u68ef\u68f0\u68f1\u68f2\u68f3\u68f4\u68f5\u68f6\u68f7\u68f8\u68f9\u68fa\u68fb\u68fc\u68fd\u68fe\u68ff\u6900\u6901\u6902\u6903\u6904\u6905\u6906\u6907\u6908\u6909\u690a\u690b\u690c\u690d\u690e\u690f\u6910\u6911\u6912\u6913\u6914\u6915\u6916\u6917\u6918\u6919\u691a\u691b\u691c\u691d\u691e\u691f\u6920\u6921\u6922\u6923\u6924\u6925\u6926\u6927\u6928\u6929\u692a\u692b\u692c\u692d\u692e\u692f\u6930\u6931\u6932\u6933\u6934\u6935\u6936\u6937\u6938\u6939\u693a\u693b\u693c\u693d\u693e\u693f\u6940\u6941\u6942\u6943\u6944\u6945\u6946\u6947\u6948\u6949\u694a\u694b\u694c\u694d\u694e\u694f\u6950\u6951\u6952\u6953\u6954\u6955\u6956\u6957\u6958\u6959\u695a\u695b\u695c\u695d\u695e\u695f\u6960\u6961\u6962\u6963\u6964\u6965\u6966\u6967\u6968\u6969\u696a\u696b\u696c\u696d\u696e\u696f\u6970\u6971\u6972\u6973\u6974\u6975\u6976\u6977\u6978\u6979\u697a\u697b\u697c\u697d\u697e\u697f\u6980\u6981\u6982\u6983\u6984\u6985\u6986\u6987\u6988\u6989\u698a\u698b\u698c\u698d\u698e\u698f\u6990\u6991\u6992\u6993\u6994\u6995\u6996\u6997\u6998\u6999\u699a\u699b\u699c\u699d\u699e\u699f\u69a0\u69a1\u69a2\u69a3\u69a4\u69a5\u69a6\u69a7\u69a8\u69a9\u69aa\u69ab\u69ac\u69ad\u69ae\u69af\u69b0\u69b1\u69b2\u69b3\u69b4\u69b5\u69b6\u69b7\u69b8\u69b9\u69ba\u69bb\u69bc\u69bd\u69be\u69bf\u69c0\u69c1\u69c2\u69c3\u69c4\u69c5\u69c6\u69c7\u69c8\u69c9\u69ca\u69cb\u69cc\u69cd\u69ce\u69cf\u69d0\u69d1\u69d2\u69d3\u69d4\u69d5\u69d6\u69d7\u69d8\u69d9\u69da\u69db\u69dc\u69dd\u69de\u69df\u69e0\u69e1\u69e2\u69e3\u69e4\u69e5\u69e6\u69e7\u69e8\u69e9\u69ea\u69eb\u69ec\u69ed\u69ee\u69ef\u69f0\u69f1\u69f2\u69f3\u69f4\u69f5\u69f6\u69f7\u69f8\u69f9\u69fa\u69fb\u69fc\u69fd\u69fe\u69ff\u6a00\u6a01\u6a02\u6a03\u6a04\u6a05\u6a06\u6a07\u6a08\u6a09\u6a0a\u6a0b\u6a0c\u6a0d\u6a0e\u6a0f\u6a10\u6a11\u6a12\u6a13\u6a14\u6a15\u6a16\u6a17\u6a18\u6a19\u6a1a\u6a1b\u6a1c\u6a1d\u6a1e\u6a1f\u6a20\u6a21\u6a22\u6a23\u6a24\u6a25\u6a26\u6a27\u6a28\u6a29\u6a2a\u6a2b\u6a2c\u6a2d\u6a2e\u6a2f\u6a30\u6a31\u6a32\u6a33\u6a34\u6a35\u6a36\u6a37\u6a38\u6a39\u6a3a\u6a3b\u6a3c\u6a3d\u6a3e\u6a3f\u6a40\u6a41\u6a42\u6a43\u6a44\u6a45\u6a46\u6a47\u6a48\u6a49\u6a4a\u6a4b\u6a4c\u6a4d\u6a4e\u6a4f\u6a50\u6a51\u6a52\u6a53\u6a54\u6a55\u6a56\u6a57\u6a58\u6a59\u6a5a\u6a5b\u6a5c\u6a5d\u6a5e\u6a5f\u6a60\u6a61\u6a62\u6a63\u6a64\u6a65\u6a66\u6a67\u6a68\u6a69\u6a6a\u6a6b\u6a6c\u6a6d\u6a6e\u6a6f\u6a70\u6a71\u6a72\u6a73\u6a74\u6a75\u6a76\u6a77\u6a78\u6a79\u6a7a\u6a7b\u6a7c\u6a7d\u6a7e\u6a7f\u6a80\u6a81\u6a82\u6a83\u6a84\u6a85\u6a86\u6a87\u6a88\u6a89\u6a8a\u6a8b\u6a8c\u6a8d\u6a8e\u6a8f\u6a90\u6a91\u6a92\u6a93\u6a94\u6a95\u6a96\u6a97\u6a98\u6a99\u6a9a\u6a9b\u6a9c\u6a9d\u6a9e\u6a9f\u6aa0\u6aa1\u6aa2\u6aa3\u6aa4\u6aa5\u6aa6\u6aa7\u6aa8\u6aa9\u6aaa\u6aab\u6aac\u6aad\u6aae\u6aaf\u6ab0\u6ab1\u6ab2\u6ab3\u6ab4\u6ab5\u6ab6\u6ab7\u6ab8\u6ab9\u6aba\u6abb\u6abc\u6abd\u6abe\u6abf\u6ac0\u6ac1\u6ac2\u6ac3\u6ac4\u6ac5\u6ac6\u6ac7\u6ac8\u6ac9\u6aca\u6acb\u6acc\u6acd\u6ace\u6acf\u6ad0\u6ad1\u6ad2\u6ad3\u6ad4\u6ad5\u6ad6\u6ad7\u6ad8\u6ad9\u6ada\u6adb\u6adc\u6add\u6ade\u6adf\u6ae0\u6ae1\u6ae2\u6ae3\u6ae4\u6ae5\u6ae6\u6ae7\u6ae8\u6ae9\u6aea\u6aeb\u6aec\u6aed\u6aee\u6aef\u6af0\u6af1\u6af2\u6af3\u6af4\u6af5\u6af6\u6af7\u6af8\u6af9\u6afa\u6afb\u6afc\u6afd\u6afe\u6aff\u6b00\u6b01\u6b02\u6b03\u6b04\u6b05\u6b06\u6b07\u6b08\u6b09\u6b0a\u6b0b\u6b0c\u6b0d\u6b0e\u6b0f\u6b10\u6b11\u6b12\u6b13\u6b14\u6b15\u6b16\u6b17\u6b18\u6b19\u6b1a\u6b1b\u6b1c\u6b1d\u6b1e\u6b1f\u6b20\u6b21\u6b22\u6b23\u6b24\u6b25\u6b26\u6b27\u6b28\u6b29\u6b2a\u6b2b\u6b2c\u6b2d\u6b2e\u6b2f\u6b30\u6b31\u6b32\u6b33\u6b34\u6b35\u6b36\u6b37\u6b38\u6b39\u6b3a\u6b3b\u6b3c\u6b3d\u6b3e\u6b3f\u6b40\u6b41\u6b42\u6b43\u6b44\u6b45\u6b46\u6b47\u6b48\u6b49\u6b4a\u6b4b\u6b4c\u6b4d\u6b4e\u6b4f\u6b50\u6b51\u6b52\u6b53\u6b54\u6b55\u6b56\u6b57\u6b58\u6b59\u6b5a\u6b5b\u6b5c\u6b5d\u6b5e\u6b5f\u6b60\u6b61\u6b62\u6b63\u6b64\u6b65\u6b66\u6b67\u6b68\u6b69\u6b6a\u6b6b\u6b6c\u6b6d\u6b6e\u6b6f\u6b70\u6b71\u6b72\u6b73\u6b74\u6b75\u6b76\u6b77\u6b78\u6b79\u6b7a\u6b7b\u6b7c\u6b7d\u6b7e\u6b7f\u6b80\u6b81\u6b82\u6b83\u6b84\u6b85\u6b86\u6b87\u6b88\u6b89\u6b8a\u6b8b\u6b8c\u6b8d\u6b8e\u6b8f\u6b90\u6b91\u6b92\u6b93\u6b94\u6b95\u6b96\u6b97\u6b98\u6b99\u6b9a\u6b9b\u6b9c\u6b9d\u6b9e\u6b9f\u6ba0\u6ba1\u6ba2\u6ba3\u6ba4\u6ba5\u6ba6\u6ba7\u6ba8\u6ba9\u6baa\u6bab\u6bac\u6bad\u6bae\u6baf\u6bb0\u6bb1\u6bb2\u6bb3\u6bb4\u6bb5\u6bb6\u6bb7\u6bb8\u6bb9\u6bba\u6bbb\u6bbc\u6bbd\u6bbe\u6bbf\u6bc0\u6bc1\u6bc2\u6bc3\u6bc4\u6bc5\u6bc6\u6bc7\u6bc8\u6bc9\u6bca\u6bcb\u6bcc\u6bcd\u6bce\u6bcf\u6bd0\u6bd1\u6bd2\u6bd3\u6bd4\u6bd5\u6bd6\u6bd7\u6bd8\u6bd9\u6bda\u6bdb\u6bdc\u6bdd\u6bde\u6bdf\u6be0\u6be1\u6be2\u6be3\u6be4\u6be5\u6be6\u6be7\u6be8\u6be9\u6bea\u6beb\u6bec\u6bed\u6bee\u6bef\u6bf0\u6bf1\u6bf2\u6bf3\u6bf4\u6bf5\u6bf6\u6bf7\u6bf8\u6bf9\u6bfa\u6bfb\u6bfc\u6bfd\u6bfe\u6bff\u6c00\u6c01\u6c02\u6c03\u6c04\u6c05\u6c06\u6c07\u6c08\u6c09\u6c0a\u6c0b\u6c0c\u6c0d\u6c0e\u6c0f\u6c10\u6c11\u6c12\u6c13\u6c14\u6c15\u6c16\u6c17\u6c18\u6c19\u6c1a\u6c1b\u6c1c\u6c1d\u6c1e\u6c1f\u6c20\u6c21\u6c22\u6c23\u6c24\u6c25\u6c26\u6c27\u6c28\u6c29\u6c2a\u6c2b\u6c2c\u6c2d\u6c2e\u6c2f\u6c30\u6c31\u6c32\u6c33\u6c34\u6c35\u6c36\u6c37\u6c38\u6c39\u6c3a\u6c3b\u6c3c\u6c3d\u6c3e\u6c3f\u6c40\u6c41\u6c42\u6c43\u6c44\u6c45\u6c46\u6c47\u6c48\u6c49\u6c4a\u6c4b\u6c4c\u6c4d\u6c4e\u6c4f\u6c50\u6c51\u6c52\u6c53\u6c54\u6c55\u6c56\u6c57\u6c58\u6c59\u6c5a\u6c5b\u6c5c\u6c5d\u6c5e\u6c5f\u6c60\u6c61\u6c62\u6c63\u6c64\u6c65\u6c66\u6c67\u6c68\u6c69\u6c6a\u6c6b\u6c6c\u6c6d\u6c6e\u6c6f\u6c70\u6c71\u6c72\u6c73\u6c74\u6c75\u6c76\u6c77\u6c78\u6c79\u6c7a\u6c7b\u6c7c\u6c7d\u6c7e\u6c7f\u6c80\u6c81\u6c82\u6c83\u6c84\u6c85\u6c86\u6c87\u6c88\u6c89\u6c8a\u6c8b\u6c8c\u6c8d\u6c8e\u6c8f\u6c90\u6c91\u6c92\u6c93\u6c94\u6c95\u6c96\u6c97\u6c98\u6c99\u6c9a\u6c9b\u6c9c\u6c9d\u6c9e\u6c9f\u6ca0\u6ca1\u6ca2\u6ca3\u6ca4\u6ca5\u6ca6\u6ca7\u6ca8\u6ca9\u6caa\u6cab\u6cac\u6cad\u6cae\u6caf\u6cb0\u6cb1\u6cb2\u6cb3\u6cb4\u6cb5\u6cb6\u6cb7\u6cb8\u6cb9\u6cba\u6cbb\u6cbc\u6cbd\u6cbe\u6cbf\u6cc0\u6cc1\u6cc2\u6cc3\u6cc4\u6cc5\u6cc6\u6cc7\u6cc8\u6cc9\u6cca\u6ccb\u6ccc\u6ccd\u6cce\u6ccf\u6cd0\u6cd1\u6cd2\u6cd3\u6cd4\u6cd5\u6cd6\u6cd7\u6cd8\u6cd9\u6cda\u6cdb\u6cdc\u6cdd\u6cde\u6cdf\u6ce0\u6ce1\u6ce2\u6ce3\u6ce4\u6ce5\u6ce6\u6ce7\u6ce8\u6ce9\u6cea\u6ceb\u6cec\u6ced\u6cee\u6cef\u6cf0\u6cf1\u6cf2\u6cf3\u6cf4\u6cf5\u6cf6\u6cf7\u6cf8\u6cf9\u6cfa\u6cfb\u6cfc\u6cfd\u6cfe\u6cff\u6d00\u6d01\u6d02\u6d03\u6d04\u6d05\u6d06\u6d07\u6d08\u6d09\u6d0a\u6d0b\u6d0c\u6d0d\u6d0e\u6d0f\u6d10\u6d11\u6d12\u6d13\u6d14\u6d15\u6d16\u6d17\u6d18\u6d19\u6d1a\u6d1b\u6d1c\u6d1d\u6d1e\u6d1f\u6d20\u6d21\u6d22\u6d23\u6d24\u6d25\u6d26\u6d27\u6d28\u6d29\u6d2a\u6d2b\u6d2c\u6d2d\u6d2e\u6d2f\u6d30\u6d31\u6d32\u6d33\u6d34\u6d35\u6d36\u6d37\u6d38\u6d39\u6d3a\u6d3b\u6d3c\u6d3d\u6d3e\u6d3f\u6d40\u6d41\u6d42\u6d43\u6d44\u6d45\u6d46\u6d47\u6d48\u6d49\u6d4a\u6d4b\u6d4c\u6d4d\u6d4e\u6d4f\u6d50\u6d51\u6d52\u6d53\u6d54\u6d55\u6d56\u6d57\u6d58\u6d59\u6d5a\u6d5b\u6d5c\u6d5d\u6d5e\u6d5f\u6d60\u6d61\u6d62\u6d63\u6d64\u6d65\u6d66\u6d67\u6d68\u6d69\u6d6a\u6d6b\u6d6c\u6d6d\u6d6e\u6d6f\u6d70\u6d71\u6d72\u6d73\u6d74\u6d75\u6d76\u6d77\u6d78\u6d79\u6d7a\u6d7b\u6d7c\u6d7d\u6d7e\u6d7f\u6d80\u6d81\u6d82\u6d83\u6d84\u6d85\u6d86\u6d87\u6d88\u6d89\u6d8a\u6d8b\u6d8c\u6d8d\u6d8e\u6d8f\u6d90\u6d91\u6d92\u6d93\u6d94\u6d95\u6d96\u6d97\u6d98\u6d99\u6d9a\u6d9b\u6d9c\u6d9d\u6d9e\u6d9f\u6da0\u6da1\u6da2\u6da3\u6da4\u6da5\u6da6\u6da7\u6da8\u6da9\u6daa\u6dab\u6dac\u6dad\u6dae\u6daf\u6db0\u6db1\u6db2\u6db3\u6db4\u6db5\u6db6\u6db7\u6db8\u6db9\u6dba\u6dbb\u6dbc\u6dbd\u6dbe\u6dbf\u6dc0\u6dc1\u6dc2\u6dc3\u6dc4\u6dc5\u6dc6\u6dc7\u6dc8\u6dc9\u6dca\u6dcb\u6dcc\u6dcd\u6dce\u6dcf\u6dd0\u6dd1\u6dd2\u6dd3\u6dd4\u6dd5\u6dd6\u6dd7\u6dd8\u6dd9\u6dda\u6ddb\u6ddc\u6ddd\u6dde\u6ddf\u6de0\u6de1\u6de2\u6de3\u6de4\u6de5\u6de6\u6de7\u6de8\u6de9\u6dea\u6deb\u6dec\u6ded\u6dee\u6def\u6df0\u6df1\u6df2\u6df3\u6df4\u6df5\u6df6\u6df7\u6df8\u6df9\u6dfa\u6dfb\u6dfc\u6dfd\u6dfe\u6dff\u6e00\u6e01\u6e02\u6e03\u6e04\u6e05\u6e06\u6e07\u6e08\u6e09\u6e0a\u6e0b\u6e0c\u6e0d\u6e0e\u6e0f\u6e10\u6e11\u6e12\u6e13\u6e14\u6e15\u6e16\u6e17\u6e18\u6e19\u6e1a\u6e1b\u6e1c\u6e1d\u6e1e\u6e1f\u6e20\u6e21\u6e22\u6e23\u6e24\u6e25\u6e26\u6e27\u6e28\u6e29\u6e2a\u6e2b\u6e2c\u6e2d\u6e2e\u6e2f\u6e30\u6e31\u6e32\u6e33\u6e34\u6e35\u6e36\u6e37\u6e38\u6e39\u6e3a\u6e3b\u6e3c\u6e3d\u6e3e\u6e3f\u6e40\u6e41\u6e42\u6e43\u6e44\u6e45\u6e46\u6e47\u6e48\u6e49\u6e4a\u6e4b\u6e4c\u6e4d\u6e4e\u6e4f\u6e50\u6e51\u6e52\u6e53\u6e54\u6e55\u6e56\u6e57\u6e58\u6e59\u6e5a\u6e5b\u6e5c\u6e5d\u6e5e\u6e5f\u6e60\u6e61\u6e62\u6e63\u6e64\u6e65\u6e66\u6e67\u6e68\u6e69\u6e6a\u6e6b\u6e6c\u6e6d\u6e6e\u6e6f\u6e70\u6e71\u6e72\u6e73\u6e74\u6e75\u6e76\u6e77\u6e78\u6e79\u6e7a\u6e7b\u6e7c\u6e7d\u6e7e\u6e7f\u6e80\u6e81\u6e82\u6e83\u6e84\u6e85\u6e86\u6e87\u6e88\u6e89\u6e8a\u6e8b\u6e8c\u6e8d\u6e8e\u6e8f\u6e90\u6e91\u6e92\u6e93\u6e94\u6e95\u6e96\u6e97\u6e98\u6e99\u6e9a\u6e9b\u6e9c\u6e9d\u6e9e\u6e9f\u6ea0\u6ea1\u6ea2\u6ea3\u6ea4\u6ea5\u6ea6\u6ea7\u6ea8\u6ea9\u6eaa\u6eab\u6eac\u6ead\u6eae\u6eaf\u6eb0\u6eb1\u6eb2\u6eb3\u6eb4\u6eb5\u6eb6\u6eb7\u6eb8\u6eb9\u6eba\u6ebb\u6ebc\u6ebd\u6ebe\u6ebf\u6ec0\u6ec1\u6ec2\u6ec3\u6ec4\u6ec5\u6ec6\u6ec7\u6ec8\u6ec9\u6eca\u6ecb\u6ecc\u6ecd\u6ece\u6ecf\u6ed0\u6ed1\u6ed2\u6ed3\u6ed4\u6ed5\u6ed6\u6ed7\u6ed8\u6ed9\u6eda\u6edb\u6edc\u6edd\u6ede\u6edf\u6ee0\u6ee1\u6ee2\u6ee3\u6ee4\u6ee5\u6ee6\u6ee7\u6ee8\u6ee9\u6eea\u6eeb\u6eec\u6eed\u6eee\u6eef\u6ef0\u6ef1\u6ef2\u6ef3\u6ef4\u6ef5\u6ef6\u6ef7\u6ef8\u6ef9\u6efa\u6efb\u6efc\u6efd\u6efe\u6eff\u6f00\u6f01\u6f02\u6f03\u6f04\u6f05\u6f06\u6f07\u6f08\u6f09\u6f0a\u6f0b\u6f0c\u6f0d\u6f0e\u6f0f\u6f10\u6f11\u6f12\u6f13\u6f14\u6f15\u6f16\u6f17\u6f18\u6f19\u6f1a\u6f1b\u6f1c\u6f1d\u6f1e\u6f1f\u6f20\u6f21\u6f22\u6f23\u6f24\u6f25\u6f26\u6f27\u6f28\u6f29\u6f2a\u6f2b\u6f2c\u6f2d\u6f2e\u6f2f\u6f30\u6f31\u6f32\u6f33\u6f34\u6f35\u6f36\u6f37\u6f38\u6f39\u6f3a\u6f3b\u6f3c\u6f3d\u6f3e\u6f3f\u6f40\u6f41\u6f42\u6f43\u6f44\u6f45\u6f46\u6f47\u6f48\u6f49\u6f4a\u6f4b\u6f4c\u6f4d\u6f4e\u6f4f\u6f50\u6f51\u6f52\u6f53\u6f54\u6f55\u6f56\u6f57\u6f58\u6f59\u6f5a\u6f5b\u6f5c\u6f5d\u6f5e\u6f5f\u6f60\u6f61\u6f62\u6f63\u6f64\u6f65\u6f66\u6f67\u6f68\u6f69\u6f6a\u6f6b\u6f6c\u6f6d\u6f6e\u6f6f\u6f70\u6f71\u6f72\u6f73\u6f74\u6f75\u6f76\u6f77\u6f78\u6f79\u6f7a\u6f7b\u6f7c\u6f7d\u6f7e\u6f7f\u6f80\u6f81\u6f82\u6f83\u6f84\u6f85\u6f86\u6f87\u6f88\u6f89\u6f8a\u6f8b\u6f8c\u6f8d\u6f8e\u6f8f\u6f90\u6f91\u6f92\u6f93\u6f94\u6f95\u6f96\u6f97\u6f98\u6f99\u6f9a\u6f9b\u6f9c\u6f9d\u6f9e\u6f9f\u6fa0\u6fa1\u6fa2\u6fa3\u6fa4\u6fa5\u6fa6\u6fa7\u6fa8\u6fa9\u6faa\u6fab\u6fac\u6fad\u6fae\u6faf\u6fb0\u6fb1\u6fb2\u6fb3\u6fb4\u6fb5\u6fb6\u6fb7\u6fb8\u6fb9\u6fba\u6fbb\u6fbc\u6fbd\u6fbe\u6fbf\u6fc0\u6fc1\u6fc2\u6fc3\u6fc4\u6fc5\u6fc6\u6fc7\u6fc8\u6fc9\u6fca\u6fcb\u6fcc\u6fcd\u6fce\u6fcf\u6fd0\u6fd1\u6fd2\u6fd3\u6fd4\u6fd5\u6fd6\u6fd7\u6fd8\u6fd9\u6fda\u6fdb\u6fdc\u6fdd\u6fde\u6fdf\u6fe0\u6fe1\u6fe2\u6fe3\u6fe4\u6fe5\u6fe6\u6fe7\u6fe8\u6fe9\u6fea\u6feb\u6fec\u6fed\u6fee\u6fef\u6ff0\u6ff1\u6ff2\u6ff3\u6ff4\u6ff5\u6ff6\u6ff7\u6ff8\u6ff9\u6ffa\u6ffb\u6ffc\u6ffd\u6ffe\u6fff\u7000\u7001\u7002\u7003\u7004\u7005\u7006\u7007\u7008\u7009\u700a\u700b\u700c\u700d\u700e\u700f\u7010\u7011\u7012\u7013\u7014\u7015\u7016\u7017\u7018\u7019\u701a\u701b\u701c\u701d\u701e\u701f\u7020\u7021\u7022\u7023\u7024\u7025\u7026\u7027\u7028\u7029\u702a\u702b\u702c\u702d\u702e\u702f\u7030\u7031\u7032\u7033\u7034\u7035\u7036\u7037\u7038\u7039\u703a\u703b\u703c\u703d\u703e\u703f\u7040\u7041\u7042\u7043\u7044\u7045\u7046\u7047\u7048\u7049\u704a\u704b\u704c\u704d\u704e\u704f\u7050\u7051\u7052\u7053\u7054\u7055\u7056\u7057\u7058\u7059\u705a\u705b\u705c\u705d\u705e\u705f\u7060\u7061\u7062\u7063\u7064\u7065\u7066\u7067\u7068\u7069\u706a\u706b\u706c\u706d\u706e\u706f\u7070\u7071\u7072\u7073\u7074\u7075\u7076\u7077\u7078\u7079\u707a\u707b\u707c\u707d\u707e\u707f\u7080\u7081\u7082\u7083\u7084\u7085\u7086\u7087\u7088\u7089\u708a\u708b\u708c\u708d\u708e\u708f\u7090\u7091\u7092\u7093\u7094\u7095\u7096\u7097\u7098\u7099\u709a\u709b\u709c\u709d\u709e\u709f\u70a0\u70a1\u70a2\u70a3\u70a4\u70a5\u70a6\u70a7\u70a8\u70a9\u70aa\u70ab\u70ac\u70ad\u70ae\u70af\u70b0\u70b1\u70b2\u70b3\u70b4\u70b5\u70b6\u70b7\u70b8\u70b9\u70ba\u70bb\u70bc\u70bd\u70be\u70bf\u70c0\u70c1\u70c2\u70c3\u70c4\u70c5\u70c6\u70c7\u70c8\u70c9\u70ca\u70cb\u70cc\u70cd\u70ce\u70cf\u70d0\u70d1\u70d2\u70d3\u70d4\u70d5\u70d6\u70d7\u70d8\u70d9\u70da\u70db\u70dc\u70dd\u70de\u70df\u70e0\u70e1\u70e2\u70e3\u70e4\u70e5\u70e6\u70e7\u70e8\u70e9\u70ea\u70eb\u70ec\u70ed\u70ee\u70ef\u70f0\u70f1\u70f2\u70f3\u70f4\u70f5\u70f6\u70f7\u70f8\u70f9\u70fa\u70fb\u70fc\u70fd\u70fe\u70ff\u7100\u7101\u7102\u7103\u7104\u7105\u7106\u7107\u7108\u7109\u710a\u710b\u710c\u710d\u710e\u710f\u7110\u7111\u7112\u7113\u7114\u7115\u7116\u7117\u7118\u7119\u711a\u711b\u711c\u711d\u711e\u711f\u7120\u7121\u7122\u7123\u7124\u7125\u7126\u7127\u7128\u7129\u712a\u712b\u712c\u712d\u712e\u712f\u7130\u7131\u7132\u7133\u7134\u7135\u7136\u7137\u7138\u7139\u713a\u713b\u713c\u713d\u713e\u713f\u7140\u7141\u7142\u7143\u7144\u7145\u7146\u7147\u7148\u7149\u714a\u714b\u714c\u714d\u714e\u714f\u7150\u7151\u7152\u7153\u7154\u7155\u7156\u7157\u7158\u7159\u715a\u715b\u715c\u715d\u715e\u715f\u7160\u7161\u7162\u7163\u7164\u7165\u7166\u7167\u7168\u7169\u716a\u716b\u716c\u716d\u716e\u716f\u7170\u7171\u7172\u7173\u7174\u7175\u7176\u7177\u7178\u7179\u717a\u717b\u717c\u717d\u717e\u717f\u7180\u7181\u7182\u7183\u7184\u7185\u7186\u7187\u7188\u7189\u718a\u718b\u718c\u718d\u718e\u718f\u7190\u7191\u7192\u7193\u7194\u7195\u7196\u7197\u7198\u7199\u719a\u719b\u719c\u719d\u719e\u719f\u71a0\u71a1\u71a2\u71a3\u71a4\u71a5\u71a6\u71a7\u71a8\u71a9\u71aa\u71ab\u71ac\u71ad\u71ae\u71af\u71b0\u71b1\u71b2\u71b3\u71b4\u71b5\u71b6\u71b7\u71b8\u71b9\u71ba\u71bb\u71bc\u71bd\u71be\u71bf\u71c0\u71c1\u71c2\u71c3\u71c4\u71c5\u71c6\u71c7\u71c8\u71c9\u71ca\u71cb\u71cc\u71cd\u71ce\u71cf\u71d0\u71d1\u71d2\u71d3\u71d4\u71d5\u71d6\u71d7\u71d8\u71d9\u71da\u71db\u71dc\u71dd\u71de\u71df\u71e0\u71e1\u71e2\u71e3\u71e4\u71e5\u71e6\u71e7\u71e8\u71e9\u71ea\u71eb\u71ec\u71ed\u71ee\u71ef\u71f0\u71f1\u71f2\u71f3\u71f4\u71f5\u71f6\u71f7\u71f8\u71f9\u71fa\u71fb\u71fc\u71fd\u71fe\u71ff\u7200\u7201\u7202\u7203\u7204\u7205\u7206\u7207\u7208\u7209\u720a\u720b\u720c\u720d\u720e\u720f\u7210\u7211\u7212\u7213\u7214\u7215\u7216\u7217\u7218\u7219\u721a\u721b\u721c\u721d\u721e\u721f\u7220\u7221\u7222\u7223\u7224\u7225\u7226\u7227\u7228\u7229\u722a\u722b\u722c\u722d\u722e\u722f\u7230\u7231\u7232\u7233\u7234\u7235\u7236\u7237\u7238\u7239\u723a\u723b\u723c\u723d\u723e\u723f\u7240\u7241\u7242\u7243\u7244\u7245\u7246\u7247\u7248\u7249\u724a\u724b\u724c\u724d\u724e\u724f\u7250\u7251\u7252\u7253\u7254\u7255\u7256\u7257\u7258\u7259\u725a\u725b\u725c\u725d\u725e\u725f\u7260\u7261\u7262\u7263\u7264\u7265\u7266\u7267\u7268\u7269\u726a\u726b\u726c\u726d\u726e\u726f\u7270\u7271\u7272\u7273\u7274\u7275\u7276\u7277\u7278\u7279\u727a\u727b\u727c\u727d\u727e\u727f\u7280\u7281\u7282\u7283\u7284\u7285\u7286\u7287\u7288\u7289\u728a\u728b\u728c\u728d\u728e\u728f\u7290\u7291\u7292\u7293\u7294\u7295\u7296\u7297\u7298\u7299\u729a\u729b\u729c\u729d\u729e\u729f\u72a0\u72a1\u72a2\u72a3\u72a4\u72a5\u72a6\u72a7\u72a8\u72a9\u72aa\u72ab\u72ac\u72ad\u72ae\u72af\u72b0\u72b1\u72b2\u72b3\u72b4\u72b5\u72b6\u72b7\u72b8\u72b9\u72ba\u72bb\u72bc\u72bd\u72be\u72bf\u72c0\u72c1\u72c2\u72c3\u72c4\u72c5\u72c6\u72c7\u72c8\u72c9\u72ca\u72cb\u72cc\u72cd\u72ce\u72cf\u72d0\u72d1\u72d2\u72d3\u72d4\u72d5\u72d6\u72d7\u72d8\u72d9\u72da\u72db\u72dc\u72dd\u72de\u72df\u72e0\u72e1\u72e2\u72e3\u72e4\u72e5\u72e6\u72e7\u72e8\u72e9\u72ea\u72eb\u72ec\u72ed\u72ee\u72ef\u72f0\u72f1\u72f2\u72f3\u72f4\u72f5\u72f6\u72f7\u72f8\u72f9\u72fa\u72fb\u72fc\u72fd\u72fe\u72ff\u7300\u7301\u7302\u7303\u7304\u7305\u7306\u7307\u7308\u7309\u730a\u730b\u730c\u730d\u730e\u730f\u7310\u7311\u7312\u7313\u7314\u7315\u7316\u7317\u7318\u7319\u731a\u731b\u731c\u731d\u731e\u731f\u7320\u7321\u7322\u7323\u7324\u7325\u7326\u7327\u7328\u7329\u732a\u732b\u732c\u732d\u732e\u732f\u7330\u7331\u7332\u7333\u7334\u7335\u7336\u7337\u7338\u7339\u733a\u733b\u733c\u733d\u733e\u733f\u7340\u7341\u7342\u7343\u7344\u7345\u7346\u7347\u7348\u7349\u734a\u734b\u734c\u734d\u734e\u734f\u7350\u7351\u7352\u7353\u7354\u7355\u7356\u7357\u7358\u7359\u735a\u735b\u735c\u735d\u735e\u735f\u7360\u7361\u7362\u7363\u7364\u7365\u7366\u7367\u7368\u7369\u736a\u736b\u736c\u736d\u736e\u736f\u7370\u7371\u7372\u7373\u7374\u7375\u7376\u7377\u7378\u7379\u737a\u737b\u737c\u737d\u737e\u737f\u7380\u7381\u7382\u7383\u7384\u7385\u7386\u7387\u7388\u7389\u738a\u738b\u738c\u738d\u738e\u738f\u7390\u7391\u7392\u7393\u7394\u7395\u7396\u7397\u7398\u7399\u739a\u739b\u739c\u739d\u739e\u739f\u73a0\u73a1\u73a2\u73a3\u73a4\u73a5\u73a6\u73a7\u73a8\u73a9\u73aa\u73ab\u73ac\u73ad\u73ae\u73af\u73b0\u73b1\u73b2\u73b3\u73b4\u73b5\u73b6\u73b7\u73b8\u73b9\u73ba\u73bb\u73bc\u73bd\u73be\u73bf\u73c0\u73c1\u73c2\u73c3\u73c4\u73c5\u73c6\u73c7\u73c8\u73c9\u73ca\u73cb\u73cc\u73cd\u73ce\u73cf\u73d0\u73d1\u73d2\u73d3\u73d4\u73d5\u73d6\u73d7\u73d8\u73d9\u73da\u73db\u73dc\u73dd\u73de\u73df\u73e0\u73e1\u73e2\u73e3\u73e4\u73e5\u73e6\u73e7\u73e8\u73e9\u73ea\u73eb\u73ec\u73ed\u73ee\u73ef\u73f0\u73f1\u73f2\u73f3\u73f4\u73f5\u73f6\u73f7\u73f8\u73f9\u73fa\u73fb\u73fc\u73fd\u73fe\u73ff\u7400\u7401\u7402\u7403\u7404\u7405\u7406\u7407\u7408\u7409\u740a\u740b\u740c\u740d\u740e\u740f\u7410\u7411\u7412\u7413\u7414\u7415\u7416\u7417\u7418\u7419\u741a\u741b\u741c\u741d\u741e\u741f\u7420\u7421\u7422\u7423\u7424\u7425\u7426\u7427\u7428\u7429\u742a\u742b\u742c\u742d\u742e\u742f\u7430\u7431\u7432\u7433\u7434\u7435\u7436\u7437\u7438\u7439\u743a\u743b\u743c\u743d\u743e\u743f\u7440\u7441\u7442\u7443\u7444\u7445\u7446\u7447\u7448\u7449\u744a\u744b\u744c\u744d\u744e\u744f\u7450\u7451\u7452\u7453\u7454\u7455\u7456\u7457\u7458\u7459\u745a\u745b\u745c\u745d\u745e\u745f\u7460\u7461\u7462\u7463\u7464\u7465\u7466\u7467\u7468\u7469\u746a\u746b\u746c\u746d\u746e\u746f\u7470\u7471\u7472\u7473\u7474\u7475\u7476\u7477\u7478\u7479\u747a\u747b\u747c\u747d\u747e\u747f\u7480\u7481\u7482\u7483\u7484\u7485\u7486\u7487\u7488\u7489\u748a\u748b\u748c\u748d\u748e\u748f\u7490\u7491\u7492\u7493\u7494\u7495\u7496\u7497\u7498\u7499\u749a\u749b\u749c\u749d\u749e\u749f\u74a0\u74a1\u74a2\u74a3\u74a4\u74a5\u74a6\u74a7\u74a8\u74a9\u74aa\u74ab\u74ac\u74ad\u74ae\u74af\u74b0\u74b1\u74b2\u74b3\u74b4\u74b5\u74b6\u74b7\u74b8\u74b9\u74ba\u74bb\u74bc\u74bd\u74be\u74bf\u74c0\u74c1\u74c2\u74c3\u74c4\u74c5\u74c6\u74c7\u74c8\u74c9\u74ca\u74cb\u74cc\u74cd\u74ce\u74cf\u74d0\u74d1\u74d2\u74d3\u74d4\u74d5\u74d6\u74d7\u74d8\u74d9\u74da\u74db\u74dc\u74dd\u74de\u74df\u74e0\u74e1\u74e2\u74e3\u74e4\u74e5\u74e6\u74e7\u74e8\u74e9\u74ea\u74eb\u74ec\u74ed\u74ee\u74ef\u74f0\u74f1\u74f2\u74f3\u74f4\u74f5\u74f6\u74f7\u74f8\u74f9\u74fa\u74fb\u74fc\u74fd\u74fe\u74ff\u7500\u7501\u7502\u7503\u7504\u7505\u7506\u7507\u7508\u7509\u750a\u750b\u750c\u750d\u750e\u750f\u7510\u7511\u7512\u7513\u7514\u7515\u7516\u7517\u7518\u7519\u751a\u751b\u751c\u751d\u751e\u751f\u7520\u7521\u7522\u7523\u7524\u7525\u7526\u7527\u7528\u7529\u752a\u752b\u752c\u752d\u752e\u752f\u7530\u7531\u7532\u7533\u7534\u7535\u7536\u7537\u7538\u7539\u753a\u753b\u753c\u753d\u753e\u753f\u7540\u7541\u7542\u7543\u7544\u7545\u7546\u7547\u7548\u7549\u754a\u754b\u754c\u754d\u754e\u754f\u7550\u7551\u7552\u7553\u7554\u7555\u7556\u7557\u7558\u7559\u755a\u755b\u755c\u755d\u755e\u755f\u7560\u7561\u7562\u7563\u7564\u7565\u7566\u7567\u7568\u7569\u756a\u756b\u756c\u756d\u756e\u756f\u7570\u7571\u7572\u7573\u7574\u7575\u7576\u7577\u7578\u7579\u757a\u757b\u757c\u757d\u757e\u757f\u7580\u7581\u7582\u7583\u7584\u7585\u7586\u7587\u7588\u7589\u758a\u758b\u758c\u758d\u758e\u758f\u7590\u7591\u7592\u7593\u7594\u7595\u7596\u7597\u7598\u7599\u759a\u759b\u759c\u759d\u759e\u759f\u75a0\u75a1\u75a2\u75a3\u75a4\u75a5\u75a6\u75a7\u75a8\u75a9\u75aa\u75ab\u75ac\u75ad\u75ae\u75af\u75b0\u75b1\u75b2\u75b3\u75b4\u75b5\u75b6\u75b7\u75b8\u75b9\u75ba\u75bb\u75bc\u75bd\u75be\u75bf\u75c0\u75c1\u75c2\u75c3\u75c4\u75c5\u75c6\u75c7\u75c8\u75c9\u75ca\u75cb\u75cc\u75cd\u75ce\u75cf\u75d0\u75d1\u75d2\u75d3\u75d4\u75d5\u75d6\u75d7\u75d8\u75d9\u75da\u75db\u75dc\u75dd\u75de\u75df\u75e0\u75e1\u75e2\u75e3\u75e4\u75e5\u75e6\u75e7\u75e8\u75e9\u75ea\u75eb\u75ec\u75ed\u75ee\u75ef\u75f0\u75f1\u75f2\u75f3\u75f4\u75f5\u75f6\u75f7\u75f8\u75f9\u75fa\u75fb\u75fc\u75fd\u75fe\u75ff\u7600\u7601\u7602\u7603\u7604\u7605\u7606\u7607\u7608\u7609\u760a\u760b\u760c\u760d\u760e\u760f\u7610\u7611\u7612\u7613\u7614\u7615\u7616\u7617\u7618\u7619\u761a\u761b\u761c\u761d\u761e\u761f\u7620\u7621\u7622\u7623\u7624\u7625\u7626\u7627\u7628\u7629\u762a\u762b\u762c\u762d\u762e\u762f\u7630\u7631\u7632\u7633\u7634\u7635\u7636\u7637\u7638\u7639\u763a\u763b\u763c\u763d\u763e\u763f\u7640\u7641\u7642\u7643\u7644\u7645\u7646\u7647\u7648\u7649\u764a\u764b\u764c\u764d\u764e\u764f\u7650\u7651\u7652\u7653\u7654\u7655\u7656\u7657\u7658\u7659\u765a\u765b\u765c\u765d\u765e\u765f\u7660\u7661\u7662\u7663\u7664\u7665\u7666\u7667\u7668\u7669\u766a\u766b\u766c\u766d\u766e\u766f\u7670\u7671\u7672\u7673\u7674\u7675\u7676\u7677\u7678\u7679\u767a\u767b\u767c\u767d\u767e\u767f\u7680\u7681\u7682\u7683\u7684\u7685\u7686\u7687\u7688\u7689\u768a\u768b\u768c\u768d\u768e\u768f\u7690\u7691\u7692\u7693\u7694\u7695\u7696\u7697\u7698\u7699\u769a\u769b\u769c\u769d\u769e\u769f\u76a0\u76a1\u76a2\u76a3\u76a4\u76a5\u76a6\u76a7\u76a8\u76a9\u76aa\u76ab\u76ac\u76ad\u76ae\u76af\u76b0\u76b1\u76b2\u76b3\u76b4\u76b5\u76b6\u76b7\u76b8\u76b9\u76ba\u76bb\u76bc\u76bd\u76be\u76bf\u76c0\u76c1\u76c2\u76c3\u76c4\u76c5\u76c6\u76c7\u76c8\u76c9\u76ca\u76cb\u76cc\u76cd\u76ce\u76cf\u76d0\u76d1\u76d2\u76d3\u76d4\u76d5\u76d6\u76d7\u76d8\u76d9\u76da\u76db\u76dc\u76dd\u76de\u76df\u76e0\u76e1\u76e2\u76e3\u76e4\u76e5\u76e6\u76e7\u76e8\u76e9\u76ea\u76eb\u76ec\u76ed\u76ee\u76ef\u76f0\u76f1\u76f2\u76f3\u76f4\u76f5\u76f6\u76f7\u76f8\u76f9\u76fa\u76fb\u76fc\u76fd\u76fe\u76ff\u7700\u7701\u7702\u7703\u7704\u7705\u7706\u7707\u7708\u7709\u770a\u770b\u770c\u770d\u770e\u770f\u7710\u7711\u7712\u7713\u7714\u7715\u7716\u7717\u7718\u7719\u771a\u771b\u771c\u771d\u771e\u771f\u7720\u7721\u7722\u7723\u7724\u7725\u7726\u7727\u7728\u7729\u772a\u772b\u772c\u772d\u772e\u772f\u7730\u7731\u7732\u7733\u7734\u7735\u7736\u7737\u7738\u7739\u773a\u773b\u773c\u773d\u773e\u773f\u7740\u7741\u7742\u7743\u7744\u7745\u7746\u7747\u7748\u7749\u774a\u774b\u774c\u774d\u774e\u774f\u7750\u7751\u7752\u7753\u7754\u7755\u7756\u7757\u7758\u7759\u775a\u775b\u775c\u775d\u775e\u775f\u7760\u7761\u7762\u7763\u7764\u7765\u7766\u7767\u7768\u7769\u776a\u776b\u776c\u776d\u776e\u776f\u7770\u7771\u7772\u7773\u7774\u7775\u7776\u7777\u7778\u7779\u777a\u777b\u777c\u777d\u777e\u777f\u7780\u7781\u7782\u7783\u7784\u7785\u7786\u7787\u7788\u7789\u778a\u778b\u778c\u778d\u778e\u778f\u7790\u7791\u7792\u7793\u7794\u7795\u7796\u7797\u7798\u7799\u779a\u779b\u779c\u779d\u779e\u779f\u77a0\u77a1\u77a2\u77a3\u77a4\u77a5\u77a6\u77a7\u77a8\u77a9\u77aa\u77ab\u77ac\u77ad\u77ae\u77af\u77b0\u77b1\u77b2\u77b3\u77b4\u77b5\u77b6\u77b7\u77b8\u77b9\u77ba\u77bb\u77bc\u77bd\u77be\u77bf\u77c0\u77c1\u77c2\u77c3\u77c4\u77c5\u77c6\u77c7\u77c8\u77c9\u77ca\u77cb\u77cc\u77cd\u77ce\u77cf\u77d0\u77d1\u77d2\u77d3\u77d4\u77d5\u77d6\u77d7\u77d8\u77d9\u77da\u77db\u77dc\u77dd\u77de\u77df\u77e0\u77e1\u77e2\u77e3\u77e4\u77e5\u77e6\u77e7\u77e8\u77e9\u77ea\u77eb\u77ec\u77ed\u77ee\u77ef\u77f0\u77f1\u77f2\u77f3\u77f4\u77f5\u77f6\u77f7\u77f8\u77f9\u77fa\u77fb\u77fc\u77fd\u77fe\u77ff\u7800\u7801\u7802\u7803\u7804\u7805\u7806\u7807\u7808\u7809\u780a\u780b\u780c\u780d\u780e\u780f\u7810\u7811\u7812\u7813\u7814\u7815\u7816\u7817\u7818\u7819\u781a\u781b\u781c\u781d\u781e\u781f\u7820\u7821\u7822\u7823\u7824\u7825\u7826\u7827\u7828\u7829\u782a\u782b\u782c\u782d\u782e\u782f\u7830\u7831\u7832\u7833\u7834\u7835\u7836\u7837\u7838\u7839\u783a\u783b\u783c\u783d\u783e\u783f\u7840\u7841\u7842\u7843\u7844\u7845\u7846\u7847\u7848\u7849\u784a\u784b\u784c\u784d\u784e\u784f\u7850\u7851\u7852\u7853\u7854\u7855\u7856\u7857\u7858\u7859\u785a\u785b\u785c\u785d\u785e\u785f\u7860\u7861\u7862\u7863\u7864\u7865\u7866\u7867\u7868\u7869\u786a\u786b\u786c\u786d\u786e\u786f\u7870\u7871\u7872\u7873\u7874\u7875\u7876\u7877\u7878\u7879\u787a\u787b\u787c\u787d\u787e\u787f\u7880\u7881\u7882\u7883\u7884\u7885\u7886\u7887\u7888\u7889\u788a\u788b\u788c\u788d\u788e\u788f\u7890\u7891\u7892\u7893\u7894\u7895\u7896\u7897\u7898\u7899\u789a\u789b\u789c\u789d\u789e\u789f\u78a0\u78a1\u78a2\u78a3\u78a4\u78a5\u78a6\u78a7\u78a8\u78a9\u78aa\u78ab\u78ac\u78ad\u78ae\u78af\u78b0\u78b1\u78b2\u78b3\u78b4\u78b5\u78b6\u78b7\u78b8\u78b9\u78ba\u78bb\u78bc\u78bd\u78be\u78bf\u78c0\u78c1\u78c2\u78c3\u78c4\u78c5\u78c6\u78c7\u78c8\u78c9\u78ca\u78cb\u78cc\u78cd\u78ce\u78cf\u78d0\u78d1\u78d2\u78d3\u78d4\u78d5\u78d6\u78d7\u78d8\u78d9\u78da\u78db\u78dc\u78dd\u78de\u78df\u78e0\u78e1\u78e2\u78e3\u78e4\u78e5\u78e6\u78e7\u78e8\u78e9\u78ea\u78eb\u78ec\u78ed\u78ee\u78ef\u78f0\u78f1\u78f2\u78f3\u78f4\u78f5\u78f6\u78f7\u78f8\u78f9\u78fa\u78fb\u78fc\u78fd\u78fe\u78ff\u7900\u7901\u7902\u7903\u7904\u7905\u7906\u7907\u7908\u7909\u790a\u790b\u790c\u790d\u790e\u790f\u7910\u7911\u7912\u7913\u7914\u7915\u7916\u7917\u7918\u7919\u791a\u791b\u791c\u791d\u791e\u791f\u7920\u7921\u7922\u7923\u7924\u7925\u7926\u7927\u7928\u7929\u792a\u792b\u792c\u792d\u792e\u792f\u7930\u7931\u7932\u7933\u7934\u7935\u7936\u7937\u7938\u7939\u793a\u793b\u793c\u793d\u793e\u793f\u7940\u7941\u7942\u7943\u7944\u7945\u7946\u7947\u7948\u7949\u794a\u794b\u794c\u794d\u794e\u794f\u7950\u7951\u7952\u7953\u7954\u7955\u7956\u7957\u7958\u7959\u795a\u795b\u795c\u795d\u795e\u795f\u7960\u7961\u7962\u7963\u7964\u7965\u7966\u7967\u7968\u7969\u796a\u796b\u796c\u796d\u796e\u796f\u7970\u7971\u7972\u7973\u7974\u7975\u7976\u7977\u7978\u7979\u797a\u797b\u797c\u797d\u797e\u797f\u7980\u7981\u7982\u7983\u7984\u7985\u7986\u7987\u7988\u7989\u798a\u798b\u798c\u798d\u798e\u798f\u7990\u7991\u7992\u7993\u7994\u7995\u7996\u7997\u7998\u7999\u799a\u799b\u799c\u799d\u799e\u799f\u79a0\u79a1\u79a2\u79a3\u79a4\u79a5\u79a6\u79a7\u79a8\u79a9\u79aa\u79ab\u79ac\u79ad\u79ae\u79af\u79b0\u79b1\u79b2\u79b3\u79b4\u79b5\u79b6\u79b7\u79b8\u79b9\u79ba\u79bb\u79bc\u79bd\u79be\u79bf\u79c0\u79c1\u79c2\u79c3\u79c4\u79c5\u79c6\u79c7\u79c8\u79c9\u79ca\u79cb\u79cc\u79cd\u79ce\u79cf\u79d0\u79d1\u79d2\u79d3\u79d4\u79d5\u79d6\u79d7\u79d8\u79d9\u79da\u79db\u79dc\u79dd\u79de\u79df\u79e0\u79e1\u79e2\u79e3\u79e4\u79e5\u79e6\u79e7\u79e8\u79e9\u79ea\u79eb\u79ec\u79ed\u79ee\u79ef\u79f0\u79f1\u79f2\u79f3\u79f4\u79f5\u79f6\u79f7\u79f8\u79f9\u79fa\u79fb\u79fc\u79fd\u79fe\u79ff\u7a00\u7a01\u7a02\u7a03\u7a04\u7a05\u7a06\u7a07\u7a08\u7a09\u7a0a\u7a0b\u7a0c\u7a0d\u7a0e\u7a0f\u7a10\u7a11\u7a12\u7a13\u7a14\u7a15\u7a16\u7a17\u7a18\u7a19\u7a1a\u7a1b\u7a1c\u7a1d\u7a1e\u7a1f\u7a20\u7a21\u7a22\u7a23\u7a24\u7a25\u7a26\u7a27\u7a28\u7a29\u7a2a\u7a2b\u7a2c\u7a2d\u7a2e\u7a2f\u7a30\u7a31\u7a32\u7a33\u7a34\u7a35\u7a36\u7a37\u7a38\u7a39\u7a3a\u7a3b\u7a3c\u7a3d\u7a3e\u7a3f\u7a40\u7a41\u7a42\u7a43\u7a44\u7a45\u7a46\u7a47\u7a48\u7a49\u7a4a\u7a4b\u7a4c\u7a4d\u7a4e\u7a4f\u7a50\u7a51\u7a52\u7a53\u7a54\u7a55\u7a56\u7a57\u7a58\u7a59\u7a5a\u7a5b\u7a5c\u7a5d\u7a5e\u7a5f\u7a60\u7a61\u7a62\u7a63\u7a64\u7a65\u7a66\u7a67\u7a68\u7a69\u7a6a\u7a6b\u7a6c\u7a6d\u7a6e\u7a6f\u7a70\u7a71\u7a72\u7a73\u7a74\u7a75\u7a76\u7a77\u7a78\u7a79\u7a7a\u7a7b\u7a7c\u7a7d\u7a7e\u7a7f\u7a80\u7a81\u7a82\u7a83\u7a84\u7a85\u7a86\u7a87\u7a88\u7a89\u7a8a\u7a8b\u7a8c\u7a8d\u7a8e\u7a8f\u7a90\u7a91\u7a92\u7a93\u7a94\u7a95\u7a96\u7a97\u7a98\u7a99\u7a9a\u7a9b\u7a9c\u7a9d\u7a9e\u7a9f\u7aa0\u7aa1\u7aa2\u7aa3\u7aa4\u7aa5\u7aa6\u7aa7\u7aa8\u7aa9\u7aaa\u7aab\u7aac\u7aad\u7aae\u7aaf\u7ab0\u7ab1\u7ab2\u7ab3\u7ab4\u7ab5\u7ab6\u7ab7\u7ab8\u7ab9\u7aba\u7abb\u7abc\u7abd\u7abe\u7abf\u7ac0\u7ac1\u7ac2\u7ac3\u7ac4\u7ac5\u7ac6\u7ac7\u7ac8\u7ac9\u7aca\u7acb\u7acc\u7acd\u7ace\u7acf\u7ad0\u7ad1\u7ad2\u7ad3\u7ad4\u7ad5\u7ad6\u7ad7\u7ad8\u7ad9\u7ada\u7adb\u7adc\u7add\u7ade\u7adf\u7ae0\u7ae1\u7ae2\u7ae3\u7ae4\u7ae5\u7ae6\u7ae7\u7ae8\u7ae9\u7aea\u7aeb\u7aec\u7aed\u7aee\u7aef\u7af0\u7af1\u7af2\u7af3\u7af4\u7af5\u7af6\u7af7\u7af8\u7af9\u7afa\u7afb\u7afc\u7afd\u7afe\u7aff\u7b00\u7b01\u7b02\u7b03\u7b04\u7b05\u7b06\u7b07\u7b08\u7b09\u7b0a\u7b0b\u7b0c\u7b0d\u7b0e\u7b0f\u7b10\u7b11\u7b12\u7b13\u7b14\u7b15\u7b16\u7b17\u7b18\u7b19\u7b1a\u7b1b\u7b1c\u7b1d\u7b1e\u7b1f\u7b20\u7b21\u7b22\u7b23\u7b24\u7b25\u7b26\u7b27\u7b28\u7b29\u7b2a\u7b2b\u7b2c\u7b2d\u7b2e\u7b2f\u7b30\u7b31\u7b32\u7b33\u7b34\u7b35\u7b36\u7b37\u7b38\u7b39\u7b3a\u7b3b\u7b3c\u7b3d\u7b3e\u7b3f\u7b40\u7b41\u7b42\u7b43\u7b44\u7b45\u7b46\u7b47\u7b48\u7b49\u7b4a\u7b4b\u7b4c\u7b4d\u7b4e\u7b4f\u7b50\u7b51\u7b52\u7b53\u7b54\u7b55\u7b56\u7b57\u7b58\u7b59\u7b5a\u7b5b\u7b5c\u7b5d\u7b5e\u7b5f\u7b60\u7b61\u7b62\u7b63\u7b64\u7b65\u7b66\u7b67\u7b68\u7b69\u7b6a\u7b6b\u7b6c\u7b6d\u7b6e\u7b6f\u7b70\u7b71\u7b72\u7b73\u7b74\u7b75\u7b76\u7b77\u7b78\u7b79\u7b7a\u7b7b\u7b7c\u7b7d\u7b7e\u7b7f\u7b80\u7b81\u7b82\u7b83\u7b84\u7b85\u7b86\u7b87\u7b88\u7b89\u7b8a\u7b8b\u7b8c\u7b8d\u7b8e\u7b8f\u7b90\u7b91\u7b92\u7b93\u7b94\u7b95\u7b96\u7b97\u7b98\u7b99\u7b9a\u7b9b\u7b9c\u7b9d\u7b9e\u7b9f\u7ba0\u7ba1\u7ba2\u7ba3\u7ba4\u7ba5\u7ba6\u7ba7\u7ba8\u7ba9\u7baa\u7bab\u7bac\u7bad\u7bae\u7baf\u7bb0\u7bb1\u7bb2\u7bb3\u7bb4\u7bb5\u7bb6\u7bb7\u7bb8\u7bb9\u7bba\u7bbb\u7bbc\u7bbd\u7bbe\u7bbf\u7bc0\u7bc1\u7bc2\u7bc3\u7bc4\u7bc5\u7bc6\u7bc7\u7bc8\u7bc9\u7bca\u7bcb\u7bcc\u7bcd\u7bce\u7bcf\u7bd0\u7bd1\u7bd2\u7bd3\u7bd4\u7bd5\u7bd6\u7bd7\u7bd8\u7bd9\u7bda\u7bdb\u7bdc\u7bdd\u7bde\u7bdf\u7be0\u7be1\u7be2\u7be3\u7be4\u7be5\u7be6\u7be7\u7be8\u7be9\u7bea\u7beb\u7bec\u7bed\u7bee\u7bef\u7bf0\u7bf1\u7bf2\u7bf3\u7bf4\u7bf5\u7bf6\u7bf7\u7bf8\u7bf9\u7bfa\u7bfb\u7bfc\u7bfd\u7bfe\u7bff\u7c00\u7c01\u7c02\u7c03\u7c04\u7c05\u7c06\u7c07\u7c08\u7c09\u7c0a\u7c0b\u7c0c\u7c0d\u7c0e\u7c0f\u7c10\u7c11\u7c12\u7c13\u7c14\u7c15\u7c16\u7c17\u7c18\u7c19\u7c1a\u7c1b\u7c1c\u7c1d\u7c1e\u7c1f\u7c20\u7c21\u7c22\u7c23\u7c24\u7c25\u7c26\u7c27\u7c28\u7c29\u7c2a\u7c2b\u7c2c\u7c2d\u7c2e\u7c2f\u7c30\u7c31\u7c32\u7c33\u7c34\u7c35\u7c36\u7c37\u7c38\u7c39\u7c3a\u7c3b\u7c3c\u7c3d\u7c3e\u7c3f\u7c40\u7c41\u7c42\u7c43\u7c44\u7c45\u7c46\u7c47\u7c48\u7c49\u7c4a\u7c4b\u7c4c\u7c4d\u7c4e\u7c4f\u7c50\u7c51\u7c52\u7c53\u7c54\u7c55\u7c56\u7c57\u7c58\u7c59\u7c5a\u7c5b\u7c5c\u7c5d\u7c5e\u7c5f\u7c60\u7c61\u7c62\u7c63\u7c64\u7c65\u7c66\u7c67\u7c68\u7c69\u7c6a\u7c6b\u7c6c\u7c6d\u7c6e\u7c6f\u7c70\u7c71\u7c72\u7c73\u7c74\u7c75\u7c76\u7c77\u7c78\u7c79\u7c7a\u7c7b\u7c7c\u7c7d\u7c7e\u7c7f\u7c80\u7c81\u7c82\u7c83\u7c84\u7c85\u7c86\u7c87\u7c88\u7c89\u7c8a\u7c8b\u7c8c\u7c8d\u7c8e\u7c8f\u7c90\u7c91\u7c92\u7c93\u7c94\u7c95\u7c96\u7c97\u7c98\u7c99\u7c9a\u7c9b\u7c9c\u7c9d\u7c9e\u7c9f\u7ca0\u7ca1\u7ca2\u7ca3\u7ca4\u7ca5\u7ca6\u7ca7\u7ca8\u7ca9\u7caa\u7cab\u7cac\u7cad\u7cae\u7caf\u7cb0\u7cb1\u7cb2\u7cb3\u7cb4\u7cb5\u7cb6\u7cb7\u7cb8\u7cb9\u7cba\u7cbb\u7cbc\u7cbd\u7cbe\u7cbf\u7cc0\u7cc1\u7cc2\u7cc3\u7cc4\u7cc5\u7cc6\u7cc7\u7cc8\u7cc9\u7cca\u7ccb\u7ccc\u7ccd\u7cce\u7ccf\u7cd0\u7cd1\u7cd2\u7cd3\u7cd4\u7cd5\u7cd6\u7cd7\u7cd8\u7cd9\u7cda\u7cdb\u7cdc\u7cdd\u7cde\u7cdf\u7ce0\u7ce1\u7ce2\u7ce3\u7ce4\u7ce5\u7ce6\u7ce7\u7ce8\u7ce9\u7cea\u7ceb\u7cec\u7ced\u7cee\u7cef\u7cf0\u7cf1\u7cf2\u7cf3\u7cf4\u7cf5\u7cf6\u7cf7\u7cf8\u7cf9\u7cfa\u7cfb\u7cfc\u7cfd\u7cfe\u7cff\u7d00\u7d01\u7d02\u7d03\u7d04\u7d05\u7d06\u7d07\u7d08\u7d09\u7d0a\u7d0b\u7d0c\u7d0d\u7d0e\u7d0f\u7d10\u7d11\u7d12\u7d13\u7d14\u7d15\u7d16\u7d17\u7d18\u7d19\u7d1a\u7d1b\u7d1c\u7d1d\u7d1e\u7d1f\u7d20\u7d21\u7d22\u7d23\u7d24\u7d25\u7d26\u7d27\u7d28\u7d29\u7d2a\u7d2b\u7d2c\u7d2d\u7d2e\u7d2f\u7d30\u7d31\u7d32\u7d33\u7d34\u7d35\u7d36\u7d37\u7d38\u7d39\u7d3a\u7d3b\u7d3c\u7d3d\u7d3e\u7d3f\u7d40\u7d41\u7d42\u7d43\u7d44\u7d45\u7d46\u7d47\u7d48\u7d49\u7d4a\u7d4b\u7d4c\u7d4d\u7d4e\u7d4f\u7d50\u7d51\u7d52\u7d53\u7d54\u7d55\u7d56\u7d57\u7d58\u7d59\u7d5a\u7d5b\u7d5c\u7d5d\u7d5e\u7d5f\u7d60\u7d61\u7d62\u7d63\u7d64\u7d65\u7d66\u7d67\u7d68\u7d69\u7d6a\u7d6b\u7d6c\u7d6d\u7d6e\u7d6f\u7d70\u7d71\u7d72\u7d73\u7d74\u7d75\u7d76\u7d77\u7d78\u7d79\u7d7a\u7d7b\u7d7c\u7d7d\u7d7e\u7d7f\u7d80\u7d81\u7d82\u7d83\u7d84\u7d85\u7d86\u7d87\u7d88\u7d89\u7d8a\u7d8b\u7d8c\u7d8d\u7d8e\u7d8f\u7d90\u7d91\u7d92\u7d93\u7d94\u7d95\u7d96\u7d97\u7d98\u7d99\u7d9a\u7d9b\u7d9c\u7d9d\u7d9e\u7d9f\u7da0\u7da1\u7da2\u7da3\u7da4\u7da5\u7da6\u7da7\u7da8\u7da9\u7daa\u7dab\u7dac\u7dad\u7dae\u7daf\u7db0\u7db1\u7db2\u7db3\u7db4\u7db5\u7db6\u7db7\u7db8\u7db9\u7dba\u7dbb\u7dbc\u7dbd\u7dbe\u7dbf\u7dc0\u7dc1\u7dc2\u7dc3\u7dc4\u7dc5\u7dc6\u7dc7\u7dc8\u7dc9\u7dca\u7dcb\u7dcc\u7dcd\u7dce\u7dcf\u7dd0\u7dd1\u7dd2\u7dd3\u7dd4\u7dd5\u7dd6\u7dd7\u7dd8\u7dd9\u7dda\u7ddb\u7ddc\u7ddd\u7dde\u7ddf\u7de0\u7de1\u7de2\u7de3\u7de4\u7de5\u7de6\u7de7\u7de8\u7de9\u7dea\u7deb\u7dec\u7ded\u7dee\u7def\u7df0\u7df1\u7df2\u7df3\u7df4\u7df5\u7df6\u7df7\u7df8\u7df9\u7dfa\u7dfb\u7dfc\u7dfd\u7dfe\u7dff\u7e00\u7e01\u7e02\u7e03\u7e04\u7e05\u7e06\u7e07\u7e08\u7e09\u7e0a\u7e0b\u7e0c\u7e0d\u7e0e\u7e0f\u7e10\u7e11\u7e12\u7e13\u7e14\u7e15\u7e16\u7e17\u7e18\u7e19\u7e1a\u7e1b\u7e1c\u7e1d\u7e1e\u7e1f\u7e20\u7e21\u7e22\u7e23\u7e24\u7e25\u7e26\u7e27\u7e28\u7e29\u7e2a\u7e2b\u7e2c\u7e2d\u7e2e\u7e2f\u7e30\u7e31\u7e32\u7e33\u7e34\u7e35\u7e36\u7e37\u7e38\u7e39\u7e3a\u7e3b\u7e3c\u7e3d\u7e3e\u7e3f\u7e40\u7e41\u7e42\u7e43\u7e44\u7e45\u7e46\u7e47\u7e48\u7e49\u7e4a\u7e4b\u7e4c\u7e4d\u7e4e\u7e4f\u7e50\u7e51\u7e52\u7e53\u7e54\u7e55\u7e56\u7e57\u7e58\u7e59\u7e5a\u7e5b\u7e5c\u7e5d\u7e5e\u7e5f\u7e60\u7e61\u7e62\u7e63\u7e64\u7e65\u7e66\u7e67\u7e68\u7e69\u7e6a\u7e6b\u7e6c\u7e6d\u7e6e\u7e6f\u7e70\u7e71\u7e72\u7e73\u7e74\u7e75\u7e76\u7e77\u7e78\u7e79\u7e7a\u7e7b\u7e7c\u7e7d\u7e7e\u7e7f\u7e80\u7e81\u7e82\u7e83\u7e84\u7e85\u7e86\u7e87\u7e88\u7e89\u7e8a\u7e8b\u7e8c\u7e8d\u7e8e\u7e8f\u7e90\u7e91\u7e92\u7e93\u7e94\u7e95\u7e96\u7e97\u7e98\u7e99\u7e9a\u7e9b\u7e9c\u7e9d\u7e9e\u7e9f\u7ea0\u7ea1\u7ea2\u7ea3\u7ea4\u7ea5\u7ea6\u7ea7\u7ea8\u7ea9\u7eaa\u7eab\u7eac\u7ead\u7eae\u7eaf\u7eb0\u7eb1\u7eb2\u7eb3\u7eb4\u7eb5\u7eb6\u7eb7\u7eb8\u7eb9\u7eba\u7ebb\u7ebc\u7ebd\u7ebe\u7ebf\u7ec0\u7ec1\u7ec2\u7ec3\u7ec4\u7ec5\u7ec6\u7ec7\u7ec8\u7ec9\u7eca\u7ecb\u7ecc\u7ecd\u7ece\u7ecf\u7ed0\u7ed1\u7ed2\u7ed3\u7ed4\u7ed5\u7ed6\u7ed7\u7ed8\u7ed9\u7eda\u7edb\u7edc\u7edd\u7ede\u7edf\u7ee0\u7ee1\u7ee2\u7ee3\u7ee4\u7ee5\u7ee6\u7ee7\u7ee8\u7ee9\u7eea\u7eeb\u7eec\u7eed\u7eee\u7eef\u7ef0\u7ef1\u7ef2\u7ef3\u7ef4\u7ef5\u7ef6\u7ef7\u7ef8\u7ef9\u7efa\u7efb\u7efc\u7efd\u7efe\u7eff\u7f00\u7f01\u7f02\u7f03\u7f04\u7f05\u7f06\u7f07\u7f08\u7f09\u7f0a\u7f0b\u7f0c\u7f0d\u7f0e\u7f0f\u7f10\u7f11\u7f12\u7f13\u7f14\u7f15\u7f16\u7f17\u7f18\u7f19\u7f1a\u7f1b\u7f1c\u7f1d\u7f1e\u7f1f\u7f20\u7f21\u7f22\u7f23\u7f24\u7f25\u7f26\u7f27\u7f28\u7f29\u7f2a\u7f2b\u7f2c\u7f2d\u7f2e\u7f2f\u7f30\u7f31\u7f32\u7f33\u7f34\u7f35\u7f36\u7f37\u7f38\u7f39\u7f3a\u7f3b\u7f3c\u7f3d\u7f3e\u7f3f\u7f40\u7f41\u7f42\u7f43\u7f44\u7f45\u7f46\u7f47\u7f48\u7f49\u7f4a\u7f4b\u7f4c\u7f4d\u7f4e\u7f4f\u7f50\u7f51\u7f52\u7f53\u7f54\u7f55\u7f56\u7f57\u7f58\u7f59\u7f5a\u7f5b\u7f5c\u7f5d\u7f5e\u7f5f\u7f60\u7f61\u7f62\u7f63\u7f64\u7f65\u7f66\u7f67\u7f68\u7f69\u7f6a\u7f6b\u7f6c\u7f6d\u7f6e\u7f6f\u7f70\u7f71\u7f72\u7f73\u7f74\u7f75\u7f76\u7f77\u7f78\u7f79\u7f7a\u7f7b\u7f7c\u7f7d\u7f7e\u7f7f\u7f80\u7f81\u7f82\u7f83\u7f84\u7f85\u7f86\u7f87\u7f88\u7f89\u7f8a\u7f8b\u7f8c\u7f8d\u7f8e\u7f8f\u7f90\u7f91\u7f92\u7f93\u7f94\u7f95\u7f96\u7f97\u7f98\u7f99\u7f9a\u7f9b\u7f9c\u7f9d\u7f9e\u7f9f\u7fa0\u7fa1\u7fa2\u7fa3\u7fa4\u7fa5\u7fa6\u7fa7\u7fa8\u7fa9\u7faa\u7fab\u7fac\u7fad\u7fae\u7faf\u7fb0\u7fb1\u7fb2\u7fb3\u7fb4\u7fb5\u7fb6\u7fb7\u7fb8\u7fb9\u7fba\u7fbb\u7fbc\u7fbd\u7fbe\u7fbf\u7fc0\u7fc1\u7fc2\u7fc3\u7fc4\u7fc5\u7fc6\u7fc7\u7fc8\u7fc9\u7fca\u7fcb\u7fcc\u7fcd\u7fce\u7fcf\u7fd0\u7fd1\u7fd2\u7fd3\u7fd4\u7fd5\u7fd6\u7fd7\u7fd8\u7fd9\u7fda\u7fdb\u7fdc\u7fdd\u7fde\u7fdf\u7fe0\u7fe1\u7fe2\u7fe3\u7fe4\u7fe5\u7fe6\u7fe7\u7fe8\u7fe9\u7fea\u7feb\u7fec\u7fed\u7fee\u7fef\u7ff0\u7ff1\u7ff2\u7ff3\u7ff4\u7ff5\u7ff6\u7ff7\u7ff8\u7ff9\u7ffa\u7ffb\u7ffc\u7ffd\u7ffe\u7fff\u8000\u8001\u8002\u8003\u8004\u8005\u8006\u8007\u8008\u8009\u800a\u800b\u800c\u800d\u800e\u800f\u8010\u8011\u8012\u8013\u8014\u8015\u8016\u8017\u8018\u8019\u801a\u801b\u801c\u801d\u801e\u801f\u8020\u8021\u8022\u8023\u8024\u8025\u8026\u8027\u8028\u8029\u802a\u802b\u802c\u802d\u802e\u802f\u8030\u8031\u8032\u8033\u8034\u8035\u8036\u8037\u8038\u8039\u803a\u803b\u803c\u803d\u803e\u803f\u8040\u8041\u8042\u8043\u8044\u8045\u8046\u8047\u8048\u8049\u804a\u804b\u804c\u804d\u804e\u804f\u8050\u8051\u8052\u8053\u8054\u8055\u8056\u8057\u8058\u8059\u805a\u805b\u805c\u805d\u805e\u805f\u8060\u8061\u8062\u8063\u8064\u8065\u8066\u8067\u8068\u8069\u806a\u806b\u806c\u806d\u806e\u806f\u8070\u8071\u8072\u8073\u8074\u8075\u8076\u8077\u8078\u8079\u807a\u807b\u807c\u807d\u807e\u807f\u8080\u8081\u8082\u8083\u8084\u8085\u8086\u8087\u8088\u8089\u808a\u808b\u808c\u808d\u808e\u808f\u8090\u8091\u8092\u8093\u8094\u8095\u8096\u8097\u8098\u8099\u809a\u809b\u809c\u809d\u809e\u809f\u80a0\u80a1\u80a2\u80a3\u80a4\u80a5\u80a6\u80a7\u80a8\u80a9\u80aa\u80ab\u80ac\u80ad\u80ae\u80af\u80b0\u80b1\u80b2\u80b3\u80b4\u80b5\u80b6\u80b7\u80b8\u80b9\u80ba\u80bb\u80bc\u80bd\u80be\u80bf\u80c0\u80c1\u80c2\u80c3\u80c4\u80c5\u80c6\u80c7\u80c8\u80c9\u80ca\u80cb\u80cc\u80cd\u80ce\u80cf\u80d0\u80d1\u80d2\u80d3\u80d4\u80d5\u80d6\u80d7\u80d8\u80d9\u80da\u80db\u80dc\u80dd\u80de\u80df\u80e0\u80e1\u80e2\u80e3\u80e4\u80e5\u80e6\u80e7\u80e8\u80e9\u80ea\u80eb\u80ec\u80ed\u80ee\u80ef\u80f0\u80f1\u80f2\u80f3\u80f4\u80f5\u80f6\u80f7\u80f8\u80f9\u80fa\u80fb\u80fc\u80fd\u80fe\u80ff\u8100\u8101\u8102\u8103\u8104\u8105\u8106\u8107\u8108\u8109\u810a\u810b\u810c\u810d\u810e\u810f\u8110\u8111\u8112\u8113\u8114\u8115\u8116\u8117\u8118\u8119\u811a\u811b\u811c\u811d\u811e\u811f\u8120\u8121\u8122\u8123\u8124\u8125\u8126\u8127\u8128\u8129\u812a\u812b\u812c\u812d\u812e\u812f\u8130\u8131\u8132\u8133\u8134\u8135\u8136\u8137\u8138\u8139\u813a\u813b\u813c\u813d\u813e\u813f\u8140\u8141\u8142\u8143\u8144\u8145\u8146\u8147\u8148\u8149\u814a\u814b\u814c\u814d\u814e\u814f\u8150\u8151\u8152\u8153\u8154\u8155\u8156\u8157\u8158\u8159\u815a\u815b\u815c\u815d\u815e\u815f\u8160\u8161\u8162\u8163\u8164\u8165\u8166\u8167\u8168\u8169\u816a\u816b\u816c\u816d\u816e\u816f\u8170\u8171\u8172\u8173\u8174\u8175\u8176\u8177\u8178\u8179\u817a\u817b\u817c\u817d\u817e\u817f\u8180\u8181\u8182\u8183\u8184\u8185\u8186\u8187\u8188\u8189\u818a\u818b\u818c\u818d\u818e\u818f\u8190\u8191\u8192\u8193\u8194\u8195\u8196\u8197\u8198\u8199\u819a\u819b\u819c\u819d\u819e\u819f\u81a0\u81a1\u81a2\u81a3\u81a4\u81a5\u81a6\u81a7\u81a8\u81a9\u81aa\u81ab\u81ac\u81ad\u81ae\u81af\u81b0\u81b1\u81b2\u81b3\u81b4\u81b5\u81b6\u81b7\u81b8\u81b9\u81ba\u81bb\u81bc\u81bd\u81be\u81bf\u81c0\u81c1\u81c2\u81c3\u81c4\u81c5\u81c6\u81c7\u81c8\u81c9\u81ca\u81cb\u81cc\u81cd\u81ce\u81cf\u81d0\u81d1\u81d2\u81d3\u81d4\u81d5\u81d6\u81d7\u81d8\u81d9\u81da\u81db\u81dc\u81dd\u81de\u81df\u81e0\u81e1\u81e2\u81e3\u81e4\u81e5\u81e6\u81e7\u81e8\u81e9\u81ea\u81eb\u81ec\u81ed\u81ee\u81ef\u81f0\u81f1\u81f2\u81f3\u81f4\u81f5\u81f6\u81f7\u81f8\u81f9\u81fa\u81fb\u81fc\u81fd\u81fe\u81ff\u8200\u8201\u8202\u8203\u8204\u8205\u8206\u8207\u8208\u8209\u820a\u820b\u820c\u820d\u820e\u820f\u8210\u8211\u8212\u8213\u8214\u8215\u8216\u8217\u8218\u8219\u821a\u821b\u821c\u821d\u821e\u821f\u8220\u8221\u8222\u8223\u8224\u8225\u8226\u8227\u8228\u8229\u822a\u822b\u822c\u822d\u822e\u822f\u8230\u8231\u8232\u8233\u8234\u8235\u8236\u8237\u8238\u8239\u823a\u823b\u823c\u823d\u823e\u823f\u8240\u8241\u8242\u8243\u8244\u8245\u8246\u8247\u8248\u8249\u824a\u824b\u824c\u824d\u824e\u824f\u8250\u8251\u8252\u8253\u8254\u8255\u8256\u8257\u8258\u8259\u825a\u825b\u825c\u825d\u825e\u825f\u8260\u8261\u8262\u8263\u8264\u8265\u8266\u8267\u8268\u8269\u826a\u826b\u826c\u826d\u826e\u826f\u8270\u8271\u8272\u8273\u8274\u8275\u8276\u8277\u8278\u8279\u827a\u827b\u827c\u827d\u827e\u827f\u8280\u8281\u8282\u8283\u8284\u8285\u8286\u8287\u8288\u8289\u828a\u828b\u828c\u828d\u828e\u828f\u8290\u8291\u8292\u8293\u8294\u8295\u8296\u8297\u8298\u8299\u829a\u829b\u829c\u829d\u829e\u829f\u82a0\u82a1\u82a2\u82a3\u82a4\u82a5\u82a6\u82a7\u82a8\u82a9\u82aa\u82ab\u82ac\u82ad\u82ae\u82af\u82b0\u82b1\u82b2\u82b3\u82b4\u82b5\u82b6\u82b7\u82b8\u82b9\u82ba\u82bb\u82bc\u82bd\u82be\u82bf\u82c0\u82c1\u82c2\u82c3\u82c4\u82c5\u82c6\u82c7\u82c8\u82c9\u82ca\u82cb\u82cc\u82cd\u82ce\u82cf\u82d0\u82d1\u82d2\u82d3\u82d4\u82d5\u82d6\u82d7\u82d8\u82d9\u82da\u82db\u82dc\u82dd\u82de\u82df\u82e0\u82e1\u82e2\u82e3\u82e4\u82e5\u82e6\u82e7\u82e8\u82e9\u82ea\u82eb\u82ec\u82ed\u82ee\u82ef\u82f0\u82f1\u82f2\u82f3\u82f4\u82f5\u82f6\u82f7\u82f8\u82f9\u82fa\u82fb\u82fc\u82fd\u82fe\u82ff\u8300\u8301\u8302\u8303\u8304\u8305\u8306\u8307\u8308\u8309\u830a\u830b\u830c\u830d\u830e\u830f\u8310\u8311\u8312\u8313\u8314\u8315\u8316\u8317\u8318\u8319\u831a\u831b\u831c\u831d\u831e\u831f\u8320\u8321\u8322\u8323\u8324\u8325\u8326\u8327\u8328\u8329\u832a\u832b\u832c\u832d\u832e\u832f\u8330\u8331\u8332\u8333\u8334\u8335\u8336\u8337\u8338\u8339\u833a\u833b\u833c\u833d\u833e\u833f\u8340\u8341\u8342\u8343\u8344\u8345\u8346\u8347\u8348\u8349\u834a\u834b\u834c\u834d\u834e\u834f\u8350\u8351\u8352\u8353\u8354\u8355\u8356\u8357\u8358\u8359\u835a\u835b\u835c\u835d\u835e\u835f\u8360\u8361\u8362\u8363\u8364\u8365\u8366\u8367\u8368\u8369\u836a\u836b\u836c\u836d\u836e\u836f\u8370\u8371\u8372\u8373\u8374\u8375\u8376\u8377\u8378\u8379\u837a\u837b\u837c\u837d\u837e\u837f\u8380\u8381\u8382\u8383\u8384\u8385\u8386\u8387\u8388\u8389\u838a\u838b\u838c\u838d\u838e\u838f\u8390\u8391\u8392\u8393\u8394\u8395\u8396\u8397\u8398\u8399\u839a\u839b\u839c\u839d\u839e\u839f\u83a0\u83a1\u83a2\u83a3\u83a4\u83a5\u83a6\u83a7\u83a8\u83a9\u83aa\u83ab\u83ac\u83ad\u83ae\u83af\u83b0\u83b1\u83b2\u83b3\u83b4\u83b5\u83b6\u83b7\u83b8\u83b9\u83ba\u83bb\u83bc\u83bd\u83be\u83bf\u83c0\u83c1\u83c2\u83c3\u83c4\u83c5\u83c6\u83c7\u83c8\u83c9\u83ca\u83cb\u83cc\u83cd\u83ce\u83cf\u83d0\u83d1\u83d2\u83d3\u83d4\u83d5\u83d6\u83d7\u83d8\u83d9\u83da\u83db\u83dc\u83dd\u83de\u83df\u83e0\u83e1\u83e2\u83e3\u83e4\u83e5\u83e6\u83e7\u83e8\u83e9\u83ea\u83eb\u83ec\u83ed\u83ee\u83ef\u83f0\u83f1\u83f2\u83f3\u83f4\u83f5\u83f6\u83f7\u83f8\u83f9\u83fa\u83fb\u83fc\u83fd\u83fe\u83ff\u8400\u8401\u8402\u8403\u8404\u8405\u8406\u8407\u8408\u8409\u840a\u840b\u840c\u840d\u840e\u840f\u8410\u8411\u8412\u8413\u8414\u8415\u8416\u8417\u8418\u8419\u841a\u841b\u841c\u841d\u841e\u841f\u8420\u8421\u8422\u8423\u8424\u8425\u8426\u8427\u8428\u8429\u842a\u842b\u842c\u842d\u842e\u842f\u8430\u8431\u8432\u8433\u8434\u8435\u8436\u8437\u8438\u8439\u843a\u843b\u843c\u843d\u843e\u843f\u8440\u8441\u8442\u8443\u8444\u8445\u8446\u8447\u8448\u8449\u844a\u844b\u844c\u844d\u844e\u844f\u8450\u8451\u8452\u8453\u8454\u8455\u8456\u8457\u8458\u8459\u845a\u845b\u845c\u845d\u845e\u845f\u8460\u8461\u8462\u8463\u8464\u8465\u8466\u8467\u8468\u8469\u846a\u846b\u846c\u846d\u846e\u846f\u8470\u8471\u8472\u8473\u8474\u8475\u8476\u8477\u8478\u8479\u847a\u847b\u847c\u847d\u847e\u847f\u8480\u8481\u8482\u8483\u8484\u8485\u8486\u8487\u8488\u8489\u848a\u848b\u848c\u848d\u848e\u848f\u8490\u8491\u8492\u8493\u8494\u8495\u8496\u8497\u8498\u8499\u849a\u849b\u849c\u849d\u849e\u849f\u84a0\u84a1\u84a2\u84a3\u84a4\u84a5\u84a6\u84a7\u84a8\u84a9\u84aa\u84ab\u84ac\u84ad\u84ae\u84af\u84b0\u84b1\u84b2\u84b3\u84b4\u84b5\u84b6\u84b7\u84b8\u84b9\u84ba\u84bb\u84bc\u84bd\u84be\u84bf\u84c0\u84c1\u84c2\u84c3\u84c4\u84c5\u84c6\u84c7\u84c8\u84c9\u84ca\u84cb\u84cc\u84cd\u84ce\u84cf\u84d0\u84d1\u84d2\u84d3\u84d4\u84d5\u84d6\u84d7\u84d8\u84d9\u84da\u84db\u84dc\u84dd\u84de\u84df\u84e0\u84e1\u84e2\u84e3\u84e4\u84e5\u84e6\u84e7\u84e8\u84e9\u84ea\u84eb\u84ec\u84ed\u84ee\u84ef\u84f0\u84f1\u84f2\u84f3\u84f4\u84f5\u84f6\u84f7\u84f8\u84f9\u84fa\u84fb\u84fc\u84fd\u84fe\u84ff\u8500\u8501\u8502\u8503\u8504\u8505\u8506\u8507\u8508\u8509\u850a\u850b\u850c\u850d\u850e\u850f\u8510\u8511\u8512\u8513\u8514\u8515\u8516\u8517\u8518\u8519\u851a\u851b\u851c\u851d\u851e\u851f\u8520\u8521\u8522\u8523\u8524\u8525\u8526\u8527\u8528\u8529\u852a\u852b\u852c\u852d\u852e\u852f\u8530\u8531\u8532\u8533\u8534\u8535\u8536\u8537\u8538\u8539\u853a\u853b\u853c\u853d\u853e\u853f\u8540\u8541\u8542\u8543\u8544\u8545\u8546\u8547\u8548\u8549\u854a\u854b\u854c\u854d\u854e\u854f\u8550\u8551\u8552\u8553\u8554\u8555\u8556\u8557\u8558\u8559\u855a\u855b\u855c\u855d\u855e\u855f\u8560\u8561\u8562\u8563\u8564\u8565\u8566\u8567\u8568\u8569\u856a\u856b\u856c\u856d\u856e\u856f\u8570\u8571\u8572\u8573\u8574\u8575\u8576\u8577\u8578\u8579\u857a\u857b\u857c\u857d\u857e\u857f\u8580\u8581\u8582\u8583\u8584\u8585\u8586\u8587\u8588\u8589\u858a\u858b\u858c\u858d\u858e\u858f\u8590\u8591\u8592\u8593\u8594\u8595\u8596\u8597\u8598\u8599\u859a\u859b\u859c\u859d\u859e\u859f\u85a0\u85a1\u85a2\u85a3\u85a4\u85a5\u85a6\u85a7\u85a8\u85a9\u85aa\u85ab\u85ac\u85ad\u85ae\u85af\u85b0\u85b1\u85b2\u85b3\u85b4\u85b5\u85b6\u85b7\u85b8\u85b9\u85ba\u85bb\u85bc\u85bd\u85be\u85bf\u85c0\u85c1\u85c2\u85c3\u85c4\u85c5\u85c6\u85c7\u85c8\u85c9\u85ca\u85cb\u85cc\u85cd\u85ce\u85cf\u85d0\u85d1\u85d2\u85d3\u85d4\u85d5\u85d6\u85d7\u85d8\u85d9\u85da\u85db\u85dc\u85dd\u85de\u85df\u85e0\u85e1\u85e2\u85e3\u85e4\u85e5\u85e6\u85e7\u85e8\u85e9\u85ea\u85eb\u85ec\u85ed\u85ee\u85ef\u85f0\u85f1\u85f2\u85f3\u85f4\u85f5\u85f6\u85f7\u85f8\u85f9\u85fa\u85fb\u85fc\u85fd\u85fe\u85ff\u8600\u8601\u8602\u8603\u8604\u8605\u8606\u8607\u8608\u8609\u860a\u860b\u860c\u860d\u860e\u860f\u8610\u8611\u8612\u8613\u8614\u8615\u8616\u8617\u8618\u8619\u861a\u861b\u861c\u861d\u861e\u861f\u8620\u8621\u8622\u8623\u8624\u8625\u8626\u8627\u8628\u8629\u862a\u862b\u862c\u862d\u862e\u862f\u8630\u8631\u8632\u8633\u8634\u8635\u8636\u8637\u8638\u8639\u863a\u863b\u863c\u863d\u863e\u863f\u8640\u8641\u8642\u8643\u8644\u8645\u8646\u8647\u8648\u8649\u864a\u864b\u864c\u864d\u864e\u864f\u8650\u8651\u8652\u8653\u8654\u8655\u8656\u8657\u8658\u8659\u865a\u865b\u865c\u865d\u865e\u865f\u8660\u8661\u8662\u8663\u8664\u8665\u8666\u8667\u8668\u8669\u866a\u866b\u866c\u866d\u866e\u866f\u8670\u8671\u8672\u8673\u8674\u8675\u8676\u8677\u8678\u8679\u867a\u867b\u867c\u867d\u867e\u867f\u8680\u8681\u8682\u8683\u8684\u8685\u8686\u8687\u8688\u8689\u868a\u868b\u868c\u868d\u868e\u868f\u8690\u8691\u8692\u8693\u8694\u8695\u8696\u8697\u8698\u8699\u869a\u869b\u869c\u869d\u869e\u869f\u86a0\u86a1\u86a2\u86a3\u86a4\u86a5\u86a6\u86a7\u86a8\u86a9\u86aa\u86ab\u86ac\u86ad\u86ae\u86af\u86b0\u86b1\u86b2\u86b3\u86b4\u86b5\u86b6\u86b7\u86b8\u86b9\u86ba\u86bb\u86bc\u86bd\u86be\u86bf\u86c0\u86c1\u86c2\u86c3\u86c4\u86c5\u86c6\u86c7\u86c8\u86c9\u86ca\u86cb\u86cc\u86cd\u86ce\u86cf\u86d0\u86d1\u86d2\u86d3\u86d4\u86d5\u86d6\u86d7\u86d8\u86d9\u86da\u86db\u86dc\u86dd\u86de\u86df\u86e0\u86e1\u86e2\u86e3\u86e4\u86e5\u86e6\u86e7\u86e8\u86e9\u86ea\u86eb\u86ec\u86ed\u86ee\u86ef\u86f0\u86f1\u86f2\u86f3\u86f4\u86f5\u86f6\u86f7\u86f8\u86f9\u86fa\u86fb\u86fc\u86fd\u86fe\u86ff\u8700\u8701\u8702\u8703\u8704\u8705\u8706\u8707\u8708\u8709\u870a\u870b\u870c\u870d\u870e\u870f\u8710\u8711\u8712\u8713\u8714\u8715\u8716\u8717\u8718\u8719\u871a\u871b\u871c\u871d\u871e\u871f\u8720\u8721\u8722\u8723\u8724\u8725\u8726\u8727\u8728\u8729\u872a\u872b\u872c\u872d\u872e\u872f\u8730\u8731\u8732\u8733\u8734\u8735\u8736\u8737\u8738\u8739\u873a\u873b\u873c\u873d\u873e\u873f\u8740\u8741\u8742\u8743\u8744\u8745\u8746\u8747\u8748\u8749\u874a\u874b\u874c\u874d\u874e\u874f\u8750\u8751\u8752\u8753\u8754\u8755\u8756\u8757\u8758\u8759\u875a\u875b\u875c\u875d\u875e\u875f\u8760\u8761\u8762\u8763\u8764\u8765\u8766\u8767\u8768\u8769\u876a\u876b\u876c\u876d\u876e\u876f\u8770\u8771\u8772\u8773\u8774\u8775\u8776\u8777\u8778\u8779\u877a\u877b\u877c\u877d\u877e\u877f\u8780\u8781\u8782\u8783\u8784\u8785\u8786\u8787\u8788\u8789\u878a\u878b\u878c\u878d\u878e\u878f\u8790\u8791\u8792\u8793\u8794\u8795\u8796\u8797\u8798\u8799\u879a\u879b\u879c\u879d\u879e\u879f\u87a0\u87a1\u87a2\u87a3\u87a4\u87a5\u87a6\u87a7\u87a8\u87a9\u87aa\u87ab\u87ac\u87ad\u87ae\u87af\u87b0\u87b1\u87b2\u87b3\u87b4\u87b5\u87b6\u87b7\u87b8\u87b9\u87ba\u87bb\u87bc\u87bd\u87be\u87bf\u87c0\u87c1\u87c2\u87c3\u87c4\u87c5\u87c6\u87c7\u87c8\u87c9\u87ca\u87cb\u87cc\u87cd\u87ce\u87cf\u87d0\u87d1\u87d2\u87d3\u87d4\u87d5\u87d6\u87d7\u87d8\u87d9\u87da\u87db\u87dc\u87dd\u87de\u87df\u87e0\u87e1\u87e2\u87e3\u87e4\u87e5\u87e6\u87e7\u87e8\u87e9\u87ea\u87eb\u87ec\u87ed\u87ee\u87ef\u87f0\u87f1\u87f2\u87f3\u87f4\u87f5\u87f6\u87f7\u87f8\u87f9\u87fa\u87fb\u87fc\u87fd\u87fe\u87ff\u8800\u8801\u8802\u8803\u8804\u8805\u8806\u8807\u8808\u8809\u880a\u880b\u880c\u880d\u880e\u880f\u8810\u8811\u8812\u8813\u8814\u8815\u8816\u8817\u8818\u8819\u881a\u881b\u881c\u881d\u881e\u881f\u8820\u8821\u8822\u8823\u8824\u8825\u8826\u8827\u8828\u8829\u882a\u882b\u882c\u882d\u882e\u882f\u8830\u8831\u8832\u8833\u8834\u8835\u8836\u8837\u8838\u8839\u883a\u883b\u883c\u883d\u883e\u883f\u8840\u8841\u8842\u8843\u8844\u8845\u8846\u8847\u8848\u8849\u884a\u884b\u884c\u884d\u884e\u884f\u8850\u8851\u8852\u8853\u8854\u8855\u8856\u8857\u8858\u8859\u885a\u885b\u885c\u885d\u885e\u885f\u8860\u8861\u8862\u8863\u8864\u8865\u8866\u8867\u8868\u8869\u886a\u886b\u886c\u886d\u886e\u886f\u8870\u8871\u8872\u8873\u8874\u8875\u8876\u8877\u8878\u8879\u887a\u887b\u887c\u887d\u887e\u887f\u8880\u8881\u8882\u8883\u8884\u8885\u8886\u8887\u8888\u8889\u888a\u888b\u888c\u888d\u888e\u888f\u8890\u8891\u8892\u8893\u8894\u8895\u8896\u8897\u8898\u8899\u889a\u889b\u889c\u889d\u889e\u889f\u88a0\u88a1\u88a2\u88a3\u88a4\u88a5\u88a6\u88a7\u88a8\u88a9\u88aa\u88ab\u88ac\u88ad\u88ae\u88af\u88b0\u88b1\u88b2\u88b3\u88b4\u88b5\u88b6\u88b7\u88b8\u88b9\u88ba\u88bb\u88bc\u88bd\u88be\u88bf\u88c0\u88c1\u88c2\u88c3\u88c4\u88c5\u88c6\u88c7\u88c8\u88c9\u88ca\u88cb\u88cc\u88cd\u88ce\u88cf\u88d0\u88d1\u88d2\u88d3\u88d4\u88d5\u88d6\u88d7\u88d8\u88d9\u88da\u88db\u88dc\u88dd\u88de\u88df\u88e0\u88e1\u88e2\u88e3\u88e4\u88e5\u88e6\u88e7\u88e8\u88e9\u88ea\u88eb\u88ec\u88ed\u88ee\u88ef\u88f0\u88f1\u88f2\u88f3\u88f4\u88f5\u88f6\u88f7\u88f8\u88f9\u88fa\u88fb\u88fc\u88fd\u88fe\u88ff\u8900\u8901\u8902\u8903\u8904\u8905\u8906\u8907\u8908\u8909\u890a\u890b\u890c\u890d\u890e\u890f\u8910\u8911\u8912\u8913\u8914\u8915\u8916\u8917\u8918\u8919\u891a\u891b\u891c\u891d\u891e\u891f\u8920\u8921\u8922\u8923\u8924\u8925\u8926\u8927\u8928\u8929\u892a\u892b\u892c\u892d\u892e\u892f\u8930\u8931\u8932\u8933\u8934\u8935\u8936\u8937\u8938\u8939\u893a\u893b\u893c\u893d\u893e\u893f\u8940\u8941\u8942\u8943\u8944\u8945\u8946\u8947\u8948\u8949\u894a\u894b\u894c\u894d\u894e\u894f\u8950\u8951\u8952\u8953\u8954\u8955\u8956\u8957\u8958\u8959\u895a\u895b\u895c\u895d\u895e\u895f\u8960\u8961\u8962\u8963\u8964\u8965\u8966\u8967\u8968\u8969\u896a\u896b\u896c\u896d\u896e\u896f\u8970\u8971\u8972\u8973\u8974\u8975\u8976\u8977\u8978\u8979\u897a\u897b\u897c\u897d\u897e\u897f\u8980\u8981\u8982\u8983\u8984\u8985\u8986\u8987\u8988\u8989\u898a\u898b\u898c\u898d\u898e\u898f\u8990\u8991\u8992\u8993\u8994\u8995\u8996\u8997\u8998\u8999\u899a\u899b\u899c\u899d\u899e\u899f\u89a0\u89a1\u89a2\u89a3\u89a4\u89a5\u89a6\u89a7\u89a8\u89a9\u89aa\u89ab\u89ac\u89ad\u89ae\u89af\u89b0\u89b1\u89b2\u89b3\u89b4\u89b5\u89b6\u89b7\u89b8\u89b9\u89ba\u89bb\u89bc\u89bd\u89be\u89bf\u89c0\u89c1\u89c2\u89c3\u89c4\u89c5\u89c6\u89c7\u89c8\u89c9\u89ca\u89cb\u89cc\u89cd\u89ce\u89cf\u89d0\u89d1\u89d2\u89d3\u89d4\u89d5\u89d6\u89d7\u89d8\u89d9\u89da\u89db\u89dc\u89dd\u89de\u89df\u89e0\u89e1\u89e2\u89e3\u89e4\u89e5\u89e6\u89e7\u89e8\u89e9\u89ea\u89eb\u89ec\u89ed\u89ee\u89ef\u89f0\u89f1\u89f2\u89f3\u89f4\u89f5\u89f6\u89f7\u89f8\u89f9\u89fa\u89fb\u89fc\u89fd\u89fe\u89ff\u8a00\u8a01\u8a02\u8a03\u8a04\u8a05\u8a06\u8a07\u8a08\u8a09\u8a0a\u8a0b\u8a0c\u8a0d\u8a0e\u8a0f\u8a10\u8a11\u8a12\u8a13\u8a14\u8a15\u8a16\u8a17\u8a18\u8a19\u8a1a\u8a1b\u8a1c\u8a1d\u8a1e\u8a1f\u8a20\u8a21\u8a22\u8a23\u8a24\u8a25\u8a26\u8a27\u8a28\u8a29\u8a2a\u8a2b\u8a2c\u8a2d\u8a2e\u8a2f\u8a30\u8a31\u8a32\u8a33\u8a34\u8a35\u8a36\u8a37\u8a38\u8a39\u8a3a\u8a3b\u8a3c\u8a3d\u8a3e\u8a3f\u8a40\u8a41\u8a42\u8a43\u8a44\u8a45\u8a46\u8a47\u8a48\u8a49\u8a4a\u8a4b\u8a4c\u8a4d\u8a4e\u8a4f\u8a50\u8a51\u8a52\u8a53\u8a54\u8a55\u8a56\u8a57\u8a58\u8a59\u8a5a\u8a5b\u8a5c\u8a5d\u8a5e\u8a5f\u8a60\u8a61\u8a62\u8a63\u8a64\u8a65\u8a66\u8a67\u8a68\u8a69\u8a6a\u8a6b\u8a6c\u8a6d\u8a6e\u8a6f\u8a70\u8a71\u8a72\u8a73\u8a74\u8a75\u8a76\u8a77\u8a78\u8a79\u8a7a\u8a7b\u8a7c\u8a7d\u8a7e\u8a7f\u8a80\u8a81\u8a82\u8a83\u8a84\u8a85\u8a86\u8a87\u8a88\u8a89\u8a8a\u8a8b\u8a8c\u8a8d\u8a8e\u8a8f\u8a90\u8a91\u8a92\u8a93\u8a94\u8a95\u8a96\u8a97\u8a98\u8a99\u8a9a\u8a9b\u8a9c\u8a9d\u8a9e\u8a9f\u8aa0\u8aa1\u8aa2\u8aa3\u8aa4\u8aa5\u8aa6\u8aa7\u8aa8\u8aa9\u8aaa\u8aab\u8aac\u8aad\u8aae\u8aaf\u8ab0\u8ab1\u8ab2\u8ab3\u8ab4\u8ab5\u8ab6\u8ab7\u8ab8\u8ab9\u8aba\u8abb\u8abc\u8abd\u8abe\u8abf\u8ac0\u8ac1\u8ac2\u8ac3\u8ac4\u8ac5\u8ac6\u8ac7\u8ac8\u8ac9\u8aca\u8acb\u8acc\u8acd\u8ace\u8acf\u8ad0\u8ad1\u8ad2\u8ad3\u8ad4\u8ad5\u8ad6\u8ad7\u8ad8\u8ad9\u8ada\u8adb\u8adc\u8add\u8ade\u8adf\u8ae0\u8ae1\u8ae2\u8ae3\u8ae4\u8ae5\u8ae6\u8ae7\u8ae8\u8ae9\u8aea\u8aeb\u8aec\u8aed\u8aee\u8aef\u8af0\u8af1\u8af2\u8af3\u8af4\u8af5\u8af6\u8af7\u8af8\u8af9\u8afa\u8afb\u8afc\u8afd\u8afe\u8aff\u8b00\u8b01\u8b02\u8b03\u8b04\u8b05\u8b06\u8b07\u8b08\u8b09\u8b0a\u8b0b\u8b0c\u8b0d\u8b0e\u8b0f\u8b10\u8b11\u8b12\u8b13\u8b14\u8b15\u8b16\u8b17\u8b18\u8b19\u8b1a\u8b1b\u8b1c\u8b1d\u8b1e\u8b1f\u8b20\u8b21\u8b22\u8b23\u8b24\u8b25\u8b26\u8b27\u8b28\u8b29\u8b2a\u8b2b\u8b2c\u8b2d\u8b2e\u8b2f\u8b30\u8b31\u8b32\u8b33\u8b34\u8b35\u8b36\u8b37\u8b38\u8b39\u8b3a\u8b3b\u8b3c\u8b3d\u8b3e\u8b3f\u8b40\u8b41\u8b42\u8b43\u8b44\u8b45\u8b46\u8b47\u8b48\u8b49\u8b4a\u8b4b\u8b4c\u8b4d\u8b4e\u8b4f\u8b50\u8b51\u8b52\u8b53\u8b54\u8b55\u8b56\u8b57\u8b58\u8b59\u8b5a\u8b5b\u8b5c\u8b5d\u8b5e\u8b5f\u8b60\u8b61\u8b62\u8b63\u8b64\u8b65\u8b66\u8b67\u8b68\u8b69\u8b6a\u8b6b\u8b6c\u8b6d\u8b6e\u8b6f\u8b70\u8b71\u8b72\u8b73\u8b74\u8b75\u8b76\u8b77\u8b78\u8b79\u8b7a\u8b7b\u8b7c\u8b7d\u8b7e\u8b7f\u8b80\u8b81\u8b82\u8b83\u8b84\u8b85\u8b86\u8b87\u8b88\u8b89\u8b8a\u8b8b\u8b8c\u8b8d\u8b8e\u8b8f\u8b90\u8b91\u8b92\u8b93\u8b94\u8b95\u8b96\u8b97\u8b98\u8b99\u8b9a\u8b9b\u8b9c\u8b9d\u8b9e\u8b9f\u8ba0\u8ba1\u8ba2\u8ba3\u8ba4\u8ba5\u8ba6\u8ba7\u8ba8\u8ba9\u8baa\u8bab\u8bac\u8bad\u8bae\u8baf\u8bb0\u8bb1\u8bb2\u8bb3\u8bb4\u8bb5\u8bb6\u8bb7\u8bb8\u8bb9\u8bba\u8bbb\u8bbc\u8bbd\u8bbe\u8bbf\u8bc0\u8bc1\u8bc2\u8bc3\u8bc4\u8bc5\u8bc6\u8bc7\u8bc8\u8bc9\u8bca\u8bcb\u8bcc\u8bcd\u8bce\u8bcf\u8bd0\u8bd1\u8bd2\u8bd3\u8bd4\u8bd5\u8bd6\u8bd7\u8bd8\u8bd9\u8bda\u8bdb\u8bdc\u8bdd\u8bde\u8bdf\u8be0\u8be1\u8be2\u8be3\u8be4\u8be5\u8be6\u8be7\u8be8\u8be9\u8bea\u8beb\u8bec\u8bed\u8bee\u8bef\u8bf0\u8bf1\u8bf2\u8bf3\u8bf4\u8bf5\u8bf6\u8bf7\u8bf8\u8bf9\u8bfa\u8bfb\u8bfc\u8bfd\u8bfe\u8bff\u8c00\u8c01\u8c02\u8c03\u8c04\u8c05\u8c06\u8c07\u8c08\u8c09\u8c0a\u8c0b\u8c0c\u8c0d\u8c0e\u8c0f\u8c10\u8c11\u8c12\u8c13\u8c14\u8c15\u8c16\u8c17\u8c18\u8c19\u8c1a\u8c1b\u8c1c\u8c1d\u8c1e\u8c1f\u8c20\u8c21\u8c22\u8c23\u8c24\u8c25\u8c26\u8c27\u8c28\u8c29\u8c2a\u8c2b\u8c2c\u8c2d\u8c2e\u8c2f\u8c30\u8c31\u8c32\u8c33\u8c34\u8c35\u8c36\u8c37\u8c38\u8c39\u8c3a\u8c3b\u8c3c\u8c3d\u8c3e\u8c3f\u8c40\u8c41\u8c42\u8c43\u8c44\u8c45\u8c46\u8c47\u8c48\u8c49\u8c4a\u8c4b\u8c4c\u8c4d\u8c4e\u8c4f\u8c50\u8c51\u8c52\u8c53\u8c54\u8c55\u8c56\u8c57\u8c58\u8c59\u8c5a\u8c5b\u8c5c\u8c5d\u8c5e\u8c5f\u8c60\u8c61\u8c62\u8c63\u8c64\u8c65\u8c66\u8c67\u8c68\u8c69\u8c6a\u8c6b\u8c6c\u8c6d\u8c6e\u8c6f\u8c70\u8c71\u8c72\u8c73\u8c74\u8c75\u8c76\u8c77\u8c78\u8c79\u8c7a\u8c7b\u8c7c\u8c7d\u8c7e\u8c7f\u8c80\u8c81\u8c82\u8c83\u8c84\u8c85\u8c86\u8c87\u8c88\u8c89\u8c8a\u8c8b\u8c8c\u8c8d\u8c8e\u8c8f\u8c90\u8c91\u8c92\u8c93\u8c94\u8c95\u8c96\u8c97\u8c98\u8c99\u8c9a\u8c9b\u8c9c\u8c9d\u8c9e\u8c9f\u8ca0\u8ca1\u8ca2\u8ca3\u8ca4\u8ca5\u8ca6\u8ca7\u8ca8\u8ca9\u8caa\u8cab\u8cac\u8cad\u8cae\u8caf\u8cb0\u8cb1\u8cb2\u8cb3\u8cb4\u8cb5\u8cb6\u8cb7\u8cb8\u8cb9\u8cba\u8cbb\u8cbc\u8cbd\u8cbe\u8cbf\u8cc0\u8cc1\u8cc2\u8cc3\u8cc4\u8cc5\u8cc6\u8cc7\u8cc8\u8cc9\u8cca\u8ccb\u8ccc\u8ccd\u8cce\u8ccf\u8cd0\u8cd1\u8cd2\u8cd3\u8cd4\u8cd5\u8cd6\u8cd7\u8cd8\u8cd9\u8cda\u8cdb\u8cdc\u8cdd\u8cde\u8cdf\u8ce0\u8ce1\u8ce2\u8ce3\u8ce4\u8ce5\u8ce6\u8ce7\u8ce8\u8ce9\u8cea\u8ceb\u8cec\u8ced\u8cee\u8cef\u8cf0\u8cf1\u8cf2\u8cf3\u8cf4\u8cf5\u8cf6\u8cf7\u8cf8\u8cf9\u8cfa\u8cfb\u8cfc\u8cfd\u8cfe\u8cff\u8d00\u8d01\u8d02\u8d03\u8d04\u8d05\u8d06\u8d07\u8d08\u8d09\u8d0a\u8d0b\u8d0c\u8d0d\u8d0e\u8d0f\u8d10\u8d11\u8d12\u8d13\u8d14\u8d15\u8d16\u8d17\u8d18\u8d19\u8d1a\u8d1b\u8d1c\u8d1d\u8d1e\u8d1f\u8d20\u8d21\u8d22\u8d23\u8d24\u8d25\u8d26\u8d27\u8d28\u8d29\u8d2a\u8d2b\u8d2c\u8d2d\u8d2e\u8d2f\u8d30\u8d31\u8d32\u8d33\u8d34\u8d35\u8d36\u8d37\u8d38\u8d39\u8d3a\u8d3b\u8d3c\u8d3d\u8d3e\u8d3f\u8d40\u8d41\u8d42\u8d43\u8d44\u8d45\u8d46\u8d47\u8d48\u8d49\u8d4a\u8d4b\u8d4c\u8d4d\u8d4e\u8d4f\u8d50\u8d51\u8d52\u8d53\u8d54\u8d55\u8d56\u8d57\u8d58\u8d59\u8d5a\u8d5b\u8d5c\u8d5d\u8d5e\u8d5f\u8d60\u8d61\u8d62\u8d63\u8d64\u8d65\u8d66\u8d67\u8d68\u8d69\u8d6a\u8d6b\u8d6c\u8d6d\u8d6e\u8d6f\u8d70\u8d71\u8d72\u8d73\u8d74\u8d75\u8d76\u8d77\u8d78\u8d79\u8d7a\u8d7b\u8d7c\u8d7d\u8d7e\u8d7f\u8d80\u8d81\u8d82\u8d83\u8d84\u8d85\u8d86\u8d87\u8d88\u8d89\u8d8a\u8d8b\u8d8c\u8d8d\u8d8e\u8d8f\u8d90\u8d91\u8d92\u8d93\u8d94\u8d95\u8d96\u8d97\u8d98\u8d99\u8d9a\u8d9b\u8d9c\u8d9d\u8d9e\u8d9f\u8da0\u8da1\u8da2\u8da3\u8da4\u8da5\u8da6\u8da7\u8da8\u8da9\u8daa\u8dab\u8dac\u8dad\u8dae\u8daf\u8db0\u8db1\u8db2\u8db3\u8db4\u8db5\u8db6\u8db7\u8db8\u8db9\u8dba\u8dbb\u8dbc\u8dbd\u8dbe\u8dbf\u8dc0\u8dc1\u8dc2\u8dc3\u8dc4\u8dc5\u8dc6\u8dc7\u8dc8\u8dc9\u8dca\u8dcb\u8dcc\u8dcd\u8dce\u8dcf\u8dd0\u8dd1\u8dd2\u8dd3\u8dd4\u8dd5\u8dd6\u8dd7\u8dd8\u8dd9\u8dda\u8ddb\u8ddc\u8ddd\u8dde\u8ddf\u8de0\u8de1\u8de2\u8de3\u8de4\u8de5\u8de6\u8de7\u8de8\u8de9\u8dea\u8deb\u8dec\u8ded\u8dee\u8def\u8df0\u8df1\u8df2\u8df3\u8df4\u8df5\u8df6\u8df7\u8df8\u8df9\u8dfa\u8dfb\u8dfc\u8dfd\u8dfe\u8dff\u8e00\u8e01\u8e02\u8e03\u8e04\u8e05\u8e06\u8e07\u8e08\u8e09\u8e0a\u8e0b\u8e0c\u8e0d\u8e0e\u8e0f\u8e10\u8e11\u8e12\u8e13\u8e14\u8e15\u8e16\u8e17\u8e18\u8e19\u8e1a\u8e1b\u8e1c\u8e1d\u8e1e\u8e1f\u8e20\u8e21\u8e22\u8e23\u8e24\u8e25\u8e26\u8e27\u8e28\u8e29\u8e2a\u8e2b\u8e2c\u8e2d\u8e2e\u8e2f\u8e30\u8e31\u8e32\u8e33\u8e34\u8e35\u8e36\u8e37\u8e38\u8e39\u8e3a\u8e3b\u8e3c\u8e3d\u8e3e\u8e3f\u8e40\u8e41\u8e42\u8e43\u8e44\u8e45\u8e46\u8e47\u8e48\u8e49\u8e4a\u8e4b\u8e4c\u8e4d\u8e4e\u8e4f\u8e50\u8e51\u8e52\u8e53\u8e54\u8e55\u8e56\u8e57\u8e58\u8e59\u8e5a\u8e5b\u8e5c\u8e5d\u8e5e\u8e5f\u8e60\u8e61\u8e62\u8e63\u8e64\u8e65\u8e66\u8e67\u8e68\u8e69\u8e6a\u8e6b\u8e6c\u8e6d\u8e6e\u8e6f\u8e70\u8e71\u8e72\u8e73\u8e74\u8e75\u8e76\u8e77\u8e78\u8e79\u8e7a\u8e7b\u8e7c\u8e7d\u8e7e\u8e7f\u8e80\u8e81\u8e82\u8e83\u8e84\u8e85\u8e86\u8e87\u8e88\u8e89\u8e8a\u8e8b\u8e8c\u8e8d\u8e8e\u8e8f\u8e90\u8e91\u8e92\u8e93\u8e94\u8e95\u8e96\u8e97\u8e98\u8e99\u8e9a\u8e9b\u8e9c\u8e9d\u8e9e\u8e9f\u8ea0\u8ea1\u8ea2\u8ea3\u8ea4\u8ea5\u8ea6\u8ea7\u8ea8\u8ea9\u8eaa\u8eab\u8eac\u8ead\u8eae\u8eaf\u8eb0\u8eb1\u8eb2\u8eb3\u8eb4\u8eb5\u8eb6\u8eb7\u8eb8\u8eb9\u8eba\u8ebb\u8ebc\u8ebd\u8ebe\u8ebf\u8ec0\u8ec1\u8ec2\u8ec3\u8ec4\u8ec5\u8ec6\u8ec7\u8ec8\u8ec9\u8eca\u8ecb\u8ecc\u8ecd\u8ece\u8ecf\u8ed0\u8ed1\u8ed2\u8ed3\u8ed4\u8ed5\u8ed6\u8ed7\u8ed8\u8ed9\u8eda\u8edb\u8edc\u8edd\u8ede\u8edf\u8ee0\u8ee1\u8ee2\u8ee3\u8ee4\u8ee5\u8ee6\u8ee7\u8ee8\u8ee9\u8eea\u8eeb\u8eec\u8eed\u8eee\u8eef\u8ef0\u8ef1\u8ef2\u8ef3\u8ef4\u8ef5\u8ef6\u8ef7\u8ef8\u8ef9\u8efa\u8efb\u8efc\u8efd\u8efe\u8eff\u8f00\u8f01\u8f02\u8f03\u8f04\u8f05\u8f06\u8f07\u8f08\u8f09\u8f0a\u8f0b\u8f0c\u8f0d\u8f0e\u8f0f\u8f10\u8f11\u8f12\u8f13\u8f14\u8f15\u8f16\u8f17\u8f18\u8f19\u8f1a\u8f1b\u8f1c\u8f1d\u8f1e\u8f1f\u8f20\u8f21\u8f22\u8f23\u8f24\u8f25\u8f26\u8f27\u8f28\u8f29\u8f2a\u8f2b\u8f2c\u8f2d\u8f2e\u8f2f\u8f30\u8f31\u8f32\u8f33\u8f34\u8f35\u8f36\u8f37\u8f38\u8f39\u8f3a\u8f3b\u8f3c\u8f3d\u8f3e\u8f3f\u8f40\u8f41\u8f42\u8f43\u8f44\u8f45\u8f46\u8f47\u8f48\u8f49\u8f4a\u8f4b\u8f4c\u8f4d\u8f4e\u8f4f\u8f50\u8f51\u8f52\u8f53\u8f54\u8f55\u8f56\u8f57\u8f58\u8f59\u8f5a\u8f5b\u8f5c\u8f5d\u8f5e\u8f5f\u8f60\u8f61\u8f62\u8f63\u8f64\u8f65\u8f66\u8f67\u8f68\u8f69\u8f6a\u8f6b\u8f6c\u8f6d\u8f6e\u8f6f\u8f70\u8f71\u8f72\u8f73\u8f74\u8f75\u8f76\u8f77\u8f78\u8f79\u8f7a\u8f7b\u8f7c\u8f7d\u8f7e\u8f7f\u8f80\u8f81\u8f82\u8f83\u8f84\u8f85\u8f86\u8f87\u8f88\u8f89\u8f8a\u8f8b\u8f8c\u8f8d\u8f8e\u8f8f\u8f90\u8f91\u8f92\u8f93\u8f94\u8f95\u8f96\u8f97\u8f98\u8f99\u8f9a\u8f9b\u8f9c\u8f9d\u8f9e\u8f9f\u8fa0\u8fa1\u8fa2\u8fa3\u8fa4\u8fa5\u8fa6\u8fa7\u8fa8\u8fa9\u8faa\u8fab\u8fac\u8fad\u8fae\u8faf\u8fb0\u8fb1\u8fb2\u8fb3\u8fb4\u8fb5\u8fb6\u8fb7\u8fb8\u8fb9\u8fba\u8fbb\u8fbc\u8fbd\u8fbe\u8fbf\u8fc0\u8fc1\u8fc2\u8fc3\u8fc4\u8fc5\u8fc6\u8fc7\u8fc8\u8fc9\u8fca\u8fcb\u8fcc\u8fcd\u8fce\u8fcf\u8fd0\u8fd1\u8fd2\u8fd3\u8fd4\u8fd5\u8fd6\u8fd7\u8fd8\u8fd9\u8fda\u8fdb\u8fdc\u8fdd\u8fde\u8fdf\u8fe0\u8fe1\u8fe2\u8fe3\u8fe4\u8fe5\u8fe6\u8fe7\u8fe8\u8fe9\u8fea\u8feb\u8fec\u8fed\u8fee\u8fef\u8ff0\u8ff1\u8ff2\u8ff3\u8ff4\u8ff5\u8ff6\u8ff7\u8ff8\u8ff9\u8ffa\u8ffb\u8ffc\u8ffd\u8ffe\u8fff\u9000\u9001\u9002\u9003\u9004\u9005\u9006\u9007\u9008\u9009\u900a\u900b\u900c\u900d\u900e\u900f\u9010\u9011\u9012\u9013\u9014\u9015\u9016\u9017\u9018\u9019\u901a\u901b\u901c\u901d\u901e\u901f\u9020\u9021\u9022\u9023\u9024\u9025\u9026\u9027\u9028\u9029\u902a\u902b\u902c\u902d\u902e\u902f\u9030\u9031\u9032\u9033\u9034\u9035\u9036\u9037\u9038\u9039\u903a\u903b\u903c\u903d\u903e\u903f\u9040\u9041\u9042\u9043\u9044\u9045\u9046\u9047\u9048\u9049\u904a\u904b\u904c\u904d\u904e\u904f\u9050\u9051\u9052\u9053\u9054\u9055\u9056\u9057\u9058\u9059\u905a\u905b\u905c\u905d\u905e\u905f\u9060\u9061\u9062\u9063\u9064\u9065\u9066\u9067\u9068\u9069\u906a\u906b\u906c\u906d\u906e\u906f\u9070\u9071\u9072\u9073\u9074\u9075\u9076\u9077\u9078\u9079\u907a\u907b\u907c\u907d\u907e\u907f\u9080\u9081\u9082\u9083\u9084\u9085\u9086\u9087\u9088\u9089\u908a\u908b\u908c\u908d\u908e\u908f\u9090\u9091\u9092\u9093\u9094\u9095\u9096\u9097\u9098\u9099\u909a\u909b\u909c\u909d\u909e\u909f\u90a0\u90a1\u90a2\u90a3\u90a4\u90a5\u90a6\u90a7\u90a8\u90a9\u90aa\u90ab\u90ac\u90ad\u90ae\u90af\u90b0\u90b1\u90b2\u90b3\u90b4\u90b5\u90b6\u90b7\u90b8\u90b9\u90ba\u90bb\u90bc\u90bd\u90be\u90bf\u90c0\u90c1\u90c2\u90c3\u90c4\u90c5\u90c6\u90c7\u90c8\u90c9\u90ca\u90cb\u90cc\u90cd\u90ce\u90cf\u90d0\u90d1\u90d2\u90d3\u90d4\u90d5\u90d6\u90d7\u90d8\u90d9\u90da\u90db\u90dc\u90dd\u90de\u90df\u90e0\u90e1\u90e2\u90e3\u90e4\u90e5\u90e6\u90e7\u90e8\u90e9\u90ea\u90eb\u90ec\u90ed\u90ee\u90ef\u90f0\u90f1\u90f2\u90f3\u90f4\u90f5\u90f6\u90f7\u90f8\u90f9\u90fa\u90fb\u90fc\u90fd\u90fe\u90ff\u9100\u9101\u9102\u9103\u9104\u9105\u9106\u9107\u9108\u9109\u910a\u910b\u910c\u910d\u910e\u910f\u9110\u9111\u9112\u9113\u9114\u9115\u9116\u9117\u9118\u9119\u911a\u911b\u911c\u911d\u911e\u911f\u9120\u9121\u9122\u9123\u9124\u9125\u9126\u9127\u9128\u9129\u912a\u912b\u912c\u912d\u912e\u912f\u9130\u9131\u9132\u9133\u9134\u9135\u9136\u9137\u9138\u9139\u913a\u913b\u913c\u913d\u913e\u913f\u9140\u9141\u9142\u9143\u9144\u9145\u9146\u9147\u9148\u9149\u914a\u914b\u914c\u914d\u914e\u914f\u9150\u9151\u9152\u9153\u9154\u9155\u9156\u9157\u9158\u9159\u915a\u915b\u915c\u915d\u915e\u915f\u9160\u9161\u9162\u9163\u9164\u9165\u9166\u9167\u9168\u9169\u916a\u916b\u916c\u916d\u916e\u916f\u9170\u9171\u9172\u9173\u9174\u9175\u9176\u9177\u9178\u9179\u917a\u917b\u917c\u917d\u917e\u917f\u9180\u9181\u9182\u9183\u9184\u9185\u9186\u9187\u9188\u9189\u918a\u918b\u918c\u918d\u918e\u918f\u9190\u9191\u9192\u9193\u9194\u9195\u9196\u9197\u9198\u9199\u919a\u919b\u919c\u919d\u919e\u919f\u91a0\u91a1\u91a2\u91a3\u91a4\u91a5\u91a6\u91a7\u91a8\u91a9\u91aa\u91ab\u91ac\u91ad\u91ae\u91af\u91b0\u91b1\u91b2\u91b3\u91b4\u91b5\u91b6\u91b7\u91b8\u91b9\u91ba\u91bb\u91bc\u91bd\u91be\u91bf\u91c0\u91c1\u91c2\u91c3\u91c4\u91c5\u91c6\u91c7\u91c8\u91c9\u91ca\u91cb\u91cc\u91cd\u91ce\u91cf\u91d0\u91d1\u91d2\u91d3\u91d4\u91d5\u91d6\u91d7\u91d8\u91d9\u91da\u91db\u91dc\u91dd\u91de\u91df\u91e0\u91e1\u91e2\u91e3\u91e4\u91e5\u91e6\u91e7\u91e8\u91e9\u91ea\u91eb\u91ec\u91ed\u91ee\u91ef\u91f0\u91f1\u91f2\u91f3\u91f4\u91f5\u91f6\u91f7\u91f8\u91f9\u91fa\u91fb\u91fc\u91fd\u91fe\u91ff\u9200\u9201\u9202\u9203\u9204\u9205\u9206\u9207\u9208\u9209\u920a\u920b\u920c\u920d\u920e\u920f\u9210\u9211\u9212\u9213\u9214\u9215\u9216\u9217\u9218\u9219\u921a\u921b\u921c\u921d\u921e\u921f\u9220\u9221\u9222\u9223\u9224\u9225\u9226\u9227\u9228\u9229\u922a\u922b\u922c\u922d\u922e\u922f\u9230\u9231\u9232\u9233\u9234\u9235\u9236\u9237\u9238\u9239\u923a\u923b\u923c\u923d\u923e\u923f\u9240\u9241\u9242\u9243\u9244\u9245\u9246\u9247\u9248\u9249\u924a\u924b\u924c\u924d\u924e\u924f\u9250\u9251\u9252\u9253\u9254\u9255\u9256\u9257\u9258\u9259\u925a\u925b\u925c\u925d\u925e\u925f\u9260\u9261\u9262\u9263\u9264\u9265\u9266\u9267\u9268\u9269\u926a\u926b\u926c\u926d\u926e\u926f\u9270\u9271\u9272\u9273\u9274\u9275\u9276\u9277\u9278\u9279\u927a\u927b\u927c\u927d\u927e\u927f\u9280\u9281\u9282\u9283\u9284\u9285\u9286\u9287\u9288\u9289\u928a\u928b\u928c\u928d\u928e\u928f\u9290\u9291\u9292\u9293\u9294\u9295\u9296\u9297\u9298\u9299\u929a\u929b\u929c\u929d\u929e\u929f\u92a0\u92a1\u92a2\u92a3\u92a4\u92a5\u92a6\u92a7\u92a8\u92a9\u92aa\u92ab\u92ac\u92ad\u92ae\u92af\u92b0\u92b1\u92b2\u92b3\u92b4\u92b5\u92b6\u92b7\u92b8\u92b9\u92ba\u92bb\u92bc\u92bd\u92be\u92bf\u92c0\u92c1\u92c2\u92c3\u92c4\u92c5\u92c6\u92c7\u92c8\u92c9\u92ca\u92cb\u92cc\u92cd\u92ce\u92cf\u92d0\u92d1\u92d2\u92d3\u92d4\u92d5\u92d6\u92d7\u92d8\u92d9\u92da\u92db\u92dc\u92dd\u92de\u92df\u92e0\u92e1\u92e2\u92e3\u92e4\u92e5\u92e6\u92e7\u92e8\u92e9\u92ea\u92eb\u92ec\u92ed\u92ee\u92ef\u92f0\u92f1\u92f2\u92f3\u92f4\u92f5\u92f6\u92f7\u92f8\u92f9\u92fa\u92fb\u92fc\u92fd\u92fe\u92ff\u9300\u9301\u9302\u9303\u9304\u9305\u9306\u9307\u9308\u9309\u930a\u930b\u930c\u930d\u930e\u930f\u9310\u9311\u9312\u9313\u9314\u9315\u9316\u9317\u9318\u9319\u931a\u931b\u931c\u931d\u931e\u931f\u9320\u9321\u9322\u9323\u9324\u9325\u9326\u9327\u9328\u9329\u932a\u932b\u932c\u932d\u932e\u932f\u9330\u9331\u9332\u9333\u9334\u9335\u9336\u9337\u9338\u9339\u933a\u933b\u933c\u933d\u933e\u933f\u9340\u9341\u9342\u9343\u9344\u9345\u9346\u9347\u9348\u9349\u934a\u934b\u934c\u934d\u934e\u934f\u9350\u9351\u9352\u9353\u9354\u9355\u9356\u9357\u9358\u9359\u935a\u935b\u935c\u935d\u935e\u935f\u9360\u9361\u9362\u9363\u9364\u9365\u9366\u9367\u9368\u9369\u936a\u936b\u936c\u936d\u936e\u936f\u9370\u9371\u9372\u9373\u9374\u9375\u9376\u9377\u9378\u9379\u937a\u937b\u937c\u937d\u937e\u937f\u9380\u9381\u9382\u9383\u9384\u9385\u9386\u9387\u9388\u9389\u938a\u938b\u938c\u938d\u938e\u938f\u9390\u9391\u9392\u9393\u9394\u9395\u9396\u9397\u9398\u9399\u939a\u939b\u939c\u939d\u939e\u939f\u93a0\u93a1\u93a2\u93a3\u93a4\u93a5\u93a6\u93a7\u93a8\u93a9\u93aa\u93ab\u93ac\u93ad\u93ae\u93af\u93b0\u93b1\u93b2\u93b3\u93b4\u93b5\u93b6\u93b7\u93b8\u93b9\u93ba\u93bb\u93bc\u93bd\u93be\u93bf\u93c0\u93c1\u93c2\u93c3\u93c4\u93c5\u93c6\u93c7\u93c8\u93c9\u93ca\u93cb\u93cc\u93cd\u93ce\u93cf\u93d0\u93d1\u93d2\u93d3\u93d4\u93d5\u93d6\u93d7\u93d8\u93d9\u93da\u93db\u93dc\u93dd\u93de\u93df\u93e0\u93e1\u93e2\u93e3\u93e4\u93e5\u93e6\u93e7\u93e8\u93e9\u93ea\u93eb\u93ec\u93ed\u93ee\u93ef\u93f0\u93f1\u93f2\u93f3\u93f4\u93f5\u93f6\u93f7\u93f8\u93f9\u93fa\u93fb\u93fc\u93fd\u93fe\u93ff\u9400\u9401\u9402\u9403\u9404\u9405\u9406\u9407\u9408\u9409\u940a\u940b\u940c\u940d\u940e\u940f\u9410\u9411\u9412\u9413\u9414\u9415\u9416\u9417\u9418\u9419\u941a\u941b\u941c\u941d\u941e\u941f\u9420\u9421\u9422\u9423\u9424\u9425\u9426\u9427\u9428\u9429\u942a\u942b\u942c\u942d\u942e\u942f\u9430\u9431\u9432\u9433\u9434\u9435\u9436\u9437\u9438\u9439\u943a\u943b\u943c\u943d\u943e\u943f\u9440\u9441\u9442\u9443\u9444\u9445\u9446\u9447\u9448\u9449\u944a\u944b\u944c\u944d\u944e\u944f\u9450\u9451\u9452\u9453\u9454\u9455\u9456\u9457\u9458\u9459\u945a\u945b\u945c\u945d\u945e\u945f\u9460\u9461\u9462\u9463\u9464\u9465\u9466\u9467\u9468\u9469\u946a\u946b\u946c\u946d\u946e\u946f\u9470\u9471\u9472\u9473\u9474\u9475\u9476\u9477\u9478\u9479\u947a\u947b\u947c\u947d\u947e\u947f\u9480\u9481\u9482\u9483\u9484\u9485\u9486\u9487\u9488\u9489\u948a\u948b\u948c\u948d\u948e\u948f\u9490\u9491\u9492\u9493\u9494\u9495\u9496\u9497\u9498\u9499\u949a\u949b\u949c\u949d\u949e\u949f\u94a0\u94a1\u94a2\u94a3\u94a4\u94a5\u94a6\u94a7\u94a8\u94a9\u94aa\u94ab\u94ac\u94ad\u94ae\u94af\u94b0\u94b1\u94b2\u94b3\u94b4\u94b5\u94b6\u94b7\u94b8\u94b9\u94ba\u94bb\u94bc\u94bd\u94be\u94bf\u94c0\u94c1\u94c2\u94c3\u94c4\u94c5\u94c6\u94c7\u94c8\u94c9\u94ca\u94cb\u94cc\u94cd\u94ce\u94cf\u94d0\u94d1\u94d2\u94d3\u94d4\u94d5\u94d6\u94d7\u94d8\u94d9\u94da\u94db\u94dc\u94dd\u94de\u94df\u94e0\u94e1\u94e2\u94e3\u94e4\u94e5\u94e6\u94e7\u94e8\u94e9\u94ea\u94eb\u94ec\u94ed\u94ee\u94ef\u94f0\u94f1\u94f2\u94f3\u94f4\u94f5\u94f6\u94f7\u94f8\u94f9\u94fa\u94fb\u94fc\u94fd\u94fe\u94ff\u9500\u9501\u9502\u9503\u9504\u9505\u9506\u9507\u9508\u9509\u950a\u950b\u950c\u950d\u950e\u950f\u9510\u9511\u9512\u9513\u9514\u9515\u9516\u9517\u9518\u9519\u951a\u951b\u951c\u951d\u951e\u951f\u9520\u9521\u9522\u9523\u9524\u9525\u9526\u9527\u9528\u9529\u952a\u952b\u952c\u952d\u952e\u952f\u9530\u9531\u9532\u9533\u9534\u9535\u9536\u9537\u9538\u9539\u953a\u953b\u953c\u953d\u953e\u953f\u9540\u9541\u9542\u9543\u9544\u9545\u9546\u9547\u9548\u9549\u954a\u954b\u954c\u954d\u954e\u954f\u9550\u9551\u9552\u9553\u9554\u9555\u9556\u9557\u9558\u9559\u955a\u955b\u955c\u955d\u955e\u955f\u9560\u9561\u9562\u9563\u9564\u9565\u9566\u9567\u9568\u9569\u956a\u956b\u956c\u956d\u956e\u956f\u9570\u9571\u9572\u9573\u9574\u9575\u9576\u9577\u9578\u9579\u957a\u957b\u957c\u957d\u957e\u957f\u9580\u9581\u9582\u9583\u9584\u9585\u9586\u9587\u9588\u9589\u958a\u958b\u958c\u958d\u958e\u958f\u9590\u9591\u9592\u9593\u9594\u9595\u9596\u9597\u9598\u9599\u959a\u959b\u959c\u959d\u959e\u959f\u95a0\u95a1\u95a2\u95a3\u95a4\u95a5\u95a6\u95a7\u95a8\u95a9\u95aa\u95ab\u95ac\u95ad\u95ae\u95af\u95b0\u95b1\u95b2\u95b3\u95b4\u95b5\u95b6\u95b7\u95b8\u95b9\u95ba\u95bb\u95bc\u95bd\u95be\u95bf\u95c0\u95c1\u95c2\u95c3\u95c4\u95c5\u95c6\u95c7\u95c8\u95c9\u95ca\u95cb\u95cc\u95cd\u95ce\u95cf\u95d0\u95d1\u95d2\u95d3\u95d4\u95d5\u95d6\u95d7\u95d8\u95d9\u95da\u95db\u95dc\u95dd\u95de\u95df\u95e0\u95e1\u95e2\u95e3\u95e4\u95e5\u95e6\u95e7\u95e8\u95e9\u95ea\u95eb\u95ec\u95ed\u95ee\u95ef\u95f0\u95f1\u95f2\u95f3\u95f4\u95f5\u95f6\u95f7\u95f8\u95f9\u95fa\u95fb\u95fc\u95fd\u95fe\u95ff\u9600\u9601\u9602\u9603\u9604\u9605\u9606\u9607\u9608\u9609\u960a\u960b\u960c\u960d\u960e\u960f\u9610\u9611\u9612\u9613\u9614\u9615\u9616\u9617\u9618\u9619\u961a\u961b\u961c\u961d\u961e\u961f\u9620\u9621\u9622\u9623\u9624\u9625\u9626\u9627\u9628\u9629\u962a\u962b\u962c\u962d\u962e\u962f\u9630\u9631\u9632\u9633\u9634\u9635\u9636\u9637\u9638\u9639\u963a\u963b\u963c\u963d\u963e\u963f\u9640\u9641\u9642\u9643\u9644\u9645\u9646\u9647\u9648\u9649\u964a\u964b\u964c\u964d\u964e\u964f\u9650\u9651\u9652\u9653\u9654\u9655\u9656\u9657\u9658\u9659\u965a\u965b\u965c\u965d\u965e\u965f\u9660\u9661\u9662\u9663\u9664\u9665\u9666\u9667\u9668\u9669\u966a\u966b\u966c\u966d\u966e\u966f\u9670\u9671\u9672\u9673\u9674\u9675\u9676\u9677\u9678\u9679\u967a\u967b\u967c\u967d\u967e\u967f\u9680\u9681\u9682\u9683\u9684\u9685\u9686\u9687\u9688\u9689\u968a\u968b\u968c\u968d\u968e\u968f\u9690\u9691\u9692\u9693\u9694\u9695\u9696\u9697\u9698\u9699\u969a\u969b\u969c\u969d\u969e\u969f\u96a0\u96a1\u96a2\u96a3\u96a4\u96a5\u96a6\u96a7\u96a8\u96a9\u96aa\u96ab\u96ac\u96ad\u96ae\u96af\u96b0\u96b1\u96b2\u96b3\u96b4\u96b5\u96b6\u96b7\u96b8\u96b9\u96ba\u96bb\u96bc\u96bd\u96be\u96bf\u96c0\u96c1\u96c2\u96c3\u96c4\u96c5\u96c6\u96c7\u96c8\u96c9\u96ca\u96cb\u96cc\u96cd\u96ce\u96cf\u96d0\u96d1\u96d2\u96d3\u96d4\u96d5\u96d6\u96d7\u96d8\u96d9\u96da\u96db\u96dc\u96dd\u96de\u96df\u96e0\u96e1\u96e2\u96e3\u96e4\u96e5\u96e6\u96e7\u96e8\u96e9\u96ea\u96eb\u96ec\u96ed\u96ee\u96ef\u96f0\u96f1\u96f2\u96f3\u96f4\u96f5\u96f6\u96f7\u96f8\u96f9\u96fa\u96fb\u96fc\u96fd\u96fe\u96ff\u9700\u9701\u9702\u9703\u9704\u9705\u9706\u9707\u9708\u9709\u970a\u970b\u970c\u970d\u970e\u970f\u9710\u9711\u9712\u9713\u9714\u9715\u9716\u9717\u9718\u9719\u971a\u971b\u971c\u971d\u971e\u971f\u9720\u9721\u9722\u9723\u9724\u9725\u9726\u9727\u9728\u9729\u972a\u972b\u972c\u972d\u972e\u972f\u9730\u9731\u9732\u9733\u9734\u9735\u9736\u9737\u9738\u9739\u973a\u973b\u973c\u973d\u973e\u973f\u9740\u9741\u9742\u9743\u9744\u9745\u9746\u9747\u9748\u9749\u974a\u974b\u974c\u974d\u974e\u974f\u9750\u9751\u9752\u9753\u9754\u9755\u9756\u9757\u9758\u9759\u975a\u975b\u975c\u975d\u975e\u975f\u9760\u9761\u9762\u9763\u9764\u9765\u9766\u9767\u9768\u9769\u976a\u976b\u976c\u976d\u976e\u976f\u9770\u9771\u9772\u9773\u9774\u9775\u9776\u9777\u9778\u9779\u977a\u977b\u977c\u977d\u977e\u977f\u9780\u9781\u9782\u9783\u9784\u9785\u9786\u9787\u9788\u9789\u978a\u978b\u978c\u978d\u978e\u978f\u9790\u9791\u9792\u9793\u9794\u9795\u9796\u9797\u9798\u9799\u979a\u979b\u979c\u979d\u979e\u979f\u97a0\u97a1\u97a2\u97a3\u97a4\u97a5\u97a6\u97a7\u97a8\u97a9\u97aa\u97ab\u97ac\u97ad\u97ae\u97af\u97b0\u97b1\u97b2\u97b3\u97b4\u97b5\u97b6\u97b7\u97b8\u97b9\u97ba\u97bb\u97bc\u97bd\u97be\u97bf\u97c0\u97c1\u97c2\u97c3\u97c4\u97c5\u97c6\u97c7\u97c8\u97c9\u97ca\u97cb\u97cc\u97cd\u97ce\u97cf\u97d0\u97d1\u97d2\u97d3\u97d4\u97d5\u97d6\u97d7\u97d8\u97d9\u97da\u97db\u97dc\u97dd\u97de\u97df\u97e0\u97e1\u97e2\u97e3\u97e4\u97e5\u97e6\u97e7\u97e8\u97e9\u97ea\u97eb\u97ec\u97ed\u97ee\u97ef\u97f0\u97f1\u97f2\u97f3\u97f4\u97f5\u97f6\u97f7\u97f8\u97f9\u97fa\u97fb\u97fc\u97fd\u97fe\u97ff\u9800\u9801\u9802\u9803\u9804\u9805\u9806\u9807\u9808\u9809\u980a\u980b\u980c\u980d\u980e\u980f\u9810\u9811\u9812\u9813\u9814\u9815\u9816\u9817\u9818\u9819\u981a\u981b\u981c\u981d\u981e\u981f\u9820\u9821\u9822\u9823\u9824\u9825\u9826\u9827\u9828\u9829\u982a\u982b\u982c\u982d\u982e\u982f\u9830\u9831\u9832\u9833\u9834\u9835\u9836\u9837\u9838\u9839\u983a\u983b\u983c\u983d\u983e\u983f\u9840\u9841\u9842\u9843\u9844\u9845\u9846\u9847\u9848\u9849\u984a\u984b\u984c\u984d\u984e\u984f\u9850\u9851\u9852\u9853\u9854\u9855\u9856\u9857\u9858\u9859\u985a\u985b\u985c\u985d\u985e\u985f\u9860\u9861\u9862\u9863\u9864\u9865\u9866\u9867\u9868\u9869\u986a\u986b\u986c\u986d\u986e\u986f\u9870\u9871\u9872\u9873\u9874\u9875\u9876\u9877\u9878\u9879\u987a\u987b\u987c\u987d\u987e\u987f\u9880\u9881\u9882\u9883\u9884\u9885\u9886\u9887\u9888\u9889\u988a\u988b\u988c\u988d\u988e\u988f\u9890\u9891\u9892\u9893\u9894\u9895\u9896\u9897\u9898\u9899\u989a\u989b\u989c\u989d\u989e\u989f\u98a0\u98a1\u98a2\u98a3\u98a4\u98a5\u98a6\u98a7\u98a8\u98a9\u98aa\u98ab\u98ac\u98ad\u98ae\u98af\u98b0\u98b1\u98b2\u98b3\u98b4\u98b5\u98b6\u98b7\u98b8\u98b9\u98ba\u98bb\u98bc\u98bd\u98be\u98bf\u98c0\u98c1\u98c2\u98c3\u98c4\u98c5\u98c6\u98c7\u98c8\u98c9\u98ca\u98cb\u98cc\u98cd\u98ce\u98cf\u98d0\u98d1\u98d2\u98d3\u98d4\u98d5\u98d6\u98d7\u98d8\u98d9\u98da\u98db\u98dc\u98dd\u98de\u98df\u98e0\u98e1\u98e2\u98e3\u98e4\u98e5\u98e6\u98e7\u98e8\u98e9\u98ea\u98eb\u98ec\u98ed\u98ee\u98ef\u98f0\u98f1\u98f2\u98f3\u98f4\u98f5\u98f6\u98f7\u98f8\u98f9\u98fa\u98fb\u98fc\u98fd\u98fe\u98ff\u9900\u9901\u9902\u9903\u9904\u9905\u9906\u9907\u9908\u9909\u990a\u990b\u990c\u990d\u990e\u990f\u9910\u9911\u9912\u9913\u9914\u9915\u9916\u9917\u9918\u9919\u991a\u991b\u991c\u991d\u991e\u991f\u9920\u9921\u9922\u9923\u9924\u9925\u9926\u9927\u9928\u9929\u992a\u992b\u992c\u992d\u992e\u992f\u9930\u9931\u9932\u9933\u9934\u9935\u9936\u9937\u9938\u9939\u993a\u993b\u993c\u993d\u993e\u993f\u9940\u9941\u9942\u9943\u9944\u9945\u9946\u9947\u9948\u9949\u994a\u994b\u994c\u994d\u994e\u994f\u9950\u9951\u9952\u9953\u9954\u9955\u9956\u9957\u9958\u9959\u995a\u995b\u995c\u995d\u995e\u995f\u9960\u9961\u9962\u9963\u9964\u9965\u9966\u9967\u9968\u9969\u996a\u996b\u996c\u996d\u996e\u996f\u9970\u9971\u9972\u9973\u9974\u9975\u9976\u9977\u9978\u9979\u997a\u997b\u997c\u997d\u997e\u997f\u9980\u9981\u9982\u9983\u9984\u9985\u9986\u9987\u9988\u9989\u998a\u998b\u998c\u998d\u998e\u998f\u9990\u9991\u9992\u9993\u9994\u9995\u9996\u9997\u9998\u9999\u999a\u999b\u999c\u999d\u999e\u999f\u99a0\u99a1\u99a2\u99a3\u99a4\u99a5\u99a6\u99a7\u99a8\u99a9\u99aa\u99ab\u99ac\u99ad\u99ae\u99af\u99b0\u99b1\u99b2\u99b3\u99b4\u99b5\u99b6\u99b7\u99b8\u99b9\u99ba\u99bb\u99bc\u99bd\u99be\u99bf\u99c0\u99c1\u99c2\u99c3\u99c4\u99c5\u99c6\u99c7\u99c8\u99c9\u99ca\u99cb\u99cc\u99cd\u99ce\u99cf\u99d0\u99d1\u99d2\u99d3\u99d4\u99d5\u99d6\u99d7\u99d8\u99d9\u99da\u99db\u99dc\u99dd\u99de\u99df\u99e0\u99e1\u99e2\u99e3\u99e4\u99e5\u99e6\u99e7\u99e8\u99e9\u99ea\u99eb\u99ec\u99ed\u99ee\u99ef\u99f0\u99f1\u99f2\u99f3\u99f4\u99f5\u99f6\u99f7\u99f8\u99f9\u99fa\u99fb\u99fc\u99fd\u99fe\u99ff\u9a00\u9a01\u9a02\u9a03\u9a04\u9a05\u9a06\u9a07\u9a08\u9a09\u9a0a\u9a0b\u9a0c\u9a0d\u9a0e\u9a0f\u9a10\u9a11\u9a12\u9a13\u9a14\u9a15\u9a16\u9a17\u9a18\u9a19\u9a1a\u9a1b\u9a1c\u9a1d\u9a1e\u9a1f\u9a20\u9a21\u9a22\u9a23\u9a24\u9a25\u9a26\u9a27\u9a28\u9a29\u9a2a\u9a2b\u9a2c\u9a2d\u9a2e\u9a2f\u9a30\u9a31\u9a32\u9a33\u9a34\u9a35\u9a36\u9a37\u9a38\u9a39\u9a3a\u9a3b\u9a3c\u9a3d\u9a3e\u9a3f\u9a40\u9a41\u9a42\u9a43\u9a44\u9a45\u9a46\u9a47\u9a48\u9a49\u9a4a\u9a4b\u9a4c\u9a4d\u9a4e\u9a4f\u9a50\u9a51\u9a52\u9a53\u9a54\u9a55\u9a56\u9a57\u9a58\u9a59\u9a5a\u9a5b\u9a5c\u9a5d\u9a5e\u9a5f\u9a60\u9a61\u9a62\u9a63\u9a64\u9a65\u9a66\u9a67\u9a68\u9a69\u9a6a\u9a6b\u9a6c\u9a6d\u9a6e\u9a6f\u9a70\u9a71\u9a72\u9a73\u9a74\u9a75\u9a76\u9a77\u9a78\u9a79\u9a7a\u9a7b\u9a7c\u9a7d\u9a7e\u9a7f\u9a80\u9a81\u9a82\u9a83\u9a84\u9a85\u9a86\u9a87\u9a88\u9a89\u9a8a\u9a8b\u9a8c\u9a8d\u9a8e\u9a8f\u9a90\u9a91\u9a92\u9a93\u9a94\u9a95\u9a96\u9a97\u9a98\u9a99\u9a9a\u9a9b\u9a9c\u9a9d\u9a9e\u9a9f\u9aa0\u9aa1\u9aa2\u9aa3\u9aa4\u9aa5\u9aa6\u9aa7\u9aa8\u9aa9\u9aaa\u9aab\u9aac\u9aad\u9aae\u9aaf\u9ab0\u9ab1\u9ab2\u9ab3\u9ab4\u9ab5\u9ab6\u9ab7\u9ab8\u9ab9\u9aba\u9abb\u9abc\u9abd\u9abe\u9abf\u9ac0\u9ac1\u9ac2\u9ac3\u9ac4\u9ac5\u9ac6\u9ac7\u9ac8\u9ac9\u9aca\u9acb\u9acc\u9acd\u9ace\u9acf\u9ad0\u9ad1\u9ad2\u9ad3\u9ad4\u9ad5\u9ad6\u9ad7\u9ad8\u9ad9\u9ada\u9adb\u9adc\u9add\u9ade\u9adf\u9ae0\u9ae1\u9ae2\u9ae3\u9ae4\u9ae5\u9ae6\u9ae7\u9ae8\u9ae9\u9aea\u9aeb\u9aec\u9aed\u9aee\u9aef\u9af0\u9af1\u9af2\u9af3\u9af4\u9af5\u9af6\u9af7\u9af8\u9af9\u9afa\u9afb\u9afc\u9afd\u9afe\u9aff\u9b00\u9b01\u9b02\u9b03\u9b04\u9b05\u9b06\u9b07\u9b08\u9b09\u9b0a\u9b0b\u9b0c\u9b0d\u9b0e\u9b0f\u9b10\u9b11\u9b12\u9b13\u9b14\u9b15\u9b16\u9b17\u9b18\u9b19\u9b1a\u9b1b\u9b1c\u9b1d\u9b1e\u9b1f\u9b20\u9b21\u9b22\u9b23\u9b24\u9b25\u9b26\u9b27\u9b28\u9b29\u9b2a\u9b2b\u9b2c\u9b2d\u9b2e\u9b2f\u9b30\u9b31\u9b32\u9b33\u9b34\u9b35\u9b36\u9b37\u9b38\u9b39\u9b3a\u9b3b\u9b3c\u9b3d\u9b3e\u9b3f\u9b40\u9b41\u9b42\u9b43\u9b44\u9b45\u9b46\u9b47\u9b48\u9b49\u9b4a\u9b4b\u9b4c\u9b4d\u9b4e\u9b4f\u9b50\u9b51\u9b52\u9b53\u9b54\u9b55\u9b56\u9b57\u9b58\u9b59\u9b5a\u9b5b\u9b5c\u9b5d\u9b5e\u9b5f\u9b60\u9b61\u9b62\u9b63\u9b64\u9b65\u9b66\u9b67\u9b68\u9b69\u9b6a\u9b6b\u9b6c\u9b6d\u9b6e\u9b6f\u9b70\u9b71\u9b72\u9b73\u9b74\u9b75\u9b76\u9b77\u9b78\u9b79\u9b7a\u9b7b\u9b7c\u9b7d\u9b7e\u9b7f\u9b80\u9b81\u9b82\u9b83\u9b84\u9b85\u9b86\u9b87\u9b88\u9b89\u9b8a\u9b8b\u9b8c\u9b8d\u9b8e\u9b8f\u9b90\u9b91\u9b92\u9b93\u9b94\u9b95\u9b96\u9b97\u9b98\u9b99\u9b9a\u9b9b\u9b9c\u9b9d\u9b9e\u9b9f\u9ba0\u9ba1\u9ba2\u9ba3\u9ba4\u9ba5\u9ba6\u9ba7\u9ba8\u9ba9\u9baa\u9bab\u9bac\u9bad\u9bae\u9baf\u9bb0\u9bb1\u9bb2\u9bb3\u9bb4\u9bb5\u9bb6\u9bb7\u9bb8\u9bb9\u9bba\u9bbb\u9bbc\u9bbd\u9bbe\u9bbf\u9bc0\u9bc1\u9bc2\u9bc3\u9bc4\u9bc5\u9bc6\u9bc7\u9bc8\u9bc9\u9bca\u9bcb\u9bcc\u9bcd\u9bce\u9bcf\u9bd0\u9bd1\u9bd2\u9bd3\u9bd4\u9bd5\u9bd6\u9bd7\u9bd8\u9bd9\u9bda\u9bdb\u9bdc\u9bdd\u9bde\u9bdf\u9be0\u9be1\u9be2\u9be3\u9be4\u9be5\u9be6\u9be7\u9be8\u9be9\u9bea\u9beb\u9bec\u9bed\u9bee\u9bef\u9bf0\u9bf1\u9bf2\u9bf3\u9bf4\u9bf5\u9bf6\u9bf7\u9bf8\u9bf9\u9bfa\u9bfb\u9bfc\u9bfd\u9bfe\u9bff\u9c00\u9c01\u9c02\u9c03\u9c04\u9c05\u9c06\u9c07\u9c08\u9c09\u9c0a\u9c0b\u9c0c\u9c0d\u9c0e\u9c0f\u9c10\u9c11\u9c12\u9c13\u9c14\u9c15\u9c16\u9c17\u9c18\u9c19\u9c1a\u9c1b\u9c1c\u9c1d\u9c1e\u9c1f\u9c20\u9c21\u9c22\u9c23\u9c24\u9c25\u9c26\u9c27\u9c28\u9c29\u9c2a\u9c2b\u9c2c\u9c2d\u9c2e\u9c2f\u9c30\u9c31\u9c32\u9c33\u9c34\u9c35\u9c36\u9c37\u9c38\u9c39\u9c3a\u9c3b\u9c3c\u9c3d\u9c3e\u9c3f\u9c40\u9c41\u9c42\u9c43\u9c44\u9c45\u9c46\u9c47\u9c48\u9c49\u9c4a\u9c4b\u9c4c\u9c4d\u9c4e\u9c4f\u9c50\u9c51\u9c52\u9c53\u9c54\u9c55\u9c56\u9c57\u9c58\u9c59\u9c5a\u9c5b\u9c5c\u9c5d\u9c5e\u9c5f\u9c60\u9c61\u9c62\u9c63\u9c64\u9c65\u9c66\u9c67\u9c68\u9c69\u9c6a\u9c6b\u9c6c\u9c6d\u9c6e\u9c6f\u9c70\u9c71\u9c72\u9c73\u9c74\u9c75\u9c76\u9c77\u9c78\u9c79\u9c7a\u9c7b\u9c7c\u9c7d\u9c7e\u9c7f\u9c80\u9c81\u9c82\u9c83\u9c84\u9c85\u9c86\u9c87\u9c88\u9c89\u9c8a\u9c8b\u9c8c\u9c8d\u9c8e\u9c8f\u9c90\u9c91\u9c92\u9c93\u9c94\u9c95\u9c96\u9c97\u9c98\u9c99\u9c9a\u9c9b\u9c9c\u9c9d\u9c9e\u9c9f\u9ca0\u9ca1\u9ca2\u9ca3\u9ca4\u9ca5\u9ca6\u9ca7\u9ca8\u9ca9\u9caa\u9cab\u9cac\u9cad\u9cae\u9caf\u9cb0\u9cb1\u9cb2\u9cb3\u9cb4\u9cb5\u9cb6\u9cb7\u9cb8\u9cb9\u9cba\u9cbb\u9cbc\u9cbd\u9cbe\u9cbf\u9cc0\u9cc1\u9cc2\u9cc3\u9cc4\u9cc5\u9cc6\u9cc7\u9cc8\u9cc9\u9cca\u9ccb\u9ccc\u9ccd\u9cce\u9ccf\u9cd0\u9cd1\u9cd2\u9cd3\u9cd4\u9cd5\u9cd6\u9cd7\u9cd8\u9cd9\u9cda\u9cdb\u9cdc\u9cdd\u9cde\u9cdf\u9ce0\u9ce1\u9ce2\u9ce3\u9ce4\u9ce5\u9ce6\u9ce7\u9ce8\u9ce9\u9cea\u9ceb\u9cec\u9ced\u9cee\u9cef\u9cf0\u9cf1\u9cf2\u9cf3\u9cf4\u9cf5\u9cf6\u9cf7\u9cf8\u9cf9\u9cfa\u9cfb\u9cfc\u9cfd\u9cfe\u9cff\u9d00\u9d01\u9d02\u9d03\u9d04\u9d05\u9d06\u9d07\u9d08\u9d09\u9d0a\u9d0b\u9d0c\u9d0d\u9d0e\u9d0f\u9d10\u9d11\u9d12\u9d13\u9d14\u9d15\u9d16\u9d17\u9d18\u9d19\u9d1a\u9d1b\u9d1c\u9d1d\u9d1e\u9d1f\u9d20\u9d21\u9d22\u9d23\u9d24\u9d25\u9d26\u9d27\u9d28\u9d29\u9d2a\u9d2b\u9d2c\u9d2d\u9d2e\u9d2f\u9d30\u9d31\u9d32\u9d33\u9d34\u9d35\u9d36\u9d37\u9d38\u9d39\u9d3a\u9d3b\u9d3c\u9d3d\u9d3e\u9d3f\u9d40\u9d41\u9d42\u9d43\u9d44\u9d45\u9d46\u9d47\u9d48\u9d49\u9d4a\u9d4b\u9d4c\u9d4d\u9d4e\u9d4f\u9d50\u9d51\u9d52\u9d53\u9d54\u9d55\u9d56\u9d57\u9d58\u9d59\u9d5a\u9d5b\u9d5c\u9d5d\u9d5e\u9d5f\u9d60\u9d61\u9d62\u9d63\u9d64\u9d65\u9d66\u9d67\u9d68\u9d69\u9d6a\u9d6b\u9d6c\u9d6d\u9d6e\u9d6f\u9d70\u9d71\u9d72\u9d73\u9d74\u9d75\u9d76\u9d77\u9d78\u9d79\u9d7a\u9d7b\u9d7c\u9d7d\u9d7e\u9d7f\u9d80\u9d81\u9d82\u9d83\u9d84\u9d85\u9d86\u9d87\u9d88\u9d89\u9d8a\u9d8b\u9d8c\u9d8d\u9d8e\u9d8f\u9d90\u9d91\u9d92\u9d93\u9d94\u9d95\u9d96\u9d97\u9d98\u9d99\u9d9a\u9d9b\u9d9c\u9d9d\u9d9e\u9d9f\u9da0\u9da1\u9da2\u9da3\u9da4\u9da5\u9da6\u9da7\u9da8\u9da9\u9daa\u9dab\u9dac\u9dad\u9dae\u9daf\u9db0\u9db1\u9db2\u9db3\u9db4\u9db5\u9db6\u9db7\u9db8\u9db9\u9dba\u9dbb\u9dbc\u9dbd\u9dbe\u9dbf\u9dc0\u9dc1\u9dc2\u9dc3\u9dc4\u9dc5\u9dc6\u9dc7\u9dc8\u9dc9\u9dca\u9dcb\u9dcc\u9dcd\u9dce\u9dcf\u9dd0\u9dd1\u9dd2\u9dd3\u9dd4\u9dd5\u9dd6\u9dd7\u9dd8\u9dd9\u9dda\u9ddb\u9ddc\u9ddd\u9dde\u9ddf\u9de0\u9de1\u9de2\u9de3\u9de4\u9de5\u9de6\u9de7\u9de8\u9de9\u9dea\u9deb\u9dec\u9ded\u9dee\u9def\u9df0\u9df1\u9df2\u9df3\u9df4\u9df5\u9df6\u9df7\u9df8\u9df9\u9dfa\u9dfb\u9dfc\u9dfd\u9dfe\u9dff\u9e00\u9e01\u9e02\u9e03\u9e04\u9e05\u9e06\u9e07\u9e08\u9e09\u9e0a\u9e0b\u9e0c\u9e0d\u9e0e\u9e0f\u9e10\u9e11\u9e12\u9e13\u9e14\u9e15\u9e16\u9e17\u9e18\u9e19\u9e1a\u9e1b\u9e1c\u9e1d\u9e1e\u9e1f\u9e20\u9e21\u9e22\u9e23\u9e24\u9e25\u9e26\u9e27\u9e28\u9e29\u9e2a\u9e2b\u9e2c\u9e2d\u9e2e\u9e2f\u9e30\u9e31\u9e32\u9e33\u9e34\u9e35\u9e36\u9e37\u9e38\u9e39\u9e3a\u9e3b\u9e3c\u9e3d\u9e3e\u9e3f\u9e40\u9e41\u9e42\u9e43\u9e44\u9e45\u9e46\u9e47\u9e48\u9e49\u9e4a\u9e4b\u9e4c\u9e4d\u9e4e\u9e4f\u9e50\u9e51\u9e52\u9e53\u9e54\u9e55\u9e56\u9e57\u9e58\u9e59\u9e5a\u9e5b\u9e5c\u9e5d\u9e5e\u9e5f\u9e60\u9e61\u9e62\u9e63\u9e64\u9e65\u9e66\u9e67\u9e68\u9e69\u9e6a\u9e6b\u9e6c\u9e6d\u9e6e\u9e6f\u9e70\u9e71\u9e72\u9e73\u9e74\u9e75\u9e76\u9e77\u9e78\u9e79\u9e7a\u9e7b\u9e7c\u9e7d\u9e7e\u9e7f\u9e80\u9e81\u9e82\u9e83\u9e84\u9e85\u9e86\u9e87\u9e88\u9e89\u9e8a\u9e8b\u9e8c\u9e8d\u9e8e\u9e8f\u9e90\u9e91\u9e92\u9e93\u9e94\u9e95\u9e96\u9e97\u9e98\u9e99\u9e9a\u9e9b\u9e9c\u9e9d\u9e9e\u9e9f\u9ea0\u9ea1\u9ea2\u9ea3\u9ea4\u9ea5\u9ea6\u9ea7\u9ea8\u9ea9\u9eaa\u9eab\u9eac\u9ead\u9eae\u9eaf\u9eb0\u9eb1\u9eb2\u9eb3\u9eb4\u9eb5\u9eb6\u9eb7\u9eb8\u9eb9\u9eba\u9ebb\u9ebc\u9ebd\u9ebe\u9ebf\u9ec0\u9ec1\u9ec2\u9ec3\u9ec4\u9ec5\u9ec6\u9ec7\u9ec8\u9ec9\u9eca\u9ecb\u9ecc\u9ecd\u9ece\u9ecf\u9ed0\u9ed1\u9ed2\u9ed3\u9ed4\u9ed5\u9ed6\u9ed7\u9ed8\u9ed9\u9eda\u9edb\u9edc\u9edd\u9ede\u9edf\u9ee0\u9ee1\u9ee2\u9ee3\u9ee4\u9ee5\u9ee6\u9ee7\u9ee8\u9ee9\u9eea\u9eeb\u9eec\u9eed\u9eee\u9eef\u9ef0\u9ef1\u9ef2\u9ef3\u9ef4\u9ef5\u9ef6\u9ef7\u9ef8\u9ef9\u9efa\u9efb\u9efc\u9efd\u9efe\u9eff\u9f00\u9f01\u9f02\u9f03\u9f04\u9f05\u9f06\u9f07\u9f08\u9f09\u9f0a\u9f0b\u9f0c\u9f0d\u9f0e\u9f0f\u9f10\u9f11\u9f12\u9f13\u9f14\u9f15\u9f16\u9f17\u9f18\u9f19\u9f1a\u9f1b\u9f1c\u9f1d\u9f1e\u9f1f\u9f20\u9f21\u9f22\u9f23\u9f24\u9f25\u9f26\u9f27\u9f28\u9f29\u9f2a\u9f2b\u9f2c\u9f2d\u9f2e\u9f2f\u9f30\u9f31\u9f32\u9f33\u9f34\u9f35\u9f36\u9f37\u9f38\u9f39\u9f3a\u9f3b\u9f3c\u9f3d\u9f3e\u9f3f\u9f40\u9f41\u9f42\u9f43\u9f44\u9f45\u9f46\u9f47\u9f48\u9f49\u9f4a\u9f4b\u9f4c\u9f4d\u9f4e\u9f4f\u9f50\u9f51\u9f52\u9f53\u9f54\u9f55\u9f56\u9f57\u9f58\u9f59\u9f5a\u9f5b\u9f5c\u9f5d\u9f5e\u9f5f\u9f60\u9f61\u9f62\u9f63\u9f64\u9f65\u9f66\u9f67\u9f68\u9f69\u9f6a\u9f6b\u9f6c\u9f6d\u9f6e\u9f6f\u9f70\u9f71\u9f72\u9f73\u9f74\u9f75\u9f76\u9f77\u9f78\u9f79\u9f7a\u9f7b\u9f7c\u9f7d\u9f7e\u9f7f\u9f80\u9f81\u9f82\u9f83\u9f84\u9f85\u9f86\u9f87\u9f88\u9f89\u9f8a\u9f8b\u9f8c\u9f8d\u9f8e\u9f8f\u9f90\u9f91\u9f92\u9f93\u9f94\u9f95\u9f96\u9f97\u9f98\u9f99\u9f9a\u9f9b\u9f9c\u9f9d\u9f9e\u9f9f\u9fa0\u9fa1\u9fa2\u9fa3\u9fa4\u9fa5\u9fa6\u9fa7\u9fa8\u9fa9\u9faa\u9fab\u9fac\u9fad\u9fae\u9faf\u9fb0\u9fb1\u9fb2\u9fb3\u9fb4\u9fb5\u9fb6\u9fb7\u9fb8\u9fb9\u9fba\u9fbb\ua000\ua001\ua002\ua003\ua004\ua005\ua006\ua007\ua008\ua009\ua00a\ua00b\ua00c\ua00d\ua00e\ua00f\ua010\ua011\ua012\ua013\ua014\ua016\ua017\ua018\ua019\ua01a\ua01b\ua01c\ua01d\ua01e\ua01f\ua020\ua021\ua022\ua023\ua024\ua025\ua026\ua027\ua028\ua029\ua02a\ua02b\ua02c\ua02d\ua02e\ua02f\ua030\ua031\ua032\ua033\ua034\ua035\ua036\ua037\ua038\ua039\ua03a\ua03b\ua03c\ua03d\ua03e\ua03f\ua040\ua041\ua042\ua043\ua044\ua045\ua046\ua047\ua048\ua049\ua04a\ua04b\ua04c\ua04d\ua04e\ua04f\ua050\ua051\ua052\ua053\ua054\ua055\ua056\ua057\ua058\ua059\ua05a\ua05b\ua05c\ua05d\ua05e\ua05f\ua060\ua061\ua062\ua063\ua064\ua065\ua066\ua067\ua068\ua069\ua06a\ua06b\ua06c\ua06d\ua06e\ua06f\ua070\ua071\ua072\ua073\ua074\ua075\ua076\ua077\ua078\ua079\ua07a\ua07b\ua07c\ua07d\ua07e\ua07f\ua080\ua081\ua082\ua083\ua084\ua085\ua086\ua087\ua088\ua089\ua08a\ua08b\ua08c\ua08d\ua08e\ua08f\ua090\ua091\ua092\ua093\ua094\ua095\ua096\ua097\ua098\ua099\ua09a\ua09b\ua09c\ua09d\ua09e\ua09f\ua0a0\ua0a1\ua0a2\ua0a3\ua0a4\ua0a5\ua0a6\ua0a7\ua0a8\ua0a9\ua0aa\ua0ab\ua0ac\ua0ad\ua0ae\ua0af\ua0b0\ua0b1\ua0b2\ua0b3\ua0b4\ua0b5\ua0b6\ua0b7\ua0b8\ua0b9\ua0ba\ua0bb\ua0bc\ua0bd\ua0be\ua0bf\ua0c0\ua0c1\ua0c2\ua0c3\ua0c4\ua0c5\ua0c6\ua0c7\ua0c8\ua0c9\ua0ca\ua0cb\ua0cc\ua0cd\ua0ce\ua0cf\ua0d0\ua0d1\ua0d2\ua0d3\ua0d4\ua0d5\ua0d6\ua0d7\ua0d8\ua0d9\ua0da\ua0db\ua0dc\ua0dd\ua0de\ua0df\ua0e0\ua0e1\ua0e2\ua0e3\ua0e4\ua0e5\ua0e6\ua0e7\ua0e8\ua0e9\ua0ea\ua0eb\ua0ec\ua0ed\ua0ee\ua0ef\ua0f0\ua0f1\ua0f2\ua0f3\ua0f4\ua0f5\ua0f6\ua0f7\ua0f8\ua0f9\ua0fa\ua0fb\ua0fc\ua0fd\ua0fe\ua0ff\ua100\ua101\ua102\ua103\ua104\ua105\ua106\ua107\ua108\ua109\ua10a\ua10b\ua10c\ua10d\ua10e\ua10f\ua110\ua111\ua112\ua113\ua114\ua115\ua116\ua117\ua118\ua119\ua11a\ua11b\ua11c\ua11d\ua11e\ua11f\ua120\ua121\ua122\ua123\ua124\ua125\ua126\ua127\ua128\ua129\ua12a\ua12b\ua12c\ua12d\ua12e\ua12f\ua130\ua131\ua132\ua133\ua134\ua135\ua136\ua137\ua138\ua139\ua13a\ua13b\ua13c\ua13d\ua13e\ua13f\ua140\ua141\ua142\ua143\ua144\ua145\ua146\ua147\ua148\ua149\ua14a\ua14b\ua14c\ua14d\ua14e\ua14f\ua150\ua151\ua152\ua153\ua154\ua155\ua156\ua157\ua158\ua159\ua15a\ua15b\ua15c\ua15d\ua15e\ua15f\ua160\ua161\ua162\ua163\ua164\ua165\ua166\ua167\ua168\ua169\ua16a\ua16b\ua16c\ua16d\ua16e\ua16f\ua170\ua171\ua172\ua173\ua174\ua175\ua176\ua177\ua178\ua179\ua17a\ua17b\ua17c\ua17d\ua17e\ua17f\ua180\ua181\ua182\ua183\ua184\ua185\ua186\ua187\ua188\ua189\ua18a\ua18b\ua18c\ua18d\ua18e\ua18f\ua190\ua191\ua192\ua193\ua194\ua195\ua196\ua197\ua198\ua199\ua19a\ua19b\ua19c\ua19d\ua19e\ua19f\ua1a0\ua1a1\ua1a2\ua1a3\ua1a4\ua1a5\ua1a6\ua1a7\ua1a8\ua1a9\ua1aa\ua1ab\ua1ac\ua1ad\ua1ae\ua1af\ua1b0\ua1b1\ua1b2\ua1b3\ua1b4\ua1b5\ua1b6\ua1b7\ua1b8\ua1b9\ua1ba\ua1bb\ua1bc\ua1bd\ua1be\ua1bf\ua1c0\ua1c1\ua1c2\ua1c3\ua1c4\ua1c5\ua1c6\ua1c7\ua1c8\ua1c9\ua1ca\ua1cb\ua1cc\ua1cd\ua1ce\ua1cf\ua1d0\ua1d1\ua1d2\ua1d3\ua1d4\ua1d5\ua1d6\ua1d7\ua1d8\ua1d9\ua1da\ua1db\ua1dc\ua1dd\ua1de\ua1df\ua1e0\ua1e1\ua1e2\ua1e3\ua1e4\ua1e5\ua1e6\ua1e7\ua1e8\ua1e9\ua1ea\ua1eb\ua1ec\ua1ed\ua1ee\ua1ef\ua1f0\ua1f1\ua1f2\ua1f3\ua1f4\ua1f5\ua1f6\ua1f7\ua1f8\ua1f9\ua1fa\ua1fb\ua1fc\ua1fd\ua1fe\ua1ff\ua200\ua201\ua202\ua203\ua204\ua205\ua206\ua207\ua208\ua209\ua20a\ua20b\ua20c\ua20d\ua20e\ua20f\ua210\ua211\ua212\ua213\ua214\ua215\ua216\ua217\ua218\ua219\ua21a\ua21b\ua21c\ua21d\ua21e\ua21f\ua220\ua221\ua222\ua223\ua224\ua225\ua226\ua227\ua228\ua229\ua22a\ua22b\ua22c\ua22d\ua22e\ua22f\ua230\ua231\ua232\ua233\ua234\ua235\ua236\ua237\ua238\ua239\ua23a\ua23b\ua23c\ua23d\ua23e\ua23f\ua240\ua241\ua242\ua243\ua244\ua245\ua246\ua247\ua248\ua249\ua24a\ua24b\ua24c\ua24d\ua24e\ua24f\ua250\ua251\ua252\ua253\ua254\ua255\ua256\ua257\ua258\ua259\ua25a\ua25b\ua25c\ua25d\ua25e\ua25f\ua260\ua261\ua262\ua263\ua264\ua265\ua266\ua267\ua268\ua269\ua26a\ua26b\ua26c\ua26d\ua26e\ua26f\ua270\ua271\ua272\ua273\ua274\ua275\ua276\ua277\ua278\ua279\ua27a\ua27b\ua27c\ua27d\ua27e\ua27f\ua280\ua281\ua282\ua283\ua284\ua285\ua286\ua287\ua288\ua289\ua28a\ua28b\ua28c\ua28d\ua28e\ua28f\ua290\ua291\ua292\ua293\ua294\ua295\ua296\ua297\ua298\ua299\ua29a\ua29b\ua29c\ua29d\ua29e\ua29f\ua2a0\ua2a1\ua2a2\ua2a3\ua2a4\ua2a5\ua2a6\ua2a7\ua2a8\ua2a9\ua2aa\ua2ab\ua2ac\ua2ad\ua2ae\ua2af\ua2b0\ua2b1\ua2b2\ua2b3\ua2b4\ua2b5\ua2b6\ua2b7\ua2b8\ua2b9\ua2ba\ua2bb\ua2bc\ua2bd\ua2be\ua2bf\ua2c0\ua2c1\ua2c2\ua2c3\ua2c4\ua2c5\ua2c6\ua2c7\ua2c8\ua2c9\ua2ca\ua2cb\ua2cc\ua2cd\ua2ce\ua2cf\ua2d0\ua2d1\ua2d2\ua2d3\ua2d4\ua2d5\ua2d6\ua2d7\ua2d8\ua2d9\ua2da\ua2db\ua2dc\ua2dd\ua2de\ua2df\ua2e0\ua2e1\ua2e2\ua2e3\ua2e4\ua2e5\ua2e6\ua2e7\ua2e8\ua2e9\ua2ea\ua2eb\ua2ec\ua2ed\ua2ee\ua2ef\ua2f0\ua2f1\ua2f2\ua2f3\ua2f4\ua2f5\ua2f6\ua2f7\ua2f8\ua2f9\ua2fa\ua2fb\ua2fc\ua2fd\ua2fe\ua2ff\ua300\ua301\ua302\ua303\ua304\ua305\ua306\ua307\ua308\ua309\ua30a\ua30b\ua30c\ua30d\ua30e\ua30f\ua310\ua311\ua312\ua313\ua314\ua315\ua316\ua317\ua318\ua319\ua31a\ua31b\ua31c\ua31d\ua31e\ua31f\ua320\ua321\ua322\ua323\ua324\ua325\ua326\ua327\ua328\ua329\ua32a\ua32b\ua32c\ua32d\ua32e\ua32f\ua330\ua331\ua332\ua333\ua334\ua335\ua336\ua337\ua338\ua339\ua33a\ua33b\ua33c\ua33d\ua33e\ua33f\ua340\ua341\ua342\ua343\ua344\ua345\ua346\ua347\ua348\ua349\ua34a\ua34b\ua34c\ua34d\ua34e\ua34f\ua350\ua351\ua352\ua353\ua354\ua355\ua356\ua357\ua358\ua359\ua35a\ua35b\ua35c\ua35d\ua35e\ua35f\ua360\ua361\ua362\ua363\ua364\ua365\ua366\ua367\ua368\ua369\ua36a\ua36b\ua36c\ua36d\ua36e\ua36f\ua370\ua371\ua372\ua373\ua374\ua375\ua376\ua377\ua378\ua379\ua37a\ua37b\ua37c\ua37d\ua37e\ua37f\ua380\ua381\ua382\ua383\ua384\ua385\ua386\ua387\ua388\ua389\ua38a\ua38b\ua38c\ua38d\ua38e\ua38f\ua390\ua391\ua392\ua393\ua394\ua395\ua396\ua397\ua398\ua399\ua39a\ua39b\ua39c\ua39d\ua39e\ua39f\ua3a0\ua3a1\ua3a2\ua3a3\ua3a4\ua3a5\ua3a6\ua3a7\ua3a8\ua3a9\ua3aa\ua3ab\ua3ac\ua3ad\ua3ae\ua3af\ua3b0\ua3b1\ua3b2\ua3b3\ua3b4\ua3b5\ua3b6\ua3b7\ua3b8\ua3b9\ua3ba\ua3bb\ua3bc\ua3bd\ua3be\ua3bf\ua3c0\ua3c1\ua3c2\ua3c3\ua3c4\ua3c5\ua3c6\ua3c7\ua3c8\ua3c9\ua3ca\ua3cb\ua3cc\ua3cd\ua3ce\ua3cf\ua3d0\ua3d1\ua3d2\ua3d3\ua3d4\ua3d5\ua3d6\ua3d7\ua3d8\ua3d9\ua3da\ua3db\ua3dc\ua3dd\ua3de\ua3df\ua3e0\ua3e1\ua3e2\ua3e3\ua3e4\ua3e5\ua3e6\ua3e7\ua3e8\ua3e9\ua3ea\ua3eb\ua3ec\ua3ed\ua3ee\ua3ef\ua3f0\ua3f1\ua3f2\ua3f3\ua3f4\ua3f5\ua3f6\ua3f7\ua3f8\ua3f9\ua3fa\ua3fb\ua3fc\ua3fd\ua3fe\ua3ff\ua400\ua401\ua402\ua403\ua404\ua405\ua406\ua407\ua408\ua409\ua40a\ua40b\ua40c\ua40d\ua40e\ua40f\ua410\ua411\ua412\ua413\ua414\ua415\ua416\ua417\ua418\ua419\ua41a\ua41b\ua41c\ua41d\ua41e\ua41f\ua420\ua421\ua422\ua423\ua424\ua425\ua426\ua427\ua428\ua429\ua42a\ua42b\ua42c\ua42d\ua42e\ua42f\ua430\ua431\ua432\ua433\ua434\ua435\ua436\ua437\ua438\ua439\ua43a\ua43b\ua43c\ua43d\ua43e\ua43f\ua440\ua441\ua442\ua443\ua444\ua445\ua446\ua447\ua448\ua449\ua44a\ua44b\ua44c\ua44d\ua44e\ua44f\ua450\ua451\ua452\ua453\ua454\ua455\ua456\ua457\ua458\ua459\ua45a\ua45b\ua45c\ua45d\ua45e\ua45f\ua460\ua461\ua462\ua463\ua464\ua465\ua466\ua467\ua468\ua469\ua46a\ua46b\ua46c\ua46d\ua46e\ua46f\ua470\ua471\ua472\ua473\ua474\ua475\ua476\ua477\ua478\ua479\ua47a\ua47b\ua47c\ua47d\ua47e\ua47f\ua480\ua481\ua482\ua483\ua484\ua485\ua486\ua487\ua488\ua489\ua48a\ua48b\ua48c\ua800\ua801\ua803\ua804\ua805\ua807\ua808\ua809\ua80a\ua80c\ua80d\ua80e\ua80f\ua810\ua811\ua812\ua813\ua814\ua815\ua816\ua817\ua818\ua819\ua81a\ua81b\ua81c\ua81d\ua81e\ua81f\ua820\ua821\ua822\uac00\uac01\uac02\uac03\uac04\uac05\uac06\uac07\uac08\uac09\uac0a\uac0b\uac0c\uac0d\uac0e\uac0f\uac10\uac11\uac12\uac13\uac14\uac15\uac16\uac17\uac18\uac19\uac1a\uac1b\uac1c\uac1d\uac1e\uac1f\uac20\uac21\uac22\uac23\uac24\uac25\uac26\uac27\uac28\uac29\uac2a\uac2b\uac2c\uac2d\uac2e\uac2f\uac30\uac31\uac32\uac33\uac34\uac35\uac36\uac37\uac38\uac39\uac3a\uac3b\uac3c\uac3d\uac3e\uac3f\uac40\uac41\uac42\uac43\uac44\uac45\uac46\uac47\uac48\uac49\uac4a\uac4b\uac4c\uac4d\uac4e\uac4f\uac50\uac51\uac52\uac53\uac54\uac55\uac56\uac57\uac58\uac59\uac5a\uac5b\uac5c\uac5d\uac5e\uac5f\uac60\uac61\uac62\uac63\uac64\uac65\uac66\uac67\uac68\uac69\uac6a\uac6b\uac6c\uac6d\uac6e\uac6f\uac70\uac71\uac72\uac73\uac74\uac75\uac76\uac77\uac78\uac79\uac7a\uac7b\uac7c\uac7d\uac7e\uac7f\uac80\uac81\uac82\uac83\uac84\uac85\uac86\uac87\uac88\uac89\uac8a\uac8b\uac8c\uac8d\uac8e\uac8f\uac90\uac91\uac92\uac93\uac94\uac95\uac96\uac97\uac98\uac99\uac9a\uac9b\uac9c\uac9d\uac9e\uac9f\uaca0\uaca1\uaca2\uaca3\uaca4\uaca5\uaca6\uaca7\uaca8\uaca9\uacaa\uacab\uacac\uacad\uacae\uacaf\uacb0\uacb1\uacb2\uacb3\uacb4\uacb5\uacb6\uacb7\uacb8\uacb9\uacba\uacbb\uacbc\uacbd\uacbe\uacbf\uacc0\uacc1\uacc2\uacc3\uacc4\uacc5\uacc6\uacc7\uacc8\uacc9\uacca\uaccb\uaccc\uaccd\uacce\uaccf\uacd0\uacd1\uacd2\uacd3\uacd4\uacd5\uacd6\uacd7\uacd8\uacd9\uacda\uacdb\uacdc\uacdd\uacde\uacdf\uace0\uace1\uace2\uace3\uace4\uace5\uace6\uace7\uace8\uace9\uacea\uaceb\uacec\uaced\uacee\uacef\uacf0\uacf1\uacf2\uacf3\uacf4\uacf5\uacf6\uacf7\uacf8\uacf9\uacfa\uacfb\uacfc\uacfd\uacfe\uacff\uad00\uad01\uad02\uad03\uad04\uad05\uad06\uad07\uad08\uad09\uad0a\uad0b\uad0c\uad0d\uad0e\uad0f\uad10\uad11\uad12\uad13\uad14\uad15\uad16\uad17\uad18\uad19\uad1a\uad1b\uad1c\uad1d\uad1e\uad1f\uad20\uad21\uad22\uad23\uad24\uad25\uad26\uad27\uad28\uad29\uad2a\uad2b\uad2c\uad2d\uad2e\uad2f\uad30\uad31\uad32\uad33\uad34\uad35\uad36\uad37\uad38\uad39\uad3a\uad3b\uad3c\uad3d\uad3e\uad3f\uad40\uad41\uad42\uad43\uad44\uad45\uad46\uad47\uad48\uad49\uad4a\uad4b\uad4c\uad4d\uad4e\uad4f\uad50\uad51\uad52\uad53\uad54\uad55\uad56\uad57\uad58\uad59\uad5a\uad5b\uad5c\uad5d\uad5e\uad5f\uad60\uad61\uad62\uad63\uad64\uad65\uad66\uad67\uad68\uad69\uad6a\uad6b\uad6c\uad6d\uad6e\uad6f\uad70\uad71\uad72\uad73\uad74\uad75\uad76\uad77\uad78\uad79\uad7a\uad7b\uad7c\uad7d\uad7e\uad7f\uad80\uad81\uad82\uad83\uad84\uad85\uad86\uad87\uad88\uad89\uad8a\uad8b\uad8c\uad8d\uad8e\uad8f\uad90\uad91\uad92\uad93\uad94\uad95\uad96\uad97\uad98\uad99\uad9a\uad9b\uad9c\uad9d\uad9e\uad9f\uada0\uada1\uada2\uada3\uada4\uada5\uada6\uada7\uada8\uada9\uadaa\uadab\uadac\uadad\uadae\uadaf\uadb0\uadb1\uadb2\uadb3\uadb4\uadb5\uadb6\uadb7\uadb8\uadb9\uadba\uadbb\uadbc\uadbd\uadbe\uadbf\uadc0\uadc1\uadc2\uadc3\uadc4\uadc5\uadc6\uadc7\uadc8\uadc9\uadca\uadcb\uadcc\uadcd\uadce\uadcf\uadd0\uadd1\uadd2\uadd3\uadd4\uadd5\uadd6\uadd7\uadd8\uadd9\uadda\uaddb\uaddc\uaddd\uadde\uaddf\uade0\uade1\uade2\uade3\uade4\uade5\uade6\uade7\uade8\uade9\uadea\uadeb\uadec\uaded\uadee\uadef\uadf0\uadf1\uadf2\uadf3\uadf4\uadf5\uadf6\uadf7\uadf8\uadf9\uadfa\uadfb\uadfc\uadfd\uadfe\uadff\uae00\uae01\uae02\uae03\uae04\uae05\uae06\uae07\uae08\uae09\uae0a\uae0b\uae0c\uae0d\uae0e\uae0f\uae10\uae11\uae12\uae13\uae14\uae15\uae16\uae17\uae18\uae19\uae1a\uae1b\uae1c\uae1d\uae1e\uae1f\uae20\uae21\uae22\uae23\uae24\uae25\uae26\uae27\uae28\uae29\uae2a\uae2b\uae2c\uae2d\uae2e\uae2f\uae30\uae31\uae32\uae33\uae34\uae35\uae36\uae37\uae38\uae39\uae3a\uae3b\uae3c\uae3d\uae3e\uae3f\uae40\uae41\uae42\uae43\uae44\uae45\uae46\uae47\uae48\uae49\uae4a\uae4b\uae4c\uae4d\uae4e\uae4f\uae50\uae51\uae52\uae53\uae54\uae55\uae56\uae57\uae58\uae59\uae5a\uae5b\uae5c\uae5d\uae5e\uae5f\uae60\uae61\uae62\uae63\uae64\uae65\uae66\uae67\uae68\uae69\uae6a\uae6b\uae6c\uae6d\uae6e\uae6f\uae70\uae71\uae72\uae73\uae74\uae75\uae76\uae77\uae78\uae79\uae7a\uae7b\uae7c\uae7d\uae7e\uae7f\uae80\uae81\uae82\uae83\uae84\uae85\uae86\uae87\uae88\uae89\uae8a\uae8b\uae8c\uae8d\uae8e\uae8f\uae90\uae91\uae92\uae93\uae94\uae95\uae96\uae97\uae98\uae99\uae9a\uae9b\uae9c\uae9d\uae9e\uae9f\uaea0\uaea1\uaea2\uaea3\uaea4\uaea5\uaea6\uaea7\uaea8\uaea9\uaeaa\uaeab\uaeac\uaead\uaeae\uaeaf\uaeb0\uaeb1\uaeb2\uaeb3\uaeb4\uaeb5\uaeb6\uaeb7\uaeb8\uaeb9\uaeba\uaebb\uaebc\uaebd\uaebe\uaebf\uaec0\uaec1\uaec2\uaec3\uaec4\uaec5\uaec6\uaec7\uaec8\uaec9\uaeca\uaecb\uaecc\uaecd\uaece\uaecf\uaed0\uaed1\uaed2\uaed3\uaed4\uaed5\uaed6\uaed7\uaed8\uaed9\uaeda\uaedb\uaedc\uaedd\uaede\uaedf\uaee0\uaee1\uaee2\uaee3\uaee4\uaee5\uaee6\uaee7\uaee8\uaee9\uaeea\uaeeb\uaeec\uaeed\uaeee\uaeef\uaef0\uaef1\uaef2\uaef3\uaef4\uaef5\uaef6\uaef7\uaef8\uaef9\uaefa\uaefb\uaefc\uaefd\uaefe\uaeff\uaf00\uaf01\uaf02\uaf03\uaf04\uaf05\uaf06\uaf07\uaf08\uaf09\uaf0a\uaf0b\uaf0c\uaf0d\uaf0e\uaf0f\uaf10\uaf11\uaf12\uaf13\uaf14\uaf15\uaf16\uaf17\uaf18\uaf19\uaf1a\uaf1b\uaf1c\uaf1d\uaf1e\uaf1f\uaf20\uaf21\uaf22\uaf23\uaf24\uaf25\uaf26\uaf27\uaf28\uaf29\uaf2a\uaf2b\uaf2c\uaf2d\uaf2e\uaf2f\uaf30\uaf31\uaf32\uaf33\uaf34\uaf35\uaf36\uaf37\uaf38\uaf39\uaf3a\uaf3b\uaf3c\uaf3d\uaf3e\uaf3f\uaf40\uaf41\uaf42\uaf43\uaf44\uaf45\uaf46\uaf47\uaf48\uaf49\uaf4a\uaf4b\uaf4c\uaf4d\uaf4e\uaf4f\uaf50\uaf51\uaf52\uaf53\uaf54\uaf55\uaf56\uaf57\uaf58\uaf59\uaf5a\uaf5b\uaf5c\uaf5d\uaf5e\uaf5f\uaf60\uaf61\uaf62\uaf63\uaf64\uaf65\uaf66\uaf67\uaf68\uaf69\uaf6a\uaf6b\uaf6c\uaf6d\uaf6e\uaf6f\uaf70\uaf71\uaf72\uaf73\uaf74\uaf75\uaf76\uaf77\uaf78\uaf79\uaf7a\uaf7b\uaf7c\uaf7d\uaf7e\uaf7f\uaf80\uaf81\uaf82\uaf83\uaf84\uaf85\uaf86\uaf87\uaf88\uaf89\uaf8a\uaf8b\uaf8c\uaf8d\uaf8e\uaf8f\uaf90\uaf91\uaf92\uaf93\uaf94\uaf95\uaf96\uaf97\uaf98\uaf99\uaf9a\uaf9b\uaf9c\uaf9d\uaf9e\uaf9f\uafa0\uafa1\uafa2\uafa3\uafa4\uafa5\uafa6\uafa7\uafa8\uafa9\uafaa\uafab\uafac\uafad\uafae\uafaf\uafb0\uafb1\uafb2\uafb3\uafb4\uafb5\uafb6\uafb7\uafb8\uafb9\uafba\uafbb\uafbc\uafbd\uafbe\uafbf\uafc0\uafc1\uafc2\uafc3\uafc4\uafc5\uafc6\uafc7\uafc8\uafc9\uafca\uafcb\uafcc\uafcd\uafce\uafcf\uafd0\uafd1\uafd2\uafd3\uafd4\uafd5\uafd6\uafd7\uafd8\uafd9\uafda\uafdb\uafdc\uafdd\uafde\uafdf\uafe0\uafe1\uafe2\uafe3\uafe4\uafe5\uafe6\uafe7\uafe8\uafe9\uafea\uafeb\uafec\uafed\uafee\uafef\uaff0\uaff1\uaff2\uaff3\uaff4\uaff5\uaff6\uaff7\uaff8\uaff9\uaffa\uaffb\uaffc\uaffd\uaffe\uafff\ub000\ub001\ub002\ub003\ub004\ub005\ub006\ub007\ub008\ub009\ub00a\ub00b\ub00c\ub00d\ub00e\ub00f\ub010\ub011\ub012\ub013\ub014\ub015\ub016\ub017\ub018\ub019\ub01a\ub01b\ub01c\ub01d\ub01e\ub01f\ub020\ub021\ub022\ub023\ub024\ub025\ub026\ub027\ub028\ub029\ub02a\ub02b\ub02c\ub02d\ub02e\ub02f\ub030\ub031\ub032\ub033\ub034\ub035\ub036\ub037\ub038\ub039\ub03a\ub03b\ub03c\ub03d\ub03e\ub03f\ub040\ub041\ub042\ub043\ub044\ub045\ub046\ub047\ub048\ub049\ub04a\ub04b\ub04c\ub04d\ub04e\ub04f\ub050\ub051\ub052\ub053\ub054\ub055\ub056\ub057\ub058\ub059\ub05a\ub05b\ub05c\ub05d\ub05e\ub05f\ub060\ub061\ub062\ub063\ub064\ub065\ub066\ub067\ub068\ub069\ub06a\ub06b\ub06c\ub06d\ub06e\ub06f\ub070\ub071\ub072\ub073\ub074\ub075\ub076\ub077\ub078\ub079\ub07a\ub07b\ub07c\ub07d\ub07e\ub07f\ub080\ub081\ub082\ub083\ub084\ub085\ub086\ub087\ub088\ub089\ub08a\ub08b\ub08c\ub08d\ub08e\ub08f\ub090\ub091\ub092\ub093\ub094\ub095\ub096\ub097\ub098\ub099\ub09a\ub09b\ub09c\ub09d\ub09e\ub09f\ub0a0\ub0a1\ub0a2\ub0a3\ub0a4\ub0a5\ub0a6\ub0a7\ub0a8\ub0a9\ub0aa\ub0ab\ub0ac\ub0ad\ub0ae\ub0af\ub0b0\ub0b1\ub0b2\ub0b3\ub0b4\ub0b5\ub0b6\ub0b7\ub0b8\ub0b9\ub0ba\ub0bb\ub0bc\ub0bd\ub0be\ub0bf\ub0c0\ub0c1\ub0c2\ub0c3\ub0c4\ub0c5\ub0c6\ub0c7\ub0c8\ub0c9\ub0ca\ub0cb\ub0cc\ub0cd\ub0ce\ub0cf\ub0d0\ub0d1\ub0d2\ub0d3\ub0d4\ub0d5\ub0d6\ub0d7\ub0d8\ub0d9\ub0da\ub0db\ub0dc\ub0dd\ub0de\ub0df\ub0e0\ub0e1\ub0e2\ub0e3\ub0e4\ub0e5\ub0e6\ub0e7\ub0e8\ub0e9\ub0ea\ub0eb\ub0ec\ub0ed\ub0ee\ub0ef\ub0f0\ub0f1\ub0f2\ub0f3\ub0f4\ub0f5\ub0f6\ub0f7\ub0f8\ub0f9\ub0fa\ub0fb\ub0fc\ub0fd\ub0fe\ub0ff\ub100\ub101\ub102\ub103\ub104\ub105\ub106\ub107\ub108\ub109\ub10a\ub10b\ub10c\ub10d\ub10e\ub10f\ub110\ub111\ub112\ub113\ub114\ub115\ub116\ub117\ub118\ub119\ub11a\ub11b\ub11c\ub11d\ub11e\ub11f\ub120\ub121\ub122\ub123\ub124\ub125\ub126\ub127\ub128\ub129\ub12a\ub12b\ub12c\ub12d\ub12e\ub12f\ub130\ub131\ub132\ub133\ub134\ub135\ub136\ub137\ub138\ub139\ub13a\ub13b\ub13c\ub13d\ub13e\ub13f\ub140\ub141\ub142\ub143\ub144\ub145\ub146\ub147\ub148\ub149\ub14a\ub14b\ub14c\ub14d\ub14e\ub14f\ub150\ub151\ub152\ub153\ub154\ub155\ub156\ub157\ub158\ub159\ub15a\ub15b\ub15c\ub15d\ub15e\ub15f\ub160\ub161\ub162\ub163\ub164\ub165\ub166\ub167\ub168\ub169\ub16a\ub16b\ub16c\ub16d\ub16e\ub16f\ub170\ub171\ub172\ub173\ub174\ub175\ub176\ub177\ub178\ub179\ub17a\ub17b\ub17c\ub17d\ub17e\ub17f\ub180\ub181\ub182\ub183\ub184\ub185\ub186\ub187\ub188\ub189\ub18a\ub18b\ub18c\ub18d\ub18e\ub18f\ub190\ub191\ub192\ub193\ub194\ub195\ub196\ub197\ub198\ub199\ub19a\ub19b\ub19c\ub19d\ub19e\ub19f\ub1a0\ub1a1\ub1a2\ub1a3\ub1a4\ub1a5\ub1a6\ub1a7\ub1a8\ub1a9\ub1aa\ub1ab\ub1ac\ub1ad\ub1ae\ub1af\ub1b0\ub1b1\ub1b2\ub1b3\ub1b4\ub1b5\ub1b6\ub1b7\ub1b8\ub1b9\ub1ba\ub1bb\ub1bc\ub1bd\ub1be\ub1bf\ub1c0\ub1c1\ub1c2\ub1c3\ub1c4\ub1c5\ub1c6\ub1c7\ub1c8\ub1c9\ub1ca\ub1cb\ub1cc\ub1cd\ub1ce\ub1cf\ub1d0\ub1d1\ub1d2\ub1d3\ub1d4\ub1d5\ub1d6\ub1d7\ub1d8\ub1d9\ub1da\ub1db\ub1dc\ub1dd\ub1de\ub1df\ub1e0\ub1e1\ub1e2\ub1e3\ub1e4\ub1e5\ub1e6\ub1e7\ub1e8\ub1e9\ub1ea\ub1eb\ub1ec\ub1ed\ub1ee\ub1ef\ub1f0\ub1f1\ub1f2\ub1f3\ub1f4\ub1f5\ub1f6\ub1f7\ub1f8\ub1f9\ub1fa\ub1fb\ub1fc\ub1fd\ub1fe\ub1ff\ub200\ub201\ub202\ub203\ub204\ub205\ub206\ub207\ub208\ub209\ub20a\ub20b\ub20c\ub20d\ub20e\ub20f\ub210\ub211\ub212\ub213\ub214\ub215\ub216\ub217\ub218\ub219\ub21a\ub21b\ub21c\ub21d\ub21e\ub21f\ub220\ub221\ub222\ub223\ub224\ub225\ub226\ub227\ub228\ub229\ub22a\ub22b\ub22c\ub22d\ub22e\ub22f\ub230\ub231\ub232\ub233\ub234\ub235\ub236\ub237\ub238\ub239\ub23a\ub23b\ub23c\ub23d\ub23e\ub23f\ub240\ub241\ub242\ub243\ub244\ub245\ub246\ub247\ub248\ub249\ub24a\ub24b\ub24c\ub24d\ub24e\ub24f\ub250\ub251\ub252\ub253\ub254\ub255\ub256\ub257\ub258\ub259\ub25a\ub25b\ub25c\ub25d\ub25e\ub25f\ub260\ub261\ub262\ub263\ub264\ub265\ub266\ub267\ub268\ub269\ub26a\ub26b\ub26c\ub26d\ub26e\ub26f\ub270\ub271\ub272\ub273\ub274\ub275\ub276\ub277\ub278\ub279\ub27a\ub27b\ub27c\ub27d\ub27e\ub27f\ub280\ub281\ub282\ub283\ub284\ub285\ub286\ub287\ub288\ub289\ub28a\ub28b\ub28c\ub28d\ub28e\ub28f\ub290\ub291\ub292\ub293\ub294\ub295\ub296\ub297\ub298\ub299\ub29a\ub29b\ub29c\ub29d\ub29e\ub29f\ub2a0\ub2a1\ub2a2\ub2a3\ub2a4\ub2a5\ub2a6\ub2a7\ub2a8\ub2a9\ub2aa\ub2ab\ub2ac\ub2ad\ub2ae\ub2af\ub2b0\ub2b1\ub2b2\ub2b3\ub2b4\ub2b5\ub2b6\ub2b7\ub2b8\ub2b9\ub2ba\ub2bb\ub2bc\ub2bd\ub2be\ub2bf\ub2c0\ub2c1\ub2c2\ub2c3\ub2c4\ub2c5\ub2c6\ub2c7\ub2c8\ub2c9\ub2ca\ub2cb\ub2cc\ub2cd\ub2ce\ub2cf\ub2d0\ub2d1\ub2d2\ub2d3\ub2d4\ub2d5\ub2d6\ub2d7\ub2d8\ub2d9\ub2da\ub2db\ub2dc\ub2dd\ub2de\ub2df\ub2e0\ub2e1\ub2e2\ub2e3\ub2e4\ub2e5\ub2e6\ub2e7\ub2e8\ub2e9\ub2ea\ub2eb\ub2ec\ub2ed\ub2ee\ub2ef\ub2f0\ub2f1\ub2f2\ub2f3\ub2f4\ub2f5\ub2f6\ub2f7\ub2f8\ub2f9\ub2fa\ub2fb\ub2fc\ub2fd\ub2fe\ub2ff\ub300\ub301\ub302\ub303\ub304\ub305\ub306\ub307\ub308\ub309\ub30a\ub30b\ub30c\ub30d\ub30e\ub30f\ub310\ub311\ub312\ub313\ub314\ub315\ub316\ub317\ub318\ub319\ub31a\ub31b\ub31c\ub31d\ub31e\ub31f\ub320\ub321\ub322\ub323\ub324\ub325\ub326\ub327\ub328\ub329\ub32a\ub32b\ub32c\ub32d\ub32e\ub32f\ub330\ub331\ub332\ub333\ub334\ub335\ub336\ub337\ub338\ub339\ub33a\ub33b\ub33c\ub33d\ub33e\ub33f\ub340\ub341\ub342\ub343\ub344\ub345\ub346\ub347\ub348\ub349\ub34a\ub34b\ub34c\ub34d\ub34e\ub34f\ub350\ub351\ub352\ub353\ub354\ub355\ub356\ub357\ub358\ub359\ub35a\ub35b\ub35c\ub35d\ub35e\ub35f\ub360\ub361\ub362\ub363\ub364\ub365\ub366\ub367\ub368\ub369\ub36a\ub36b\ub36c\ub36d\ub36e\ub36f\ub370\ub371\ub372\ub373\ub374\ub375\ub376\ub377\ub378\ub379\ub37a\ub37b\ub37c\ub37d\ub37e\ub37f\ub380\ub381\ub382\ub383\ub384\ub385\ub386\ub387\ub388\ub389\ub38a\ub38b\ub38c\ub38d\ub38e\ub38f\ub390\ub391\ub392\ub393\ub394\ub395\ub396\ub397\ub398\ub399\ub39a\ub39b\ub39c\ub39d\ub39e\ub39f\ub3a0\ub3a1\ub3a2\ub3a3\ub3a4\ub3a5\ub3a6\ub3a7\ub3a8\ub3a9\ub3aa\ub3ab\ub3ac\ub3ad\ub3ae\ub3af\ub3b0\ub3b1\ub3b2\ub3b3\ub3b4\ub3b5\ub3b6\ub3b7\ub3b8\ub3b9\ub3ba\ub3bb\ub3bc\ub3bd\ub3be\ub3bf\ub3c0\ub3c1\ub3c2\ub3c3\ub3c4\ub3c5\ub3c6\ub3c7\ub3c8\ub3c9\ub3ca\ub3cb\ub3cc\ub3cd\ub3ce\ub3cf\ub3d0\ub3d1\ub3d2\ub3d3\ub3d4\ub3d5\ub3d6\ub3d7\ub3d8\ub3d9\ub3da\ub3db\ub3dc\ub3dd\ub3de\ub3df\ub3e0\ub3e1\ub3e2\ub3e3\ub3e4\ub3e5\ub3e6\ub3e7\ub3e8\ub3e9\ub3ea\ub3eb\ub3ec\ub3ed\ub3ee\ub3ef\ub3f0\ub3f1\ub3f2\ub3f3\ub3f4\ub3f5\ub3f6\ub3f7\ub3f8\ub3f9\ub3fa\ub3fb\ub3fc\ub3fd\ub3fe\ub3ff\ub400\ub401\ub402\ub403\ub404\ub405\ub406\ub407\ub408\ub409\ub40a\ub40b\ub40c\ub40d\ub40e\ub40f\ub410\ub411\ub412\ub413\ub414\ub415\ub416\ub417\ub418\ub419\ub41a\ub41b\ub41c\ub41d\ub41e\ub41f\ub420\ub421\ub422\ub423\ub424\ub425\ub426\ub427\ub428\ub429\ub42a\ub42b\ub42c\ub42d\ub42e\ub42f\ub430\ub431\ub432\ub433\ub434\ub435\ub436\ub437\ub438\ub439\ub43a\ub43b\ub43c\ub43d\ub43e\ub43f\ub440\ub441\ub442\ub443\ub444\ub445\ub446\ub447\ub448\ub449\ub44a\ub44b\ub44c\ub44d\ub44e\ub44f\ub450\ub451\ub452\ub453\ub454\ub455\ub456\ub457\ub458\ub459\ub45a\ub45b\ub45c\ub45d\ub45e\ub45f\ub460\ub461\ub462\ub463\ub464\ub465\ub466\ub467\ub468\ub469\ub46a\ub46b\ub46c\ub46d\ub46e\ub46f\ub470\ub471\ub472\ub473\ub474\ub475\ub476\ub477\ub478\ub479\ub47a\ub47b\ub47c\ub47d\ub47e\ub47f\ub480\ub481\ub482\ub483\ub484\ub485\ub486\ub487\ub488\ub489\ub48a\ub48b\ub48c\ub48d\ub48e\ub48f\ub490\ub491\ub492\ub493\ub494\ub495\ub496\ub497\ub498\ub499\ub49a\ub49b\ub49c\ub49d\ub49e\ub49f\ub4a0\ub4a1\ub4a2\ub4a3\ub4a4\ub4a5\ub4a6\ub4a7\ub4a8\ub4a9\ub4aa\ub4ab\ub4ac\ub4ad\ub4ae\ub4af\ub4b0\ub4b1\ub4b2\ub4b3\ub4b4\ub4b5\ub4b6\ub4b7\ub4b8\ub4b9\ub4ba\ub4bb\ub4bc\ub4bd\ub4be\ub4bf\ub4c0\ub4c1\ub4c2\ub4c3\ub4c4\ub4c5\ub4c6\ub4c7\ub4c8\ub4c9\ub4ca\ub4cb\ub4cc\ub4cd\ub4ce\ub4cf\ub4d0\ub4d1\ub4d2\ub4d3\ub4d4\ub4d5\ub4d6\ub4d7\ub4d8\ub4d9\ub4da\ub4db\ub4dc\ub4dd\ub4de\ub4df\ub4e0\ub4e1\ub4e2\ub4e3\ub4e4\ub4e5\ub4e6\ub4e7\ub4e8\ub4e9\ub4ea\ub4eb\ub4ec\ub4ed\ub4ee\ub4ef\ub4f0\ub4f1\ub4f2\ub4f3\ub4f4\ub4f5\ub4f6\ub4f7\ub4f8\ub4f9\ub4fa\ub4fb\ub4fc\ub4fd\ub4fe\ub4ff\ub500\ub501\ub502\ub503\ub504\ub505\ub506\ub507\ub508\ub509\ub50a\ub50b\ub50c\ub50d\ub50e\ub50f\ub510\ub511\ub512\ub513\ub514\ub515\ub516\ub517\ub518\ub519\ub51a\ub51b\ub51c\ub51d\ub51e\ub51f\ub520\ub521\ub522\ub523\ub524\ub525\ub526\ub527\ub528\ub529\ub52a\ub52b\ub52c\ub52d\ub52e\ub52f\ub530\ub531\ub532\ub533\ub534\ub535\ub536\ub537\ub538\ub539\ub53a\ub53b\ub53c\ub53d\ub53e\ub53f\ub540\ub541\ub542\ub543\ub544\ub545\ub546\ub547\ub548\ub549\ub54a\ub54b\ub54c\ub54d\ub54e\ub54f\ub550\ub551\ub552\ub553\ub554\ub555\ub556\ub557\ub558\ub559\ub55a\ub55b\ub55c\ub55d\ub55e\ub55f\ub560\ub561\ub562\ub563\ub564\ub565\ub566\ub567\ub568\ub569\ub56a\ub56b\ub56c\ub56d\ub56e\ub56f\ub570\ub571\ub572\ub573\ub574\ub575\ub576\ub577\ub578\ub579\ub57a\ub57b\ub57c\ub57d\ub57e\ub57f\ub580\ub581\ub582\ub583\ub584\ub585\ub586\ub587\ub588\ub589\ub58a\ub58b\ub58c\ub58d\ub58e\ub58f\ub590\ub591\ub592\ub593\ub594\ub595\ub596\ub597\ub598\ub599\ub59a\ub59b\ub59c\ub59d\ub59e\ub59f\ub5a0\ub5a1\ub5a2\ub5a3\ub5a4\ub5a5\ub5a6\ub5a7\ub5a8\ub5a9\ub5aa\ub5ab\ub5ac\ub5ad\ub5ae\ub5af\ub5b0\ub5b1\ub5b2\ub5b3\ub5b4\ub5b5\ub5b6\ub5b7\ub5b8\ub5b9\ub5ba\ub5bb\ub5bc\ub5bd\ub5be\ub5bf\ub5c0\ub5c1\ub5c2\ub5c3\ub5c4\ub5c5\ub5c6\ub5c7\ub5c8\ub5c9\ub5ca\ub5cb\ub5cc\ub5cd\ub5ce\ub5cf\ub5d0\ub5d1\ub5d2\ub5d3\ub5d4\ub5d5\ub5d6\ub5d7\ub5d8\ub5d9\ub5da\ub5db\ub5dc\ub5dd\ub5de\ub5df\ub5e0\ub5e1\ub5e2\ub5e3\ub5e4\ub5e5\ub5e6\ub5e7\ub5e8\ub5e9\ub5ea\ub5eb\ub5ec\ub5ed\ub5ee\ub5ef\ub5f0\ub5f1\ub5f2\ub5f3\ub5f4\ub5f5\ub5f6\ub5f7\ub5f8\ub5f9\ub5fa\ub5fb\ub5fc\ub5fd\ub5fe\ub5ff\ub600\ub601\ub602\ub603\ub604\ub605\ub606\ub607\ub608\ub609\ub60a\ub60b\ub60c\ub60d\ub60e\ub60f\ub610\ub611\ub612\ub613\ub614\ub615\ub616\ub617\ub618\ub619\ub61a\ub61b\ub61c\ub61d\ub61e\ub61f\ub620\ub621\ub622\ub623\ub624\ub625\ub626\ub627\ub628\ub629\ub62a\ub62b\ub62c\ub62d\ub62e\ub62f\ub630\ub631\ub632\ub633\ub634\ub635\ub636\ub637\ub638\ub639\ub63a\ub63b\ub63c\ub63d\ub63e\ub63f\ub640\ub641\ub642\ub643\ub644\ub645\ub646\ub647\ub648\ub649\ub64a\ub64b\ub64c\ub64d\ub64e\ub64f\ub650\ub651\ub652\ub653\ub654\ub655\ub656\ub657\ub658\ub659\ub65a\ub65b\ub65c\ub65d\ub65e\ub65f\ub660\ub661\ub662\ub663\ub664\ub665\ub666\ub667\ub668\ub669\ub66a\ub66b\ub66c\ub66d\ub66e\ub66f\ub670\ub671\ub672\ub673\ub674\ub675\ub676\ub677\ub678\ub679\ub67a\ub67b\ub67c\ub67d\ub67e\ub67f\ub680\ub681\ub682\ub683\ub684\ub685\ub686\ub687\ub688\ub689\ub68a\ub68b\ub68c\ub68d\ub68e\ub68f\ub690\ub691\ub692\ub693\ub694\ub695\ub696\ub697\ub698\ub699\ub69a\ub69b\ub69c\ub69d\ub69e\ub69f\ub6a0\ub6a1\ub6a2\ub6a3\ub6a4\ub6a5\ub6a6\ub6a7\ub6a8\ub6a9\ub6aa\ub6ab\ub6ac\ub6ad\ub6ae\ub6af\ub6b0\ub6b1\ub6b2\ub6b3\ub6b4\ub6b5\ub6b6\ub6b7\ub6b8\ub6b9\ub6ba\ub6bb\ub6bc\ub6bd\ub6be\ub6bf\ub6c0\ub6c1\ub6c2\ub6c3\ub6c4\ub6c5\ub6c6\ub6c7\ub6c8\ub6c9\ub6ca\ub6cb\ub6cc\ub6cd\ub6ce\ub6cf\ub6d0\ub6d1\ub6d2\ub6d3\ub6d4\ub6d5\ub6d6\ub6d7\ub6d8\ub6d9\ub6da\ub6db\ub6dc\ub6dd\ub6de\ub6df\ub6e0\ub6e1\ub6e2\ub6e3\ub6e4\ub6e5\ub6e6\ub6e7\ub6e8\ub6e9\ub6ea\ub6eb\ub6ec\ub6ed\ub6ee\ub6ef\ub6f0\ub6f1\ub6f2\ub6f3\ub6f4\ub6f5\ub6f6\ub6f7\ub6f8\ub6f9\ub6fa\ub6fb\ub6fc\ub6fd\ub6fe\ub6ff\ub700\ub701\ub702\ub703\ub704\ub705\ub706\ub707\ub708\ub709\ub70a\ub70b\ub70c\ub70d\ub70e\ub70f\ub710\ub711\ub712\ub713\ub714\ub715\ub716\ub717\ub718\ub719\ub71a\ub71b\ub71c\ub71d\ub71e\ub71f\ub720\ub721\ub722\ub723\ub724\ub725\ub726\ub727\ub728\ub729\ub72a\ub72b\ub72c\ub72d\ub72e\ub72f\ub730\ub731\ub732\ub733\ub734\ub735\ub736\ub737\ub738\ub739\ub73a\ub73b\ub73c\ub73d\ub73e\ub73f\ub740\ub741\ub742\ub743\ub744\ub745\ub746\ub747\ub748\ub749\ub74a\ub74b\ub74c\ub74d\ub74e\ub74f\ub750\ub751\ub752\ub753\ub754\ub755\ub756\ub757\ub758\ub759\ub75a\ub75b\ub75c\ub75d\ub75e\ub75f\ub760\ub761\ub762\ub763\ub764\ub765\ub766\ub767\ub768\ub769\ub76a\ub76b\ub76c\ub76d\ub76e\ub76f\ub770\ub771\ub772\ub773\ub774\ub775\ub776\ub777\ub778\ub779\ub77a\ub77b\ub77c\ub77d\ub77e\ub77f\ub780\ub781\ub782\ub783\ub784\ub785\ub786\ub787\ub788\ub789\ub78a\ub78b\ub78c\ub78d\ub78e\ub78f\ub790\ub791\ub792\ub793\ub794\ub795\ub796\ub797\ub798\ub799\ub79a\ub79b\ub79c\ub79d\ub79e\ub79f\ub7a0\ub7a1\ub7a2\ub7a3\ub7a4\ub7a5\ub7a6\ub7a7\ub7a8\ub7a9\ub7aa\ub7ab\ub7ac\ub7ad\ub7ae\ub7af\ub7b0\ub7b1\ub7b2\ub7b3\ub7b4\ub7b5\ub7b6\ub7b7\ub7b8\ub7b9\ub7ba\ub7bb\ub7bc\ub7bd\ub7be\ub7bf\ub7c0\ub7c1\ub7c2\ub7c3\ub7c4\ub7c5\ub7c6\ub7c7\ub7c8\ub7c9\ub7ca\ub7cb\ub7cc\ub7cd\ub7ce\ub7cf\ub7d0\ub7d1\ub7d2\ub7d3\ub7d4\ub7d5\ub7d6\ub7d7\ub7d8\ub7d9\ub7da\ub7db\ub7dc\ub7dd\ub7de\ub7df\ub7e0\ub7e1\ub7e2\ub7e3\ub7e4\ub7e5\ub7e6\ub7e7\ub7e8\ub7e9\ub7ea\ub7eb\ub7ec\ub7ed\ub7ee\ub7ef\ub7f0\ub7f1\ub7f2\ub7f3\ub7f4\ub7f5\ub7f6\ub7f7\ub7f8\ub7f9\ub7fa\ub7fb\ub7fc\ub7fd\ub7fe\ub7ff\ub800\ub801\ub802\ub803\ub804\ub805\ub806\ub807\ub808\ub809\ub80a\ub80b\ub80c\ub80d\ub80e\ub80f\ub810\ub811\ub812\ub813\ub814\ub815\ub816\ub817\ub818\ub819\ub81a\ub81b\ub81c\ub81d\ub81e\ub81f\ub820\ub821\ub822\ub823\ub824\ub825\ub826\ub827\ub828\ub829\ub82a\ub82b\ub82c\ub82d\ub82e\ub82f\ub830\ub831\ub832\ub833\ub834\ub835\ub836\ub837\ub838\ub839\ub83a\ub83b\ub83c\ub83d\ub83e\ub83f\ub840\ub841\ub842\ub843\ub844\ub845\ub846\ub847\ub848\ub849\ub84a\ub84b\ub84c\ub84d\ub84e\ub84f\ub850\ub851\ub852\ub853\ub854\ub855\ub856\ub857\ub858\ub859\ub85a\ub85b\ub85c\ub85d\ub85e\ub85f\ub860\ub861\ub862\ub863\ub864\ub865\ub866\ub867\ub868\ub869\ub86a\ub86b\ub86c\ub86d\ub86e\ub86f\ub870\ub871\ub872\ub873\ub874\ub875\ub876\ub877\ub878\ub879\ub87a\ub87b\ub87c\ub87d\ub87e\ub87f\ub880\ub881\ub882\ub883\ub884\ub885\ub886\ub887\ub888\ub889\ub88a\ub88b\ub88c\ub88d\ub88e\ub88f\ub890\ub891\ub892\ub893\ub894\ub895\ub896\ub897\ub898\ub899\ub89a\ub89b\ub89c\ub89d\ub89e\ub89f\ub8a0\ub8a1\ub8a2\ub8a3\ub8a4\ub8a5\ub8a6\ub8a7\ub8a8\ub8a9\ub8aa\ub8ab\ub8ac\ub8ad\ub8ae\ub8af\ub8b0\ub8b1\ub8b2\ub8b3\ub8b4\ub8b5\ub8b6\ub8b7\ub8b8\ub8b9\ub8ba\ub8bb\ub8bc\ub8bd\ub8be\ub8bf\ub8c0\ub8c1\ub8c2\ub8c3\ub8c4\ub8c5\ub8c6\ub8c7\ub8c8\ub8c9\ub8ca\ub8cb\ub8cc\ub8cd\ub8ce\ub8cf\ub8d0\ub8d1\ub8d2\ub8d3\ub8d4\ub8d5\ub8d6\ub8d7\ub8d8\ub8d9\ub8da\ub8db\ub8dc\ub8dd\ub8de\ub8df\ub8e0\ub8e1\ub8e2\ub8e3\ub8e4\ub8e5\ub8e6\ub8e7\ub8e8\ub8e9\ub8ea\ub8eb\ub8ec\ub8ed\ub8ee\ub8ef\ub8f0\ub8f1\ub8f2\ub8f3\ub8f4\ub8f5\ub8f6\ub8f7\ub8f8\ub8f9\ub8fa\ub8fb\ub8fc\ub8fd\ub8fe\ub8ff\ub900\ub901\ub902\ub903\ub904\ub905\ub906\ub907\ub908\ub909\ub90a\ub90b\ub90c\ub90d\ub90e\ub90f\ub910\ub911\ub912\ub913\ub914\ub915\ub916\ub917\ub918\ub919\ub91a\ub91b\ub91c\ub91d\ub91e\ub91f\ub920\ub921\ub922\ub923\ub924\ub925\ub926\ub927\ub928\ub929\ub92a\ub92b\ub92c\ub92d\ub92e\ub92f\ub930\ub931\ub932\ub933\ub934\ub935\ub936\ub937\ub938\ub939\ub93a\ub93b\ub93c\ub93d\ub93e\ub93f\ub940\ub941\ub942\ub943\ub944\ub945\ub946\ub947\ub948\ub949\ub94a\ub94b\ub94c\ub94d\ub94e\ub94f\ub950\ub951\ub952\ub953\ub954\ub955\ub956\ub957\ub958\ub959\ub95a\ub95b\ub95c\ub95d\ub95e\ub95f\ub960\ub961\ub962\ub963\ub964\ub965\ub966\ub967\ub968\ub969\ub96a\ub96b\ub96c\ub96d\ub96e\ub96f\ub970\ub971\ub972\ub973\ub974\ub975\ub976\ub977\ub978\ub979\ub97a\ub97b\ub97c\ub97d\ub97e\ub97f\ub980\ub981\ub982\ub983\ub984\ub985\ub986\ub987\ub988\ub989\ub98a\ub98b\ub98c\ub98d\ub98e\ub98f\ub990\ub991\ub992\ub993\ub994\ub995\ub996\ub997\ub998\ub999\ub99a\ub99b\ub99c\ub99d\ub99e\ub99f\ub9a0\ub9a1\ub9a2\ub9a3\ub9a4\ub9a5\ub9a6\ub9a7\ub9a8\ub9a9\ub9aa\ub9ab\ub9ac\ub9ad\ub9ae\ub9af\ub9b0\ub9b1\ub9b2\ub9b3\ub9b4\ub9b5\ub9b6\ub9b7\ub9b8\ub9b9\ub9ba\ub9bb\ub9bc\ub9bd\ub9be\ub9bf\ub9c0\ub9c1\ub9c2\ub9c3\ub9c4\ub9c5\ub9c6\ub9c7\ub9c8\ub9c9\ub9ca\ub9cb\ub9cc\ub9cd\ub9ce\ub9cf\ub9d0\ub9d1\ub9d2\ub9d3\ub9d4\ub9d5\ub9d6\ub9d7\ub9d8\ub9d9\ub9da\ub9db\ub9dc\ub9dd\ub9de\ub9df\ub9e0\ub9e1\ub9e2\ub9e3\ub9e4\ub9e5\ub9e6\ub9e7\ub9e8\ub9e9\ub9ea\ub9eb\ub9ec\ub9ed\ub9ee\ub9ef\ub9f0\ub9f1\ub9f2\ub9f3\ub9f4\ub9f5\ub9f6\ub9f7\ub9f8\ub9f9\ub9fa\ub9fb\ub9fc\ub9fd\ub9fe\ub9ff\uba00\uba01\uba02\uba03\uba04\uba05\uba06\uba07\uba08\uba09\uba0a\uba0b\uba0c\uba0d\uba0e\uba0f\uba10\uba11\uba12\uba13\uba14\uba15\uba16\uba17\uba18\uba19\uba1a\uba1b\uba1c\uba1d\uba1e\uba1f\uba20\uba21\uba22\uba23\uba24\uba25\uba26\uba27\uba28\uba29\uba2a\uba2b\uba2c\uba2d\uba2e\uba2f\uba30\uba31\uba32\uba33\uba34\uba35\uba36\uba37\uba38\uba39\uba3a\uba3b\uba3c\uba3d\uba3e\uba3f\uba40\uba41\uba42\uba43\uba44\uba45\uba46\uba47\uba48\uba49\uba4a\uba4b\uba4c\uba4d\uba4e\uba4f\uba50\uba51\uba52\uba53\uba54\uba55\uba56\uba57\uba58\uba59\uba5a\uba5b\uba5c\uba5d\uba5e\uba5f\uba60\uba61\uba62\uba63\uba64\uba65\uba66\uba67\uba68\uba69\uba6a\uba6b\uba6c\uba6d\uba6e\uba6f\uba70\uba71\uba72\uba73\uba74\uba75\uba76\uba77\uba78\uba79\uba7a\uba7b\uba7c\uba7d\uba7e\uba7f\uba80\uba81\uba82\uba83\uba84\uba85\uba86\uba87\uba88\uba89\uba8a\uba8b\uba8c\uba8d\uba8e\uba8f\uba90\uba91\uba92\uba93\uba94\uba95\uba96\uba97\uba98\uba99\uba9a\uba9b\uba9c\uba9d\uba9e\uba9f\ubaa0\ubaa1\ubaa2\ubaa3\ubaa4\ubaa5\ubaa6\ubaa7\ubaa8\ubaa9\ubaaa\ubaab\ubaac\ubaad\ubaae\ubaaf\ubab0\ubab1\ubab2\ubab3\ubab4\ubab5\ubab6\ubab7\ubab8\ubab9\ubaba\ubabb\ubabc\ubabd\ubabe\ubabf\ubac0\ubac1\ubac2\ubac3\ubac4\ubac5\ubac6\ubac7\ubac8\ubac9\ubaca\ubacb\ubacc\ubacd\ubace\ubacf\ubad0\ubad1\ubad2\ubad3\ubad4\ubad5\ubad6\ubad7\ubad8\ubad9\ubada\ubadb\ubadc\ubadd\ubade\ubadf\ubae0\ubae1\ubae2\ubae3\ubae4\ubae5\ubae6\ubae7\ubae8\ubae9\ubaea\ubaeb\ubaec\ubaed\ubaee\ubaef\ubaf0\ubaf1\ubaf2\ubaf3\ubaf4\ubaf5\ubaf6\ubaf7\ubaf8\ubaf9\ubafa\ubafb\ubafc\ubafd\ubafe\ubaff\ubb00\ubb01\ubb02\ubb03\ubb04\ubb05\ubb06\ubb07\ubb08\ubb09\ubb0a\ubb0b\ubb0c\ubb0d\ubb0e\ubb0f\ubb10\ubb11\ubb12\ubb13\ubb14\ubb15\ubb16\ubb17\ubb18\ubb19\ubb1a\ubb1b\ubb1c\ubb1d\ubb1e\ubb1f\ubb20\ubb21\ubb22\ubb23\ubb24\ubb25\ubb26\ubb27\ubb28\ubb29\ubb2a\ubb2b\ubb2c\ubb2d\ubb2e\ubb2f\ubb30\ubb31\ubb32\ubb33\ubb34\ubb35\ubb36\ubb37\ubb38\ubb39\ubb3a\ubb3b\ubb3c\ubb3d\ubb3e\ubb3f\ubb40\ubb41\ubb42\ubb43\ubb44\ubb45\ubb46\ubb47\ubb48\ubb49\ubb4a\ubb4b\ubb4c\ubb4d\ubb4e\ubb4f\ubb50\ubb51\ubb52\ubb53\ubb54\ubb55\ubb56\ubb57\ubb58\ubb59\ubb5a\ubb5b\ubb5c\ubb5d\ubb5e\ubb5f\ubb60\ubb61\ubb62\ubb63\ubb64\ubb65\ubb66\ubb67\ubb68\ubb69\ubb6a\ubb6b\ubb6c\ubb6d\ubb6e\ubb6f\ubb70\ubb71\ubb72\ubb73\ubb74\ubb75\ubb76\ubb77\ubb78\ubb79\ubb7a\ubb7b\ubb7c\ubb7d\ubb7e\ubb7f\ubb80\ubb81\ubb82\ubb83\ubb84\ubb85\ubb86\ubb87\ubb88\ubb89\ubb8a\ubb8b\ubb8c\ubb8d\ubb8e\ubb8f\ubb90\ubb91\ubb92\ubb93\ubb94\ubb95\ubb96\ubb97\ubb98\ubb99\ubb9a\ubb9b\ubb9c\ubb9d\ubb9e\ubb9f\ubba0\ubba1\ubba2\ubba3\ubba4\ubba5\ubba6\ubba7\ubba8\ubba9\ubbaa\ubbab\ubbac\ubbad\ubbae\ubbaf\ubbb0\ubbb1\ubbb2\ubbb3\ubbb4\ubbb5\ubbb6\ubbb7\ubbb8\ubbb9\ubbba\ubbbb\ubbbc\ubbbd\ubbbe\ubbbf\ubbc0\ubbc1\ubbc2\ubbc3\ubbc4\ubbc5\ubbc6\ubbc7\ubbc8\ubbc9\ubbca\ubbcb\ubbcc\ubbcd\ubbce\ubbcf\ubbd0\ubbd1\ubbd2\ubbd3\ubbd4\ubbd5\ubbd6\ubbd7\ubbd8\ubbd9\ubbda\ubbdb\ubbdc\ubbdd\ubbde\ubbdf\ubbe0\ubbe1\ubbe2\ubbe3\ubbe4\ubbe5\ubbe6\ubbe7\ubbe8\ubbe9\ubbea\ubbeb\ubbec\ubbed\ubbee\ubbef\ubbf0\ubbf1\ubbf2\ubbf3\ubbf4\ubbf5\ubbf6\ubbf7\ubbf8\ubbf9\ubbfa\ubbfb\ubbfc\ubbfd\ubbfe\ubbff\ubc00\ubc01\ubc02\ubc03\ubc04\ubc05\ubc06\ubc07\ubc08\ubc09\ubc0a\ubc0b\ubc0c\ubc0d\ubc0e\ubc0f\ubc10\ubc11\ubc12\ubc13\ubc14\ubc15\ubc16\ubc17\ubc18\ubc19\ubc1a\ubc1b\ubc1c\ubc1d\ubc1e\ubc1f\ubc20\ubc21\ubc22\ubc23\ubc24\ubc25\ubc26\ubc27\ubc28\ubc29\ubc2a\ubc2b\ubc2c\ubc2d\ubc2e\ubc2f\ubc30\ubc31\ubc32\ubc33\ubc34\ubc35\ubc36\ubc37\ubc38\ubc39\ubc3a\ubc3b\ubc3c\ubc3d\ubc3e\ubc3f\ubc40\ubc41\ubc42\ubc43\ubc44\ubc45\ubc46\ubc47\ubc48\ubc49\ubc4a\ubc4b\ubc4c\ubc4d\ubc4e\ubc4f\ubc50\ubc51\ubc52\ubc53\ubc54\ubc55\ubc56\ubc57\ubc58\ubc59\ubc5a\ubc5b\ubc5c\ubc5d\ubc5e\ubc5f\ubc60\ubc61\ubc62\ubc63\ubc64\ubc65\ubc66\ubc67\ubc68\ubc69\ubc6a\ubc6b\ubc6c\ubc6d\ubc6e\ubc6f\ubc70\ubc71\ubc72\ubc73\ubc74\ubc75\ubc76\ubc77\ubc78\ubc79\ubc7a\ubc7b\ubc7c\ubc7d\ubc7e\ubc7f\ubc80\ubc81\ubc82\ubc83\ubc84\ubc85\ubc86\ubc87\ubc88\ubc89\ubc8a\ubc8b\ubc8c\ubc8d\ubc8e\ubc8f\ubc90\ubc91\ubc92\ubc93\ubc94\ubc95\ubc96\ubc97\ubc98\ubc99\ubc9a\ubc9b\ubc9c\ubc9d\ubc9e\ubc9f\ubca0\ubca1\ubca2\ubca3\ubca4\ubca5\ubca6\ubca7\ubca8\ubca9\ubcaa\ubcab\ubcac\ubcad\ubcae\ubcaf\ubcb0\ubcb1\ubcb2\ubcb3\ubcb4\ubcb5\ubcb6\ubcb7\ubcb8\ubcb9\ubcba\ubcbb\ubcbc\ubcbd\ubcbe\ubcbf\ubcc0\ubcc1\ubcc2\ubcc3\ubcc4\ubcc5\ubcc6\ubcc7\ubcc8\ubcc9\ubcca\ubccb\ubccc\ubccd\ubcce\ubccf\ubcd0\ubcd1\ubcd2\ubcd3\ubcd4\ubcd5\ubcd6\ubcd7\ubcd8\ubcd9\ubcda\ubcdb\ubcdc\ubcdd\ubcde\ubcdf\ubce0\ubce1\ubce2\ubce3\ubce4\ubce5\ubce6\ubce7\ubce8\ubce9\ubcea\ubceb\ubcec\ubced\ubcee\ubcef\ubcf0\ubcf1\ubcf2\ubcf3\ubcf4\ubcf5\ubcf6\ubcf7\ubcf8\ubcf9\ubcfa\ubcfb\ubcfc\ubcfd\ubcfe\ubcff\ubd00\ubd01\ubd02\ubd03\ubd04\ubd05\ubd06\ubd07\ubd08\ubd09\ubd0a\ubd0b\ubd0c\ubd0d\ubd0e\ubd0f\ubd10\ubd11\ubd12\ubd13\ubd14\ubd15\ubd16\ubd17\ubd18\ubd19\ubd1a\ubd1b\ubd1c\ubd1d\ubd1e\ubd1f\ubd20\ubd21\ubd22\ubd23\ubd24\ubd25\ubd26\ubd27\ubd28\ubd29\ubd2a\ubd2b\ubd2c\ubd2d\ubd2e\ubd2f\ubd30\ubd31\ubd32\ubd33\ubd34\ubd35\ubd36\ubd37\ubd38\ubd39\ubd3a\ubd3b\ubd3c\ubd3d\ubd3e\ubd3f\ubd40\ubd41\ubd42\ubd43\ubd44\ubd45\ubd46\ubd47\ubd48\ubd49\ubd4a\ubd4b\ubd4c\ubd4d\ubd4e\ubd4f\ubd50\ubd51\ubd52\ubd53\ubd54\ubd55\ubd56\ubd57\ubd58\ubd59\ubd5a\ubd5b\ubd5c\ubd5d\ubd5e\ubd5f\ubd60\ubd61\ubd62\ubd63\ubd64\ubd65\ubd66\ubd67\ubd68\ubd69\ubd6a\ubd6b\ubd6c\ubd6d\ubd6e\ubd6f\ubd70\ubd71\ubd72\ubd73\ubd74\ubd75\ubd76\ubd77\ubd78\ubd79\ubd7a\ubd7b\ubd7c\ubd7d\ubd7e\ubd7f\ubd80\ubd81\ubd82\ubd83\ubd84\ubd85\ubd86\ubd87\ubd88\ubd89\ubd8a\ubd8b\ubd8c\ubd8d\ubd8e\ubd8f\ubd90\ubd91\ubd92\ubd93\ubd94\ubd95\ubd96\ubd97\ubd98\ubd99\ubd9a\ubd9b\ubd9c\ubd9d\ubd9e\ubd9f\ubda0\ubda1\ubda2\ubda3\ubda4\ubda5\ubda6\ubda7\ubda8\ubda9\ubdaa\ubdab\ubdac\ubdad\ubdae\ubdaf\ubdb0\ubdb1\ubdb2\ubdb3\ubdb4\ubdb5\ubdb6\ubdb7\ubdb8\ubdb9\ubdba\ubdbb\ubdbc\ubdbd\ubdbe\ubdbf\ubdc0\ubdc1\ubdc2\ubdc3\ubdc4\ubdc5\ubdc6\ubdc7\ubdc8\ubdc9\ubdca\ubdcb\ubdcc\ubdcd\ubdce\ubdcf\ubdd0\ubdd1\ubdd2\ubdd3\ubdd4\ubdd5\ubdd6\ubdd7\ubdd8\ubdd9\ubdda\ubddb\ubddc\ubddd\ubdde\ubddf\ubde0\ubde1\ubde2\ubde3\ubde4\ubde5\ubde6\ubde7\ubde8\ubde9\ubdea\ubdeb\ubdec\ubded\ubdee\ubdef\ubdf0\ubdf1\ubdf2\ubdf3\ubdf4\ubdf5\ubdf6\ubdf7\ubdf8\ubdf9\ubdfa\ubdfb\ubdfc\ubdfd\ubdfe\ubdff\ube00\ube01\ube02\ube03\ube04\ube05\ube06\ube07\ube08\ube09\ube0a\ube0b\ube0c\ube0d\ube0e\ube0f\ube10\ube11\ube12\ube13\ube14\ube15\ube16\ube17\ube18\ube19\ube1a\ube1b\ube1c\ube1d\ube1e\ube1f\ube20\ube21\ube22\ube23\ube24\ube25\ube26\ube27\ube28\ube29\ube2a\ube2b\ube2c\ube2d\ube2e\ube2f\ube30\ube31\ube32\ube33\ube34\ube35\ube36\ube37\ube38\ube39\ube3a\ube3b\ube3c\ube3d\ube3e\ube3f\ube40\ube41\ube42\ube43\ube44\ube45\ube46\ube47\ube48\ube49\ube4a\ube4b\ube4c\ube4d\ube4e\ube4f\ube50\ube51\ube52\ube53\ube54\ube55\ube56\ube57\ube58\ube59\ube5a\ube5b\ube5c\ube5d\ube5e\ube5f\ube60\ube61\ube62\ube63\ube64\ube65\ube66\ube67\ube68\ube69\ube6a\ube6b\ube6c\ube6d\ube6e\ube6f\ube70\ube71\ube72\ube73\ube74\ube75\ube76\ube77\ube78\ube79\ube7a\ube7b\ube7c\ube7d\ube7e\ube7f\ube80\ube81\ube82\ube83\ube84\ube85\ube86\ube87\ube88\ube89\ube8a\ube8b\ube8c\ube8d\ube8e\ube8f\ube90\ube91\ube92\ube93\ube94\ube95\ube96\ube97\ube98\ube99\ube9a\ube9b\ube9c\ube9d\ube9e\ube9f\ubea0\ubea1\ubea2\ubea3\ubea4\ubea5\ubea6\ubea7\ubea8\ubea9\ubeaa\ubeab\ubeac\ubead\ubeae\ubeaf\ubeb0\ubeb1\ubeb2\ubeb3\ubeb4\ubeb5\ubeb6\ubeb7\ubeb8\ubeb9\ubeba\ubebb\ubebc\ubebd\ubebe\ubebf\ubec0\ubec1\ubec2\ubec3\ubec4\ubec5\ubec6\ubec7\ubec8\ubec9\ubeca\ubecb\ubecc\ubecd\ubece\ubecf\ubed0\ubed1\ubed2\ubed3\ubed4\ubed5\ubed6\ubed7\ubed8\ubed9\ubeda\ubedb\ubedc\ubedd\ubede\ubedf\ubee0\ubee1\ubee2\ubee3\ubee4\ubee5\ubee6\ubee7\ubee8\ubee9\ubeea\ubeeb\ubeec\ubeed\ubeee\ubeef\ubef0\ubef1\ubef2\ubef3\ubef4\ubef5\ubef6\ubef7\ubef8\ubef9\ubefa\ubefb\ubefc\ubefd\ubefe\ubeff\ubf00\ubf01\ubf02\ubf03\ubf04\ubf05\ubf06\ubf07\ubf08\ubf09\ubf0a\ubf0b\ubf0c\ubf0d\ubf0e\ubf0f\ubf10\ubf11\ubf12\ubf13\ubf14\ubf15\ubf16\ubf17\ubf18\ubf19\ubf1a\ubf1b\ubf1c\ubf1d\ubf1e\ubf1f\ubf20\ubf21\ubf22\ubf23\ubf24\ubf25\ubf26\ubf27\ubf28\ubf29\ubf2a\ubf2b\ubf2c\ubf2d\ubf2e\ubf2f\ubf30\ubf31\ubf32\ubf33\ubf34\ubf35\ubf36\ubf37\ubf38\ubf39\ubf3a\ubf3b\ubf3c\ubf3d\ubf3e\ubf3f\ubf40\ubf41\ubf42\ubf43\ubf44\ubf45\ubf46\ubf47\ubf48\ubf49\ubf4a\ubf4b\ubf4c\ubf4d\ubf4e\ubf4f\ubf50\ubf51\ubf52\ubf53\ubf54\ubf55\ubf56\ubf57\ubf58\ubf59\ubf5a\ubf5b\ubf5c\ubf5d\ubf5e\ubf5f\ubf60\ubf61\ubf62\ubf63\ubf64\ubf65\ubf66\ubf67\ubf68\ubf69\ubf6a\ubf6b\ubf6c\ubf6d\ubf6e\ubf6f\ubf70\ubf71\ubf72\ubf73\ubf74\ubf75\ubf76\ubf77\ubf78\ubf79\ubf7a\ubf7b\ubf7c\ubf7d\ubf7e\ubf7f\ubf80\ubf81\ubf82\ubf83\ubf84\ubf85\ubf86\ubf87\ubf88\ubf89\ubf8a\ubf8b\ubf8c\ubf8d\ubf8e\ubf8f\ubf90\ubf91\ubf92\ubf93\ubf94\ubf95\ubf96\ubf97\ubf98\ubf99\ubf9a\ubf9b\ubf9c\ubf9d\ubf9e\ubf9f\ubfa0\ubfa1\ubfa2\ubfa3\ubfa4\ubfa5\ubfa6\ubfa7\ubfa8\ubfa9\ubfaa\ubfab\ubfac\ubfad\ubfae\ubfaf\ubfb0\ubfb1\ubfb2\ubfb3\ubfb4\ubfb5\ubfb6\ubfb7\ubfb8\ubfb9\ubfba\ubfbb\ubfbc\ubfbd\ubfbe\ubfbf\ubfc0\ubfc1\ubfc2\ubfc3\ubfc4\ubfc5\ubfc6\ubfc7\ubfc8\ubfc9\ubfca\ubfcb\ubfcc\ubfcd\ubfce\ubfcf\ubfd0\ubfd1\ubfd2\ubfd3\ubfd4\ubfd5\ubfd6\ubfd7\ubfd8\ubfd9\ubfda\ubfdb\ubfdc\ubfdd\ubfde\ubfdf\ubfe0\ubfe1\ubfe2\ubfe3\ubfe4\ubfe5\ubfe6\ubfe7\ubfe8\ubfe9\ubfea\ubfeb\ubfec\ubfed\ubfee\ubfef\ubff0\ubff1\ubff2\ubff3\ubff4\ubff5\ubff6\ubff7\ubff8\ubff9\ubffa\ubffb\ubffc\ubffd\ubffe\ubfff\uc000\uc001\uc002\uc003\uc004\uc005\uc006\uc007\uc008\uc009\uc00a\uc00b\uc00c\uc00d\uc00e\uc00f\uc010\uc011\uc012\uc013\uc014\uc015\uc016\uc017\uc018\uc019\uc01a\uc01b\uc01c\uc01d\uc01e\uc01f\uc020\uc021\uc022\uc023\uc024\uc025\uc026\uc027\uc028\uc029\uc02a\uc02b\uc02c\uc02d\uc02e\uc02f\uc030\uc031\uc032\uc033\uc034\uc035\uc036\uc037\uc038\uc039\uc03a\uc03b\uc03c\uc03d\uc03e\uc03f\uc040\uc041\uc042\uc043\uc044\uc045\uc046\uc047\uc048\uc049\uc04a\uc04b\uc04c\uc04d\uc04e\uc04f\uc050\uc051\uc052\uc053\uc054\uc055\uc056\uc057\uc058\uc059\uc05a\uc05b\uc05c\uc05d\uc05e\uc05f\uc060\uc061\uc062\uc063\uc064\uc065\uc066\uc067\uc068\uc069\uc06a\uc06b\uc06c\uc06d\uc06e\uc06f\uc070\uc071\uc072\uc073\uc074\uc075\uc076\uc077\uc078\uc079\uc07a\uc07b\uc07c\uc07d\uc07e\uc07f\uc080\uc081\uc082\uc083\uc084\uc085\uc086\uc087\uc088\uc089\uc08a\uc08b\uc08c\uc08d\uc08e\uc08f\uc090\uc091\uc092\uc093\uc094\uc095\uc096\uc097\uc098\uc099\uc09a\uc09b\uc09c\uc09d\uc09e\uc09f\uc0a0\uc0a1\uc0a2\uc0a3\uc0a4\uc0a5\uc0a6\uc0a7\uc0a8\uc0a9\uc0aa\uc0ab\uc0ac\uc0ad\uc0ae\uc0af\uc0b0\uc0b1\uc0b2\uc0b3\uc0b4\uc0b5\uc0b6\uc0b7\uc0b8\uc0b9\uc0ba\uc0bb\uc0bc\uc0bd\uc0be\uc0bf\uc0c0\uc0c1\uc0c2\uc0c3\uc0c4\uc0c5\uc0c6\uc0c7\uc0c8\uc0c9\uc0ca\uc0cb\uc0cc\uc0cd\uc0ce\uc0cf\uc0d0\uc0d1\uc0d2\uc0d3\uc0d4\uc0d5\uc0d6\uc0d7\uc0d8\uc0d9\uc0da\uc0db\uc0dc\uc0dd\uc0de\uc0df\uc0e0\uc0e1\uc0e2\uc0e3\uc0e4\uc0e5\uc0e6\uc0e7\uc0e8\uc0e9\uc0ea\uc0eb\uc0ec\uc0ed\uc0ee\uc0ef\uc0f0\uc0f1\uc0f2\uc0f3\uc0f4\uc0f5\uc0f6\uc0f7\uc0f8\uc0f9\uc0fa\uc0fb\uc0fc\uc0fd\uc0fe\uc0ff\uc100\uc101\uc102\uc103\uc104\uc105\uc106\uc107\uc108\uc109\uc10a\uc10b\uc10c\uc10d\uc10e\uc10f\uc110\uc111\uc112\uc113\uc114\uc115\uc116\uc117\uc118\uc119\uc11a\uc11b\uc11c\uc11d\uc11e\uc11f\uc120\uc121\uc122\uc123\uc124\uc125\uc126\uc127\uc128\uc129\uc12a\uc12b\uc12c\uc12d\uc12e\uc12f\uc130\uc131\uc132\uc133\uc134\uc135\uc136\uc137\uc138\uc139\uc13a\uc13b\uc13c\uc13d\uc13e\uc13f\uc140\uc141\uc142\uc143\uc144\uc145\uc146\uc147\uc148\uc149\uc14a\uc14b\uc14c\uc14d\uc14e\uc14f\uc150\uc151\uc152\uc153\uc154\uc155\uc156\uc157\uc158\uc159\uc15a\uc15b\uc15c\uc15d\uc15e\uc15f\uc160\uc161\uc162\uc163\uc164\uc165\uc166\uc167\uc168\uc169\uc16a\uc16b\uc16c\uc16d\uc16e\uc16f\uc170\uc171\uc172\uc173\uc174\uc175\uc176\uc177\uc178\uc179\uc17a\uc17b\uc17c\uc17d\uc17e\uc17f\uc180\uc181\uc182\uc183\uc184\uc185\uc186\uc187\uc188\uc189\uc18a\uc18b\uc18c\uc18d\uc18e\uc18f\uc190\uc191\uc192\uc193\uc194\uc195\uc196\uc197\uc198\uc199\uc19a\uc19b\uc19c\uc19d\uc19e\uc19f\uc1a0\uc1a1\uc1a2\uc1a3\uc1a4\uc1a5\uc1a6\uc1a7\uc1a8\uc1a9\uc1aa\uc1ab\uc1ac\uc1ad\uc1ae\uc1af\uc1b0\uc1b1\uc1b2\uc1b3\uc1b4\uc1b5\uc1b6\uc1b7\uc1b8\uc1b9\uc1ba\uc1bb\uc1bc\uc1bd\uc1be\uc1bf\uc1c0\uc1c1\uc1c2\uc1c3\uc1c4\uc1c5\uc1c6\uc1c7\uc1c8\uc1c9\uc1ca\uc1cb\uc1cc\uc1cd\uc1ce\uc1cf\uc1d0\uc1d1\uc1d2\uc1d3\uc1d4\uc1d5\uc1d6\uc1d7\uc1d8\uc1d9\uc1da\uc1db\uc1dc\uc1dd\uc1de\uc1df\uc1e0\uc1e1\uc1e2\uc1e3\uc1e4\uc1e5\uc1e6\uc1e7\uc1e8\uc1e9\uc1ea\uc1eb\uc1ec\uc1ed\uc1ee\uc1ef\uc1f0\uc1f1\uc1f2\uc1f3\uc1f4\uc1f5\uc1f6\uc1f7\uc1f8\uc1f9\uc1fa\uc1fb\uc1fc\uc1fd\uc1fe\uc1ff\uc200\uc201\uc202\uc203\uc204\uc205\uc206\uc207\uc208\uc209\uc20a\uc20b\uc20c\uc20d\uc20e\uc20f\uc210\uc211\uc212\uc213\uc214\uc215\uc216\uc217\uc218\uc219\uc21a\uc21b\uc21c\uc21d\uc21e\uc21f\uc220\uc221\uc222\uc223\uc224\uc225\uc226\uc227\uc228\uc229\uc22a\uc22b\uc22c\uc22d\uc22e\uc22f\uc230\uc231\uc232\uc233\uc234\uc235\uc236\uc237\uc238\uc239\uc23a\uc23b\uc23c\uc23d\uc23e\uc23f\uc240\uc241\uc242\uc243\uc244\uc245\uc246\uc247\uc248\uc249\uc24a\uc24b\uc24c\uc24d\uc24e\uc24f\uc250\uc251\uc252\uc253\uc254\uc255\uc256\uc257\uc258\uc259\uc25a\uc25b\uc25c\uc25d\uc25e\uc25f\uc260\uc261\uc262\uc263\uc264\uc265\uc266\uc267\uc268\uc269\uc26a\uc26b\uc26c\uc26d\uc26e\uc26f\uc270\uc271\uc272\uc273\uc274\uc275\uc276\uc277\uc278\uc279\uc27a\uc27b\uc27c\uc27d\uc27e\uc27f\uc280\uc281\uc282\uc283\uc284\uc285\uc286\uc287\uc288\uc289\uc28a\uc28b\uc28c\uc28d\uc28e\uc28f\uc290\uc291\uc292\uc293\uc294\uc295\uc296\uc297\uc298\uc299\uc29a\uc29b\uc29c\uc29d\uc29e\uc29f\uc2a0\uc2a1\uc2a2\uc2a3\uc2a4\uc2a5\uc2a6\uc2a7\uc2a8\uc2a9\uc2aa\uc2ab\uc2ac\uc2ad\uc2ae\uc2af\uc2b0\uc2b1\uc2b2\uc2b3\uc2b4\uc2b5\uc2b6\uc2b7\uc2b8\uc2b9\uc2ba\uc2bb\uc2bc\uc2bd\uc2be\uc2bf\uc2c0\uc2c1\uc2c2\uc2c3\uc2c4\uc2c5\uc2c6\uc2c7\uc2c8\uc2c9\uc2ca\uc2cb\uc2cc\uc2cd\uc2ce\uc2cf\uc2d0\uc2d1\uc2d2\uc2d3\uc2d4\uc2d5\uc2d6\uc2d7\uc2d8\uc2d9\uc2da\uc2db\uc2dc\uc2dd\uc2de\uc2df\uc2e0\uc2e1\uc2e2\uc2e3\uc2e4\uc2e5\uc2e6\uc2e7\uc2e8\uc2e9\uc2ea\uc2eb\uc2ec\uc2ed\uc2ee\uc2ef\uc2f0\uc2f1\uc2f2\uc2f3\uc2f4\uc2f5\uc2f6\uc2f7\uc2f8\uc2f9\uc2fa\uc2fb\uc2fc\uc2fd\uc2fe\uc2ff\uc300\uc301\uc302\uc303\uc304\uc305\uc306\uc307\uc308\uc309\uc30a\uc30b\uc30c\uc30d\uc30e\uc30f\uc310\uc311\uc312\uc313\uc314\uc315\uc316\uc317\uc318\uc319\uc31a\uc31b\uc31c\uc31d\uc31e\uc31f\uc320\uc321\uc322\uc323\uc324\uc325\uc326\uc327\uc328\uc329\uc32a\uc32b\uc32c\uc32d\uc32e\uc32f\uc330\uc331\uc332\uc333\uc334\uc335\uc336\uc337\uc338\uc339\uc33a\uc33b\uc33c\uc33d\uc33e\uc33f\uc340\uc341\uc342\uc343\uc344\uc345\uc346\uc347\uc348\uc349\uc34a\uc34b\uc34c\uc34d\uc34e\uc34f\uc350\uc351\uc352\uc353\uc354\uc355\uc356\uc357\uc358\uc359\uc35a\uc35b\uc35c\uc35d\uc35e\uc35f\uc360\uc361\uc362\uc363\uc364\uc365\uc366\uc367\uc368\uc369\uc36a\uc36b\uc36c\uc36d\uc36e\uc36f\uc370\uc371\uc372\uc373\uc374\uc375\uc376\uc377\uc378\uc379\uc37a\uc37b\uc37c\uc37d\uc37e\uc37f\uc380\uc381\uc382\uc383\uc384\uc385\uc386\uc387\uc388\uc389\uc38a\uc38b\uc38c\uc38d\uc38e\uc38f\uc390\uc391\uc392\uc393\uc394\uc395\uc396\uc397\uc398\uc399\uc39a\uc39b\uc39c\uc39d\uc39e\uc39f\uc3a0\uc3a1\uc3a2\uc3a3\uc3a4\uc3a5\uc3a6\uc3a7\uc3a8\uc3a9\uc3aa\uc3ab\uc3ac\uc3ad\uc3ae\uc3af\uc3b0\uc3b1\uc3b2\uc3b3\uc3b4\uc3b5\uc3b6\uc3b7\uc3b8\uc3b9\uc3ba\uc3bb\uc3bc\uc3bd\uc3be\uc3bf\uc3c0\uc3c1\uc3c2\uc3c3\uc3c4\uc3c5\uc3c6\uc3c7\uc3c8\uc3c9\uc3ca\uc3cb\uc3cc\uc3cd\uc3ce\uc3cf\uc3d0\uc3d1\uc3d2\uc3d3\uc3d4\uc3d5\uc3d6\uc3d7\uc3d8\uc3d9\uc3da\uc3db\uc3dc\uc3dd\uc3de\uc3df\uc3e0\uc3e1\uc3e2\uc3e3\uc3e4\uc3e5\uc3e6\uc3e7\uc3e8\uc3e9\uc3ea\uc3eb\uc3ec\uc3ed\uc3ee\uc3ef\uc3f0\uc3f1\uc3f2\uc3f3\uc3f4\uc3f5\uc3f6\uc3f7\uc3f8\uc3f9\uc3fa\uc3fb\uc3fc\uc3fd\uc3fe\uc3ff\uc400\uc401\uc402\uc403\uc404\uc405\uc406\uc407\uc408\uc409\uc40a\uc40b\uc40c\uc40d\uc40e\uc40f\uc410\uc411\uc412\uc413\uc414\uc415\uc416\uc417\uc418\uc419\uc41a\uc41b\uc41c\uc41d\uc41e\uc41f\uc420\uc421\uc422\uc423\uc424\uc425\uc426\uc427\uc428\uc429\uc42a\uc42b\uc42c\uc42d\uc42e\uc42f\uc430\uc431\uc432\uc433\uc434\uc435\uc436\uc437\uc438\uc439\uc43a\uc43b\uc43c\uc43d\uc43e\uc43f\uc440\uc441\uc442\uc443\uc444\uc445\uc446\uc447\uc448\uc449\uc44a\uc44b\uc44c\uc44d\uc44e\uc44f\uc450\uc451\uc452\uc453\uc454\uc455\uc456\uc457\uc458\uc459\uc45a\uc45b\uc45c\uc45d\uc45e\uc45f\uc460\uc461\uc462\uc463\uc464\uc465\uc466\uc467\uc468\uc469\uc46a\uc46b\uc46c\uc46d\uc46e\uc46f\uc470\uc471\uc472\uc473\uc474\uc475\uc476\uc477\uc478\uc479\uc47a\uc47b\uc47c\uc47d\uc47e\uc47f\uc480\uc481\uc482\uc483\uc484\uc485\uc486\uc487\uc488\uc489\uc48a\uc48b\uc48c\uc48d\uc48e\uc48f\uc490\uc491\uc492\uc493\uc494\uc495\uc496\uc497\uc498\uc499\uc49a\uc49b\uc49c\uc49d\uc49e\uc49f\uc4a0\uc4a1\uc4a2\uc4a3\uc4a4\uc4a5\uc4a6\uc4a7\uc4a8\uc4a9\uc4aa\uc4ab\uc4ac\uc4ad\uc4ae\uc4af\uc4b0\uc4b1\uc4b2\uc4b3\uc4b4\uc4b5\uc4b6\uc4b7\uc4b8\uc4b9\uc4ba\uc4bb\uc4bc\uc4bd\uc4be\uc4bf\uc4c0\uc4c1\uc4c2\uc4c3\uc4c4\uc4c5\uc4c6\uc4c7\uc4c8\uc4c9\uc4ca\uc4cb\uc4cc\uc4cd\uc4ce\uc4cf\uc4d0\uc4d1\uc4d2\uc4d3\uc4d4\uc4d5\uc4d6\uc4d7\uc4d8\uc4d9\uc4da\uc4db\uc4dc\uc4dd\uc4de\uc4df\uc4e0\uc4e1\uc4e2\uc4e3\uc4e4\uc4e5\uc4e6\uc4e7\uc4e8\uc4e9\uc4ea\uc4eb\uc4ec\uc4ed\uc4ee\uc4ef\uc4f0\uc4f1\uc4f2\uc4f3\uc4f4\uc4f5\uc4f6\uc4f7\uc4f8\uc4f9\uc4fa\uc4fb\uc4fc\uc4fd\uc4fe\uc4ff\uc500\uc501\uc502\uc503\uc504\uc505\uc506\uc507\uc508\uc509\uc50a\uc50b\uc50c\uc50d\uc50e\uc50f\uc510\uc511\uc512\uc513\uc514\uc515\uc516\uc517\uc518\uc519\uc51a\uc51b\uc51c\uc51d\uc51e\uc51f\uc520\uc521\uc522\uc523\uc524\uc525\uc526\uc527\uc528\uc529\uc52a\uc52b\uc52c\uc52d\uc52e\uc52f\uc530\uc531\uc532\uc533\uc534\uc535\uc536\uc537\uc538\uc539\uc53a\uc53b\uc53c\uc53d\uc53e\uc53f\uc540\uc541\uc542\uc543\uc544\uc545\uc546\uc547\uc548\uc549\uc54a\uc54b\uc54c\uc54d\uc54e\uc54f\uc550\uc551\uc552\uc553\uc554\uc555\uc556\uc557\uc558\uc559\uc55a\uc55b\uc55c\uc55d\uc55e\uc55f\uc560\uc561\uc562\uc563\uc564\uc565\uc566\uc567\uc568\uc569\uc56a\uc56b\uc56c\uc56d\uc56e\uc56f\uc570\uc571\uc572\uc573\uc574\uc575\uc576\uc577\uc578\uc579\uc57a\uc57b\uc57c\uc57d\uc57e\uc57f\uc580\uc581\uc582\uc583\uc584\uc585\uc586\uc587\uc588\uc589\uc58a\uc58b\uc58c\uc58d\uc58e\uc58f\uc590\uc591\uc592\uc593\uc594\uc595\uc596\uc597\uc598\uc599\uc59a\uc59b\uc59c\uc59d\uc59e\uc59f\uc5a0\uc5a1\uc5a2\uc5a3\uc5a4\uc5a5\uc5a6\uc5a7\uc5a8\uc5a9\uc5aa\uc5ab\uc5ac\uc5ad\uc5ae\uc5af\uc5b0\uc5b1\uc5b2\uc5b3\uc5b4\uc5b5\uc5b6\uc5b7\uc5b8\uc5b9\uc5ba\uc5bb\uc5bc\uc5bd\uc5be\uc5bf\uc5c0\uc5c1\uc5c2\uc5c3\uc5c4\uc5c5\uc5c6\uc5c7\uc5c8\uc5c9\uc5ca\uc5cb\uc5cc\uc5cd\uc5ce\uc5cf\uc5d0\uc5d1\uc5d2\uc5d3\uc5d4\uc5d5\uc5d6\uc5d7\uc5d8\uc5d9\uc5da\uc5db\uc5dc\uc5dd\uc5de\uc5df\uc5e0\uc5e1\uc5e2\uc5e3\uc5e4\uc5e5\uc5e6\uc5e7\uc5e8\uc5e9\uc5ea\uc5eb\uc5ec\uc5ed\uc5ee\uc5ef\uc5f0\uc5f1\uc5f2\uc5f3\uc5f4\uc5f5\uc5f6\uc5f7\uc5f8\uc5f9\uc5fa\uc5fb\uc5fc\uc5fd\uc5fe\uc5ff\uc600\uc601\uc602\uc603\uc604\uc605\uc606\uc607\uc608\uc609\uc60a\uc60b\uc60c\uc60d\uc60e\uc60f\uc610\uc611\uc612\uc613\uc614\uc615\uc616\uc617\uc618\uc619\uc61a\uc61b\uc61c\uc61d\uc61e\uc61f\uc620\uc621\uc622\uc623\uc624\uc625\uc626\uc627\uc628\uc629\uc62a\uc62b\uc62c\uc62d\uc62e\uc62f\uc630\uc631\uc632\uc633\uc634\uc635\uc636\uc637\uc638\uc639\uc63a\uc63b\uc63c\uc63d\uc63e\uc63f\uc640\uc641\uc642\uc643\uc644\uc645\uc646\uc647\uc648\uc649\uc64a\uc64b\uc64c\uc64d\uc64e\uc64f\uc650\uc651\uc652\uc653\uc654\uc655\uc656\uc657\uc658\uc659\uc65a\uc65b\uc65c\uc65d\uc65e\uc65f\uc660\uc661\uc662\uc663\uc664\uc665\uc666\uc667\uc668\uc669\uc66a\uc66b\uc66c\uc66d\uc66e\uc66f\uc670\uc671\uc672\uc673\uc674\uc675\uc676\uc677\uc678\uc679\uc67a\uc67b\uc67c\uc67d\uc67e\uc67f\uc680\uc681\uc682\uc683\uc684\uc685\uc686\uc687\uc688\uc689\uc68a\uc68b\uc68c\uc68d\uc68e\uc68f\uc690\uc691\uc692\uc693\uc694\uc695\uc696\uc697\uc698\uc699\uc69a\uc69b\uc69c\uc69d\uc69e\uc69f\uc6a0\uc6a1\uc6a2\uc6a3\uc6a4\uc6a5\uc6a6\uc6a7\uc6a8\uc6a9\uc6aa\uc6ab\uc6ac\uc6ad\uc6ae\uc6af\uc6b0\uc6b1\uc6b2\uc6b3\uc6b4\uc6b5\uc6b6\uc6b7\uc6b8\uc6b9\uc6ba\uc6bb\uc6bc\uc6bd\uc6be\uc6bf\uc6c0\uc6c1\uc6c2\uc6c3\uc6c4\uc6c5\uc6c6\uc6c7\uc6c8\uc6c9\uc6ca\uc6cb\uc6cc\uc6cd\uc6ce\uc6cf\uc6d0\uc6d1\uc6d2\uc6d3\uc6d4\uc6d5\uc6d6\uc6d7\uc6d8\uc6d9\uc6da\uc6db\uc6dc\uc6dd\uc6de\uc6df\uc6e0\uc6e1\uc6e2\uc6e3\uc6e4\uc6e5\uc6e6\uc6e7\uc6e8\uc6e9\uc6ea\uc6eb\uc6ec\uc6ed\uc6ee\uc6ef\uc6f0\uc6f1\uc6f2\uc6f3\uc6f4\uc6f5\uc6f6\uc6f7\uc6f8\uc6f9\uc6fa\uc6fb\uc6fc\uc6fd\uc6fe\uc6ff\uc700\uc701\uc702\uc703\uc704\uc705\uc706\uc707\uc708\uc709\uc70a\uc70b\uc70c\uc70d\uc70e\uc70f\uc710\uc711\uc712\uc713\uc714\uc715\uc716\uc717\uc718\uc719\uc71a\uc71b\uc71c\uc71d\uc71e\uc71f\uc720\uc721\uc722\uc723\uc724\uc725\uc726\uc727\uc728\uc729\uc72a\uc72b\uc72c\uc72d\uc72e\uc72f\uc730\uc731\uc732\uc733\uc734\uc735\uc736\uc737\uc738\uc739\uc73a\uc73b\uc73c\uc73d\uc73e\uc73f\uc740\uc741\uc742\uc743\uc744\uc745\uc746\uc747\uc748\uc749\uc74a\uc74b\uc74c\uc74d\uc74e\uc74f\uc750\uc751\uc752\uc753\uc754\uc755\uc756\uc757\uc758\uc759\uc75a\uc75b\uc75c\uc75d\uc75e\uc75f\uc760\uc761\uc762\uc763\uc764\uc765\uc766\uc767\uc768\uc769\uc76a\uc76b\uc76c\uc76d\uc76e\uc76f\uc770\uc771\uc772\uc773\uc774\uc775\uc776\uc777\uc778\uc779\uc77a\uc77b\uc77c\uc77d\uc77e\uc77f\uc780\uc781\uc782\uc783\uc784\uc785\uc786\uc787\uc788\uc789\uc78a\uc78b\uc78c\uc78d\uc78e\uc78f\uc790\uc791\uc792\uc793\uc794\uc795\uc796\uc797\uc798\uc799\uc79a\uc79b\uc79c\uc79d\uc79e\uc79f\uc7a0\uc7a1\uc7a2\uc7a3\uc7a4\uc7a5\uc7a6\uc7a7\uc7a8\uc7a9\uc7aa\uc7ab\uc7ac\uc7ad\uc7ae\uc7af\uc7b0\uc7b1\uc7b2\uc7b3\uc7b4\uc7b5\uc7b6\uc7b7\uc7b8\uc7b9\uc7ba\uc7bb\uc7bc\uc7bd\uc7be\uc7bf\uc7c0\uc7c1\uc7c2\uc7c3\uc7c4\uc7c5\uc7c6\uc7c7\uc7c8\uc7c9\uc7ca\uc7cb\uc7cc\uc7cd\uc7ce\uc7cf\uc7d0\uc7d1\uc7d2\uc7d3\uc7d4\uc7d5\uc7d6\uc7d7\uc7d8\uc7d9\uc7da\uc7db\uc7dc\uc7dd\uc7de\uc7df\uc7e0\uc7e1\uc7e2\uc7e3\uc7e4\uc7e5\uc7e6\uc7e7\uc7e8\uc7e9\uc7ea\uc7eb\uc7ec\uc7ed\uc7ee\uc7ef\uc7f0\uc7f1\uc7f2\uc7f3\uc7f4\uc7f5\uc7f6\uc7f7\uc7f8\uc7f9\uc7fa\uc7fb\uc7fc\uc7fd\uc7fe\uc7ff\uc800\uc801\uc802\uc803\uc804\uc805\uc806\uc807\uc808\uc809\uc80a\uc80b\uc80c\uc80d\uc80e\uc80f\uc810\uc811\uc812\uc813\uc814\uc815\uc816\uc817\uc818\uc819\uc81a\uc81b\uc81c\uc81d\uc81e\uc81f\uc820\uc821\uc822\uc823\uc824\uc825\uc826\uc827\uc828\uc829\uc82a\uc82b\uc82c\uc82d\uc82e\uc82f\uc830\uc831\uc832\uc833\uc834\uc835\uc836\uc837\uc838\uc839\uc83a\uc83b\uc83c\uc83d\uc83e\uc83f\uc840\uc841\uc842\uc843\uc844\uc845\uc846\uc847\uc848\uc849\uc84a\uc84b\uc84c\uc84d\uc84e\uc84f\uc850\uc851\uc852\uc853\uc854\uc855\uc856\uc857\uc858\uc859\uc85a\uc85b\uc85c\uc85d\uc85e\uc85f\uc860\uc861\uc862\uc863\uc864\uc865\uc866\uc867\uc868\uc869\uc86a\uc86b\uc86c\uc86d\uc86e\uc86f\uc870\uc871\uc872\uc873\uc874\uc875\uc876\uc877\uc878\uc879\uc87a\uc87b\uc87c\uc87d\uc87e\uc87f\uc880\uc881\uc882\uc883\uc884\uc885\uc886\uc887\uc888\uc889\uc88a\uc88b\uc88c\uc88d\uc88e\uc88f\uc890\uc891\uc892\uc893\uc894\uc895\uc896\uc897\uc898\uc899\uc89a\uc89b\uc89c\uc89d\uc89e\uc89f\uc8a0\uc8a1\uc8a2\uc8a3\uc8a4\uc8a5\uc8a6\uc8a7\uc8a8\uc8a9\uc8aa\uc8ab\uc8ac\uc8ad\uc8ae\uc8af\uc8b0\uc8b1\uc8b2\uc8b3\uc8b4\uc8b5\uc8b6\uc8b7\uc8b8\uc8b9\uc8ba\uc8bb\uc8bc\uc8bd\uc8be\uc8bf\uc8c0\uc8c1\uc8c2\uc8c3\uc8c4\uc8c5\uc8c6\uc8c7\uc8c8\uc8c9\uc8ca\uc8cb\uc8cc\uc8cd\uc8ce\uc8cf\uc8d0\uc8d1\uc8d2\uc8d3\uc8d4\uc8d5\uc8d6\uc8d7\uc8d8\uc8d9\uc8da\uc8db\uc8dc\uc8dd\uc8de\uc8df\uc8e0\uc8e1\uc8e2\uc8e3\uc8e4\uc8e5\uc8e6\uc8e7\uc8e8\uc8e9\uc8ea\uc8eb\uc8ec\uc8ed\uc8ee\uc8ef\uc8f0\uc8f1\uc8f2\uc8f3\uc8f4\uc8f5\uc8f6\uc8f7\uc8f8\uc8f9\uc8fa\uc8fb\uc8fc\uc8fd\uc8fe\uc8ff\uc900\uc901\uc902\uc903\uc904\uc905\uc906\uc907\uc908\uc909\uc90a\uc90b\uc90c\uc90d\uc90e\uc90f\uc910\uc911\uc912\uc913\uc914\uc915\uc916\uc917\uc918\uc919\uc91a\uc91b\uc91c\uc91d\uc91e\uc91f\uc920\uc921\uc922\uc923\uc924\uc925\uc926\uc927\uc928\uc929\uc92a\uc92b\uc92c\uc92d\uc92e\uc92f\uc930\uc931\uc932\uc933\uc934\uc935\uc936\uc937\uc938\uc939\uc93a\uc93b\uc93c\uc93d\uc93e\uc93f\uc940\uc941\uc942\uc943\uc944\uc945\uc946\uc947\uc948\uc949\uc94a\uc94b\uc94c\uc94d\uc94e\uc94f\uc950\uc951\uc952\uc953\uc954\uc955\uc956\uc957\uc958\uc959\uc95a\uc95b\uc95c\uc95d\uc95e\uc95f\uc960\uc961\uc962\uc963\uc964\uc965\uc966\uc967\uc968\uc969\uc96a\uc96b\uc96c\uc96d\uc96e\uc96f\uc970\uc971\uc972\uc973\uc974\uc975\uc976\uc977\uc978\uc979\uc97a\uc97b\uc97c\uc97d\uc97e\uc97f\uc980\uc981\uc982\uc983\uc984\uc985\uc986\uc987\uc988\uc989\uc98a\uc98b\uc98c\uc98d\uc98e\uc98f\uc990\uc991\uc992\uc993\uc994\uc995\uc996\uc997\uc998\uc999\uc99a\uc99b\uc99c\uc99d\uc99e\uc99f\uc9a0\uc9a1\uc9a2\uc9a3\uc9a4\uc9a5\uc9a6\uc9a7\uc9a8\uc9a9\uc9aa\uc9ab\uc9ac\uc9ad\uc9ae\uc9af\uc9b0\uc9b1\uc9b2\uc9b3\uc9b4\uc9b5\uc9b6\uc9b7\uc9b8\uc9b9\uc9ba\uc9bb\uc9bc\uc9bd\uc9be\uc9bf\uc9c0\uc9c1\uc9c2\uc9c3\uc9c4\uc9c5\uc9c6\uc9c7\uc9c8\uc9c9\uc9ca\uc9cb\uc9cc\uc9cd\uc9ce\uc9cf\uc9d0\uc9d1\uc9d2\uc9d3\uc9d4\uc9d5\uc9d6\uc9d7\uc9d8\uc9d9\uc9da\uc9db\uc9dc\uc9dd\uc9de\uc9df\uc9e0\uc9e1\uc9e2\uc9e3\uc9e4\uc9e5\uc9e6\uc9e7\uc9e8\uc9e9\uc9ea\uc9eb\uc9ec\uc9ed\uc9ee\uc9ef\uc9f0\uc9f1\uc9f2\uc9f3\uc9f4\uc9f5\uc9f6\uc9f7\uc9f8\uc9f9\uc9fa\uc9fb\uc9fc\uc9fd\uc9fe\uc9ff\uca00\uca01\uca02\uca03\uca04\uca05\uca06\uca07\uca08\uca09\uca0a\uca0b\uca0c\uca0d\uca0e\uca0f\uca10\uca11\uca12\uca13\uca14\uca15\uca16\uca17\uca18\uca19\uca1a\uca1b\uca1c\uca1d\uca1e\uca1f\uca20\uca21\uca22\uca23\uca24\uca25\uca26\uca27\uca28\uca29\uca2a\uca2b\uca2c\uca2d\uca2e\uca2f\uca30\uca31\uca32\uca33\uca34\uca35\uca36\uca37\uca38\uca39\uca3a\uca3b\uca3c\uca3d\uca3e\uca3f\uca40\uca41\uca42\uca43\uca44\uca45\uca46\uca47\uca48\uca49\uca4a\uca4b\uca4c\uca4d\uca4e\uca4f\uca50\uca51\uca52\uca53\uca54\uca55\uca56\uca57\uca58\uca59\uca5a\uca5b\uca5c\uca5d\uca5e\uca5f\uca60\uca61\uca62\uca63\uca64\uca65\uca66\uca67\uca68\uca69\uca6a\uca6b\uca6c\uca6d\uca6e\uca6f\uca70\uca71\uca72\uca73\uca74\uca75\uca76\uca77\uca78\uca79\uca7a\uca7b\uca7c\uca7d\uca7e\uca7f\uca80\uca81\uca82\uca83\uca84\uca85\uca86\uca87\uca88\uca89\uca8a\uca8b\uca8c\uca8d\uca8e\uca8f\uca90\uca91\uca92\uca93\uca94\uca95\uca96\uca97\uca98\uca99\uca9a\uca9b\uca9c\uca9d\uca9e\uca9f\ucaa0\ucaa1\ucaa2\ucaa3\ucaa4\ucaa5\ucaa6\ucaa7\ucaa8\ucaa9\ucaaa\ucaab\ucaac\ucaad\ucaae\ucaaf\ucab0\ucab1\ucab2\ucab3\ucab4\ucab5\ucab6\ucab7\ucab8\ucab9\ucaba\ucabb\ucabc\ucabd\ucabe\ucabf\ucac0\ucac1\ucac2\ucac3\ucac4\ucac5\ucac6\ucac7\ucac8\ucac9\ucaca\ucacb\ucacc\ucacd\ucace\ucacf\ucad0\ucad1\ucad2\ucad3\ucad4\ucad5\ucad6\ucad7\ucad8\ucad9\ucada\ucadb\ucadc\ucadd\ucade\ucadf\ucae0\ucae1\ucae2\ucae3\ucae4\ucae5\ucae6\ucae7\ucae8\ucae9\ucaea\ucaeb\ucaec\ucaed\ucaee\ucaef\ucaf0\ucaf1\ucaf2\ucaf3\ucaf4\ucaf5\ucaf6\ucaf7\ucaf8\ucaf9\ucafa\ucafb\ucafc\ucafd\ucafe\ucaff\ucb00\ucb01\ucb02\ucb03\ucb04\ucb05\ucb06\ucb07\ucb08\ucb09\ucb0a\ucb0b\ucb0c\ucb0d\ucb0e\ucb0f\ucb10\ucb11\ucb12\ucb13\ucb14\ucb15\ucb16\ucb17\ucb18\ucb19\ucb1a\ucb1b\ucb1c\ucb1d\ucb1e\ucb1f\ucb20\ucb21\ucb22\ucb23\ucb24\ucb25\ucb26\ucb27\ucb28\ucb29\ucb2a\ucb2b\ucb2c\ucb2d\ucb2e\ucb2f\ucb30\ucb31\ucb32\ucb33\ucb34\ucb35\ucb36\ucb37\ucb38\ucb39\ucb3a\ucb3b\ucb3c\ucb3d\ucb3e\ucb3f\ucb40\ucb41\ucb42\ucb43\ucb44\ucb45\ucb46\ucb47\ucb48\ucb49\ucb4a\ucb4b\ucb4c\ucb4d\ucb4e\ucb4f\ucb50\ucb51\ucb52\ucb53\ucb54\ucb55\ucb56\ucb57\ucb58\ucb59\ucb5a\ucb5b\ucb5c\ucb5d\ucb5e\ucb5f\ucb60\ucb61\ucb62\ucb63\ucb64\ucb65\ucb66\ucb67\ucb68\ucb69\ucb6a\ucb6b\ucb6c\ucb6d\ucb6e\ucb6f\ucb70\ucb71\ucb72\ucb73\ucb74\ucb75\ucb76\ucb77\ucb78\ucb79\ucb7a\ucb7b\ucb7c\ucb7d\ucb7e\ucb7f\ucb80\ucb81\ucb82\ucb83\ucb84\ucb85\ucb86\ucb87\ucb88\ucb89\ucb8a\ucb8b\ucb8c\ucb8d\ucb8e\ucb8f\ucb90\ucb91\ucb92\ucb93\ucb94\ucb95\ucb96\ucb97\ucb98\ucb99\ucb9a\ucb9b\ucb9c\ucb9d\ucb9e\ucb9f\ucba0\ucba1\ucba2\ucba3\ucba4\ucba5\ucba6\ucba7\ucba8\ucba9\ucbaa\ucbab\ucbac\ucbad\ucbae\ucbaf\ucbb0\ucbb1\ucbb2\ucbb3\ucbb4\ucbb5\ucbb6\ucbb7\ucbb8\ucbb9\ucbba\ucbbb\ucbbc\ucbbd\ucbbe\ucbbf\ucbc0\ucbc1\ucbc2\ucbc3\ucbc4\ucbc5\ucbc6\ucbc7\ucbc8\ucbc9\ucbca\ucbcb\ucbcc\ucbcd\ucbce\ucbcf\ucbd0\ucbd1\ucbd2\ucbd3\ucbd4\ucbd5\ucbd6\ucbd7\ucbd8\ucbd9\ucbda\ucbdb\ucbdc\ucbdd\ucbde\ucbdf\ucbe0\ucbe1\ucbe2\ucbe3\ucbe4\ucbe5\ucbe6\ucbe7\ucbe8\ucbe9\ucbea\ucbeb\ucbec\ucbed\ucbee\ucbef\ucbf0\ucbf1\ucbf2\ucbf3\ucbf4\ucbf5\ucbf6\ucbf7\ucbf8\ucbf9\ucbfa\ucbfb\ucbfc\ucbfd\ucbfe\ucbff\ucc00\ucc01\ucc02\ucc03\ucc04\ucc05\ucc06\ucc07\ucc08\ucc09\ucc0a\ucc0b\ucc0c\ucc0d\ucc0e\ucc0f\ucc10\ucc11\ucc12\ucc13\ucc14\ucc15\ucc16\ucc17\ucc18\ucc19\ucc1a\ucc1b\ucc1c\ucc1d\ucc1e\ucc1f\ucc20\ucc21\ucc22\ucc23\ucc24\ucc25\ucc26\ucc27\ucc28\ucc29\ucc2a\ucc2b\ucc2c\ucc2d\ucc2e\ucc2f\ucc30\ucc31\ucc32\ucc33\ucc34\ucc35\ucc36\ucc37\ucc38\ucc39\ucc3a\ucc3b\ucc3c\ucc3d\ucc3e\ucc3f\ucc40\ucc41\ucc42\ucc43\ucc44\ucc45\ucc46\ucc47\ucc48\ucc49\ucc4a\ucc4b\ucc4c\ucc4d\ucc4e\ucc4f\ucc50\ucc51\ucc52\ucc53\ucc54\ucc55\ucc56\ucc57\ucc58\ucc59\ucc5a\ucc5b\ucc5c\ucc5d\ucc5e\ucc5f\ucc60\ucc61\ucc62\ucc63\ucc64\ucc65\ucc66\ucc67\ucc68\ucc69\ucc6a\ucc6b\ucc6c\ucc6d\ucc6e\ucc6f\ucc70\ucc71\ucc72\ucc73\ucc74\ucc75\ucc76\ucc77\ucc78\ucc79\ucc7a\ucc7b\ucc7c\ucc7d\ucc7e\ucc7f\ucc80\ucc81\ucc82\ucc83\ucc84\ucc85\ucc86\ucc87\ucc88\ucc89\ucc8a\ucc8b\ucc8c\ucc8d\ucc8e\ucc8f\ucc90\ucc91\ucc92\ucc93\ucc94\ucc95\ucc96\ucc97\ucc98\ucc99\ucc9a\ucc9b\ucc9c\ucc9d\ucc9e\ucc9f\ucca0\ucca1\ucca2\ucca3\ucca4\ucca5\ucca6\ucca7\ucca8\ucca9\uccaa\uccab\uccac\uccad\uccae\uccaf\uccb0\uccb1\uccb2\uccb3\uccb4\uccb5\uccb6\uccb7\uccb8\uccb9\uccba\uccbb\uccbc\uccbd\uccbe\uccbf\uccc0\uccc1\uccc2\uccc3\uccc4\uccc5\uccc6\uccc7\uccc8\uccc9\uccca\ucccb\ucccc\ucccd\uccce\ucccf\uccd0\uccd1\uccd2\uccd3\uccd4\uccd5\uccd6\uccd7\uccd8\uccd9\uccda\uccdb\uccdc\uccdd\uccde\uccdf\ucce0\ucce1\ucce2\ucce3\ucce4\ucce5\ucce6\ucce7\ucce8\ucce9\uccea\ucceb\uccec\ucced\uccee\uccef\uccf0\uccf1\uccf2\uccf3\uccf4\uccf5\uccf6\uccf7\uccf8\uccf9\uccfa\uccfb\uccfc\uccfd\uccfe\uccff\ucd00\ucd01\ucd02\ucd03\ucd04\ucd05\ucd06\ucd07\ucd08\ucd09\ucd0a\ucd0b\ucd0c\ucd0d\ucd0e\ucd0f\ucd10\ucd11\ucd12\ucd13\ucd14\ucd15\ucd16\ucd17\ucd18\ucd19\ucd1a\ucd1b\ucd1c\ucd1d\ucd1e\ucd1f\ucd20\ucd21\ucd22\ucd23\ucd24\ucd25\ucd26\ucd27\ucd28\ucd29\ucd2a\ucd2b\ucd2c\ucd2d\ucd2e\ucd2f\ucd30\ucd31\ucd32\ucd33\ucd34\ucd35\ucd36\ucd37\ucd38\ucd39\ucd3a\ucd3b\ucd3c\ucd3d\ucd3e\ucd3f\ucd40\ucd41\ucd42\ucd43\ucd44\ucd45\ucd46\ucd47\ucd48\ucd49\ucd4a\ucd4b\ucd4c\ucd4d\ucd4e\ucd4f\ucd50\ucd51\ucd52\ucd53\ucd54\ucd55\ucd56\ucd57\ucd58\ucd59\ucd5a\ucd5b\ucd5c\ucd5d\ucd5e\ucd5f\ucd60\ucd61\ucd62\ucd63\ucd64\ucd65\ucd66\ucd67\ucd68\ucd69\ucd6a\ucd6b\ucd6c\ucd6d\ucd6e\ucd6f\ucd70\ucd71\ucd72\ucd73\ucd74\ucd75\ucd76\ucd77\ucd78\ucd79\ucd7a\ucd7b\ucd7c\ucd7d\ucd7e\ucd7f\ucd80\ucd81\ucd82\ucd83\ucd84\ucd85\ucd86\ucd87\ucd88\ucd89\ucd8a\ucd8b\ucd8c\ucd8d\ucd8e\ucd8f\ucd90\ucd91\ucd92\ucd93\ucd94\ucd95\ucd96\ucd97\ucd98\ucd99\ucd9a\ucd9b\ucd9c\ucd9d\ucd9e\ucd9f\ucda0\ucda1\ucda2\ucda3\ucda4\ucda5\ucda6\ucda7\ucda8\ucda9\ucdaa\ucdab\ucdac\ucdad\ucdae\ucdaf\ucdb0\ucdb1\ucdb2\ucdb3\ucdb4\ucdb5\ucdb6\ucdb7\ucdb8\ucdb9\ucdba\ucdbb\ucdbc\ucdbd\ucdbe\ucdbf\ucdc0\ucdc1\ucdc2\ucdc3\ucdc4\ucdc5\ucdc6\ucdc7\ucdc8\ucdc9\ucdca\ucdcb\ucdcc\ucdcd\ucdce\ucdcf\ucdd0\ucdd1\ucdd2\ucdd3\ucdd4\ucdd5\ucdd6\ucdd7\ucdd8\ucdd9\ucdda\ucddb\ucddc\ucddd\ucdde\ucddf\ucde0\ucde1\ucde2\ucde3\ucde4\ucde5\ucde6\ucde7\ucde8\ucde9\ucdea\ucdeb\ucdec\ucded\ucdee\ucdef\ucdf0\ucdf1\ucdf2\ucdf3\ucdf4\ucdf5\ucdf6\ucdf7\ucdf8\ucdf9\ucdfa\ucdfb\ucdfc\ucdfd\ucdfe\ucdff\uce00\uce01\uce02\uce03\uce04\uce05\uce06\uce07\uce08\uce09\uce0a\uce0b\uce0c\uce0d\uce0e\uce0f\uce10\uce11\uce12\uce13\uce14\uce15\uce16\uce17\uce18\uce19\uce1a\uce1b\uce1c\uce1d\uce1e\uce1f\uce20\uce21\uce22\uce23\uce24\uce25\uce26\uce27\uce28\uce29\uce2a\uce2b\uce2c\uce2d\uce2e\uce2f\uce30\uce31\uce32\uce33\uce34\uce35\uce36\uce37\uce38\uce39\uce3a\uce3b\uce3c\uce3d\uce3e\uce3f\uce40\uce41\uce42\uce43\uce44\uce45\uce46\uce47\uce48\uce49\uce4a\uce4b\uce4c\uce4d\uce4e\uce4f\uce50\uce51\uce52\uce53\uce54\uce55\uce56\uce57\uce58\uce59\uce5a\uce5b\uce5c\uce5d\uce5e\uce5f\uce60\uce61\uce62\uce63\uce64\uce65\uce66\uce67\uce68\uce69\uce6a\uce6b\uce6c\uce6d\uce6e\uce6f\uce70\uce71\uce72\uce73\uce74\uce75\uce76\uce77\uce78\uce79\uce7a\uce7b\uce7c\uce7d\uce7e\uce7f\uce80\uce81\uce82\uce83\uce84\uce85\uce86\uce87\uce88\uce89\uce8a\uce8b\uce8c\uce8d\uce8e\uce8f\uce90\uce91\uce92\uce93\uce94\uce95\uce96\uce97\uce98\uce99\uce9a\uce9b\uce9c\uce9d\uce9e\uce9f\ucea0\ucea1\ucea2\ucea3\ucea4\ucea5\ucea6\ucea7\ucea8\ucea9\uceaa\uceab\uceac\ucead\uceae\uceaf\uceb0\uceb1\uceb2\uceb3\uceb4\uceb5\uceb6\uceb7\uceb8\uceb9\uceba\ucebb\ucebc\ucebd\ucebe\ucebf\ucec0\ucec1\ucec2\ucec3\ucec4\ucec5\ucec6\ucec7\ucec8\ucec9\uceca\ucecb\ucecc\ucecd\ucece\ucecf\uced0\uced1\uced2\uced3\uced4\uced5\uced6\uced7\uced8\uced9\uceda\ucedb\ucedc\ucedd\ucede\ucedf\ucee0\ucee1\ucee2\ucee3\ucee4\ucee5\ucee6\ucee7\ucee8\ucee9\uceea\uceeb\uceec\uceed\uceee\uceef\ucef0\ucef1\ucef2\ucef3\ucef4\ucef5\ucef6\ucef7\ucef8\ucef9\ucefa\ucefb\ucefc\ucefd\ucefe\uceff\ucf00\ucf01\ucf02\ucf03\ucf04\ucf05\ucf06\ucf07\ucf08\ucf09\ucf0a\ucf0b\ucf0c\ucf0d\ucf0e\ucf0f\ucf10\ucf11\ucf12\ucf13\ucf14\ucf15\ucf16\ucf17\ucf18\ucf19\ucf1a\ucf1b\ucf1c\ucf1d\ucf1e\ucf1f\ucf20\ucf21\ucf22\ucf23\ucf24\ucf25\ucf26\ucf27\ucf28\ucf29\ucf2a\ucf2b\ucf2c\ucf2d\ucf2e\ucf2f\ucf30\ucf31\ucf32\ucf33\ucf34\ucf35\ucf36\ucf37\ucf38\ucf39\ucf3a\ucf3b\ucf3c\ucf3d\ucf3e\ucf3f\ucf40\ucf41\ucf42\ucf43\ucf44\ucf45\ucf46\ucf47\ucf48\ucf49\ucf4a\ucf4b\ucf4c\ucf4d\ucf4e\ucf4f\ucf50\ucf51\ucf52\ucf53\ucf54\ucf55\ucf56\ucf57\ucf58\ucf59\ucf5a\ucf5b\ucf5c\ucf5d\ucf5e\ucf5f\ucf60\ucf61\ucf62\ucf63\ucf64\ucf65\ucf66\ucf67\ucf68\ucf69\ucf6a\ucf6b\ucf6c\ucf6d\ucf6e\ucf6f\ucf70\ucf71\ucf72\ucf73\ucf74\ucf75\ucf76\ucf77\ucf78\ucf79\ucf7a\ucf7b\ucf7c\ucf7d\ucf7e\ucf7f\ucf80\ucf81\ucf82\ucf83\ucf84\ucf85\ucf86\ucf87\ucf88\ucf89\ucf8a\ucf8b\ucf8c\ucf8d\ucf8e\ucf8f\ucf90\ucf91\ucf92\ucf93\ucf94\ucf95\ucf96\ucf97\ucf98\ucf99\ucf9a\ucf9b\ucf9c\ucf9d\ucf9e\ucf9f\ucfa0\ucfa1\ucfa2\ucfa3\ucfa4\ucfa5\ucfa6\ucfa7\ucfa8\ucfa9\ucfaa\ucfab\ucfac\ucfad\ucfae\ucfaf\ucfb0\ucfb1\ucfb2\ucfb3\ucfb4\ucfb5\ucfb6\ucfb7\ucfb8\ucfb9\ucfba\ucfbb\ucfbc\ucfbd\ucfbe\ucfbf\ucfc0\ucfc1\ucfc2\ucfc3\ucfc4\ucfc5\ucfc6\ucfc7\ucfc8\ucfc9\ucfca\ucfcb\ucfcc\ucfcd\ucfce\ucfcf\ucfd0\ucfd1\ucfd2\ucfd3\ucfd4\ucfd5\ucfd6\ucfd7\ucfd8\ucfd9\ucfda\ucfdb\ucfdc\ucfdd\ucfde\ucfdf\ucfe0\ucfe1\ucfe2\ucfe3\ucfe4\ucfe5\ucfe6\ucfe7\ucfe8\ucfe9\ucfea\ucfeb\ucfec\ucfed\ucfee\ucfef\ucff0\ucff1\ucff2\ucff3\ucff4\ucff5\ucff6\ucff7\ucff8\ucff9\ucffa\ucffb\ucffc\ucffd\ucffe\ucfff\ud000\ud001\ud002\ud003\ud004\ud005\ud006\ud007\ud008\ud009\ud00a\ud00b\ud00c\ud00d\ud00e\ud00f\ud010\ud011\ud012\ud013\ud014\ud015\ud016\ud017\ud018\ud019\ud01a\ud01b\ud01c\ud01d\ud01e\ud01f\ud020\ud021\ud022\ud023\ud024\ud025\ud026\ud027\ud028\ud029\ud02a\ud02b\ud02c\ud02d\ud02e\ud02f\ud030\ud031\ud032\ud033\ud034\ud035\ud036\ud037\ud038\ud039\ud03a\ud03b\ud03c\ud03d\ud03e\ud03f\ud040\ud041\ud042\ud043\ud044\ud045\ud046\ud047\ud048\ud049\ud04a\ud04b\ud04c\ud04d\ud04e\ud04f\ud050\ud051\ud052\ud053\ud054\ud055\ud056\ud057\ud058\ud059\ud05a\ud05b\ud05c\ud05d\ud05e\ud05f\ud060\ud061\ud062\ud063\ud064\ud065\ud066\ud067\ud068\ud069\ud06a\ud06b\ud06c\ud06d\ud06e\ud06f\ud070\ud071\ud072\ud073\ud074\ud075\ud076\ud077\ud078\ud079\ud07a\ud07b\ud07c\ud07d\ud07e\ud07f\ud080\ud081\ud082\ud083\ud084\ud085\ud086\ud087\ud088\ud089\ud08a\ud08b\ud08c\ud08d\ud08e\ud08f\ud090\ud091\ud092\ud093\ud094\ud095\ud096\ud097\ud098\ud099\ud09a\ud09b\ud09c\ud09d\ud09e\ud09f\ud0a0\ud0a1\ud0a2\ud0a3\ud0a4\ud0a5\ud0a6\ud0a7\ud0a8\ud0a9\ud0aa\ud0ab\ud0ac\ud0ad\ud0ae\ud0af\ud0b0\ud0b1\ud0b2\ud0b3\ud0b4\ud0b5\ud0b6\ud0b7\ud0b8\ud0b9\ud0ba\ud0bb\ud0bc\ud0bd\ud0be\ud0bf\ud0c0\ud0c1\ud0c2\ud0c3\ud0c4\ud0c5\ud0c6\ud0c7\ud0c8\ud0c9\ud0ca\ud0cb\ud0cc\ud0cd\ud0ce\ud0cf\ud0d0\ud0d1\ud0d2\ud0d3\ud0d4\ud0d5\ud0d6\ud0d7\ud0d8\ud0d9\ud0da\ud0db\ud0dc\ud0dd\ud0de\ud0df\ud0e0\ud0e1\ud0e2\ud0e3\ud0e4\ud0e5\ud0e6\ud0e7\ud0e8\ud0e9\ud0ea\ud0eb\ud0ec\ud0ed\ud0ee\ud0ef\ud0f0\ud0f1\ud0f2\ud0f3\ud0f4\ud0f5\ud0f6\ud0f7\ud0f8\ud0f9\ud0fa\ud0fb\ud0fc\ud0fd\ud0fe\ud0ff\ud100\ud101\ud102\ud103\ud104\ud105\ud106\ud107\ud108\ud109\ud10a\ud10b\ud10c\ud10d\ud10e\ud10f\ud110\ud111\ud112\ud113\ud114\ud115\ud116\ud117\ud118\ud119\ud11a\ud11b\ud11c\ud11d\ud11e\ud11f\ud120\ud121\ud122\ud123\ud124\ud125\ud126\ud127\ud128\ud129\ud12a\ud12b\ud12c\ud12d\ud12e\ud12f\ud130\ud131\ud132\ud133\ud134\ud135\ud136\ud137\ud138\ud139\ud13a\ud13b\ud13c\ud13d\ud13e\ud13f\ud140\ud141\ud142\ud143\ud144\ud145\ud146\ud147\ud148\ud149\ud14a\ud14b\ud14c\ud14d\ud14e\ud14f\ud150\ud151\ud152\ud153\ud154\ud155\ud156\ud157\ud158\ud159\ud15a\ud15b\ud15c\ud15d\ud15e\ud15f\ud160\ud161\ud162\ud163\ud164\ud165\ud166\ud167\ud168\ud169\ud16a\ud16b\ud16c\ud16d\ud16e\ud16f\ud170\ud171\ud172\ud173\ud174\ud175\ud176\ud177\ud178\ud179\ud17a\ud17b\ud17c\ud17d\ud17e\ud17f\ud180\ud181\ud182\ud183\ud184\ud185\ud186\ud187\ud188\ud189\ud18a\ud18b\ud18c\ud18d\ud18e\ud18f\ud190\ud191\ud192\ud193\ud194\ud195\ud196\ud197\ud198\ud199\ud19a\ud19b\ud19c\ud19d\ud19e\ud19f\ud1a0\ud1a1\ud1a2\ud1a3\ud1a4\ud1a5\ud1a6\ud1a7\ud1a8\ud1a9\ud1aa\ud1ab\ud1ac\ud1ad\ud1ae\ud1af\ud1b0\ud1b1\ud1b2\ud1b3\ud1b4\ud1b5\ud1b6\ud1b7\ud1b8\ud1b9\ud1ba\ud1bb\ud1bc\ud1bd\ud1be\ud1bf\ud1c0\ud1c1\ud1c2\ud1c3\ud1c4\ud1c5\ud1c6\ud1c7\ud1c8\ud1c9\ud1ca\ud1cb\ud1cc\ud1cd\ud1ce\ud1cf\ud1d0\ud1d1\ud1d2\ud1d3\ud1d4\ud1d5\ud1d6\ud1d7\ud1d8\ud1d9\ud1da\ud1db\ud1dc\ud1dd\ud1de\ud1df\ud1e0\ud1e1\ud1e2\ud1e3\ud1e4\ud1e5\ud1e6\ud1e7\ud1e8\ud1e9\ud1ea\ud1eb\ud1ec\ud1ed\ud1ee\ud1ef\ud1f0\ud1f1\ud1f2\ud1f3\ud1f4\ud1f5\ud1f6\ud1f7\ud1f8\ud1f9\ud1fa\ud1fb\ud1fc\ud1fd\ud1fe\ud1ff\ud200\ud201\ud202\ud203\ud204\ud205\ud206\ud207\ud208\ud209\ud20a\ud20b\ud20c\ud20d\ud20e\ud20f\ud210\ud211\ud212\ud213\ud214\ud215\ud216\ud217\ud218\ud219\ud21a\ud21b\ud21c\ud21d\ud21e\ud21f\ud220\ud221\ud222\ud223\ud224\ud225\ud226\ud227\ud228\ud229\ud22a\ud22b\ud22c\ud22d\ud22e\ud22f\ud230\ud231\ud232\ud233\ud234\ud235\ud236\ud237\ud238\ud239\ud23a\ud23b\ud23c\ud23d\ud23e\ud23f\ud240\ud241\ud242\ud243\ud244\ud245\ud246\ud247\ud248\ud249\ud24a\ud24b\ud24c\ud24d\ud24e\ud24f\ud250\ud251\ud252\ud253\ud254\ud255\ud256\ud257\ud258\ud259\ud25a\ud25b\ud25c\ud25d\ud25e\ud25f\ud260\ud261\ud262\ud263\ud264\ud265\ud266\ud267\ud268\ud269\ud26a\ud26b\ud26c\ud26d\ud26e\ud26f\ud270\ud271\ud272\ud273\ud274\ud275\ud276\ud277\ud278\ud279\ud27a\ud27b\ud27c\ud27d\ud27e\ud27f\ud280\ud281\ud282\ud283\ud284\ud285\ud286\ud287\ud288\ud289\ud28a\ud28b\ud28c\ud28d\ud28e\ud28f\ud290\ud291\ud292\ud293\ud294\ud295\ud296\ud297\ud298\ud299\ud29a\ud29b\ud29c\ud29d\ud29e\ud29f\ud2a0\ud2a1\ud2a2\ud2a3\ud2a4\ud2a5\ud2a6\ud2a7\ud2a8\ud2a9\ud2aa\ud2ab\ud2ac\ud2ad\ud2ae\ud2af\ud2b0\ud2b1\ud2b2\ud2b3\ud2b4\ud2b5\ud2b6\ud2b7\ud2b8\ud2b9\ud2ba\ud2bb\ud2bc\ud2bd\ud2be\ud2bf\ud2c0\ud2c1\ud2c2\ud2c3\ud2c4\ud2c5\ud2c6\ud2c7\ud2c8\ud2c9\ud2ca\ud2cb\ud2cc\ud2cd\ud2ce\ud2cf\ud2d0\ud2d1\ud2d2\ud2d3\ud2d4\ud2d5\ud2d6\ud2d7\ud2d8\ud2d9\ud2da\ud2db\ud2dc\ud2dd\ud2de\ud2df\ud2e0\ud2e1\ud2e2\ud2e3\ud2e4\ud2e5\ud2e6\ud2e7\ud2e8\ud2e9\ud2ea\ud2eb\ud2ec\ud2ed\ud2ee\ud2ef\ud2f0\ud2f1\ud2f2\ud2f3\ud2f4\ud2f5\ud2f6\ud2f7\ud2f8\ud2f9\ud2fa\ud2fb\ud2fc\ud2fd\ud2fe\ud2ff\ud300\ud301\ud302\ud303\ud304\ud305\ud306\ud307\ud308\ud309\ud30a\ud30b\ud30c\ud30d\ud30e\ud30f\ud310\ud311\ud312\ud313\ud314\ud315\ud316\ud317\ud318\ud319\ud31a\ud31b\ud31c\ud31d\ud31e\ud31f\ud320\ud321\ud322\ud323\ud324\ud325\ud326\ud327\ud328\ud329\ud32a\ud32b\ud32c\ud32d\ud32e\ud32f\ud330\ud331\ud332\ud333\ud334\ud335\ud336\ud337\ud338\ud339\ud33a\ud33b\ud33c\ud33d\ud33e\ud33f\ud340\ud341\ud342\ud343\ud344\ud345\ud346\ud347\ud348\ud349\ud34a\ud34b\ud34c\ud34d\ud34e\ud34f\ud350\ud351\ud352\ud353\ud354\ud355\ud356\ud357\ud358\ud359\ud35a\ud35b\ud35c\ud35d\ud35e\ud35f\ud360\ud361\ud362\ud363\ud364\ud365\ud366\ud367\ud368\ud369\ud36a\ud36b\ud36c\ud36d\ud36e\ud36f\ud370\ud371\ud372\ud373\ud374\ud375\ud376\ud377\ud378\ud379\ud37a\ud37b\ud37c\ud37d\ud37e\ud37f\ud380\ud381\ud382\ud383\ud384\ud385\ud386\ud387\ud388\ud389\ud38a\ud38b\ud38c\ud38d\ud38e\ud38f\ud390\ud391\ud392\ud393\ud394\ud395\ud396\ud397\ud398\ud399\ud39a\ud39b\ud39c\ud39d\ud39e\ud39f\ud3a0\ud3a1\ud3a2\ud3a3\ud3a4\ud3a5\ud3a6\ud3a7\ud3a8\ud3a9\ud3aa\ud3ab\ud3ac\ud3ad\ud3ae\ud3af\ud3b0\ud3b1\ud3b2\ud3b3\ud3b4\ud3b5\ud3b6\ud3b7\ud3b8\ud3b9\ud3ba\ud3bb\ud3bc\ud3bd\ud3be\ud3bf\ud3c0\ud3c1\ud3c2\ud3c3\ud3c4\ud3c5\ud3c6\ud3c7\ud3c8\ud3c9\ud3ca\ud3cb\ud3cc\ud3cd\ud3ce\ud3cf\ud3d0\ud3d1\ud3d2\ud3d3\ud3d4\ud3d5\ud3d6\ud3d7\ud3d8\ud3d9\ud3da\ud3db\ud3dc\ud3dd\ud3de\ud3df\ud3e0\ud3e1\ud3e2\ud3e3\ud3e4\ud3e5\ud3e6\ud3e7\ud3e8\ud3e9\ud3ea\ud3eb\ud3ec\ud3ed\ud3ee\ud3ef\ud3f0\ud3f1\ud3f2\ud3f3\ud3f4\ud3f5\ud3f6\ud3f7\ud3f8\ud3f9\ud3fa\ud3fb\ud3fc\ud3fd\ud3fe\ud3ff\ud400\ud401\ud402\ud403\ud404\ud405\ud406\ud407\ud408\ud409\ud40a\ud40b\ud40c\ud40d\ud40e\ud40f\ud410\ud411\ud412\ud413\ud414\ud415\ud416\ud417\ud418\ud419\ud41a\ud41b\ud41c\ud41d\ud41e\ud41f\ud420\ud421\ud422\ud423\ud424\ud425\ud426\ud427\ud428\ud429\ud42a\ud42b\ud42c\ud42d\ud42e\ud42f\ud430\ud431\ud432\ud433\ud434\ud435\ud436\ud437\ud438\ud439\ud43a\ud43b\ud43c\ud43d\ud43e\ud43f\ud440\ud441\ud442\ud443\ud444\ud445\ud446\ud447\ud448\ud449\ud44a\ud44b\ud44c\ud44d\ud44e\ud44f\ud450\ud451\ud452\ud453\ud454\ud455\ud456\ud457\ud458\ud459\ud45a\ud45b\ud45c\ud45d\ud45e\ud45f\ud460\ud461\ud462\ud463\ud464\ud465\ud466\ud467\ud468\ud469\ud46a\ud46b\ud46c\ud46d\ud46e\ud46f\ud470\ud471\ud472\ud473\ud474\ud475\ud476\ud477\ud478\ud479\ud47a\ud47b\ud47c\ud47d\ud47e\ud47f\ud480\ud481\ud482\ud483\ud484\ud485\ud486\ud487\ud488\ud489\ud48a\ud48b\ud48c\ud48d\ud48e\ud48f\ud490\ud491\ud492\ud493\ud494\ud495\ud496\ud497\ud498\ud499\ud49a\ud49b\ud49c\ud49d\ud49e\ud49f\ud4a0\ud4a1\ud4a2\ud4a3\ud4a4\ud4a5\ud4a6\ud4a7\ud4a8\ud4a9\ud4aa\ud4ab\ud4ac\ud4ad\ud4ae\ud4af\ud4b0\ud4b1\ud4b2\ud4b3\ud4b4\ud4b5\ud4b6\ud4b7\ud4b8\ud4b9\ud4ba\ud4bb\ud4bc\ud4bd\ud4be\ud4bf\ud4c0\ud4c1\ud4c2\ud4c3\ud4c4\ud4c5\ud4c6\ud4c7\ud4c8\ud4c9\ud4ca\ud4cb\ud4cc\ud4cd\ud4ce\ud4cf\ud4d0\ud4d1\ud4d2\ud4d3\ud4d4\ud4d5\ud4d6\ud4d7\ud4d8\ud4d9\ud4da\ud4db\ud4dc\ud4dd\ud4de\ud4df\ud4e0\ud4e1\ud4e2\ud4e3\ud4e4\ud4e5\ud4e6\ud4e7\ud4e8\ud4e9\ud4ea\ud4eb\ud4ec\ud4ed\ud4ee\ud4ef\ud4f0\ud4f1\ud4f2\ud4f3\ud4f4\ud4f5\ud4f6\ud4f7\ud4f8\ud4f9\ud4fa\ud4fb\ud4fc\ud4fd\ud4fe\ud4ff\ud500\ud501\ud502\ud503\ud504\ud505\ud506\ud507\ud508\ud509\ud50a\ud50b\ud50c\ud50d\ud50e\ud50f\ud510\ud511\ud512\ud513\ud514\ud515\ud516\ud517\ud518\ud519\ud51a\ud51b\ud51c\ud51d\ud51e\ud51f\ud520\ud521\ud522\ud523\ud524\ud525\ud526\ud527\ud528\ud529\ud52a\ud52b\ud52c\ud52d\ud52e\ud52f\ud530\ud531\ud532\ud533\ud534\ud535\ud536\ud537\ud538\ud539\ud53a\ud53b\ud53c\ud53d\ud53e\ud53f\ud540\ud541\ud542\ud543\ud544\ud545\ud546\ud547\ud548\ud549\ud54a\ud54b\ud54c\ud54d\ud54e\ud54f\ud550\ud551\ud552\ud553\ud554\ud555\ud556\ud557\ud558\ud559\ud55a\ud55b\ud55c\ud55d\ud55e\ud55f\ud560\ud561\ud562\ud563\ud564\ud565\ud566\ud567\ud568\ud569\ud56a\ud56b\ud56c\ud56d\ud56e\ud56f\ud570\ud571\ud572\ud573\ud574\ud575\ud576\ud577\ud578\ud579\ud57a\ud57b\ud57c\ud57d\ud57e\ud57f\ud580\ud581\ud582\ud583\ud584\ud585\ud586\ud587\ud588\ud589\ud58a\ud58b\ud58c\ud58d\ud58e\ud58f\ud590\ud591\ud592\ud593\ud594\ud595\ud596\ud597\ud598\ud599\ud59a\ud59b\ud59c\ud59d\ud59e\ud59f\ud5a0\ud5a1\ud5a2\ud5a3\ud5a4\ud5a5\ud5a6\ud5a7\ud5a8\ud5a9\ud5aa\ud5ab\ud5ac\ud5ad\ud5ae\ud5af\ud5b0\ud5b1\ud5b2\ud5b3\ud5b4\ud5b5\ud5b6\ud5b7\ud5b8\ud5b9\ud5ba\ud5bb\ud5bc\ud5bd\ud5be\ud5bf\ud5c0\ud5c1\ud5c2\ud5c3\ud5c4\ud5c5\ud5c6\ud5c7\ud5c8\ud5c9\ud5ca\ud5cb\ud5cc\ud5cd\ud5ce\ud5cf\ud5d0\ud5d1\ud5d2\ud5d3\ud5d4\ud5d5\ud5d6\ud5d7\ud5d8\ud5d9\ud5da\ud5db\ud5dc\ud5dd\ud5de\ud5df\ud5e0\ud5e1\ud5e2\ud5e3\ud5e4\ud5e5\ud5e6\ud5e7\ud5e8\ud5e9\ud5ea\ud5eb\ud5ec\ud5ed\ud5ee\ud5ef\ud5f0\ud5f1\ud5f2\ud5f3\ud5f4\ud5f5\ud5f6\ud5f7\ud5f8\ud5f9\ud5fa\ud5fb\ud5fc\ud5fd\ud5fe\ud5ff\ud600\ud601\ud602\ud603\ud604\ud605\ud606\ud607\ud608\ud609\ud60a\ud60b\ud60c\ud60d\ud60e\ud60f\ud610\ud611\ud612\ud613\ud614\ud615\ud616\ud617\ud618\ud619\ud61a\ud61b\ud61c\ud61d\ud61e\ud61f\ud620\ud621\ud622\ud623\ud624\ud625\ud626\ud627\ud628\ud629\ud62a\ud62b\ud62c\ud62d\ud62e\ud62f\ud630\ud631\ud632\ud633\ud634\ud635\ud636\ud637\ud638\ud639\ud63a\ud63b\ud63c\ud63d\ud63e\ud63f\ud640\ud641\ud642\ud643\ud644\ud645\ud646\ud647\ud648\ud649\ud64a\ud64b\ud64c\ud64d\ud64e\ud64f\ud650\ud651\ud652\ud653\ud654\ud655\ud656\ud657\ud658\ud659\ud65a\ud65b\ud65c\ud65d\ud65e\ud65f\ud660\ud661\ud662\ud663\ud664\ud665\ud666\ud667\ud668\ud669\ud66a\ud66b\ud66c\ud66d\ud66e\ud66f\ud670\ud671\ud672\ud673\ud674\ud675\ud676\ud677\ud678\ud679\ud67a\ud67b\ud67c\ud67d\ud67e\ud67f\ud680\ud681\ud682\ud683\ud684\ud685\ud686\ud687\ud688\ud689\ud68a\ud68b\ud68c\ud68d\ud68e\ud68f\ud690\ud691\ud692\ud693\ud694\ud695\ud696\ud697\ud698\ud699\ud69a\ud69b\ud69c\ud69d\ud69e\ud69f\ud6a0\ud6a1\ud6a2\ud6a3\ud6a4\ud6a5\ud6a6\ud6a7\ud6a8\ud6a9\ud6aa\ud6ab\ud6ac\ud6ad\ud6ae\ud6af\ud6b0\ud6b1\ud6b2\ud6b3\ud6b4\ud6b5\ud6b6\ud6b7\ud6b8\ud6b9\ud6ba\ud6bb\ud6bc\ud6bd\ud6be\ud6bf\ud6c0\ud6c1\ud6c2\ud6c3\ud6c4\ud6c5\ud6c6\ud6c7\ud6c8\ud6c9\ud6ca\ud6cb\ud6cc\ud6cd\ud6ce\ud6cf\ud6d0\ud6d1\ud6d2\ud6d3\ud6d4\ud6d5\ud6d6\ud6d7\ud6d8\ud6d9\ud6da\ud6db\ud6dc\ud6dd\ud6de\ud6df\ud6e0\ud6e1\ud6e2\ud6e3\ud6e4\ud6e5\ud6e6\ud6e7\ud6e8\ud6e9\ud6ea\ud6eb\ud6ec\ud6ed\ud6ee\ud6ef\ud6f0\ud6f1\ud6f2\ud6f3\ud6f4\ud6f5\ud6f6\ud6f7\ud6f8\ud6f9\ud6fa\ud6fb\ud6fc\ud6fd\ud6fe\ud6ff\ud700\ud701\ud702\ud703\ud704\ud705\ud706\ud707\ud708\ud709\ud70a\ud70b\ud70c\ud70d\ud70e\ud70f\ud710\ud711\ud712\ud713\ud714\ud715\ud716\ud717\ud718\ud719\ud71a\ud71b\ud71c\ud71d\ud71e\ud71f\ud720\ud721\ud722\ud723\ud724\ud725\ud726\ud727\ud728\ud729\ud72a\ud72b\ud72c\ud72d\ud72e\ud72f\ud730\ud731\ud732\ud733\ud734\ud735\ud736\ud737\ud738\ud739\ud73a\ud73b\ud73c\ud73d\ud73e\ud73f\ud740\ud741\ud742\ud743\ud744\ud745\ud746\ud747\ud748\ud749\ud74a\ud74b\ud74c\ud74d\ud74e\ud74f\ud750\ud751\ud752\ud753\ud754\ud755\ud756\ud757\ud758\ud759\ud75a\ud75b\ud75c\ud75d\ud75e\ud75f\ud760\ud761\ud762\ud763\ud764\ud765\ud766\ud767\ud768\ud769\ud76a\ud76b\ud76c\ud76d\ud76e\ud76f\ud770\ud771\ud772\ud773\ud774\ud775\ud776\ud777\ud778\ud779\ud77a\ud77b\ud77c\ud77d\ud77e\ud77f\ud780\ud781\ud782\ud783\ud784\ud785\ud786\ud787\ud788\ud789\ud78a\ud78b\ud78c\ud78d\ud78e\ud78f\ud790\ud791\ud792\ud793\ud794\ud795\ud796\ud797\ud798\ud799\ud79a\ud79b\ud79c\ud79d\ud79e\ud79f\ud7a0\ud7a1\ud7a2\ud7a3\uf900\uf901\uf902\uf903\uf904\uf905\uf906\uf907\uf908\uf909\uf90a\uf90b\uf90c\uf90d\uf90e\uf90f\uf910\uf911\uf912\uf913\uf914\uf915\uf916\uf917\uf918\uf919\uf91a\uf91b\uf91c\uf91d\uf91e\uf91f\uf920\uf921\uf922\uf923\uf924\uf925\uf926\uf927\uf928\uf929\uf92a\uf92b\uf92c\uf92d\uf92e\uf92f\uf930\uf931\uf932\uf933\uf934\uf935\uf936\uf937\uf938\uf939\uf93a\uf93b\uf93c\uf93d\uf93e\uf93f\uf940\uf941\uf942\uf943\uf944\uf945\uf946\uf947\uf948\uf949\uf94a\uf94b\uf94c\uf94d\uf94e\uf94f\uf950\uf951\uf952\uf953\uf954\uf955\uf956\uf957\uf958\uf959\uf95a\uf95b\uf95c\uf95d\uf95e\uf95f\uf960\uf961\uf962\uf963\uf964\uf965\uf966\uf967\uf968\uf969\uf96a\uf96b\uf96c\uf96d\uf96e\uf96f\uf970\uf971\uf972\uf973\uf974\uf975\uf976\uf977\uf978\uf979\uf97a\uf97b\uf97c\uf97d\uf97e\uf97f\uf980\uf981\uf982\uf983\uf984\uf985\uf986\uf987\uf988\uf989\uf98a\uf98b\uf98c\uf98d\uf98e\uf98f\uf990\uf991\uf992\uf993\uf994\uf995\uf996\uf997\uf998\uf999\uf99a\uf99b\uf99c\uf99d\uf99e\uf99f\uf9a0\uf9a1\uf9a2\uf9a3\uf9a4\uf9a5\uf9a6\uf9a7\uf9a8\uf9a9\uf9aa\uf9ab\uf9ac\uf9ad\uf9ae\uf9af\uf9b0\uf9b1\uf9b2\uf9b3\uf9b4\uf9b5\uf9b6\uf9b7\uf9b8\uf9b9\uf9ba\uf9bb\uf9bc\uf9bd\uf9be\uf9bf\uf9c0\uf9c1\uf9c2\uf9c3\uf9c4\uf9c5\uf9c6\uf9c7\uf9c8\uf9c9\uf9ca\uf9cb\uf9cc\uf9cd\uf9ce\uf9cf\uf9d0\uf9d1\uf9d2\uf9d3\uf9d4\uf9d5\uf9d6\uf9d7\uf9d8\uf9d9\uf9da\uf9db\uf9dc\uf9dd\uf9de\uf9df\uf9e0\uf9e1\uf9e2\uf9e3\uf9e4\uf9e5\uf9e6\uf9e7\uf9e8\uf9e9\uf9ea\uf9eb\uf9ec\uf9ed\uf9ee\uf9ef\uf9f0\uf9f1\uf9f2\uf9f3\uf9f4\uf9f5\uf9f6\uf9f7\uf9f8\uf9f9\uf9fa\uf9fb\uf9fc\uf9fd\uf9fe\uf9ff\ufa00\ufa01\ufa02\ufa03\ufa04\ufa05\ufa06\ufa07\ufa08\ufa09\ufa0a\ufa0b\ufa0c\ufa0d\ufa0e\ufa0f\ufa10\ufa11\ufa12\ufa13\ufa14\ufa15\ufa16\ufa17\ufa18\ufa19\ufa1a\ufa1b\ufa1c\ufa1d\ufa1e\ufa1f\ufa20\ufa21\ufa22\ufa23\ufa24\ufa25\ufa26\ufa27\ufa28\ufa29\ufa2a\ufa2b\ufa2c\ufa2d\ufa30\ufa31\ufa32\ufa33\ufa34\ufa35\ufa36\ufa37\ufa38\ufa39\ufa3a\ufa3b\ufa3c\ufa3d\ufa3e\ufa3f\ufa40\ufa41\ufa42\ufa43\ufa44\ufa45\ufa46\ufa47\ufa48\ufa49\ufa4a\ufa4b\ufa4c\ufa4d\ufa4e\ufa4f\ufa50\ufa51\ufa52\ufa53\ufa54\ufa55\ufa56\ufa57\ufa58\ufa59\ufa5a\ufa5b\ufa5c\ufa5d\ufa5e\ufa5f\ufa60\ufa61\ufa62\ufa63\ufa64\ufa65\ufa66\ufa67\ufa68\ufa69\ufa6a\ufa70\ufa71\ufa72\ufa73\ufa74\ufa75\ufa76\ufa77\ufa78\ufa79\ufa7a\ufa7b\ufa7c\ufa7d\ufa7e\ufa7f\ufa80\ufa81\ufa82\ufa83\ufa84\ufa85\ufa86\ufa87\ufa88\ufa89\ufa8a\ufa8b\ufa8c\ufa8d\ufa8e\ufa8f\ufa90\ufa91\ufa92\ufa93\ufa94\ufa95\ufa96\ufa97\ufa98\ufa99\ufa9a\ufa9b\ufa9c\ufa9d\ufa9e\ufa9f\ufaa0\ufaa1\ufaa2\ufaa3\ufaa4\ufaa5\ufaa6\ufaa7\ufaa8\ufaa9\ufaaa\ufaab\ufaac\ufaad\ufaae\ufaaf\ufab0\ufab1\ufab2\ufab3\ufab4\ufab5\ufab6\ufab7\ufab8\ufab9\ufaba\ufabb\ufabc\ufabd\ufabe\ufabf\ufac0\ufac1\ufac2\ufac3\ufac4\ufac5\ufac6\ufac7\ufac8\ufac9\ufaca\ufacb\ufacc\ufacd\uface\ufacf\ufad0\ufad1\ufad2\ufad3\ufad4\ufad5\ufad6\ufad7\ufad8\ufad9\ufb1d\ufb1f\ufb20\ufb21\ufb22\ufb23\ufb24\ufb25\ufb26\ufb27\ufb28\ufb2a\ufb2b\ufb2c\ufb2d\ufb2e\ufb2f\ufb30\ufb31\ufb32\ufb33\ufb34\ufb35\ufb36\ufb38\ufb39\ufb3a\ufb3b\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46\ufb47\ufb48\ufb49\ufb4a\ufb4b\ufb4c\ufb4d\ufb4e\ufb4f\ufb50\ufb51\ufb52\ufb53\ufb54\ufb55\ufb56\ufb57\ufb58\ufb59\ufb5a\ufb5b\ufb5c\ufb5d\ufb5e\ufb5f\ufb60\ufb61\ufb62\ufb63\ufb64\ufb65\ufb66\ufb67\ufb68\ufb69\ufb6a\ufb6b\ufb6c\ufb6d\ufb6e\ufb6f\ufb70\ufb71\ufb72\ufb73\ufb74\ufb75\ufb76\ufb77\ufb78\ufb79\ufb7a\ufb7b\ufb7c\ufb7d\ufb7e\ufb7f\ufb80\ufb81\ufb82\ufb83\ufb84\ufb85\ufb86\ufb87\ufb88\ufb89\ufb8a\ufb8b\ufb8c\ufb8d\ufb8e\ufb8f\ufb90\ufb91\ufb92\ufb93\ufb94\ufb95\ufb96\ufb97\ufb98\ufb99\ufb9a\ufb9b\ufb9c\ufb9d\ufb9e\ufb9f\ufba0\ufba1\ufba2\ufba3\ufba4\ufba5\ufba6\ufba7\ufba8\ufba9\ufbaa\ufbab\ufbac\ufbad\ufbae\ufbaf\ufbb0\ufbb1\ufbd3\ufbd4\ufbd5\ufbd6\ufbd7\ufbd8\ufbd9\ufbda\ufbdb\ufbdc\ufbdd\ufbde\ufbdf\ufbe0\ufbe1\ufbe2\ufbe3\ufbe4\ufbe5\ufbe6\ufbe7\ufbe8\ufbe9\ufbea\ufbeb\ufbec\ufbed\ufbee\ufbef\ufbf0\ufbf1\ufbf2\ufbf3\ufbf4\ufbf5\ufbf6\ufbf7\ufbf8\ufbf9\ufbfa\ufbfb\ufbfc\ufbfd\ufbfe\ufbff\ufc00\ufc01\ufc02\ufc03\ufc04\ufc05\ufc06\ufc07\ufc08\ufc09\ufc0a\ufc0b\ufc0c\ufc0d\ufc0e\ufc0f\ufc10\ufc11\ufc12\ufc13\ufc14\ufc15\ufc16\ufc17\ufc18\ufc19\ufc1a\ufc1b\ufc1c\ufc1d\ufc1e\ufc1f\ufc20\ufc21\ufc22\ufc23\ufc24\ufc25\ufc26\ufc27\ufc28\ufc29\ufc2a\ufc2b\ufc2c\ufc2d\ufc2e\ufc2f\ufc30\ufc31\ufc32\ufc33\ufc34\ufc35\ufc36\ufc37\ufc38\ufc39\ufc3a\ufc3b\ufc3c\ufc3d\ufc3e\ufc3f\ufc40\ufc41\ufc42\ufc43\ufc44\ufc45\ufc46\ufc47\ufc48\ufc49\ufc4a\ufc4b\ufc4c\ufc4d\ufc4e\ufc4f\ufc50\ufc51\ufc52\ufc53\ufc54\ufc55\ufc56\ufc57\ufc58\ufc59\ufc5a\ufc5b\ufc5c\ufc5d\ufc5e\ufc5f\ufc60\ufc61\ufc62\ufc63\ufc64\ufc65\ufc66\ufc67\ufc68\ufc69\ufc6a\ufc6b\ufc6c\ufc6d\ufc6e\ufc6f\ufc70\ufc71\ufc72\ufc73\ufc74\ufc75\ufc76\ufc77\ufc78\ufc79\ufc7a\ufc7b\ufc7c\ufc7d\ufc7e\ufc7f\ufc80\ufc81\ufc82\ufc83\ufc84\ufc85\ufc86\ufc87\ufc88\ufc89\ufc8a\ufc8b\ufc8c\ufc8d\ufc8e\ufc8f\ufc90\ufc91\ufc92\ufc93\ufc94\ufc95\ufc96\ufc97\ufc98\ufc99\ufc9a\ufc9b\ufc9c\ufc9d\ufc9e\ufc9f\ufca0\ufca1\ufca2\ufca3\ufca4\ufca5\ufca6\ufca7\ufca8\ufca9\ufcaa\ufcab\ufcac\ufcad\ufcae\ufcaf\ufcb0\ufcb1\ufcb2\ufcb3\ufcb4\ufcb5\ufcb6\ufcb7\ufcb8\ufcb9\ufcba\ufcbb\ufcbc\ufcbd\ufcbe\ufcbf\ufcc0\ufcc1\ufcc2\ufcc3\ufcc4\ufcc5\ufcc6\ufcc7\ufcc8\ufcc9\ufcca\ufccb\ufccc\ufccd\ufcce\ufccf\ufcd0\ufcd1\ufcd2\ufcd3\ufcd4\ufcd5\ufcd6\ufcd7\ufcd8\ufcd9\ufcda\ufcdb\ufcdc\ufcdd\ufcde\ufcdf\ufce0\ufce1\ufce2\ufce3\ufce4\ufce5\ufce6\ufce7\ufce8\ufce9\ufcea\ufceb\ufcec\ufced\ufcee\ufcef\ufcf0\ufcf1\ufcf2\ufcf3\ufcf4\ufcf5\ufcf6\ufcf7\ufcf8\ufcf9\ufcfa\ufcfb\ufcfc\ufcfd\ufcfe\ufcff\ufd00\ufd01\ufd02\ufd03\ufd04\ufd05\ufd06\ufd07\ufd08\ufd09\ufd0a\ufd0b\ufd0c\ufd0d\ufd0e\ufd0f\ufd10\ufd11\ufd12\ufd13\ufd14\ufd15\ufd16\ufd17\ufd18\ufd19\ufd1a\ufd1b\ufd1c\ufd1d\ufd1e\ufd1f\ufd20\ufd21\ufd22\ufd23\ufd24\ufd25\ufd26\ufd27\ufd28\ufd29\ufd2a\ufd2b\ufd2c\ufd2d\ufd2e\ufd2f\ufd30\ufd31\ufd32\ufd33\ufd34\ufd35\ufd36\ufd37\ufd38\ufd39\ufd3a\ufd3b\ufd3c\ufd3d\ufd50\ufd51\ufd52\ufd53\ufd54\ufd55\ufd56\ufd57\ufd58\ufd59\ufd5a\ufd5b\ufd5c\ufd5d\ufd5e\ufd5f\ufd60\ufd61\ufd62\ufd63\ufd64\ufd65\ufd66\ufd67\ufd68\ufd69\ufd6a\ufd6b\ufd6c\ufd6d\ufd6e\ufd6f\ufd70\ufd71\ufd72\ufd73\ufd74\ufd75\ufd76\ufd77\ufd78\ufd79\ufd7a\ufd7b\ufd7c\ufd7d\ufd7e\ufd7f\ufd80\ufd81\ufd82\ufd83\ufd84\ufd85\ufd86\ufd87\ufd88\ufd89\ufd8a\ufd8b\ufd8c\ufd8d\ufd8e\ufd8f\ufd92\ufd93\ufd94\ufd95\ufd96\ufd97\ufd98\ufd99\ufd9a\ufd9b\ufd9c\ufd9d\ufd9e\ufd9f\ufda0\ufda1\ufda2\ufda3\ufda4\ufda5\ufda6\ufda7\ufda8\ufda9\ufdaa\ufdab\ufdac\ufdad\ufdae\ufdaf\ufdb0\ufdb1\ufdb2\ufdb3\ufdb4\ufdb5\ufdb6\ufdb7\ufdb8\ufdb9\ufdba\ufdbb\ufdbc\ufdbd\ufdbe\ufdbf\ufdc0\ufdc1\ufdc2\ufdc3\ufdc4\ufdc5\ufdc6\ufdc7\ufdf0\ufdf1\ufdf2\ufdf3\ufdf4\ufdf5\ufdf6\ufdf7\ufdf8\ufdf9\ufdfa\ufdfb\ufe70\ufe71\ufe72\ufe73\ufe74\ufe76\ufe77\ufe78\ufe79\ufe7a\ufe7b\ufe7c\ufe7d\ufe7e\ufe7f\ufe80\ufe81\ufe82\ufe83\ufe84\ufe85\ufe86\ufe87\ufe88\ufe89\ufe8a\ufe8b\ufe8c\ufe8d\ufe8e\ufe8f\ufe90\ufe91\ufe92\ufe93\ufe94\ufe95\ufe96\ufe97\ufe98\ufe99\ufe9a\ufe9b\ufe9c\ufe9d\ufe9e\ufe9f\ufea0\ufea1\ufea2\ufea3\ufea4\ufea5\ufea6\ufea7\ufea8\ufea9\ufeaa\ufeab\ufeac\ufead\ufeae\ufeaf\ufeb0\ufeb1\ufeb2\ufeb3\ufeb4\ufeb5\ufeb6\ufeb7\ufeb8\ufeb9\ufeba\ufebb\ufebc\ufebd\ufebe\ufebf\ufec0\ufec1\ufec2\ufec3\ufec4\ufec5\ufec6\ufec7\ufec8\ufec9\ufeca\ufecb\ufecc\ufecd\ufece\ufecf\ufed0\ufed1\ufed2\ufed3\ufed4\ufed5\ufed6\ufed7\ufed8\ufed9\ufeda\ufedb\ufedc\ufedd\ufede\ufedf\ufee0\ufee1\ufee2\ufee3\ufee4\ufee5\ufee6\ufee7\ufee8\ufee9\ufeea\ufeeb\ufeec\ufeed\ufeee\ufeef\ufef0\ufef1\ufef2\ufef3\ufef4\ufef5\ufef6\ufef7\ufef8\ufef9\ufefa\ufefb\ufefc\uff66\uff67\uff68\uff69\uff6a\uff6b\uff6c\uff6d\uff6e\uff6f\uff71\uff72\uff73\uff74\uff75\uff76\uff77\uff78\uff79\uff7a\uff7b\uff7c\uff7d\uff7e\uff7f\uff80\uff81\uff82\uff83\uff84\uff85\uff86\uff87\uff88\uff89\uff8a\uff8b\uff8c\uff8d\uff8e\uff8f\uff90\uff91\uff92\uff93\uff94\uff95\uff96\uff97\uff98\uff99\uff9a\uff9b\uff9c\uff9d\uffa0\uffa1\uffa2\uffa3\uffa4\uffa5\uffa6\uffa7\uffa8\uffa9\uffaa\uffab\uffac\uffad\uffae\uffaf\uffb0\uffb1\uffb2\uffb3\uffb4\uffb5\uffb6\uffb7\uffb8\uffb9\uffba\uffbb\uffbc\uffbd\uffbe\uffc2\uffc3\uffc4\uffc5\uffc6\uffc7\uffca\uffcb\uffcc\uffcd\uffce\uffcf\uffd2\uffd3\uffd4\uffd5\uffd6\uffd7\uffda\uffdb\uffdc'
+
+Lt = u'\u01c5\u01c8\u01cb\u01f2\u1f88\u1f89\u1f8a\u1f8b\u1f8c\u1f8d\u1f8e\u1f8f\u1f98\u1f99\u1f9a\u1f9b\u1f9c\u1f9d\u1f9e\u1f9f\u1fa8\u1fa9\u1faa\u1fab\u1fac\u1fad\u1fae\u1faf\u1fbc\u1fcc\u1ffc'
+
+Lu = u'ABCDEFGHIJKLMNOPQRSTUVWXYZ\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd8\xd9\xda\xdb\xdc\xdd\xde\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170\u0172\u0174\u0176\u0178\u0179\u017b\u017d\u0181\u0182\u0184\u0186\u0187\u0189\u018a\u018b\u018e\u018f\u0190\u0191\u0193\u0194\u0196\u0197\u0198\u019c\u019d\u019f\u01a0\u01a2\u01a4\u01a6\u01a7\u01a9\u01ac\u01ae\u01af\u01b1\u01b2\u01b3\u01b5\u01b7\u01b8\u01bc\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee\u01f1\u01f4\u01f6\u01f7\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c\u022e\u0230\u0232\u023a\u023b\u023d\u023e\u0241\u0386\u0388\u0389\u038a\u038c\u038e\u038f\u0391\u0392\u0393\u0394\u0395\u0396\u0397\u0398\u0399\u039a\u039b\u039c\u039d\u039e\u039f\u03a0\u03a1\u03a3\u03a4\u03a5\u03a6\u03a7\u03a8\u03a9\u03aa\u03ab\u03d2\u03d3\u03d4\u03d8\u03da\u03dc\u03de\u03e0\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7\u03f9\u03fa\u03fd\u03fe\u03ff\u0400\u0401\u0402\u0403\u0404\u0405\u0406\u0407\u0408\u0409\u040a\u040b\u040c\u040d\u040e\u040f\u0410\u0411\u0412\u0413\u0414\u0415\u0416\u0417\u0418\u0419\u041a\u041b\u041c\u041d\u041e\u041f\u0420\u0421\u0422\u0423\u0424\u0425\u0426\u0427\u0428\u0429\u042a\u042b\u042c\u042d\u042e\u042f\u0460\u0462\u0464\u0466\u0468\u046a\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0\u04c1\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0531\u0532\u0533\u0534\u0535\u0536\u0537\u0538\u0539\u053a\u053b\u053c\u053d\u053e\u053f\u0540\u0541\u0542\u0543\u0544\u0545\u0546\u0547\u0548\u0549\u054a\u054b\u054c\u054d\u054e\u054f\u0550\u0551\u0552\u0553\u0554\u0555\u0556\u10a0\u10a1\u10a2\u10a3\u10a4\u10a5\u10a6\u10a7\u10a8\u10a9\u10aa\u10ab\u10ac\u10ad\u10ae\u10af\u10b0\u10b1\u10b2\u10b3\u10b4\u10b5\u10b6\u10b7\u10b8\u10b9\u10ba\u10bb\u10bc\u10bd\u10be\u10bf\u10c0\u10c1\u10c2\u10c3\u10c4\u10c5\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1ea0\u1ea2\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2\u1ef4\u1ef6\u1ef8\u1f08\u1f09\u1f0a\u1f0b\u1f0c\u1f0d\u1f0e\u1f0f\u1f18\u1f19\u1f1a\u1f1b\u1f1c\u1f1d\u1f28\u1f29\u1f2a\u1f2b\u1f2c\u1f2d\u1f2e\u1f2f\u1f38\u1f39\u1f3a\u1f3b\u1f3c\u1f3d\u1f3e\u1f3f\u1f48\u1f49\u1f4a\u1f4b\u1f4c\u1f4d\u1f59\u1f5b\u1f5d\u1f5f\u1f68\u1f69\u1f6a\u1f6b\u1f6c\u1f6d\u1f6e\u1f6f\u1fb8\u1fb9\u1fba\u1fbb\u1fc8\u1fc9\u1fca\u1fcb\u1fd8\u1fd9\u1fda\u1fdb\u1fe8\u1fe9\u1fea\u1feb\u1fec\u1ff8\u1ff9\u1ffa\u1ffb\u2102\u2107\u210b\u210c\u210d\u2110\u2111\u2112\u2115\u2119\u211a\u211b\u211c\u211d\u2124\u2126\u2128\u212a\u212b\u212c\u212d\u2130\u2131\u2133\u213e\u213f\u2145\u2c00\u2c01\u2c02\u2c03\u2c04\u2c05\u2c06\u2c07\u2c08\u2c09\u2c0a\u2c0b\u2c0c\u2c0d\u2c0e\u2c0f\u2c10\u2c11\u2c12\u2c13\u2c14\u2c15\u2c16\u2c17\u2c18\u2c19\u2c1a\u2c1b\u2c1c\u2c1d\u2c1e\u2c1f\u2c20\u2c21\u2c22\u2c23\u2c24\u2c25\u2c26\u2c27\u2c28\u2c29\u2c2a\u2c2b\u2c2c\u2c2d\u2c2e\u2c80\u2c82\u2c84\u2c86\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\uff21\uff22\uff23\uff24\uff25\uff26\uff27\uff28\uff29\uff2a\uff2b\uff2c\uff2d\uff2e\uff2f\uff30\uff31\uff32\uff33\uff34\uff35\uff36\uff37\uff38\uff39\uff3a'
+
+Mc = u'\u0903\u093e\u093f\u0940\u0949\u094a\u094b\u094c\u0982\u0983\u09be\u09bf\u09c0\u09c7\u09c8\u09cb\u09cc\u09d7\u0a03\u0a3e\u0a3f\u0a40\u0a83\u0abe\u0abf\u0ac0\u0ac9\u0acb\u0acc\u0b02\u0b03\u0b3e\u0b40\u0b47\u0b48\u0b4b\u0b4c\u0b57\u0bbe\u0bbf\u0bc1\u0bc2\u0bc6\u0bc7\u0bc8\u0bca\u0bcb\u0bcc\u0bd7\u0c01\u0c02\u0c03\u0c41\u0c42\u0c43\u0c44\u0c82\u0c83\u0cbe\u0cc0\u0cc1\u0cc2\u0cc3\u0cc4\u0cc7\u0cc8\u0cca\u0ccb\u0cd5\u0cd6\u0d02\u0d03\u0d3e\u0d3f\u0d40\u0d46\u0d47\u0d48\u0d4a\u0d4b\u0d4c\u0d57\u0d82\u0d83\u0dcf\u0dd0\u0dd1\u0dd8\u0dd9\u0dda\u0ddb\u0ddc\u0ddd\u0dde\u0ddf\u0df2\u0df3\u0f3e\u0f3f\u0f7f\u102c\u1031\u1038\u1056\u1057\u17b6\u17be\u17bf\u17c0\u17c1\u17c2\u17c3\u17c4\u17c5\u17c7\u17c8\u1923\u1924\u1925\u1926\u1929\u192a\u192b\u1930\u1931\u1933\u1934\u1935\u1936\u1937\u1938\u19b0\u19b1\u19b2\u19b3\u19b4\u19b5\u19b6\u19b7\u19b8\u19b9\u19ba\u19bb\u19bc\u19bd\u19be\u19bf\u19c0\u19c8\u19c9\u1a19\u1a1a\u1a1b\ua802\ua823\ua824\ua827'
+
+Me = u'\u0488\u0489\u06de\u20dd\u20de\u20df\u20e0\u20e2\u20e3\u20e4'
+
+Mn = u'\u0300\u0301\u0302\u0303\u0304\u0305\u0306\u0307\u0308\u0309\u030a\u030b\u030c\u030d\u030e\u030f\u0310\u0311\u0312\u0313\u0314\u0315\u0316\u0317\u0318\u0319\u031a\u031b\u031c\u031d\u031e\u031f\u0320\u0321\u0322\u0323\u0324\u0325\u0326\u0327\u0328\u0329\u032a\u032b\u032c\u032d\u032e\u032f\u0330\u0331\u0332\u0333\u0334\u0335\u0336\u0337\u0338\u0339\u033a\u033b\u033c\u033d\u033e\u033f\u0340\u0341\u0342\u0343\u0344\u0345\u0346\u0347\u0348\u0349\u034a\u034b\u034c\u034d\u034e\u034f\u0350\u0351\u0352\u0353\u0354\u0355\u0356\u0357\u0358\u0359\u035a\u035b\u035c\u035d\u035e\u035f\u0360\u0361\u0362\u0363\u0364\u0365\u0366\u0367\u0368\u0369\u036a\u036b\u036c\u036d\u036e\u036f\u0483\u0484\u0485\u0486\u0591\u0592\u0593\u0594\u0595\u0596\u0597\u0598\u0599\u059a\u059b\u059c\u059d\u059e\u059f\u05a0\u05a1\u05a2\u05a3\u05a4\u05a5\u05a6\u05a7\u05a8\u05a9\u05aa\u05ab\u05ac\u05ad\u05ae\u05af\u05b0\u05b1\u05b2\u05b3\u05b4\u05b5\u05b6\u05b7\u05b8\u05b9\u05bb\u05bc\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u0610\u0611\u0612\u0613\u0614\u0615\u064b\u064c\u064d\u064e\u064f\u0650\u0651\u0652\u0653\u0654\u0655\u0656\u0657\u0658\u0659\u065a\u065b\u065c\u065d\u065e\u0670\u06d6\u06d7\u06d8\u06d9\u06da\u06db\u06dc\u06df\u06e0\u06e1\u06e2\u06e3\u06e4\u06e7\u06e8\u06ea\u06eb\u06ec\u06ed\u0711\u0730\u0731\u0732\u0733\u0734\u0735\u0736\u0737\u0738\u0739\u073a\u073b\u073c\u073d\u073e\u073f\u0740\u0741\u0742\u0743\u0744\u0745\u0746\u0747\u0748\u0749\u074a\u07a6\u07a7\u07a8\u07a9\u07aa\u07ab\u07ac\u07ad\u07ae\u07af\u07b0\u0901\u0902\u093c\u0941\u0942\u0943\u0944\u0945\u0946\u0947\u0948\u094d\u0951\u0952\u0953\u0954\u0962\u0963\u0981\u09bc\u09c1\u09c2\u09c3\u09c4\u09cd\u09e2\u09e3\u0a01\u0a02\u0a3c\u0a41\u0a42\u0a47\u0a48\u0a4b\u0a4c\u0a4d\u0a70\u0a71\u0a81\u0a82\u0abc\u0ac1\u0ac2\u0ac3\u0ac4\u0ac5\u0ac7\u0ac8\u0acd\u0ae2\u0ae3\u0b01\u0b3c\u0b3f\u0b41\u0b42\u0b43\u0b4d\u0b56\u0b82\u0bc0\u0bcd\u0c3e\u0c3f\u0c40\u0c46\u0c47\u0c48\u0c4a\u0c4b\u0c4c\u0c4d\u0c55\u0c56\u0cbc\u0cbf\u0cc6\u0ccc\u0ccd\u0d41\u0d42\u0d43\u0d4d\u0dca\u0dd2\u0dd3\u0dd4\u0dd6\u0e31\u0e34\u0e35\u0e36\u0e37\u0e38\u0e39\u0e3a\u0e47\u0e48\u0e49\u0e4a\u0e4b\u0e4c\u0e4d\u0e4e\u0eb1\u0eb4\u0eb5\u0eb6\u0eb7\u0eb8\u0eb9\u0ebb\u0ebc\u0ec8\u0ec9\u0eca\u0ecb\u0ecc\u0ecd\u0f18\u0f19\u0f35\u0f37\u0f39\u0f71\u0f72\u0f73\u0f74\u0f75\u0f76\u0f77\u0f78\u0f79\u0f7a\u0f7b\u0f7c\u0f7d\u0f7e\u0f80\u0f81\u0f82\u0f83\u0f84\u0f86\u0f87\u0f90\u0f91\u0f92\u0f93\u0f94\u0f95\u0f96\u0f97\u0f99\u0f9a\u0f9b\u0f9c\u0f9d\u0f9e\u0f9f\u0fa0\u0fa1\u0fa2\u0fa3\u0fa4\u0fa5\u0fa6\u0fa7\u0fa8\u0fa9\u0faa\u0fab\u0fac\u0fad\u0fae\u0faf\u0fb0\u0fb1\u0fb2\u0fb3\u0fb4\u0fb5\u0fb6\u0fb7\u0fb8\u0fb9\u0fba\u0fbb\u0fbc\u0fc6\u102d\u102e\u102f\u1030\u1032\u1036\u1037\u1039\u1058\u1059\u135f\u1712\u1713\u1714\u1732\u1733\u1734\u1752\u1753\u1772\u1773\u17b7\u17b8\u17b9\u17ba\u17bb\u17bc\u17bd\u17c6\u17c9\u17ca\u17cb\u17cc\u17cd\u17ce\u17cf\u17d0\u17d1\u17d2\u17d3\u17dd\u180b\u180c\u180d\u18a9\u1920\u1921\u1922\u1927\u1928\u1932\u1939\u193a\u193b\u1a17\u1a18\u1dc0\u1dc1\u1dc2\u1dc3\u20d0\u20d1\u20d2\u20d3\u20d4\u20d5\u20d6\u20d7\u20d8\u20d9\u20da\u20db\u20dc\u20e1\u20e5\u20e6\u20e7\u20e8\u20e9\u20ea\u20eb\u302a\u302b\u302c\u302d\u302e\u302f\u3099\u309a\ua806\ua80b\ua825\ua826\ufb1e\ufe00\ufe01\ufe02\ufe03\ufe04\ufe05\ufe06\ufe07\ufe08\ufe09\ufe0a\ufe0b\ufe0c\ufe0d\ufe0e\ufe0f\ufe20\ufe21\ufe22\ufe23'
+
+Nd = u'0123456789\u0660\u0661\u0662\u0663\u0664\u0665\u0666\u0667\u0668\u0669\u06f0\u06f1\u06f2\u06f3\u06f4\u06f5\u06f6\u06f7\u06f8\u06f9\u0966\u0967\u0968\u0969\u096a\u096b\u096c\u096d\u096e\u096f\u09e6\u09e7\u09e8\u09e9\u09ea\u09eb\u09ec\u09ed\u09ee\u09ef\u0a66\u0a67\u0a68\u0a69\u0a6a\u0a6b\u0a6c\u0a6d\u0a6e\u0a6f\u0ae6\u0ae7\u0ae8\u0ae9\u0aea\u0aeb\u0aec\u0aed\u0aee\u0aef\u0b66\u0b67\u0b68\u0b69\u0b6a\u0b6b\u0b6c\u0b6d\u0b6e\u0b6f\u0be6\u0be7\u0be8\u0be9\u0bea\u0beb\u0bec\u0bed\u0bee\u0bef\u0c66\u0c67\u0c68\u0c69\u0c6a\u0c6b\u0c6c\u0c6d\u0c6e\u0c6f\u0ce6\u0ce7\u0ce8\u0ce9\u0cea\u0ceb\u0cec\u0ced\u0cee\u0cef\u0d66\u0d67\u0d68\u0d69\u0d6a\u0d6b\u0d6c\u0d6d\u0d6e\u0d6f\u0e50\u0e51\u0e52\u0e53\u0e54\u0e55\u0e56\u0e57\u0e58\u0e59\u0ed0\u0ed1\u0ed2\u0ed3\u0ed4\u0ed5\u0ed6\u0ed7\u0ed8\u0ed9\u0f20\u0f21\u0f22\u0f23\u0f24\u0f25\u0f26\u0f27\u0f28\u0f29\u1040\u1041\u1042\u1043\u1044\u1045\u1046\u1047\u1048\u1049\u17e0\u17e1\u17e2\u17e3\u17e4\u17e5\u17e6\u17e7\u17e8\u17e9\u1810\u1811\u1812\u1813\u1814\u1815\u1816\u1817\u1818\u1819\u1946\u1947\u1948\u1949\u194a\u194b\u194c\u194d\u194e\u194f\u19d0\u19d1\u19d2\u19d3\u19d4\u19d5\u19d6\u19d7\u19d8\u19d9\uff10\uff11\uff12\uff13\uff14\uff15\uff16\uff17\uff18\uff19'
+
+Nl = u'\u16ee\u16ef\u16f0\u2160\u2161\u2162\u2163\u2164\u2165\u2166\u2167\u2168\u2169\u216a\u216b\u216c\u216d\u216e\u216f\u2170\u2171\u2172\u2173\u2174\u2175\u2176\u2177\u2178\u2179\u217a\u217b\u217c\u217d\u217e\u217f\u2180\u2181\u2182\u2183\u3007\u3021\u3022\u3023\u3024\u3025\u3026\u3027\u3028\u3029\u3038\u3039\u303a'
+
+No = u'\xb2\xb3\xb9\xbc\xbd\xbe\u09f4\u09f5\u09f6\u09f7\u09f8\u09f9\u0bf0\u0bf1\u0bf2\u0f2a\u0f2b\u0f2c\u0f2d\u0f2e\u0f2f\u0f30\u0f31\u0f32\u0f33\u1369\u136a\u136b\u136c\u136d\u136e\u136f\u1370\u1371\u1372\u1373\u1374\u1375\u1376\u1377\u1378\u1379\u137a\u137b\u137c\u17f0\u17f1\u17f2\u17f3\u17f4\u17f5\u17f6\u17f7\u17f8\u17f9\u2070\u2074\u2075\u2076\u2077\u2078\u2079\u2080\u2081\u2082\u2083\u2084\u2085\u2086\u2087\u2088\u2089\u2153\u2154\u2155\u2156\u2157\u2158\u2159\u215a\u215b\u215c\u215d\u215e\u215f\u2460\u2461\u2462\u2463\u2464\u2465\u2466\u2467\u2468\u2469\u246a\u246b\u246c\u246d\u246e\u246f\u2470\u2471\u2472\u2473\u2474\u2475\u2476\u2477\u2478\u2479\u247a\u247b\u247c\u247d\u247e\u247f\u2480\u2481\u2482\u2483\u2484\u2485\u2486\u2487\u2488\u2489\u248a\u248b\u248c\u248d\u248e\u248f\u2490\u2491\u2492\u2493\u2494\u2495\u2496\u2497\u2498\u2499\u249a\u249b\u24ea\u24eb\u24ec\u24ed\u24ee\u24ef\u24f0\u24f1\u24f2\u24f3\u24f4\u24f5\u24f6\u24f7\u24f8\u24f9\u24fa\u24fb\u24fc\u24fd\u24fe\u24ff\u2776\u2777\u2778\u2779\u277a\u277b\u277c\u277d\u277e\u277f\u2780\u2781\u2782\u2783\u2784\u2785\u2786\u2787\u2788\u2789\u278a\u278b\u278c\u278d\u278e\u278f\u2790\u2791\u2792\u2793\u2cfd\u3192\u3193\u3194\u3195\u3220\u3221\u3222\u3223\u3224\u3225\u3226\u3227\u3228\u3229\u3251\u3252\u3253\u3254\u3255\u3256\u3257\u3258\u3259\u325a\u325b\u325c\u325d\u325e\u325f\u3280\u3281\u3282\u3283\u3284\u3285\u3286\u3287\u3288\u3289\u32b1\u32b2\u32b3\u32b4\u32b5\u32b6\u32b7\u32b8\u32b9\u32ba\u32bb\u32bc\u32bd\u32be\u32bf'
+
+Pc = u'_\u203f\u2040\u2054\ufe33\ufe34\ufe4d\ufe4e\ufe4f\uff3f'
+
+Pd = u'-\u058a\u1806\u2010\u2011\u2012\u2013\u2014\u2015\u2e17\u301c\u3030\u30a0\ufe31\ufe32\ufe58\ufe63\uff0d'
+
+Pe = u')]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u232a\u23b5\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998\u29d9\u29db\u29fd\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e\u301f\ufd3f\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e\uff09\uff3d\uff5d\uff60\uff63'
+
+Pf = u'\xbb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d\u2e1d'
+
+Pi = u'\xab\u2018\u201b\u201c\u201f\u2039\u2e02\u2e04\u2e09\u2e0c\u2e1c'
+
+Po = u'!"#%&\'*,./:;?@\\\xa1\xb7\xbf\u037e\u0387\u055a\u055b\u055c\u055d\u055e\u055f\u0589\u05be\u05c0\u05c3\u05c6\u05f3\u05f4\u060c\u060d\u061b\u061e\u061f\u066a\u066b\u066c\u066d\u06d4\u0700\u0701\u0702\u0703\u0704\u0705\u0706\u0707\u0708\u0709\u070a\u070b\u070c\u070d\u0964\u0965\u0970\u0df4\u0e4f\u0e5a\u0e5b\u0f04\u0f05\u0f06\u0f07\u0f08\u0f09\u0f0a\u0f0b\u0f0c\u0f0d\u0f0e\u0f0f\u0f10\u0f11\u0f12\u0f85\u0fd0\u0fd1\u104a\u104b\u104c\u104d\u104e\u104f\u10fb\u1361\u1362\u1363\u1364\u1365\u1366\u1367\u1368\u166d\u166e\u16eb\u16ec\u16ed\u1735\u1736\u17d4\u17d5\u17d6\u17d8\u17d9\u17da\u1800\u1801\u1802\u1803\u1804\u1805\u1807\u1808\u1809\u180a\u1944\u1945\u19de\u19df\u1a1e\u1a1f\u2016\u2017\u2020\u2021\u2022\u2023\u2024\u2025\u2026\u2027\u2030\u2031\u2032\u2033\u2034\u2035\u2036\u2037\u2038\u203b\u203c\u203d\u203e\u2041\u2042\u2043\u2047\u2048\u2049\u204a\u204b\u204c\u204d\u204e\u204f\u2050\u2051\u2053\u2055\u2056\u2057\u2058\u2059\u205a\u205b\u205c\u205d\u205e\u23b6\u2cf9\u2cfa\u2cfb\u2cfc\u2cfe\u2cff\u2e00\u2e01\u2e06\u2e07\u2e08\u2e0b\u2e0e\u2e0f\u2e10\u2e11\u2e12\u2e13\u2e14\u2e15\u2e16\u3001\u3002\u3003\u303d\u30fb\ufe10\ufe11\ufe12\ufe13\ufe14\ufe15\ufe16\ufe19\ufe30\ufe45\ufe46\ufe49\ufe4a\ufe4b\ufe4c\ufe50\ufe51\ufe52\ufe54\ufe55\ufe56\ufe57\ufe5f\ufe60\ufe61\ufe68\ufe6a\ufe6b\uff01\uff02\uff03\uff05\uff06\uff07\uff0a\uff0c\uff0e\uff0f\uff1a\uff1b\uff1f\uff20\uff3c\uff61\uff64\uff65'
+
+Ps = u'([{\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d\u208d\u2329\u23b4\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u29d8\u29da\u29fc\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d\ufd3e\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62'
+
+Sc = u'$\xa2\xa3\xa4\xa5\u060b\u09f2\u09f3\u0af1\u0bf9\u0e3f\u17db\u20a0\u20a1\u20a2\u20a3\u20a4\u20a5\u20a6\u20a7\u20a8\u20a9\u20aa\u20ab\u20ac\u20ad\u20ae\u20af\u20b0\u20b1\u20b2\u20b3\u20b4\u20b5\ufdfc\ufe69\uff04\uffe0\uffe1\uffe5\uffe6'
+
+Sk = u'^`\xa8\xaf\xb4\xb8\u02c2\u02c3\u02c4\u02c5\u02d2\u02d3\u02d4\u02d5\u02d6\u02d7\u02d8\u02d9\u02da\u02db\u02dc\u02dd\u02de\u02df\u02e5\u02e6\u02e7\u02e8\u02e9\u02ea\u02eb\u02ec\u02ed\u02ef\u02f0\u02f1\u02f2\u02f3\u02f4\u02f5\u02f6\u02f7\u02f8\u02f9\u02fa\u02fb\u02fc\u02fd\u02fe\u02ff\u0374\u0375\u0384\u0385\u1fbd\u1fbf\u1fc0\u1fc1\u1fcd\u1fce\u1fcf\u1fdd\u1fde\u1fdf\u1fed\u1fee\u1fef\u1ffd\u1ffe\u309b\u309c\ua700\ua701\ua702\ua703\ua704\ua705\ua706\ua707\ua708\ua709\ua70a\ua70b\ua70c\ua70d\ua70e\ua70f\ua710\ua711\ua712\ua713\ua714\ua715\ua716\uff3e\uff40\uffe3'
+
+Sm = u'+<=>|~\xac\xb1\xd7\xf7\u03f6\u2044\u2052\u207a\u207b\u207c\u208a\u208b\u208c\u2140\u2141\u2142\u2143\u2144\u214b\u2190\u2191\u2192\u2193\u2194\u219a\u219b\u21a0\u21a3\u21a6\u21ae\u21ce\u21cf\u21d2\u21d4\u21f4\u21f5\u21f6\u21f7\u21f8\u21f9\u21fa\u21fb\u21fc\u21fd\u21fe\u21ff\u2200\u2201\u2202\u2203\u2204\u2205\u2206\u2207\u2208\u2209\u220a\u220b\u220c\u220d\u220e\u220f\u2210\u2211\u2212\u2213\u2214\u2215\u2216\u2217\u2218\u2219\u221a\u221b\u221c\u221d\u221e\u221f\u2220\u2221\u2222\u2223\u2224\u2225\u2226\u2227\u2228\u2229\u222a\u222b\u222c\u222d\u222e\u222f\u2230\u2231\u2232\u2233\u2234\u2235\u2236\u2237\u2238\u2239\u223a\u223b\u223c\u223d\u223e\u223f\u2240\u2241\u2242\u2243\u2244\u2245\u2246\u2247\u2248\u2249\u224a\u224b\u224c\u224d\u224e\u224f\u2250\u2251\u2252\u2253\u2254\u2255\u2256\u2257\u2258\u2259\u225a\u225b\u225c\u225d\u225e\u225f\u2260\u2261\u2262\u2263\u2264\u2265\u2266\u2267\u2268\u2269\u226a\u226b\u226c\u226d\u226e\u226f\u2270\u2271\u2272\u2273\u2274\u2275\u2276\u2277\u2278\u2279\u227a\u227b\u227c\u227d\u227e\u227f\u2280\u2281\u2282\u2283\u2284\u2285\u2286\u2287\u2288\u2289\u228a\u228b\u228c\u228d\u228e\u228f\u2290\u2291\u2292\u2293\u2294\u2295\u2296\u2297\u2298\u2299\u229a\u229b\u229c\u229d\u229e\u229f\u22a0\u22a1\u22a2\u22a3\u22a4\u22a5\u22a6\u22a7\u22a8\u22a9\u22aa\u22ab\u22ac\u22ad\u22ae\u22af\u22b0\u22b1\u22b2\u22b3\u22b4\u22b5\u22b6\u22b7\u22b8\u22b9\u22ba\u22bb\u22bc\u22bd\u22be\u22bf\u22c0\u22c1\u22c2\u22c3\u22c4\u22c5\u22c6\u22c7\u22c8\u22c9\u22ca\u22cb\u22cc\u22cd\u22ce\u22cf\u22d0\u22d1\u22d2\u22d3\u22d4\u22d5\u22d6\u22d7\u22d8\u22d9\u22da\u22db\u22dc\u22dd\u22de\u22df\u22e0\u22e1\u22e2\u22e3\u22e4\u22e5\u22e6\u22e7\u22e8\u22e9\u22ea\u22eb\u22ec\u22ed\u22ee\u22ef\u22f0\u22f1\u22f2\u22f3\u22f4\u22f5\u22f6\u22f7\u22f8\u22f9\u22fa\u22fb\u22fc\u22fd\u22fe\u22ff\u2308\u2309\u230a\u230b\u2320\u2321\u237c\u239b\u239c\u239d\u239e\u239f\u23a0\u23a1\u23a2\u23a3\u23a4\u23a5\u23a6\u23a7\u23a8\u23a9\u23aa\u23ab\u23ac\u23ad\u23ae\u23af\u23b0\u23b1\u23b2\u23b3\u25b7\u25c1\u25f8\u25f9\u25fa\u25fb\u25fc\u25fd\u25fe\u25ff\u266f\u27c0\u27c1\u27c2\u27c3\u27c4\u27d0\u27d1\u27d2\u27d3\u27d4\u27d5\u27d6\u27d7\u27d8\u27d9\u27da\u27db\u27dc\u27dd\u27de\u27df\u27e0\u27e1\u27e2\u27e3\u27e4\u27e5\u27f0\u27f1\u27f2\u27f3\u27f4\u27f5\u27f6\u27f7\u27f8\u27f9\u27fa\u27fb\u27fc\u27fd\u27fe\u27ff\u2900\u2901\u2902\u2903\u2904\u2905\u2906\u2907\u2908\u2909\u290a\u290b\u290c\u290d\u290e\u290f\u2910\u2911\u2912\u2913\u2914\u2915\u2916\u2917\u2918\u2919\u291a\u291b\u291c\u291d\u291e\u291f\u2920\u2921\u2922\u2923\u2924\u2925\u2926\u2927\u2928\u2929\u292a\u292b\u292c\u292d\u292e\u292f\u2930\u2931\u2932\u2933\u2934\u2935\u2936\u2937\u2938\u2939\u293a\u293b\u293c\u293d\u293e\u293f\u2940\u2941\u2942\u2943\u2944\u2945\u2946\u2947\u2948\u2949\u294a\u294b\u294c\u294d\u294e\u294f\u2950\u2951\u2952\u2953\u2954\u2955\u2956\u2957\u2958\u2959\u295a\u295b\u295c\u295d\u295e\u295f\u2960\u2961\u2962\u2963\u2964\u2965\u2966\u2967\u2968\u2969\u296a\u296b\u296c\u296d\u296e\u296f\u2970\u2971\u2972\u2973\u2974\u2975\u2976\u2977\u2978\u2979\u297a\u297b\u297c\u297d\u297e\u297f\u2980\u2981\u2982\u2999\u299a\u299b\u299c\u299d\u299e\u299f\u29a0\u29a1\u29a2\u29a3\u29a4\u29a5\u29a6\u29a7\u29a8\u29a9\u29aa\u29ab\u29ac\u29ad\u29ae\u29af\u29b0\u29b1\u29b2\u29b3\u29b4\u29b5\u29b6\u29b7\u29b8\u29b9\u29ba\u29bb\u29bc\u29bd\u29be\u29bf\u29c0\u29c1\u29c2\u29c3\u29c4\u29c5\u29c6\u29c7\u29c8\u29c9\u29ca\u29cb\u29cc\u29cd\u29ce\u29cf\u29d0\u29d1\u29d2\u29d3\u29d4\u29d5\u29d6\u29d7\u29dc\u29dd\u29de\u29df\u29e0\u29e1\u29e2\u29e3\u29e4\u29e5\u29e6\u29e7\u29e8\u29e9\u29ea\u29eb\u29ec\u29ed\u29ee\u29ef\u29f0\u29f1\u29f2\u29f3\u29f4\u29f5\u29f6\u29f7\u29f8\u29f9\u29fa\u29fb\u29fe\u29ff\u2a00\u2a01\u2a02\u2a03\u2a04\u2a05\u2a06\u2a07\u2a08\u2a09\u2a0a\u2a0b\u2a0c\u2a0d\u2a0e\u2a0f\u2a10\u2a11\u2a12\u2a13\u2a14\u2a15\u2a16\u2a17\u2a18\u2a19\u2a1a\u2a1b\u2a1c\u2a1d\u2a1e\u2a1f\u2a20\u2a21\u2a22\u2a23\u2a24\u2a25\u2a26\u2a27\u2a28\u2a29\u2a2a\u2a2b\u2a2c\u2a2d\u2a2e\u2a2f\u2a30\u2a31\u2a32\u2a33\u2a34\u2a35\u2a36\u2a37\u2a38\u2a39\u2a3a\u2a3b\u2a3c\u2a3d\u2a3e\u2a3f\u2a40\u2a41\u2a42\u2a43\u2a44\u2a45\u2a46\u2a47\u2a48\u2a49\u2a4a\u2a4b\u2a4c\u2a4d\u2a4e\u2a4f\u2a50\u2a51\u2a52\u2a53\u2a54\u2a55\u2a56\u2a57\u2a58\u2a59\u2a5a\u2a5b\u2a5c\u2a5d\u2a5e\u2a5f\u2a60\u2a61\u2a62\u2a63\u2a64\u2a65\u2a66\u2a67\u2a68\u2a69\u2a6a\u2a6b\u2a6c\u2a6d\u2a6e\u2a6f\u2a70\u2a71\u2a72\u2a73\u2a74\u2a75\u2a76\u2a77\u2a78\u2a79\u2a7a\u2a7b\u2a7c\u2a7d\u2a7e\u2a7f\u2a80\u2a81\u2a82\u2a83\u2a84\u2a85\u2a86\u2a87\u2a88\u2a89\u2a8a\u2a8b\u2a8c\u2a8d\u2a8e\u2a8f\u2a90\u2a91\u2a92\u2a93\u2a94\u2a95\u2a96\u2a97\u2a98\u2a99\u2a9a\u2a9b\u2a9c\u2a9d\u2a9e\u2a9f\u2aa0\u2aa1\u2aa2\u2aa3\u2aa4\u2aa5\u2aa6\u2aa7\u2aa8\u2aa9\u2aaa\u2aab\u2aac\u2aad\u2aae\u2aaf\u2ab0\u2ab1\u2ab2\u2ab3\u2ab4\u2ab5\u2ab6\u2ab7\u2ab8\u2ab9\u2aba\u2abb\u2abc\u2abd\u2abe\u2abf\u2ac0\u2ac1\u2ac2\u2ac3\u2ac4\u2ac5\u2ac6\u2ac7\u2ac8\u2ac9\u2aca\u2acb\u2acc\u2acd\u2ace\u2acf\u2ad0\u2ad1\u2ad2\u2ad3\u2ad4\u2ad5\u2ad6\u2ad7\u2ad8\u2ad9\u2ada\u2adb\u2adc\u2add\u2ade\u2adf\u2ae0\u2ae1\u2ae2\u2ae3\u2ae4\u2ae5\u2ae6\u2ae7\u2ae8\u2ae9\u2aea\u2aeb\u2aec\u2aed\u2aee\u2aef\u2af0\u2af1\u2af2\u2af3\u2af4\u2af5\u2af6\u2af7\u2af8\u2af9\u2afa\u2afb\u2afc\u2afd\u2afe\u2aff\ufb29\ufe62\ufe64\ufe65\ufe66\uff0b\uff1c\uff1d\uff1e\uff5c\uff5e\uffe2\uffe9\uffea\uffeb\uffec'
+
+So = u'\xa6\xa7\xa9\xae\xb0\xb6\u0482\u060e\u060f\u06e9\u06fd\u06fe\u09fa\u0b70\u0bf3\u0bf4\u0bf5\u0bf6\u0bf7\u0bf8\u0bfa\u0f01\u0f02\u0f03\u0f13\u0f14\u0f15\u0f16\u0f17\u0f1a\u0f1b\u0f1c\u0f1d\u0f1e\u0f1f\u0f34\u0f36\u0f38\u0fbe\u0fbf\u0fc0\u0fc1\u0fc2\u0fc3\u0fc4\u0fc5\u0fc7\u0fc8\u0fc9\u0fca\u0fcb\u0fcc\u0fcf\u1360\u1390\u1391\u1392\u1393\u1394\u1395\u1396\u1397\u1398\u1399\u1940\u19e0\u19e1\u19e2\u19e3\u19e4\u19e5\u19e6\u19e7\u19e8\u19e9\u19ea\u19eb\u19ec\u19ed\u19ee\u19ef\u19f0\u19f1\u19f2\u19f3\u19f4\u19f5\u19f6\u19f7\u19f8\u19f9\u19fa\u19fb\u19fc\u19fd\u19fe\u19ff\u2100\u2101\u2103\u2104\u2105\u2106\u2108\u2109\u2114\u2116\u2117\u2118\u211e\u211f\u2120\u2121\u2122\u2123\u2125\u2127\u2129\u212e\u2132\u213a\u213b\u214a\u214c\u2195\u2196\u2197\u2198\u2199\u219c\u219d\u219e\u219f\u21a1\u21a2\u21a4\u21a5\u21a7\u21a8\u21a9\u21aa\u21ab\u21ac\u21ad\u21af\u21b0\u21b1\u21b2\u21b3\u21b4\u21b5\u21b6\u21b7\u21b8\u21b9\u21ba\u21bb\u21bc\u21bd\u21be\u21bf\u21c0\u21c1\u21c2\u21c3\u21c4\u21c5\u21c6\u21c7\u21c8\u21c9\u21ca\u21cb\u21cc\u21cd\u21d0\u21d1\u21d3\u21d5\u21d6\u21d7\u21d8\u21d9\u21da\u21db\u21dc\u21dd\u21de\u21df\u21e0\u21e1\u21e2\u21e3\u21e4\u21e5\u21e6\u21e7\u21e8\u21e9\u21ea\u21eb\u21ec\u21ed\u21ee\u21ef\u21f0\u21f1\u21f2\u21f3\u2300\u2301\u2302\u2303\u2304\u2305\u2306\u2307\u230c\u230d\u230e\u230f\u2310\u2311\u2312\u2313\u2314\u2315\u2316\u2317\u2318\u2319\u231a\u231b\u231c\u231d\u231e\u231f\u2322\u2323\u2324\u2325\u2326\u2327\u2328\u232b\u232c\u232d\u232e\u232f\u2330\u2331\u2332\u2333\u2334\u2335\u2336\u2337\u2338\u2339\u233a\u233b\u233c\u233d\u233e\u233f\u2340\u2341\u2342\u2343\u2344\u2345\u2346\u2347\u2348\u2349\u234a\u234b\u234c\u234d\u234e\u234f\u2350\u2351\u2352\u2353\u2354\u2355\u2356\u2357\u2358\u2359\u235a\u235b\u235c\u235d\u235e\u235f\u2360\u2361\u2362\u2363\u2364\u2365\u2366\u2367\u2368\u2369\u236a\u236b\u236c\u236d\u236e\u236f\u2370\u2371\u2372\u2373\u2374\u2375\u2376\u2377\u2378\u2379\u237a\u237b\u237d\u237e\u237f\u2380\u2381\u2382\u2383\u2384\u2385\u2386\u2387\u2388\u2389\u238a\u238b\u238c\u238d\u238e\u238f\u2390\u2391\u2392\u2393\u2394\u2395\u2396\u2397\u2398\u2399\u239a\u23b7\u23b8\u23b9\u23ba\u23bb\u23bc\u23bd\u23be\u23bf\u23c0\u23c1\u23c2\u23c3\u23c4\u23c5\u23c6\u23c7\u23c8\u23c9\u23ca\u23cb\u23cc\u23cd\u23ce\u23cf\u23d0\u23d1\u23d2\u23d3\u23d4\u23d5\u23d6\u23d7\u23d8\u23d9\u23da\u23db\u2400\u2401\u2402\u2403\u2404\u2405\u2406\u2407\u2408\u2409\u240a\u240b\u240c\u240d\u240e\u240f\u2410\u2411\u2412\u2413\u2414\u2415\u2416\u2417\u2418\u2419\u241a\u241b\u241c\u241d\u241e\u241f\u2420\u2421\u2422\u2423\u2424\u2425\u2426\u2440\u2441\u2442\u2443\u2444\u2445\u2446\u2447\u2448\u2449\u244a\u249c\u249d\u249e\u249f\u24a0\u24a1\u24a2\u24a3\u24a4\u24a5\u24a6\u24a7\u24a8\u24a9\u24aa\u24ab\u24ac\u24ad\u24ae\u24af\u24b0\u24b1\u24b2\u24b3\u24b4\u24b5\u24b6\u24b7\u24b8\u24b9\u24ba\u24bb\u24bc\u24bd\u24be\u24bf\u24c0\u24c1\u24c2\u24c3\u24c4\u24c5\u24c6\u24c7\u24c8\u24c9\u24ca\u24cb\u24cc\u24cd\u24ce\u24cf\u24d0\u24d1\u24d2\u24d3\u24d4\u24d5\u24d6\u24d7\u24d8\u24d9\u24da\u24db\u24dc\u24dd\u24de\u24df\u24e0\u24e1\u24e2\u24e3\u24e4\u24e5\u24e6\u24e7\u24e8\u24e9\u2500\u2501\u2502\u2503\u2504\u2505\u2506\u2507\u2508\u2509\u250a\u250b\u250c\u250d\u250e\u250f\u2510\u2511\u2512\u2513\u2514\u2515\u2516\u2517\u2518\u2519\u251a\u251b\u251c\u251d\u251e\u251f\u2520\u2521\u2522\u2523\u2524\u2525\u2526\u2527\u2528\u2529\u252a\u252b\u252c\u252d\u252e\u252f\u2530\u2531\u2532\u2533\u2534\u2535\u2536\u2537\u2538\u2539\u253a\u253b\u253c\u253d\u253e\u253f\u2540\u2541\u2542\u2543\u2544\u2545\u2546\u2547\u2548\u2549\u254a\u254b\u254c\u254d\u254e\u254f\u2550\u2551\u2552\u2553\u2554\u2555\u2556\u2557\u2558\u2559\u255a\u255b\u255c\u255d\u255e\u255f\u2560\u2561\u2562\u2563\u2564\u2565\u2566\u2567\u2568\u2569\u256a\u256b\u256c\u256d\u256e\u256f\u2570\u2571\u2572\u2573\u2574\u2575\u2576\u2577\u2578\u2579\u257a\u257b\u257c\u257d\u257e\u257f\u2580\u2581\u2582\u2583\u2584\u2585\u2586\u2587\u2588\u2589\u258a\u258b\u258c\u258d\u258e\u258f\u2590\u2591\u2592\u2593\u2594\u2595\u2596\u2597\u2598\u2599\u259a\u259b\u259c\u259d\u259e\u259f\u25a0\u25a1\u25a2\u25a3\u25a4\u25a5\u25a6\u25a7\u25a8\u25a9\u25aa\u25ab\u25ac\u25ad\u25ae\u25af\u25b0\u25b1\u25b2\u25b3\u25b4\u25b5\u25b6\u25b8\u25b9\u25ba\u25bb\u25bc\u25bd\u25be\u25bf\u25c0\u25c2\u25c3\u25c4\u25c5\u25c6\u25c7\u25c8\u25c9\u25ca\u25cb\u25cc\u25cd\u25ce\u25cf\u25d0\u25d1\u25d2\u25d3\u25d4\u25d5\u25d6\u25d7\u25d8\u25d9\u25da\u25db\u25dc\u25dd\u25de\u25df\u25e0\u25e1\u25e2\u25e3\u25e4\u25e5\u25e6\u25e7\u25e8\u25e9\u25ea\u25eb\u25ec\u25ed\u25ee\u25ef\u25f0\u25f1\u25f2\u25f3\u25f4\u25f5\u25f6\u25f7\u2600\u2601\u2602\u2603\u2604\u2605\u2606\u2607\u2608\u2609\u260a\u260b\u260c\u260d\u260e\u260f\u2610\u2611\u2612\u2613\u2614\u2615\u2616\u2617\u2618\u2619\u261a\u261b\u261c\u261d\u261e\u261f\u2620\u2621\u2622\u2623\u2624\u2625\u2626\u2627\u2628\u2629\u262a\u262b\u262c\u262d\u262e\u262f\u2630\u2631\u2632\u2633\u2634\u2635\u2636\u2637\u2638\u2639\u263a\u263b\u263c\u263d\u263e\u263f\u2640\u2641\u2642\u2643\u2644\u2645\u2646\u2647\u2648\u2649\u264a\u264b\u264c\u264d\u264e\u264f\u2650\u2651\u2652\u2653\u2654\u2655\u2656\u2657\u2658\u2659\u265a\u265b\u265c\u265d\u265e\u265f\u2660\u2661\u2662\u2663\u2664\u2665\u2666\u2667\u2668\u2669\u266a\u266b\u266c\u266d\u266e\u2670\u2671\u2672\u2673\u2674\u2675\u2676\u2677\u2678\u2679\u267a\u267b\u267c\u267d\u267e\u267f\u2680\u2681\u2682\u2683\u2684\u2685\u2686\u2687\u2688\u2689\u268a\u268b\u268c\u268d\u268e\u268f\u2690\u2691\u2692\u2693\u2694\u2695\u2696\u2697\u2698\u2699\u269a\u269b\u269c\u26a0\u26a1\u26a2\u26a3\u26a4\u26a5\u26a6\u26a7\u26a8\u26a9\u26aa\u26ab\u26ac\u26ad\u26ae\u26af\u26b0\u26b1\u2701\u2702\u2703\u2704\u2706\u2707\u2708\u2709\u270c\u270d\u270e\u270f\u2710\u2711\u2712\u2713\u2714\u2715\u2716\u2717\u2718\u2719\u271a\u271b\u271c\u271d\u271e\u271f\u2720\u2721\u2722\u2723\u2724\u2725\u2726\u2727\u2729\u272a\u272b\u272c\u272d\u272e\u272f\u2730\u2731\u2732\u2733\u2734\u2735\u2736\u2737\u2738\u2739\u273a\u273b\u273c\u273d\u273e\u273f\u2740\u2741\u2742\u2743\u2744\u2745\u2746\u2747\u2748\u2749\u274a\u274b\u274d\u274f\u2750\u2751\u2752\u2756\u2758\u2759\u275a\u275b\u275c\u275d\u275e\u2761\u2762\u2763\u2764\u2765\u2766\u2767\u2794\u2798\u2799\u279a\u279b\u279c\u279d\u279e\u279f\u27a0\u27a1\u27a2\u27a3\u27a4\u27a5\u27a6\u27a7\u27a8\u27a9\u27aa\u27ab\u27ac\u27ad\u27ae\u27af\u27b1\u27b2\u27b3\u27b4\u27b5\u27b6\u27b7\u27b8\u27b9\u27ba\u27bb\u27bc\u27bd\u27be\u2800\u2801\u2802\u2803\u2804\u2805\u2806\u2807\u2808\u2809\u280a\u280b\u280c\u280d\u280e\u280f\u2810\u2811\u2812\u2813\u2814\u2815\u2816\u2817\u2818\u2819\u281a\u281b\u281c\u281d\u281e\u281f\u2820\u2821\u2822\u2823\u2824\u2825\u2826\u2827\u2828\u2829\u282a\u282b\u282c\u282d\u282e\u282f\u2830\u2831\u2832\u2833\u2834\u2835\u2836\u2837\u2838\u2839\u283a\u283b\u283c\u283d\u283e\u283f\u2840\u2841\u2842\u2843\u2844\u2845\u2846\u2847\u2848\u2849\u284a\u284b\u284c\u284d\u284e\u284f\u2850\u2851\u2852\u2853\u2854\u2855\u2856\u2857\u2858\u2859\u285a\u285b\u285c\u285d\u285e\u285f\u2860\u2861\u2862\u2863\u2864\u2865\u2866\u2867\u2868\u2869\u286a\u286b\u286c\u286d\u286e\u286f\u2870\u2871\u2872\u2873\u2874\u2875\u2876\u2877\u2878\u2879\u287a\u287b\u287c\u287d\u287e\u287f\u2880\u2881\u2882\u2883\u2884\u2885\u2886\u2887\u2888\u2889\u288a\u288b\u288c\u288d\u288e\u288f\u2890\u2891\u2892\u2893\u2894\u2895\u2896\u2897\u2898\u2899\u289a\u289b\u289c\u289d\u289e\u289f\u28a0\u28a1\u28a2\u28a3\u28a4\u28a5\u28a6\u28a7\u28a8\u28a9\u28aa\u28ab\u28ac\u28ad\u28ae\u28af\u28b0\u28b1\u28b2\u28b3\u28b4\u28b5\u28b6\u28b7\u28b8\u28b9\u28ba\u28bb\u28bc\u28bd\u28be\u28bf\u28c0\u28c1\u28c2\u28c3\u28c4\u28c5\u28c6\u28c7\u28c8\u28c9\u28ca\u28cb\u28cc\u28cd\u28ce\u28cf\u28d0\u28d1\u28d2\u28d3\u28d4\u28d5\u28d6\u28d7\u28d8\u28d9\u28da\u28db\u28dc\u28dd\u28de\u28df\u28e0\u28e1\u28e2\u28e3\u28e4\u28e5\u28e6\u28e7\u28e8\u28e9\u28ea\u28eb\u28ec\u28ed\u28ee\u28ef\u28f0\u28f1\u28f2\u28f3\u28f4\u28f5\u28f6\u28f7\u28f8\u28f9\u28fa\u28fb\u28fc\u28fd\u28fe\u28ff\u2b00\u2b01\u2b02\u2b03\u2b04\u2b05\u2b06\u2b07\u2b08\u2b09\u2b0a\u2b0b\u2b0c\u2b0d\u2b0e\u2b0f\u2b10\u2b11\u2b12\u2b13\u2ce5\u2ce6\u2ce7\u2ce8\u2ce9\u2cea\u2e80\u2e81\u2e82\u2e83\u2e84\u2e85\u2e86\u2e87\u2e88\u2e89\u2e8a\u2e8b\u2e8c\u2e8d\u2e8e\u2e8f\u2e90\u2e91\u2e92\u2e93\u2e94\u2e95\u2e96\u2e97\u2e98\u2e99\u2e9b\u2e9c\u2e9d\u2e9e\u2e9f\u2ea0\u2ea1\u2ea2\u2ea3\u2ea4\u2ea5\u2ea6\u2ea7\u2ea8\u2ea9\u2eaa\u2eab\u2eac\u2ead\u2eae\u2eaf\u2eb0\u2eb1\u2eb2\u2eb3\u2eb4\u2eb5\u2eb6\u2eb7\u2eb8\u2eb9\u2eba\u2ebb\u2ebc\u2ebd\u2ebe\u2ebf\u2ec0\u2ec1\u2ec2\u2ec3\u2ec4\u2ec5\u2ec6\u2ec7\u2ec8\u2ec9\u2eca\u2ecb\u2ecc\u2ecd\u2ece\u2ecf\u2ed0\u2ed1\u2ed2\u2ed3\u2ed4\u2ed5\u2ed6\u2ed7\u2ed8\u2ed9\u2eda\u2edb\u2edc\u2edd\u2ede\u2edf\u2ee0\u2ee1\u2ee2\u2ee3\u2ee4\u2ee5\u2ee6\u2ee7\u2ee8\u2ee9\u2eea\u2eeb\u2eec\u2eed\u2eee\u2eef\u2ef0\u2ef1\u2ef2\u2ef3\u2f00\u2f01\u2f02\u2f03\u2f04\u2f05\u2f06\u2f07\u2f08\u2f09\u2f0a\u2f0b\u2f0c\u2f0d\u2f0e\u2f0f\u2f10\u2f11\u2f12\u2f13\u2f14\u2f15\u2f16\u2f17\u2f18\u2f19\u2f1a\u2f1b\u2f1c\u2f1d\u2f1e\u2f1f\u2f20\u2f21\u2f22\u2f23\u2f24\u2f25\u2f26\u2f27\u2f28\u2f29\u2f2a\u2f2b\u2f2c\u2f2d\u2f2e\u2f2f\u2f30\u2f31\u2f32\u2f33\u2f34\u2f35\u2f36\u2f37\u2f38\u2f39\u2f3a\u2f3b\u2f3c\u2f3d\u2f3e\u2f3f\u2f40\u2f41\u2f42\u2f43\u2f44\u2f45\u2f46\u2f47\u2f48\u2f49\u2f4a\u2f4b\u2f4c\u2f4d\u2f4e\u2f4f\u2f50\u2f51\u2f52\u2f53\u2f54\u2f55\u2f56\u2f57\u2f58\u2f59\u2f5a\u2f5b\u2f5c\u2f5d\u2f5e\u2f5f\u2f60\u2f61\u2f62\u2f63\u2f64\u2f65\u2f66\u2f67\u2f68\u2f69\u2f6a\u2f6b\u2f6c\u2f6d\u2f6e\u2f6f\u2f70\u2f71\u2f72\u2f73\u2f74\u2f75\u2f76\u2f77\u2f78\u2f79\u2f7a\u2f7b\u2f7c\u2f7d\u2f7e\u2f7f\u2f80\u2f81\u2f82\u2f83\u2f84\u2f85\u2f86\u2f87\u2f88\u2f89\u2f8a\u2f8b\u2f8c\u2f8d\u2f8e\u2f8f\u2f90\u2f91\u2f92\u2f93\u2f94\u2f95\u2f96\u2f97\u2f98\u2f99\u2f9a\u2f9b\u2f9c\u2f9d\u2f9e\u2f9f\u2fa0\u2fa1\u2fa2\u2fa3\u2fa4\u2fa5\u2fa6\u2fa7\u2fa8\u2fa9\u2faa\u2fab\u2fac\u2fad\u2fae\u2faf\u2fb0\u2fb1\u2fb2\u2fb3\u2fb4\u2fb5\u2fb6\u2fb7\u2fb8\u2fb9\u2fba\u2fbb\u2fbc\u2fbd\u2fbe\u2fbf\u2fc0\u2fc1\u2fc2\u2fc3\u2fc4\u2fc5\u2fc6\u2fc7\u2fc8\u2fc9\u2fca\u2fcb\u2fcc\u2fcd\u2fce\u2fcf\u2fd0\u2fd1\u2fd2\u2fd3\u2fd4\u2fd5\u2ff0\u2ff1\u2ff2\u2ff3\u2ff4\u2ff5\u2ff6\u2ff7\u2ff8\u2ff9\u2ffa\u2ffb\u3004\u3012\u3013\u3020\u3036\u3037\u303e\u303f\u3190\u3191\u3196\u3197\u3198\u3199\u319a\u319b\u319c\u319d\u319e\u319f\u31c0\u31c1\u31c2\u31c3\u31c4\u31c5\u31c6\u31c7\u31c8\u31c9\u31ca\u31cb\u31cc\u31cd\u31ce\u31cf\u3200\u3201\u3202\u3203\u3204\u3205\u3206\u3207\u3208\u3209\u320a\u320b\u320c\u320d\u320e\u320f\u3210\u3211\u3212\u3213\u3214\u3215\u3216\u3217\u3218\u3219\u321a\u321b\u321c\u321d\u321e\u322a\u322b\u322c\u322d\u322e\u322f\u3230\u3231\u3232\u3233\u3234\u3235\u3236\u3237\u3238\u3239\u323a\u323b\u323c\u323d\u323e\u323f\u3240\u3241\u3242\u3243\u3250\u3260\u3261\u3262\u3263\u3264\u3265\u3266\u3267\u3268\u3269\u326a\u326b\u326c\u326d\u326e\u326f\u3270\u3271\u3272\u3273\u3274\u3275\u3276\u3277\u3278\u3279\u327a\u327b\u327c\u327d\u327e\u327f\u328a\u328b\u328c\u328d\u328e\u328f\u3290\u3291\u3292\u3293\u3294\u3295\u3296\u3297\u3298\u3299\u329a\u329b\u329c\u329d\u329e\u329f\u32a0\u32a1\u32a2\u32a3\u32a4\u32a5\u32a6\u32a7\u32a8\u32a9\u32aa\u32ab\u32ac\u32ad\u32ae\u32af\u32b0\u32c0\u32c1\u32c2\u32c3\u32c4\u32c5\u32c6\u32c7\u32c8\u32c9\u32ca\u32cb\u32cc\u32cd\u32ce\u32cf\u32d0\u32d1\u32d2\u32d3\u32d4\u32d5\u32d6\u32d7\u32d8\u32d9\u32da\u32db\u32dc\u32dd\u32de\u32df\u32e0\u32e1\u32e2\u32e3\u32e4\u32e5\u32e6\u32e7\u32e8\u32e9\u32ea\u32eb\u32ec\u32ed\u32ee\u32ef\u32f0\u32f1\u32f2\u32f3\u32f4\u32f5\u32f6\u32f7\u32f8\u32f9\u32fa\u32fb\u32fc\u32fd\u32fe\u3300\u3301\u3302\u3303\u3304\u3305\u3306\u3307\u3308\u3309\u330a\u330b\u330c\u330d\u330e\u330f\u3310\u3311\u3312\u3313\u3314\u3315\u3316\u3317\u3318\u3319\u331a\u331b\u331c\u331d\u331e\u331f\u3320\u3321\u3322\u3323\u3324\u3325\u3326\u3327\u3328\u3329\u332a\u332b\u332c\u332d\u332e\u332f\u3330\u3331\u3332\u3333\u3334\u3335\u3336\u3337\u3338\u3339\u333a\u333b\u333c\u333d\u333e\u333f\u3340\u3341\u3342\u3343\u3344\u3345\u3346\u3347\u3348\u3349\u334a\u334b\u334c\u334d\u334e\u334f\u3350\u3351\u3352\u3353\u3354\u3355\u3356\u3357\u3358\u3359\u335a\u335b\u335c\u335d\u335e\u335f\u3360\u3361\u3362\u3363\u3364\u3365\u3366\u3367\u3368\u3369\u336a\u336b\u336c\u336d\u336e\u336f\u3370\u3371\u3372\u3373\u3374\u3375\u3376\u3377\u3378\u3379\u337a\u337b\u337c\u337d\u337e\u337f\u3380\u3381\u3382\u3383\u3384\u3385\u3386\u3387\u3388\u3389\u338a\u338b\u338c\u338d\u338e\u338f\u3390\u3391\u3392\u3393\u3394\u3395\u3396\u3397\u3398\u3399\u339a\u339b\u339c\u339d\u339e\u339f\u33a0\u33a1\u33a2\u33a3\u33a4\u33a5\u33a6\u33a7\u33a8\u33a9\u33aa\u33ab\u33ac\u33ad\u33ae\u33af\u33b0\u33b1\u33b2\u33b3\u33b4\u33b5\u33b6\u33b7\u33b8\u33b9\u33ba\u33bb\u33bc\u33bd\u33be\u33bf\u33c0\u33c1\u33c2\u33c3\u33c4\u33c5\u33c6\u33c7\u33c8\u33c9\u33ca\u33cb\u33cc\u33cd\u33ce\u33cf\u33d0\u33d1\u33d2\u33d3\u33d4\u33d5\u33d6\u33d7\u33d8\u33d9\u33da\u33db\u33dc\u33dd\u33de\u33df\u33e0\u33e1\u33e2\u33e3\u33e4\u33e5\u33e6\u33e7\u33e8\u33e9\u33ea\u33eb\u33ec\u33ed\u33ee\u33ef\u33f0\u33f1\u33f2\u33f3\u33f4\u33f5\u33f6\u33f7\u33f8\u33f9\u33fa\u33fb\u33fc\u33fd\u33fe\u33ff\u4dc0\u4dc1\u4dc2\u4dc3\u4dc4\u4dc5\u4dc6\u4dc7\u4dc8\u4dc9\u4dca\u4dcb\u4dcc\u4dcd\u4dce\u4dcf\u4dd0\u4dd1\u4dd2\u4dd3\u4dd4\u4dd5\u4dd6\u4dd7\u4dd8\u4dd9\u4dda\u4ddb\u4ddc\u4ddd\u4dde\u4ddf\u4de0\u4de1\u4de2\u4de3\u4de4\u4de5\u4de6\u4de7\u4de8\u4de9\u4dea\u4deb\u4dec\u4ded\u4dee\u4def\u4df0\u4df1\u4df2\u4df3\u4df4\u4df5\u4df6\u4df7\u4df8\u4df9\u4dfa\u4dfb\u4dfc\u4dfd\u4dfe\u4dff\ua490\ua491\ua492\ua493\ua494\ua495\ua496\ua497\ua498\ua499\ua49a\ua49b\ua49c\ua49d\ua49e\ua49f\ua4a0\ua4a1\ua4a2\ua4a3\ua4a4\ua4a5\ua4a6\ua4a7\ua4a8\ua4a9\ua4aa\ua4ab\ua4ac\ua4ad\ua4ae\ua4af\ua4b0\ua4b1\ua4b2\ua4b3\ua4b4\ua4b5\ua4b6\ua4b7\ua4b8\ua4b9\ua4ba\ua4bb\ua4bc\ua4bd\ua4be\ua4bf\ua4c0\ua4c1\ua4c2\ua4c3\ua4c4\ua4c5\ua4c6\ua828\ua829\ua82a\ua82b\ufdfd\uffe4\uffe8\uffed\uffee\ufffc\ufffd'
+
+Zl = u'\u2028'
+
+Zp = u'\u2029'
+
+Zs = u' \xa0\u1680\u180e\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a\u202f\u205f\u3000'
+
+cats = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs']
+
+def combine(*args):
+ return u''.join([globals()[cat] for cat in args])
+
+xid_start = u'\u0041-\u005A\u005F\u0061-\u007A\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u01BA\u01BB\u01BC-\u01BF\u01C0-\u01C3\u01C4-\u0241\u0250-\u02AF\u02B0-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EE\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03CE\u03D0-\u03F5\u03F7-\u0481\u048A-\u04CE\u04D0-\u04F9\u0500-\u050F\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA\u05F0-\u05F2\u0621-\u063A\u0640\u0641-\u064A\u066E-\u066F\u0671-\u06D3\u06D5\u06E5-\u06E6\u06EE-\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u076D\u0780-\u07A5\u07B1\u0904-\u0939\u093D\u0950\u0958-\u0961\u097D\u0985-\u098C\u098F-\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BD\u09CE\u09DC-\u09DD\u09DF-\u09E1\u09F0-\u09F1\u0A05-\u0A0A\u0A0F-\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32-\u0A33\u0A35-\u0A36\u0A38-\u0A39\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2-\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0-\u0AE1\u0B05-\u0B0C\u0B0F-\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32-\u0B33\u0B35-\u0B39\u0B3D\u0B5C-\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99-\u0B9A\u0B9C\u0B9E-\u0B9F\u0BA3-\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C60-\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0-\u0CE1\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D28\u0D2A-\u0D39\u0D60-\u0D61\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E40-\u0E45\u0E46\u0E81-\u0E82\u0E84\u0E87-\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA-\u0EAB\u0EAD-\u0EB0\u0EB2\u0EBD\u0EC0-\u0EC4\u0EC6\u0EDC-\u0EDD\u0F00\u0F40-\u0F47\u0F49-\u0F6A\u0F88-\u0F8B\u1000-\u1021\u1023-\u1027\u1029-\u102A\u1050-\u1055\u10A0-\u10C5\u10D0-\u10FA\u10FC\u1100-\u1159\u115F-\u11A2\u11A8-\u11F9\u1200-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C\u166F-\u1676\u1681-\u169A\u16A0-\u16EA\u16EE-\u16F0\u1700-\u170C\u170E-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7\u17DC\u1820-\u1842\u1843\u1844-\u1877\u1880-\u18A8\u1900-\u191C\u1950-\u196D\u1970-\u1974\u1980-\u19A9\u19C1-\u19C7\u1A00-\u1A16\u1D00-\u1D2B\u1D2C-\u1D61\u1D62-\u1D77\u1D78\u1D79-\u1D9A\u1D9B-\u1DBF\u1E00-\u1E9B\u1EA0-\u1EF9\u1F00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F\u2090-\u2094\u2102\u2107\u210A-\u2113\u2115\u2118\u2119-\u211D\u2124\u2126\u2128\u212A-\u212D\u212E\u212F-\u2131\u2133-\u2134\u2135-\u2138\u2139\u213C-\u213F\u2145-\u2149\u2160-\u2183\u2C00-\u2C2E\u2C30-\u2C5E\u2C80-\u2CE4\u2D00-\u2D25\u2D30-\u2D65\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u3005\u3006\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303A\u303B\u303C\u3041-\u3096\u309D-\u309E\u309F\u30A1-\u30FA\u30FC-\u30FE\u30FF\u3105-\u312C\u3131-\u318E\u31A0-\u31B7\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FBB\uA000-\uA014\uA015\uA016-\uA48C\uA800-\uA801\uA803-\uA805\uA807-\uA80A\uA80C-\uA822\uAC00-\uD7A3\uF900-\uFA2D\uFA30-\uFA6A\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40-\uFB41\uFB43-\uFB44\uFB46-\uFBB1\uFBD3-\uFC5D\uFC64-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDF9\uFE71\uFE73\uFE77\uFE79\uFE7B\uFE7D\uFE7F-\uFEFC\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFF6F\uFF70\uFF71-\uFF9D\uFFA0-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC'
+
+xid_continue = u'\u0030-\u0039\u0041-\u005A\u005F\u0061-\u007A\u00AA\u00B5\u00B7\u00BA\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u01BA\u01BB\u01BC-\u01BF\u01C0-\u01C3\u01C4-\u0241\u0250-\u02AF\u02B0-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EE\u0300-\u036F\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03CE\u03D0-\u03F5\u03F7-\u0481\u0483-\u0486\u048A-\u04CE\u04D0-\u04F9\u0500-\u050F\u0531-\u0556\u0559\u0561-\u0587\u0591-\u05B9\u05BB-\u05BD\u05BF\u05C1-\u05C2\u05C4-\u05C5\u05C7\u05D0-\u05EA\u05F0-\u05F2\u0610-\u0615\u0621-\u063A\u0640\u0641-\u064A\u064B-\u065E\u0660-\u0669\u066E-\u066F\u0670\u0671-\u06D3\u06D5\u06D6-\u06DC\u06DF-\u06E4\u06E5-\u06E6\u06E7-\u06E8\u06EA-\u06ED\u06EE-\u06EF\u06F0-\u06F9\u06FA-\u06FC\u06FF\u0710\u0711\u0712-\u072F\u0730-\u074A\u074D-\u076D\u0780-\u07A5\u07A6-\u07B0\u07B1\u0901-\u0902\u0903\u0904-\u0939\u093C\u093D\u093E-\u0940\u0941-\u0948\u0949-\u094C\u094D\u0950\u0951-\u0954\u0958-\u0961\u0962-\u0963\u0966-\u096F\u097D\u0981\u0982-\u0983\u0985-\u098C\u098F-\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2\u09B6-\u09B9\u09BC\u09BD\u09BE-\u09C0\u09C1-\u09C4\u09C7-\u09C8\u09CB-\u09CC\u09CD\u09CE\u09D7\u09DC-\u09DD\u09DF-\u09E1\u09E2-\u09E3\u09E6-\u09EF\u09F0-\u09F1\u0A01-\u0A02\u0A03\u0A05-\u0A0A\u0A0F-\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32-\u0A33\u0A35-\u0A36\u0A38-\u0A39\u0A3C\u0A3E-\u0A40\u0A41-\u0A42\u0A47-\u0A48\u0A4B-\u0A4D\u0A59-\u0A5C\u0A5E\u0A66-\u0A6F\u0A70-\u0A71\u0A72-\u0A74\u0A81-\u0A82\u0A83\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8\u0AAA-\u0AB0\u0AB2-\u0AB3\u0AB5-\u0AB9\u0ABC\u0ABD\u0ABE-\u0AC0\u0AC1-\u0AC5\u0AC7-\u0AC8\u0AC9\u0ACB-\u0ACC\u0ACD\u0AD0\u0AE0-\u0AE1\u0AE2-\u0AE3\u0AE6-\u0AEF\u0B01\u0B02-\u0B03\u0B05-\u0B0C\u0B0F-\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32-\u0B33\u0B35-\u0B39\u0B3C\u0B3D\u0B3E\u0B3F\u0B40\u0B41-\u0B43\u0B47-\u0B48\u0B4B-\u0B4C\u0B4D\u0B56\u0B57\u0B5C-\u0B5D\u0B5F-\u0B61\u0B66-\u0B6F\u0B71\u0B82\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99-\u0B9A\u0B9C\u0B9E-\u0B9F\u0BA3-\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BBE-\u0BBF\u0BC0\u0BC1-\u0BC2\u0BC6-\u0BC8\u0BCA-\u0BCC\u0BCD\u0BD7\u0BE6-\u0BEF\u0C01-\u0C03\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3E-\u0C40\u0C41-\u0C44\u0C46-\u0C48\u0C4A-\u0C4D\u0C55-\u0C56\u0C60-\u0C61\u0C66-\u0C6F\u0C82-\u0C83\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3\u0CB5-\u0CB9\u0CBC\u0CBD\u0CBE\u0CBF\u0CC0-\u0CC4\u0CC6\u0CC7-\u0CC8\u0CCA-\u0CCB\u0CCC-\u0CCD\u0CD5-\u0CD6\u0CDE\u0CE0-\u0CE1\u0CE6-\u0CEF\u0D02-\u0D03\u0D05-\u0D0C\u0D0E-\u0D10\u0D12-\u0D28\u0D2A-\u0D39\u0D3E-\u0D40\u0D41-\u0D43\u0D46-\u0D48\u0D4A-\u0D4C\u0D4D\u0D57\u0D60-\u0D61\u0D66-\u0D6F\u0D82-\u0D83\u0D85-\u0D96\u0D9A-\u0DB1\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0DCA\u0DCF-\u0DD1\u0DD2-\u0DD4\u0DD6\u0DD8-\u0DDF\u0DF2-\u0DF3\u0E01-\u0E30\u0E31\u0E32-\u0E33\u0E34-\u0E3A\u0E40-\u0E45\u0E46\u0E47-\u0E4E\u0E50-\u0E59\u0E81-\u0E82\u0E84\u0E87-\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3\u0EA5\u0EA7\u0EAA-\u0EAB\u0EAD-\u0EB0\u0EB1\u0EB2-\u0EB3\u0EB4-\u0EB9\u0EBB-\u0EBC\u0EBD\u0EC0-\u0EC4\u0EC6\u0EC8-\u0ECD\u0ED0-\u0ED9\u0EDC-\u0EDD\u0F00\u0F18-\u0F19\u0F20-\u0F29\u0F35\u0F37\u0F39\u0F3E-\u0F3F\u0F40-\u0F47\u0F49-\u0F6A\u0F71-\u0F7E\u0F7F\u0F80-\u0F84\u0F86-\u0F87\u0F88-\u0F8B\u0F90-\u0F97\u0F99-\u0FBC\u0FC6\u1000-\u1021\u1023-\u1027\u1029-\u102A\u102C\u102D-\u1030\u1031\u1032\u1036-\u1037\u1038\u1039\u1040-\u1049\u1050-\u1055\u1056-\u1057\u1058-\u1059\u10A0-\u10C5\u10D0-\u10FA\u10FC\u1100-\u1159\u115F-\u11A2\u11A8-\u11F9\u1200-\u1248\u124A-\u124D\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310\u1312-\u1315\u1318-\u135A\u135F\u1369-\u1371\u1380-\u138F\u13A0-\u13F4\u1401-\u166C\u166F-\u1676\u1681-\u169A\u16A0-\u16EA\u16EE-\u16F0\u1700-\u170C\u170E-\u1711\u1712-\u1714\u1720-\u1731\u1732-\u1734\u1740-\u1751\u1752-\u1753\u1760-\u176C\u176E-\u1770\u1772-\u1773\u1780-\u17B3\u17B6\u17B7-\u17BD\u17BE-\u17C5\u17C6\u17C7-\u17C8\u17C9-\u17D3\u17D7\u17DC\u17DD\u17E0-\u17E9\u180B-\u180D\u1810-\u1819\u1820-\u1842\u1843\u1844-\u1877\u1880-\u18A8\u18A9\u1900-\u191C\u1920-\u1922\u1923-\u1926\u1927-\u1928\u1929-\u192B\u1930-\u1931\u1932\u1933-\u1938\u1939-\u193B\u1946-\u194F\u1950-\u196D\u1970-\u1974\u1980-\u19A9\u19B0-\u19C0\u19C1-\u19C7\u19C8-\u19C9\u19D0-\u19D9\u1A00-\u1A16\u1A17-\u1A18\u1A19-\u1A1B\u1D00-\u1D2B\u1D2C-\u1D61\u1D62-\u1D77\u1D78\u1D79-\u1D9A\u1D9B-\u1DBF\u1DC0-\u1DC3\u1E00-\u1E9B\u1EA0-\u1EF9\u1F00-\u1F15\u1F18-\u1F1D\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u203F-\u2040\u2054\u2071\u207F\u2090-\u2094\u20D0-\u20DC\u20E1\u20E5-\u20EB\u2102\u2107\u210A-\u2113\u2115\u2118\u2119-\u211D\u2124\u2126\u2128\u212A-\u212D\u212E\u212F-\u2131\u2133-\u2134\u2135-\u2138\u2139\u213C-\u213F\u2145-\u2149\u2160-\u2183\u2C00-\u2C2E\u2C30-\u2C5E\u2C80-\u2CE4\u2D00-\u2D25\u2D30-\u2D65\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE\u2DD0-\u2DD6\u2DD8-\u2DDE\u3005\u3006\u3007\u3021-\u3029\u302A-\u302F\u3031-\u3035\u3038-\u303A\u303B\u303C\u3041-\u3096\u3099-\u309A\u309D-\u309E\u309F\u30A1-\u30FA\u30FC-\u30FE\u30FF\u3105-\u312C\u3131-\u318E\u31A0-\u31B7\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FBB\uA000-\uA014\uA015\uA016-\uA48C\uA800-\uA801\uA802\uA803-\uA805\uA806\uA807-\uA80A\uA80B\uA80C-\uA822\uA823-\uA824\uA825-\uA826\uA827\uAC00-\uD7A3\uF900-\uFA2D\uFA30-\uFA6A\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1E\uFB1F-\uFB28\uFB2A-\uFB36\uFB38-\uFB3C\uFB3E\uFB40-\uFB41\uFB43-\uFB44\uFB46-\uFBB1\uFBD3-\uFC5D\uFC64-\uFD3D\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDF9\uFE00-\uFE0F\uFE20-\uFE23\uFE33-\uFE34\uFE4D-\uFE4F\uFE71\uFE73\uFE77\uFE79\uFE7B\uFE7D\uFE7F-\uFEFC\uFF10-\uFF19\uFF21-\uFF3A\uFF3F\uFF41-\uFF5A\uFF66-\uFF6F\uFF70\uFF71-\uFF9D\uFF9E-\uFF9F\uFFA0-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC'
+
+def allexcept(*args):
+ newcats = cats[:]
+ for arg in args:
+ newcats.remove(arg)
+ return u''.join([globals()[cat] for cat in newcats])
+
+if __name__ == '__main__':
+ import unicodedata
+
+ categories = {}
+
+ f = open(__file__.rstrip('co'))
+ try:
+ content = f.read()
+ finally:
+ f.close()
+
+ header = content[:content.find('Cc =')]
+ footer = content[content.find("def combine("):]
+
+ for code in range(65535):
+ c = unichr(code)
+ cat = unicodedata.category(c)
+ categories.setdefault(cat, []).append(c)
+
+ f = open(__file__, 'w')
+ f.write(header)
+
+ for cat in sorted(categories):
+ val = u''.join(categories[cat])
+ if cat == 'Cs':
+ # Jython can't handle isolated surrogates
+ f.write("""\
+try:
+ Cs = eval(r"%r")
+except UnicodeDecodeError:
+ Cs = '' # Jython can't handle isolated surrogates\n\n""" % val)
+ else:
+ f.write('%s = %r\n\n' % (cat, val))
+ f.write('cats = %r\n\n' % sorted(categories.keys()))
+
+ f.write(footer)
+ f.close()
diff --git a/deps/v8/third_party/jinja2/bccache.py b/deps/v8/third_party/jinja2/bccache.py
new file mode 100644
index 0000000000..f5bd3145f6
--- /dev/null
+++ b/deps/v8/third_party/jinja2/bccache.py
@@ -0,0 +1,362 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.bccache
+ ~~~~~~~~~~~~~~
+
+ This module implements the bytecode cache system Jinja is optionally
+ using. This is useful if you have very complex template situations and
+ the compiliation of all those templates slow down your application too
+ much.
+
+ Situations where this is useful are often forking web applications that
+ are initialized on the first request.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD.
+"""
+from os import path, listdir
+import os
+import sys
+import stat
+import errno
+import marshal
+import tempfile
+import fnmatch
+from hashlib import sha1
+from jinja2.utils import open_if_exists
+from jinja2._compat import BytesIO, pickle, PY2, text_type
+
+
+# marshal works better on 3.x, one hack less required
+if not PY2:
+ marshal_dump = marshal.dump
+ marshal_load = marshal.load
+else:
+
+ def marshal_dump(code, f):
+ if isinstance(f, file):
+ marshal.dump(code, f)
+ else:
+ f.write(marshal.dumps(code))
+
+ def marshal_load(f):
+ if isinstance(f, file):
+ return marshal.load(f)
+ return marshal.loads(f.read())
+
+
+bc_version = 2
+
+# magic version used to only change with new jinja versions. With 2.6
+# we change this to also take Python version changes into account. The
+# reason for this is that Python tends to segfault if fed earlier bytecode
+# versions because someone thought it would be a good idea to reuse opcodes
+# or make Python incompatible with earlier versions.
+bc_magic = 'j2'.encode('ascii') + \
+ pickle.dumps(bc_version, 2) + \
+ pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
+
+
+class Bucket(object):
+ """Buckets are used to store the bytecode for one template. It's created
+ and initialized by the bytecode cache and passed to the loading functions.
+
+ The buckets get an internal checksum from the cache assigned and use this
+ to automatically reject outdated cache material. Individual bytecode
+ cache subclasses don't have to care about cache invalidation.
+ """
+
+ def __init__(self, environment, key, checksum):
+ self.environment = environment
+ self.key = key
+ self.checksum = checksum
+ self.reset()
+
+ def reset(self):
+ """Resets the bucket (unloads the bytecode)."""
+ self.code = None
+
+ def load_bytecode(self, f):
+ """Loads bytecode from a file or file like object."""
+ # make sure the magic header is correct
+ magic = f.read(len(bc_magic))
+ if magic != bc_magic:
+ self.reset()
+ return
+ # the source code of the file changed, we need to reload
+ checksum = pickle.load(f)
+ if self.checksum != checksum:
+ self.reset()
+ return
+ # if marshal_load fails then we need to reload
+ try:
+ self.code = marshal_load(f)
+ except (EOFError, ValueError, TypeError):
+ self.reset()
+ return
+
+ def write_bytecode(self, f):
+ """Dump the bytecode into the file or file like object passed."""
+ if self.code is None:
+ raise TypeError('can\'t write empty bucket')
+ f.write(bc_magic)
+ pickle.dump(self.checksum, f, 2)
+ marshal_dump(self.code, f)
+
+ def bytecode_from_string(self, string):
+ """Load bytecode from a string."""
+ self.load_bytecode(BytesIO(string))
+
+ def bytecode_to_string(self):
+ """Return the bytecode as string."""
+ out = BytesIO()
+ self.write_bytecode(out)
+ return out.getvalue()
+
+
+class BytecodeCache(object):
+ """To implement your own bytecode cache you have to subclass this class
+ and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
+ these methods are passed a :class:`~jinja2.bccache.Bucket`.
+
+ A very basic bytecode cache that saves the bytecode on the file system::
+
+ from os import path
+
+ class MyCache(BytecodeCache):
+
+ def __init__(self, directory):
+ self.directory = directory
+
+ def load_bytecode(self, bucket):
+ filename = path.join(self.directory, bucket.key)
+ if path.exists(filename):
+ with open(filename, 'rb') as f:
+ bucket.load_bytecode(f)
+
+ def dump_bytecode(self, bucket):
+ filename = path.join(self.directory, bucket.key)
+ with open(filename, 'wb') as f:
+ bucket.write_bytecode(f)
+
+ A more advanced version of a filesystem based bytecode cache is part of
+ Jinja2.
+ """
+
+ def load_bytecode(self, bucket):
+ """Subclasses have to override this method to load bytecode into a
+ bucket. If they are not able to find code in the cache for the
+ bucket, it must not do anything.
+ """
+ raise NotImplementedError()
+
+ def dump_bytecode(self, bucket):
+ """Subclasses have to override this method to write the bytecode
+ from a bucket back to the cache. If it unable to do so it must not
+ fail silently but raise an exception.
+ """
+ raise NotImplementedError()
+
+ def clear(self):
+ """Clears the cache. This method is not used by Jinja2 but should be
+ implemented to allow applications to clear the bytecode cache used
+ by a particular environment.
+ """
+
+ def get_cache_key(self, name, filename=None):
+ """Returns the unique hash key for this template name."""
+ hash = sha1(name.encode('utf-8'))
+ if filename is not None:
+ filename = '|' + filename
+ if isinstance(filename, text_type):
+ filename = filename.encode('utf-8')
+ hash.update(filename)
+ return hash.hexdigest()
+
+ def get_source_checksum(self, source):
+ """Returns a checksum for the source."""
+ return sha1(source.encode('utf-8')).hexdigest()
+
+ def get_bucket(self, environment, name, filename, source):
+ """Return a cache bucket for the given template. All arguments are
+ mandatory but filename may be `None`.
+ """
+ key = self.get_cache_key(name, filename)
+ checksum = self.get_source_checksum(source)
+ bucket = Bucket(environment, key, checksum)
+ self.load_bytecode(bucket)
+ return bucket
+
+ def set_bucket(self, bucket):
+ """Put the bucket into the cache."""
+ self.dump_bytecode(bucket)
+
+
+class FileSystemBytecodeCache(BytecodeCache):
+ """A bytecode cache that stores bytecode on the filesystem. It accepts
+ two arguments: The directory where the cache items are stored and a
+ pattern string that is used to build the filename.
+
+ If no directory is specified a default cache directory is selected. On
+ Windows the user's temp directory is used, on UNIX systems a directory
+ is created for the user in the system temp directory.
+
+ The pattern can be used to have multiple separate caches operate on the
+ same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
+ is replaced with the cache key.
+
+ >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
+
+ This bytecode cache supports clearing of the cache using the clear method.
+ """
+
+ def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
+ if directory is None:
+ directory = self._get_default_cache_dir()
+ self.directory = directory
+ self.pattern = pattern
+
+ def _get_default_cache_dir(self):
+ def _unsafe_dir():
+ raise RuntimeError('Cannot determine safe temp directory. You '
+ 'need to explicitly provide one.')
+
+ tmpdir = tempfile.gettempdir()
+
+ # On windows the temporary directory is used specific unless
+ # explicitly forced otherwise. We can just use that.
+ if os.name == 'nt':
+ return tmpdir
+ if not hasattr(os, 'getuid'):
+ _unsafe_dir()
+
+ dirname = '_jinja2-cache-%d' % os.getuid()
+ actual_dir = os.path.join(tmpdir, dirname)
+
+ try:
+ os.mkdir(actual_dir, stat.S_IRWXU)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ try:
+ os.chmod(actual_dir, stat.S_IRWXU)
+ actual_dir_stat = os.lstat(actual_dir)
+ if actual_dir_stat.st_uid != os.getuid() \
+ or not stat.S_ISDIR(actual_dir_stat.st_mode) \
+ or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
+ _unsafe_dir()
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ actual_dir_stat = os.lstat(actual_dir)
+ if actual_dir_stat.st_uid != os.getuid() \
+ or not stat.S_ISDIR(actual_dir_stat.st_mode) \
+ or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
+ _unsafe_dir()
+
+ return actual_dir
+
+ def _get_cache_filename(self, bucket):
+ return path.join(self.directory, self.pattern % bucket.key)
+
+ def load_bytecode(self, bucket):
+ f = open_if_exists(self._get_cache_filename(bucket), 'rb')
+ if f is not None:
+ try:
+ bucket.load_bytecode(f)
+ finally:
+ f.close()
+
+ def dump_bytecode(self, bucket):
+ f = open(self._get_cache_filename(bucket), 'wb')
+ try:
+ bucket.write_bytecode(f)
+ finally:
+ f.close()
+
+ def clear(self):
+ # imported lazily here because google app-engine doesn't support
+ # write access on the file system and the function does not exist
+ # normally.
+ from os import remove
+ files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
+ for filename in files:
+ try:
+ remove(path.join(self.directory, filename))
+ except OSError:
+ pass
+
+
+class MemcachedBytecodeCache(BytecodeCache):
+ """This class implements a bytecode cache that uses a memcache cache for
+ storing the information. It does not enforce a specific memcache library
+ (tummy's memcache or cmemcache) but will accept any class that provides
+ the minimal interface required.
+
+ Libraries compatible with this class:
+
+ - `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache
+ - `python-memcached <http://www.tummy.com/Community/software/python-memcached/>`_
+ - `cmemcache <http://gijsbert.org/cmemcache/>`_
+
+ (Unfortunately the django cache interface is not compatible because it
+ does not support storing binary data, only unicode. You can however pass
+ the underlying cache client to the bytecode cache which is available
+ as `django.core.cache.cache._client`.)
+
+ The minimal interface for the client passed to the constructor is this:
+
+ .. class:: MinimalClientInterface
+
+ .. method:: set(key, value[, timeout])
+
+ Stores the bytecode in the cache. `value` is a string and
+ `timeout` the timeout of the key. If timeout is not provided
+ a default timeout or no timeout should be assumed, if it's
+ provided it's an integer with the number of seconds the cache
+ item should exist.
+
+ .. method:: get(key)
+
+ Returns the value for the cache key. If the item does not
+ exist in the cache the return value must be `None`.
+
+ The other arguments to the constructor are the prefix for all keys that
+ is added before the actual cache key and the timeout for the bytecode in
+ the cache system. We recommend a high (or no) timeout.
+
+ This bytecode cache does not support clearing of used items in the cache.
+ The clear method is a no-operation function.
+
+ .. versionadded:: 2.7
+ Added support for ignoring memcache errors through the
+ `ignore_memcache_errors` parameter.
+ """
+
+ def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
+ ignore_memcache_errors=True):
+ self.client = client
+ self.prefix = prefix
+ self.timeout = timeout
+ self.ignore_memcache_errors = ignore_memcache_errors
+
+ def load_bytecode(self, bucket):
+ try:
+ code = self.client.get(self.prefix + bucket.key)
+ except Exception:
+ if not self.ignore_memcache_errors:
+ raise
+ code = None
+ if code is not None:
+ bucket.bytecode_from_string(code)
+
+ def dump_bytecode(self, bucket):
+ args = (self.prefix + bucket.key, bucket.bytecode_to_string())
+ if self.timeout is not None:
+ args += (self.timeout,)
+ try:
+ self.client.set(*args)
+ except Exception:
+ if not self.ignore_memcache_errors:
+ raise
diff --git a/deps/v8/third_party/jinja2/compiler.py b/deps/v8/third_party/jinja2/compiler.py
new file mode 100644
index 0000000000..fad007b596
--- /dev/null
+++ b/deps/v8/third_party/jinja2/compiler.py
@@ -0,0 +1,1686 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.compiler
+ ~~~~~~~~~~~~~~~
+
+ Compiles nodes into python code.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+from itertools import chain
+from copy import deepcopy
+from keyword import iskeyword as is_python_keyword
+from jinja2 import nodes
+from jinja2.nodes import EvalContext
+from jinja2.visitor import NodeVisitor
+from jinja2.exceptions import TemplateAssertionError
+from jinja2.utils import Markup, concat, escape
+from jinja2._compat import range_type, text_type, string_types, \
+ iteritems, NativeStringIO, imap
+
+
+operators = {
+ 'eq': '==',
+ 'ne': '!=',
+ 'gt': '>',
+ 'gteq': '>=',
+ 'lt': '<',
+ 'lteq': '<=',
+ 'in': 'in',
+ 'notin': 'not in'
+}
+
+# what method to iterate over items do we want to use for dict iteration
+# in generated code? on 2.x let's go with iteritems, on 3.x with items
+if hasattr(dict, 'iteritems'):
+ dict_item_iter = 'iteritems'
+else:
+ dict_item_iter = 'items'
+
+
+# does if 0: dummy(x) get us x into the scope?
+def unoptimize_before_dead_code():
+ x = 42
+ def f():
+ if 0: dummy(x)
+ return f
+
+# The getattr is necessary for pypy which does not set this attribute if
+# no closure is on the function
+unoptimize_before_dead_code = bool(
+ getattr(unoptimize_before_dead_code(), '__closure__', None))
+
+
+def generate(node, environment, name, filename, stream=None,
+ defer_init=False):
+ """Generate the python source for a node tree."""
+ if not isinstance(node, nodes.Template):
+ raise TypeError('Can\'t compile non template nodes')
+ generator = environment.code_generator_class(environment, name, filename,
+ stream, defer_init)
+ generator.visit(node)
+ if stream is None:
+ return generator.stream.getvalue()
+
+
+def has_safe_repr(value):
+ """Does the node have a safe representation?"""
+ if value is None or value is NotImplemented or value is Ellipsis:
+ return True
+ if isinstance(value, (bool, int, float, complex, range_type,
+ Markup) + string_types):
+ return True
+ if isinstance(value, (tuple, list, set, frozenset)):
+ for item in value:
+ if not has_safe_repr(item):
+ return False
+ return True
+ elif isinstance(value, dict):
+ for key, value in iteritems(value):
+ if not has_safe_repr(key):
+ return False
+ if not has_safe_repr(value):
+ return False
+ return True
+ return False
+
+
+def find_undeclared(nodes, names):
+ """Check if the names passed are accessed undeclared. The return value
+ is a set of all the undeclared names from the sequence of names found.
+ """
+ visitor = UndeclaredNameVisitor(names)
+ try:
+ for node in nodes:
+ visitor.visit(node)
+ except VisitorExit:
+ pass
+ return visitor.undeclared
+
+
+class Identifiers(object):
+ """Tracks the status of identifiers in frames."""
+
+ def __init__(self):
+ # variables that are known to be declared (probably from outer
+ # frames or because they are special for the frame)
+ self.declared = set()
+
+ # undeclared variables from outer scopes
+ self.outer_undeclared = set()
+
+ # names that are accessed without being explicitly declared by
+ # this one or any of the outer scopes. Names can appear both in
+ # declared and undeclared.
+ self.undeclared = set()
+
+ # names that are declared locally
+ self.declared_locally = set()
+
+ # names that are declared by parameters
+ self.declared_parameter = set()
+
+ def add_special(self, name):
+ """Register a special name like `loop`."""
+ self.undeclared.discard(name)
+ self.declared.add(name)
+
+ def is_declared(self, name):
+ """Check if a name is declared in this or an outer scope."""
+ if name in self.declared_locally or name in self.declared_parameter:
+ return True
+ return name in self.declared
+
+ def copy(self):
+ return deepcopy(self)
+
+
+class Frame(object):
+ """Holds compile time information for us."""
+
+ def __init__(self, eval_ctx, parent=None):
+ self.eval_ctx = eval_ctx
+ self.identifiers = Identifiers()
+
+ # a toplevel frame is the root + soft frames such as if conditions.
+ self.toplevel = False
+
+ # the root frame is basically just the outermost frame, so no if
+ # conditions. This information is used to optimize inheritance
+ # situations.
+ self.rootlevel = False
+
+ # in some dynamic inheritance situations the compiler needs to add
+ # write tests around output statements.
+ self.require_output_check = parent and parent.require_output_check
+
+ # inside some tags we are using a buffer rather than yield statements.
+ # this for example affects {% filter %} or {% macro %}. If a frame
+ # is buffered this variable points to the name of the list used as
+ # buffer.
+ self.buffer = None
+
+ # the name of the block we're in, otherwise None.
+ self.block = parent and parent.block or None
+
+ # a set of actually assigned names
+ self.assigned_names = set()
+
+ # the parent of this frame
+ self.parent = parent
+
+ if parent is not None:
+ self.identifiers.declared.update(
+ parent.identifiers.declared |
+ parent.identifiers.declared_parameter |
+ parent.assigned_names
+ )
+ self.identifiers.outer_undeclared.update(
+ parent.identifiers.undeclared -
+ self.identifiers.declared
+ )
+ self.buffer = parent.buffer
+
+ def copy(self):
+ """Create a copy of the current one."""
+ rv = object.__new__(self.__class__)
+ rv.__dict__.update(self.__dict__)
+ rv.identifiers = object.__new__(self.identifiers.__class__)
+ rv.identifiers.__dict__.update(self.identifiers.__dict__)
+ return rv
+
+ def inspect(self, nodes):
+ """Walk the node and check for identifiers. If the scope is hard (eg:
+ enforce on a python level) overrides from outer scopes are tracked
+ differently.
+ """
+ visitor = FrameIdentifierVisitor(self.identifiers)
+ for node in nodes:
+ visitor.visit(node)
+
+ def find_shadowed(self, extra=()):
+ """Find all the shadowed names. extra is an iterable of variables
+ that may be defined with `add_special` which may occour scoped.
+ """
+ i = self.identifiers
+ return (i.declared | i.outer_undeclared) & \
+ (i.declared_locally | i.declared_parameter) | \
+ set(x for x in extra if i.is_declared(x))
+
+ def inner(self):
+ """Return an inner frame."""
+ return Frame(self.eval_ctx, self)
+
+ def soft(self):
+ """Return a soft frame. A soft frame may not be modified as
+ standalone thing as it shares the resources with the frame it
+ was created of, but it's not a rootlevel frame any longer.
+ """
+ rv = self.copy()
+ rv.rootlevel = False
+ return rv
+
+ __copy__ = copy
+
+
+class VisitorExit(RuntimeError):
+ """Exception used by the `UndeclaredNameVisitor` to signal a stop."""
+
+
+class DependencyFinderVisitor(NodeVisitor):
+ """A visitor that collects filter and test calls."""
+
+ def __init__(self):
+ self.filters = set()
+ self.tests = set()
+
+ def visit_Filter(self, node):
+ self.generic_visit(node)
+ self.filters.add(node.name)
+
+ def visit_Test(self, node):
+ self.generic_visit(node)
+ self.tests.add(node.name)
+
+ def visit_Block(self, node):
+ """Stop visiting at blocks."""
+
+
+class UndeclaredNameVisitor(NodeVisitor):
+ """A visitor that checks if a name is accessed without being
+ declared. This is different from the frame visitor as it will
+ not stop at closure frames.
+ """
+
+ def __init__(self, names):
+ self.names = set(names)
+ self.undeclared = set()
+
+ def visit_Name(self, node):
+ if node.ctx == 'load' and node.name in self.names:
+ self.undeclared.add(node.name)
+ if self.undeclared == self.names:
+ raise VisitorExit()
+ else:
+ self.names.discard(node.name)
+
+ def visit_Block(self, node):
+ """Stop visiting a blocks."""
+
+
+class FrameIdentifierVisitor(NodeVisitor):
+ """A visitor for `Frame.inspect`."""
+
+ def __init__(self, identifiers):
+ self.identifiers = identifiers
+
+ def visit_Name(self, node):
+ """All assignments to names go through this function."""
+ if node.ctx == 'store':
+ self.identifiers.declared_locally.add(node.name)
+ elif node.ctx == 'param':
+ self.identifiers.declared_parameter.add(node.name)
+ elif node.ctx == 'load' and not \
+ self.identifiers.is_declared(node.name):
+ self.identifiers.undeclared.add(node.name)
+
+ def visit_If(self, node):
+ self.visit(node.test)
+ real_identifiers = self.identifiers
+
+ old_names = real_identifiers.declared_locally | \
+ real_identifiers.declared_parameter
+
+ def inner_visit(nodes):
+ if not nodes:
+ return set()
+ self.identifiers = real_identifiers.copy()
+ for subnode in nodes:
+ self.visit(subnode)
+ rv = self.identifiers.declared_locally - old_names
+ # we have to remember the undeclared variables of this branch
+ # because we will have to pull them.
+ real_identifiers.undeclared.update(self.identifiers.undeclared)
+ self.identifiers = real_identifiers
+ return rv
+
+ body = inner_visit(node.body)
+ else_ = inner_visit(node.else_ or ())
+
+ # the differences between the two branches are also pulled as
+ # undeclared variables
+ real_identifiers.undeclared.update(body.symmetric_difference(else_) -
+ real_identifiers.declared)
+
+ # remember those that are declared.
+ real_identifiers.declared_locally.update(body | else_)
+
+ def visit_Macro(self, node):
+ self.identifiers.declared_locally.add(node.name)
+
+ def visit_Import(self, node):
+ self.generic_visit(node)
+ self.identifiers.declared_locally.add(node.target)
+
+ def visit_FromImport(self, node):
+ self.generic_visit(node)
+ for name in node.names:
+ if isinstance(name, tuple):
+ self.identifiers.declared_locally.add(name[1])
+ else:
+ self.identifiers.declared_locally.add(name)
+
+ def visit_Assign(self, node):
+ """Visit assignments in the correct order."""
+ self.visit(node.node)
+ self.visit(node.target)
+
+ def visit_For(self, node):
+ """Visiting stops at for blocks. However the block sequence
+ is visited as part of the outer scope.
+ """
+ self.visit(node.iter)
+
+ def visit_CallBlock(self, node):
+ self.visit(node.call)
+
+ def visit_FilterBlock(self, node):
+ self.visit(node.filter)
+
+ def visit_AssignBlock(self, node):
+ """Stop visiting at block assigns."""
+
+ def visit_Scope(self, node):
+ """Stop visiting at scopes."""
+
+ def visit_Block(self, node):
+ """Stop visiting at blocks."""
+
+
+class CompilerExit(Exception):
+ """Raised if the compiler encountered a situation where it just
+ doesn't make sense to further process the code. Any block that
+ raises such an exception is not further processed.
+ """
+
+
+class CodeGenerator(NodeVisitor):
+
+ def __init__(self, environment, name, filename, stream=None,
+ defer_init=False):
+ if stream is None:
+ stream = NativeStringIO()
+ self.environment = environment
+ self.name = name
+ self.filename = filename
+ self.stream = stream
+ self.created_block_context = False
+ self.defer_init = defer_init
+
+ # aliases for imports
+ self.import_aliases = {}
+
+ # a registry for all blocks. Because blocks are moved out
+ # into the global python scope they are registered here
+ self.blocks = {}
+
+ # the number of extends statements so far
+ self.extends_so_far = 0
+
+ # some templates have a rootlevel extends. In this case we
+ # can safely assume that we're a child template and do some
+ # more optimizations.
+ self.has_known_extends = False
+
+ # the current line number
+ self.code_lineno = 1
+
+ # registry of all filters and tests (global, not block local)
+ self.tests = {}
+ self.filters = {}
+
+ # the debug information
+ self.debug_info = []
+ self._write_debug_info = None
+
+ # the number of new lines before the next write()
+ self._new_lines = 0
+
+ # the line number of the last written statement
+ self._last_line = 0
+
+ # true if nothing was written so far.
+ self._first_write = True
+
+ # used by the `temporary_identifier` method to get new
+ # unique, temporary identifier
+ self._last_identifier = 0
+
+ # the current indentation
+ self._indentation = 0
+
+ # -- Various compilation helpers
+
+ def fail(self, msg, lineno):
+ """Fail with a :exc:`TemplateAssertionError`."""
+ raise TemplateAssertionError(msg, lineno, self.name, self.filename)
+
+ def temporary_identifier(self):
+ """Get a new unique identifier."""
+ self._last_identifier += 1
+ return 't_%d' % self._last_identifier
+
+ def buffer(self, frame):
+ """Enable buffering for the frame from that point onwards."""
+ frame.buffer = self.temporary_identifier()
+ self.writeline('%s = []' % frame.buffer)
+
+ def return_buffer_contents(self, frame):
+ """Return the buffer contents of the frame."""
+ if frame.eval_ctx.volatile:
+ self.writeline('if context.eval_ctx.autoescape:')
+ self.indent()
+ self.writeline('return Markup(concat(%s))' % frame.buffer)
+ self.outdent()
+ self.writeline('else:')
+ self.indent()
+ self.writeline('return concat(%s)' % frame.buffer)
+ self.outdent()
+ elif frame.eval_ctx.autoescape:
+ self.writeline('return Markup(concat(%s))' % frame.buffer)
+ else:
+ self.writeline('return concat(%s)' % frame.buffer)
+
+ def indent(self):
+ """Indent by one."""
+ self._indentation += 1
+
+ def outdent(self, step=1):
+ """Outdent by step."""
+ self._indentation -= step
+
+ def start_write(self, frame, node=None):
+ """Yield or write into the frame buffer."""
+ if frame.buffer is None:
+ self.writeline('yield ', node)
+ else:
+ self.writeline('%s.append(' % frame.buffer, node)
+
+ def end_write(self, frame):
+ """End the writing process started by `start_write`."""
+ if frame.buffer is not None:
+ self.write(')')
+
+ def simple_write(self, s, frame, node=None):
+ """Simple shortcut for start_write + write + end_write."""
+ self.start_write(frame, node)
+ self.write(s)
+ self.end_write(frame)
+
+ def blockvisit(self, nodes, frame):
+ """Visit a list of nodes as block in a frame. If the current frame
+ is no buffer a dummy ``if 0: yield None`` is written automatically
+ unless the force_generator parameter is set to False.
+ """
+ if frame.buffer is None:
+ self.writeline('if 0: yield None')
+ else:
+ self.writeline('pass')
+ try:
+ for node in nodes:
+ self.visit(node, frame)
+ except CompilerExit:
+ pass
+
+ def write(self, x):
+ """Write a string into the output stream."""
+ if self._new_lines:
+ if not self._first_write:
+ self.stream.write('\n' * self._new_lines)
+ self.code_lineno += self._new_lines
+ if self._write_debug_info is not None:
+ self.debug_info.append((self._write_debug_info,
+ self.code_lineno))
+ self._write_debug_info = None
+ self._first_write = False
+ self.stream.write(' ' * self._indentation)
+ self._new_lines = 0
+ self.stream.write(x)
+
+ def writeline(self, x, node=None, extra=0):
+ """Combination of newline and write."""
+ self.newline(node, extra)
+ self.write(x)
+
+ def newline(self, node=None, extra=0):
+ """Add one or more newlines before the next write."""
+ self._new_lines = max(self._new_lines, 1 + extra)
+ if node is not None and node.lineno != self._last_line:
+ self._write_debug_info = node.lineno
+ self._last_line = node.lineno
+
+ def signature(self, node, frame, extra_kwargs=None):
+ """Writes a function call to the stream for the current node.
+ A leading comma is added automatically. The extra keyword
+ arguments may not include python keywords otherwise a syntax
+ error could occour. The extra keyword arguments should be given
+ as python dict.
+ """
+ # if any of the given keyword arguments is a python keyword
+ # we have to make sure that no invalid call is created.
+ kwarg_workaround = False
+ for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
+ if is_python_keyword(kwarg):
+ kwarg_workaround = True
+ break
+
+ for arg in node.args:
+ self.write(', ')
+ self.visit(arg, frame)
+
+ if not kwarg_workaround:
+ for kwarg in node.kwargs:
+ self.write(', ')
+ self.visit(kwarg, frame)
+ if extra_kwargs is not None:
+ for key, value in iteritems(extra_kwargs):
+ self.write(', %s=%s' % (key, value))
+ if node.dyn_args:
+ self.write(', *')
+ self.visit(node.dyn_args, frame)
+
+ if kwarg_workaround:
+ if node.dyn_kwargs is not None:
+ self.write(', **dict({')
+ else:
+ self.write(', **{')
+ for kwarg in node.kwargs:
+ self.write('%r: ' % kwarg.key)
+ self.visit(kwarg.value, frame)
+ self.write(', ')
+ if extra_kwargs is not None:
+ for key, value in iteritems(extra_kwargs):
+ self.write('%r: %s, ' % (key, value))
+ if node.dyn_kwargs is not None:
+ self.write('}, **')
+ self.visit(node.dyn_kwargs, frame)
+ self.write(')')
+ else:
+ self.write('}')
+
+ elif node.dyn_kwargs is not None:
+ self.write(', **')
+ self.visit(node.dyn_kwargs, frame)
+
+ def pull_locals(self, frame):
+ """Pull all the references identifiers into the local scope."""
+ for name in frame.identifiers.undeclared:
+ self.writeline('l_%s = context.resolve(%r)' % (name, name))
+
+ def pull_dependencies(self, nodes):
+ """Pull all the dependencies."""
+ visitor = DependencyFinderVisitor()
+ for node in nodes:
+ visitor.visit(node)
+ for dependency in 'filters', 'tests':
+ mapping = getattr(self, dependency)
+ for name in getattr(visitor, dependency):
+ if name not in mapping:
+ mapping[name] = self.temporary_identifier()
+ self.writeline('%s = environment.%s[%r]' %
+ (mapping[name], dependency, name))
+
+ def unoptimize_scope(self, frame):
+ """Disable Python optimizations for the frame."""
+ # XXX: this is not that nice but it has no real overhead. It
+ # mainly works because python finds the locals before dead code
+ # is removed. If that breaks we have to add a dummy function
+ # that just accepts the arguments and does nothing.
+ if frame.identifiers.declared:
+ self.writeline('%sdummy(%s)' % (
+ unoptimize_before_dead_code and 'if 0: ' or '',
+ ', '.join('l_' + name for name in frame.identifiers.declared)
+ ))
+
+ def push_scope(self, frame, extra_vars=()):
+ """This function returns all the shadowed variables in a dict
+ in the form name: alias and will write the required assignments
+ into the current scope. No indentation takes place.
+
+ This also predefines locally declared variables from the loop
+ body because under some circumstances it may be the case that
+
+ `extra_vars` is passed to `Frame.find_shadowed`.
+ """
+ aliases = {}
+ for name in frame.find_shadowed(extra_vars):
+ aliases[name] = ident = self.temporary_identifier()
+ self.writeline('%s = l_%s' % (ident, name))
+ to_declare = set()
+ for name in frame.identifiers.declared_locally:
+ if name not in aliases:
+ to_declare.add('l_' + name)
+ if to_declare:
+ self.writeline(' = '.join(to_declare) + ' = missing')
+ return aliases
+
+ def pop_scope(self, aliases, frame):
+ """Restore all aliases and delete unused variables."""
+ for name, alias in iteritems(aliases):
+ self.writeline('l_%s = %s' % (name, alias))
+ to_delete = set()
+ for name in frame.identifiers.declared_locally:
+ if name not in aliases:
+ to_delete.add('l_' + name)
+ if to_delete:
+ # we cannot use the del statement here because enclosed
+ # scopes can trigger a SyntaxError:
+ # a = 42; b = lambda: a; del a
+ self.writeline(' = '.join(to_delete) + ' = missing')
+
+ def function_scoping(self, node, frame, children=None,
+ find_special=True):
+ """In Jinja a few statements require the help of anonymous
+ functions. Those are currently macros and call blocks and in
+ the future also recursive loops. As there is currently
+ technical limitation that doesn't allow reading and writing a
+ variable in a scope where the initial value is coming from an
+ outer scope, this function tries to fall back with a common
+ error message. Additionally the frame passed is modified so
+ that the argumetns are collected and callers are looked up.
+
+ This will return the modified frame.
+ """
+ # we have to iterate twice over it, make sure that works
+ if children is None:
+ children = node.iter_child_nodes()
+ children = list(children)
+ func_frame = frame.inner()
+ func_frame.inspect(children)
+
+ # variables that are undeclared (accessed before declaration) and
+ # declared locally *and* part of an outside scope raise a template
+ # assertion error. Reason: we can't generate reasonable code from
+ # it without aliasing all the variables.
+ # this could be fixed in Python 3 where we have the nonlocal
+ # keyword or if we switch to bytecode generation
+ overridden_closure_vars = (
+ func_frame.identifiers.undeclared &
+ func_frame.identifiers.declared &
+ (func_frame.identifiers.declared_locally |
+ func_frame.identifiers.declared_parameter)
+ )
+ if overridden_closure_vars:
+ self.fail('It\'s not possible to set and access variables '
+ 'derived from an outer scope! (affects: %s)' %
+ ', '.join(sorted(overridden_closure_vars)), node.lineno)
+
+ # remove variables from a closure from the frame's undeclared
+ # identifiers.
+ func_frame.identifiers.undeclared -= (
+ func_frame.identifiers.undeclared &
+ func_frame.identifiers.declared
+ )
+
+ # no special variables for this scope, abort early
+ if not find_special:
+ return func_frame
+
+ func_frame.accesses_kwargs = False
+ func_frame.accesses_varargs = False
+ func_frame.accesses_caller = False
+ func_frame.arguments = args = ['l_' + x.name for x in node.args]
+
+ undeclared = find_undeclared(children, ('caller', 'kwargs', 'varargs'))
+
+ if 'caller' in undeclared:
+ func_frame.accesses_caller = True
+ func_frame.identifiers.add_special('caller')
+ args.append('l_caller')
+ if 'kwargs' in undeclared:
+ func_frame.accesses_kwargs = True
+ func_frame.identifiers.add_special('kwargs')
+ args.append('l_kwargs')
+ if 'varargs' in undeclared:
+ func_frame.accesses_varargs = True
+ func_frame.identifiers.add_special('varargs')
+ args.append('l_varargs')
+ return func_frame
+
+ def macro_body(self, node, frame, children=None):
+ """Dump the function def of a macro or call block."""
+ frame = self.function_scoping(node, frame, children)
+ # macros are delayed, they never require output checks
+ frame.require_output_check = False
+ args = frame.arguments
+ # XXX: this is an ugly fix for the loop nesting bug
+ # (tests.test_old_bugs.test_loop_call_bug). This works around
+ # a identifier nesting problem we have in general. It's just more
+ # likely to happen in loops which is why we work around it. The
+ # real solution would be "nonlocal" all the identifiers that are
+ # leaking into a new python frame and might be used both unassigned
+ # and assigned.
+ if 'loop' in frame.identifiers.declared:
+ args = args + ['l_loop=l_loop']
+ self.writeline('def macro(%s):' % ', '.join(args), node)
+ self.indent()
+ self.buffer(frame)
+ self.pull_locals(frame)
+ self.blockvisit(node.body, frame)
+ self.return_buffer_contents(frame)
+ self.outdent()
+ return frame
+
+ def macro_def(self, node, frame):
+ """Dump the macro definition for the def created by macro_body."""
+ arg_tuple = ', '.join(repr(x.name) for x in node.args)
+ name = getattr(node, 'name', None)
+ if len(node.args) == 1:
+ arg_tuple += ','
+ self.write('Macro(environment, macro, %r, (%s), (' %
+ (name, arg_tuple))
+ for arg in node.defaults:
+ self.visit(arg, frame)
+ self.write(', ')
+ self.write('), %r, %r, %r)' % (
+ bool(frame.accesses_kwargs),
+ bool(frame.accesses_varargs),
+ bool(frame.accesses_caller)
+ ))
+
+ def position(self, node):
+ """Return a human readable position for the node."""
+ rv = 'line %d' % node.lineno
+ if self.name is not None:
+ rv += ' in ' + repr(self.name)
+ return rv
+
+ # -- Statement Visitors
+
+ def visit_Template(self, node, frame=None):
+ assert frame is None, 'no root frame allowed'
+ eval_ctx = EvalContext(self.environment, self.name)
+
+ from jinja2.runtime import __all__ as exported
+ self.writeline('from __future__ import division')
+ self.writeline('from jinja2.runtime import ' + ', '.join(exported))
+ if not unoptimize_before_dead_code:
+ self.writeline('dummy = lambda *x: None')
+
+ # if we want a deferred initialization we cannot move the
+ # environment into a local name
+ envenv = not self.defer_init and ', environment=environment' or ''
+
+ # do we have an extends tag at all? If not, we can save some
+ # overhead by just not processing any inheritance code.
+ have_extends = node.find(nodes.Extends) is not None
+
+ # find all blocks
+ for block in node.find_all(nodes.Block):
+ if block.name in self.blocks:
+ self.fail('block %r defined twice' % block.name, block.lineno)
+ self.blocks[block.name] = block
+
+ # find all imports and import them
+ for import_ in node.find_all(nodes.ImportedName):
+ if import_.importname not in self.import_aliases:
+ imp = import_.importname
+ self.import_aliases[imp] = alias = self.temporary_identifier()
+ if '.' in imp:
+ module, obj = imp.rsplit('.', 1)
+ self.writeline('from %s import %s as %s' %
+ (module, obj, alias))
+ else:
+ self.writeline('import %s as %s' % (imp, alias))
+
+ # add the load name
+ self.writeline('name = %r' % self.name)
+
+ # generate the root render function.
+ self.writeline('def root(context%s):' % envenv, extra=1)
+
+ # process the root
+ frame = Frame(eval_ctx)
+ frame.inspect(node.body)
+ frame.toplevel = frame.rootlevel = True
+ frame.require_output_check = have_extends and not self.has_known_extends
+ self.indent()
+ if have_extends:
+ self.writeline('parent_template = None')
+ if 'self' in find_undeclared(node.body, ('self',)):
+ frame.identifiers.add_special('self')
+ self.writeline('l_self = TemplateReference(context)')
+ self.pull_locals(frame)
+ self.pull_dependencies(node.body)
+ self.blockvisit(node.body, frame)
+ self.outdent()
+
+ # make sure that the parent root is called.
+ if have_extends:
+ if not self.has_known_extends:
+ self.indent()
+ self.writeline('if parent_template is not None:')
+ self.indent()
+ self.writeline('for event in parent_template.'
+ 'root_render_func(context):')
+ self.indent()
+ self.writeline('yield event')
+ self.outdent(2 + (not self.has_known_extends))
+
+ # at this point we now have the blocks collected and can visit them too.
+ for name, block in iteritems(self.blocks):
+ block_frame = Frame(eval_ctx)
+ block_frame.inspect(block.body)
+ block_frame.block = name
+ self.writeline('def block_%s(context%s):' % (name, envenv),
+ block, 1)
+ self.indent()
+ undeclared = find_undeclared(block.body, ('self', 'super'))
+ if 'self' in undeclared:
+ block_frame.identifiers.add_special('self')
+ self.writeline('l_self = TemplateReference(context)')
+ if 'super' in undeclared:
+ block_frame.identifiers.add_special('super')
+ self.writeline('l_super = context.super(%r, '
+ 'block_%s)' % (name, name))
+ self.pull_locals(block_frame)
+ self.pull_dependencies(block.body)
+ self.blockvisit(block.body, block_frame)
+ self.outdent()
+
+ self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
+ for x in self.blocks),
+ extra=1)
+
+ # add a function that returns the debug info
+ self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
+ in self.debug_info))
+
+ def visit_Block(self, node, frame):
+ """Call a block and register it for the template."""
+ level = 1
+ if frame.toplevel:
+ # if we know that we are a child template, there is no need to
+ # check if we are one
+ if self.has_known_extends:
+ return
+ if self.extends_so_far > 0:
+ self.writeline('if parent_template is None:')
+ self.indent()
+ level += 1
+ context = node.scoped and 'context.derived(locals())' or 'context'
+ self.writeline('for event in context.blocks[%r][0](%s):' % (
+ node.name, context), node)
+ self.indent()
+ self.simple_write('event', frame)
+ self.outdent(level)
+
+ def visit_Extends(self, node, frame):
+ """Calls the extender."""
+ if not frame.toplevel:
+ self.fail('cannot use extend from a non top-level scope',
+ node.lineno)
+
+ # if the number of extends statements in general is zero so
+ # far, we don't have to add a check if something extended
+ # the template before this one.
+ if self.extends_so_far > 0:
+
+ # if we have a known extends we just add a template runtime
+ # error into the generated code. We could catch that at compile
+ # time too, but i welcome it not to confuse users by throwing the
+ # same error at different times just "because we can".
+ if not self.has_known_extends:
+ self.writeline('if parent_template is not None:')
+ self.indent()
+ self.writeline('raise TemplateRuntimeError(%r)' %
+ 'extended multiple times')
+
+ # if we have a known extends already we don't need that code here
+ # as we know that the template execution will end here.
+ if self.has_known_extends:
+ raise CompilerExit()
+ else:
+ self.outdent()
+
+ self.writeline('parent_template = environment.get_template(', node)
+ self.visit(node.template, frame)
+ self.write(', %r)' % self.name)
+ self.writeline('for name, parent_block in parent_template.'
+ 'blocks.%s():' % dict_item_iter)
+ self.indent()
+ self.writeline('context.blocks.setdefault(name, []).'
+ 'append(parent_block)')
+ self.outdent()
+
+ # if this extends statement was in the root level we can take
+ # advantage of that information and simplify the generated code
+ # in the top level from this point onwards
+ if frame.rootlevel:
+ self.has_known_extends = True
+
+ # and now we have one more
+ self.extends_so_far += 1
+
+ def visit_Include(self, node, frame):
+ """Handles includes."""
+ if node.with_context:
+ self.unoptimize_scope(frame)
+ if node.ignore_missing:
+ self.writeline('try:')
+ self.indent()
+
+ func_name = 'get_or_select_template'
+ if isinstance(node.template, nodes.Const):
+ if isinstance(node.template.value, string_types):
+ func_name = 'get_template'
+ elif isinstance(node.template.value, (tuple, list)):
+ func_name = 'select_template'
+ elif isinstance(node.template, (nodes.Tuple, nodes.List)):
+ func_name = 'select_template'
+
+ self.writeline('template = environment.%s(' % func_name, node)
+ self.visit(node.template, frame)
+ self.write(', %r)' % self.name)
+ if node.ignore_missing:
+ self.outdent()
+ self.writeline('except TemplateNotFound:')
+ self.indent()
+ self.writeline('pass')
+ self.outdent()
+ self.writeline('else:')
+ self.indent()
+
+ if node.with_context:
+ self.writeline('for event in template.root_render_func('
+ 'template.new_context(context.parent, True, '
+ 'locals())):')
+ else:
+ self.writeline('for event in template.module._body_stream:')
+
+ self.indent()
+ self.simple_write('event', frame)
+ self.outdent()
+
+ if node.ignore_missing:
+ self.outdent()
+
+ def visit_Import(self, node, frame):
+ """Visit regular imports."""
+ if node.with_context:
+ self.unoptimize_scope(frame)
+ self.writeline('l_%s = ' % node.target, node)
+ if frame.toplevel:
+ self.write('context.vars[%r] = ' % node.target)
+ self.write('environment.get_template(')
+ self.visit(node.template, frame)
+ self.write(', %r).' % self.name)
+ if node.with_context:
+ self.write('make_module(context.parent, True, locals())')
+ else:
+ self.write('module')
+ if frame.toplevel and not node.target.startswith('_'):
+ self.writeline('context.exported_vars.discard(%r)' % node.target)
+ frame.assigned_names.add(node.target)
+
+ def visit_FromImport(self, node, frame):
+ """Visit named imports."""
+ self.newline(node)
+ self.write('included_template = environment.get_template(')
+ self.visit(node.template, frame)
+ self.write(', %r).' % self.name)
+ if node.with_context:
+ self.write('make_module(context.parent, True)')
+ else:
+ self.write('module')
+
+ var_names = []
+ discarded_names = []
+ for name in node.names:
+ if isinstance(name, tuple):
+ name, alias = name
+ else:
+ alias = name
+ self.writeline('l_%s = getattr(included_template, '
+ '%r, missing)' % (alias, name))
+ self.writeline('if l_%s is missing:' % alias)
+ self.indent()
+ self.writeline('l_%s = environment.undefined(%r %% '
+ 'included_template.__name__, '
+ 'name=%r)' %
+ (alias, 'the template %%r (imported on %s) does '
+ 'not export the requested name %s' % (
+ self.position(node),
+ repr(name)
+ ), name))
+ self.outdent()
+ if frame.toplevel:
+ var_names.append(alias)
+ if not alias.startswith('_'):
+ discarded_names.append(alias)
+ frame.assigned_names.add(alias)
+
+ if var_names:
+ if len(var_names) == 1:
+ name = var_names[0]
+ self.writeline('context.vars[%r] = l_%s' % (name, name))
+ else:
+ self.writeline('context.vars.update({%s})' % ', '.join(
+ '%r: l_%s' % (name, name) for name in var_names
+ ))
+ if discarded_names:
+ if len(discarded_names) == 1:
+ self.writeline('context.exported_vars.discard(%r)' %
+ discarded_names[0])
+ else:
+ self.writeline('context.exported_vars.difference_'
+ 'update((%s))' % ', '.join(imap(repr, discarded_names)))
+
+ def visit_For(self, node, frame):
+ # when calculating the nodes for the inner frame we have to exclude
+ # the iterator contents from it
+ children = node.iter_child_nodes(exclude=('iter',))
+ if node.recursive:
+ loop_frame = self.function_scoping(node, frame, children,
+ find_special=False)
+ else:
+ loop_frame = frame.inner()
+ loop_frame.inspect(children)
+
+ # try to figure out if we have an extended loop. An extended loop
+ # is necessary if the loop is in recursive mode if the special loop
+ # variable is accessed in the body.
+ extended_loop = node.recursive or 'loop' in \
+ find_undeclared(node.iter_child_nodes(
+ only=('body',)), ('loop',))
+
+ # if we don't have an recursive loop we have to find the shadowed
+ # variables at that point. Because loops can be nested but the loop
+ # variable is a special one we have to enforce aliasing for it.
+ if not node.recursive:
+ aliases = self.push_scope(loop_frame, ('loop',))
+
+ # otherwise we set up a buffer and add a function def
+ else:
+ self.writeline('def loop(reciter, loop_render_func, depth=0):', node)
+ self.indent()
+ self.buffer(loop_frame)
+ aliases = {}
+
+ # make sure the loop variable is a special one and raise a template
+ # assertion error if a loop tries to write to loop
+ if extended_loop:
+ self.writeline('l_loop = missing')
+ loop_frame.identifiers.add_special('loop')
+ for name in node.find_all(nodes.Name):
+ if name.ctx == 'store' and name.name == 'loop':
+ self.fail('Can\'t assign to special loop variable '
+ 'in for-loop target', name.lineno)
+
+ self.pull_locals(loop_frame)
+ if node.else_:
+ iteration_indicator = self.temporary_identifier()
+ self.writeline('%s = 1' % iteration_indicator)
+
+ # Create a fake parent loop if the else or test section of a
+ # loop is accessing the special loop variable and no parent loop
+ # exists.
+ if 'loop' not in aliases and 'loop' in find_undeclared(
+ node.iter_child_nodes(only=('else_', 'test')), ('loop',)):
+ self.writeline("l_loop = environment.undefined(%r, name='loop')" %
+ ("'loop' is undefined. the filter section of a loop as well "
+ "as the else block don't have access to the special 'loop'"
+ " variable of the current loop. Because there is no parent "
+ "loop it's undefined. Happened in loop on %s" %
+ self.position(node)))
+
+ self.writeline('for ', node)
+ self.visit(node.target, loop_frame)
+ self.write(extended_loop and ', l_loop in LoopContext(' or ' in ')
+
+ # if we have an extened loop and a node test, we filter in the
+ # "outer frame".
+ if extended_loop and node.test is not None:
+ self.write('(')
+ self.visit(node.target, loop_frame)
+ self.write(' for ')
+ self.visit(node.target, loop_frame)
+ self.write(' in ')
+ if node.recursive:
+ self.write('reciter')
+ else:
+ self.visit(node.iter, loop_frame)
+ self.write(' if (')
+ test_frame = loop_frame.copy()
+ self.visit(node.test, test_frame)
+ self.write('))')
+
+ elif node.recursive:
+ self.write('reciter')
+ else:
+ self.visit(node.iter, loop_frame)
+
+ if node.recursive:
+ self.write(', loop_render_func, depth):')
+ else:
+ self.write(extended_loop and '):' or ':')
+
+ # tests in not extended loops become a continue
+ if not extended_loop and node.test is not None:
+ self.indent()
+ self.writeline('if not ')
+ self.visit(node.test, loop_frame)
+ self.write(':')
+ self.indent()
+ self.writeline('continue')
+ self.outdent(2)
+
+ self.indent()
+ self.blockvisit(node.body, loop_frame)
+ if node.else_:
+ self.writeline('%s = 0' % iteration_indicator)
+ self.outdent()
+
+ if node.else_:
+ self.writeline('if %s:' % iteration_indicator)
+ self.indent()
+ self.blockvisit(node.else_, loop_frame)
+ self.outdent()
+
+ # reset the aliases if there are any.
+ if not node.recursive:
+ self.pop_scope(aliases, loop_frame)
+
+ # if the node was recursive we have to return the buffer contents
+ # and start the iteration code
+ if node.recursive:
+ self.return_buffer_contents(loop_frame)
+ self.outdent()
+ self.start_write(frame, node)
+ self.write('loop(')
+ self.visit(node.iter, frame)
+ self.write(', loop)')
+ self.end_write(frame)
+
+ def visit_If(self, node, frame):
+ if_frame = frame.soft()
+ self.writeline('if ', node)
+ self.visit(node.test, if_frame)
+ self.write(':')
+ self.indent()
+ self.blockvisit(node.body, if_frame)
+ self.outdent()
+ if node.else_:
+ self.writeline('else:')
+ self.indent()
+ self.blockvisit(node.else_, if_frame)
+ self.outdent()
+
+ def visit_Macro(self, node, frame):
+ macro_frame = self.macro_body(node, frame)
+ self.newline()
+ if frame.toplevel:
+ if not node.name.startswith('_'):
+ self.write('context.exported_vars.add(%r)' % node.name)
+ self.writeline('context.vars[%r] = ' % node.name)
+ self.write('l_%s = ' % node.name)
+ self.macro_def(node, macro_frame)
+ frame.assigned_names.add(node.name)
+
+ def visit_CallBlock(self, node, frame):
+ children = node.iter_child_nodes(exclude=('call',))
+ call_frame = self.macro_body(node, frame, children)
+ self.writeline('caller = ')
+ self.macro_def(node, call_frame)
+ self.start_write(frame, node)
+ self.visit_Call(node.call, call_frame, forward_caller=True)
+ self.end_write(frame)
+
+ def visit_FilterBlock(self, node, frame):
+ filter_frame = frame.inner()
+ filter_frame.inspect(node.iter_child_nodes())
+ aliases = self.push_scope(filter_frame)
+ self.pull_locals(filter_frame)
+ self.buffer(filter_frame)
+ self.blockvisit(node.body, filter_frame)
+ self.start_write(frame, node)
+ self.visit_Filter(node.filter, filter_frame)
+ self.end_write(frame)
+ self.pop_scope(aliases, filter_frame)
+
+ def visit_ExprStmt(self, node, frame):
+ self.newline(node)
+ self.visit(node.node, frame)
+
+ def visit_Output(self, node, frame):
+ # if we have a known extends statement, we don't output anything
+ # if we are in a require_output_check section
+ if self.has_known_extends and frame.require_output_check:
+ return
+
+ allow_constant_finalize = True
+ if self.environment.finalize:
+ func = self.environment.finalize
+ if getattr(func, 'contextfunction', False) or \
+ getattr(func, 'evalcontextfunction', False):
+ allow_constant_finalize = False
+ elif getattr(func, 'environmentfunction', False):
+ finalize = lambda x: text_type(
+ self.environment.finalize(self.environment, x))
+ else:
+ finalize = lambda x: text_type(self.environment.finalize(x))
+ else:
+ finalize = text_type
+
+ # if we are inside a frame that requires output checking, we do so
+ outdent_later = False
+ if frame.require_output_check:
+ self.writeline('if parent_template is None:')
+ self.indent()
+ outdent_later = True
+
+ # try to evaluate as many chunks as possible into a static
+ # string at compile time.
+ body = []
+ for child in node.nodes:
+ try:
+ if not allow_constant_finalize:
+ raise nodes.Impossible()
+ const = child.as_const(frame.eval_ctx)
+ except nodes.Impossible:
+ body.append(child)
+ continue
+ # the frame can't be volatile here, becaus otherwise the
+ # as_const() function would raise an Impossible exception
+ # at that point.
+ try:
+ if frame.eval_ctx.autoescape:
+ if hasattr(const, '__html__'):
+ const = const.__html__()
+ else:
+ const = escape(const)
+ const = finalize(const)
+ except Exception:
+ # if something goes wrong here we evaluate the node
+ # at runtime for easier debugging
+ body.append(child)
+ continue
+ if body and isinstance(body[-1], list):
+ body[-1].append(const)
+ else:
+ body.append([const])
+
+ # if we have less than 3 nodes or a buffer we yield or extend/append
+ if len(body) < 3 or frame.buffer is not None:
+ if frame.buffer is not None:
+ # for one item we append, for more we extend
+ if len(body) == 1:
+ self.writeline('%s.append(' % frame.buffer)
+ else:
+ self.writeline('%s.extend((' % frame.buffer)
+ self.indent()
+ for item in body:
+ if isinstance(item, list):
+ val = repr(concat(item))
+ if frame.buffer is None:
+ self.writeline('yield ' + val)
+ else:
+ self.writeline(val + ', ')
+ else:
+ if frame.buffer is None:
+ self.writeline('yield ', item)
+ else:
+ self.newline(item)
+ close = 1
+ if frame.eval_ctx.volatile:
+ self.write('(context.eval_ctx.autoescape and'
+ ' escape or to_string)(')
+ elif frame.eval_ctx.autoescape:
+ self.write('escape(')
+ else:
+ self.write('to_string(')
+ if self.environment.finalize is not None:
+ self.write('environment.finalize(')
+ if getattr(self.environment.finalize,
+ "contextfunction", False):
+ self.write('context, ')
+ close += 1
+ self.visit(item, frame)
+ self.write(')' * close)
+ if frame.buffer is not None:
+ self.write(', ')
+ if frame.buffer is not None:
+ # close the open parentheses
+ self.outdent()
+ self.writeline(len(body) == 1 and ')' or '))')
+
+ # otherwise we create a format string as this is faster in that case
+ else:
+ format = []
+ arguments = []
+ for item in body:
+ if isinstance(item, list):
+ format.append(concat(item).replace('%', '%%'))
+ else:
+ format.append('%s')
+ arguments.append(item)
+ self.writeline('yield ')
+ self.write(repr(concat(format)) + ' % (')
+ self.indent()
+ for argument in arguments:
+ self.newline(argument)
+ close = 0
+ if frame.eval_ctx.volatile:
+ self.write('(context.eval_ctx.autoescape and'
+ ' escape or to_string)(')
+ close += 1
+ elif frame.eval_ctx.autoescape:
+ self.write('escape(')
+ close += 1
+ if self.environment.finalize is not None:
+ self.write('environment.finalize(')
+ if getattr(self.environment.finalize,
+ 'contextfunction', False):
+ self.write('context, ')
+ elif getattr(self.environment.finalize,
+ 'evalcontextfunction', False):
+ self.write('context.eval_ctx, ')
+ elif getattr(self.environment.finalize,
+ 'environmentfunction', False):
+ self.write('environment, ')
+ close += 1
+ self.visit(argument, frame)
+ self.write(')' * close + ', ')
+ self.outdent()
+ self.writeline(')')
+
+ if outdent_later:
+ self.outdent()
+
+ def make_assignment_frame(self, frame):
+ # toplevel assignments however go into the local namespace and
+ # the current template's context. We create a copy of the frame
+ # here and add a set so that the Name visitor can add the assigned
+ # names here.
+ if not frame.toplevel:
+ return frame
+ assignment_frame = frame.copy()
+ assignment_frame.toplevel_assignments = set()
+ return assignment_frame
+
+ def export_assigned_vars(self, frame, assignment_frame):
+ if not frame.toplevel:
+ return
+ public_names = [x for x in assignment_frame.toplevel_assignments
+ if not x.startswith('_')]
+ if len(assignment_frame.toplevel_assignments) == 1:
+ name = next(iter(assignment_frame.toplevel_assignments))
+ self.writeline('context.vars[%r] = l_%s' % (name, name))
+ else:
+ self.writeline('context.vars.update({')
+ for idx, name in enumerate(assignment_frame.toplevel_assignments):
+ if idx:
+ self.write(', ')
+ self.write('%r: l_%s' % (name, name))
+ self.write('})')
+ if public_names:
+ if len(public_names) == 1:
+ self.writeline('context.exported_vars.add(%r)' %
+ public_names[0])
+ else:
+ self.writeline('context.exported_vars.update((%s))' %
+ ', '.join(imap(repr, public_names)))
+
+ def visit_Assign(self, node, frame):
+ self.newline(node)
+ assignment_frame = self.make_assignment_frame(frame)
+ self.visit(node.target, assignment_frame)
+ self.write(' = ')
+ self.visit(node.node, frame)
+ self.export_assigned_vars(frame, assignment_frame)
+
+ def visit_AssignBlock(self, node, frame):
+ block_frame = frame.inner()
+ block_frame.inspect(node.body)
+ aliases = self.push_scope(block_frame)
+ self.pull_locals(block_frame)
+ self.buffer(block_frame)
+ self.blockvisit(node.body, block_frame)
+ self.pop_scope(aliases, block_frame)
+
+ assignment_frame = self.make_assignment_frame(frame)
+ self.newline(node)
+ self.visit(node.target, assignment_frame)
+ self.write(' = concat(%s)' % block_frame.buffer)
+ self.export_assigned_vars(frame, assignment_frame)
+
+ # -- Expression Visitors
+
+ def visit_Name(self, node, frame):
+ if node.ctx == 'store' and frame.toplevel:
+ frame.toplevel_assignments.add(node.name)
+ self.write('l_' + node.name)
+ frame.assigned_names.add(node.name)
+
+ def visit_Const(self, node, frame):
+ val = node.value
+ if isinstance(val, float):
+ self.write(str(val))
+ else:
+ self.write(repr(val))
+
+ def visit_TemplateData(self, node, frame):
+ try:
+ self.write(repr(node.as_const(frame.eval_ctx)))
+ except nodes.Impossible:
+ self.write('(context.eval_ctx.autoescape and Markup or identity)(%r)'
+ % node.data)
+
+ def visit_Tuple(self, node, frame):
+ self.write('(')
+ idx = -1
+ for idx, item in enumerate(node.items):
+ if idx:
+ self.write(', ')
+ self.visit(item, frame)
+ self.write(idx == 0 and ',)' or ')')
+
+ def visit_List(self, node, frame):
+ self.write('[')
+ for idx, item in enumerate(node.items):
+ if idx:
+ self.write(', ')
+ self.visit(item, frame)
+ self.write(']')
+
+ def visit_Dict(self, node, frame):
+ self.write('{')
+ for idx, item in enumerate(node.items):
+ if idx:
+ self.write(', ')
+ self.visit(item.key, frame)
+ self.write(': ')
+ self.visit(item.value, frame)
+ self.write('}')
+
+ def binop(operator, interceptable=True):
+ def visitor(self, node, frame):
+ if self.environment.sandboxed and \
+ operator in self.environment.intercepted_binops:
+ self.write('environment.call_binop(context, %r, ' % operator)
+ self.visit(node.left, frame)
+ self.write(', ')
+ self.visit(node.right, frame)
+ else:
+ self.write('(')
+ self.visit(node.left, frame)
+ self.write(' %s ' % operator)
+ self.visit(node.right, frame)
+ self.write(')')
+ return visitor
+
+ def uaop(operator, interceptable=True):
+ def visitor(self, node, frame):
+ if self.environment.sandboxed and \
+ operator in self.environment.intercepted_unops:
+ self.write('environment.call_unop(context, %r, ' % operator)
+ self.visit(node.node, frame)
+ else:
+ self.write('(' + operator)
+ self.visit(node.node, frame)
+ self.write(')')
+ return visitor
+
+ visit_Add = binop('+')
+ visit_Sub = binop('-')
+ visit_Mul = binop('*')
+ visit_Div = binop('/')
+ visit_FloorDiv = binop('//')
+ visit_Pow = binop('**')
+ visit_Mod = binop('%')
+ visit_And = binop('and', interceptable=False)
+ visit_Or = binop('or', interceptable=False)
+ visit_Pos = uaop('+')
+ visit_Neg = uaop('-')
+ visit_Not = uaop('not ', interceptable=False)
+ del binop, uaop
+
+ def visit_Concat(self, node, frame):
+ if frame.eval_ctx.volatile:
+ func_name = '(context.eval_ctx.volatile and' \
+ ' markup_join or unicode_join)'
+ elif frame.eval_ctx.autoescape:
+ func_name = 'markup_join'
+ else:
+ func_name = 'unicode_join'
+ self.write('%s((' % func_name)
+ for arg in node.nodes:
+ self.visit(arg, frame)
+ self.write(', ')
+ self.write('))')
+
+ def visit_Compare(self, node, frame):
+ self.visit(node.expr, frame)
+ for op in node.ops:
+ self.visit(op, frame)
+
+ def visit_Operand(self, node, frame):
+ self.write(' %s ' % operators[node.op])
+ self.visit(node.expr, frame)
+
+ def visit_Getattr(self, node, frame):
+ self.write('environment.getattr(')
+ self.visit(node.node, frame)
+ self.write(', %r)' % node.attr)
+
+ def visit_Getitem(self, node, frame):
+ # slices bypass the environment getitem method.
+ if isinstance(node.arg, nodes.Slice):
+ self.visit(node.node, frame)
+ self.write('[')
+ self.visit(node.arg, frame)
+ self.write(']')
+ else:
+ self.write('environment.getitem(')
+ self.visit(node.node, frame)
+ self.write(', ')
+ self.visit(node.arg, frame)
+ self.write(')')
+
+ def visit_Slice(self, node, frame):
+ if node.start is not None:
+ self.visit(node.start, frame)
+ self.write(':')
+ if node.stop is not None:
+ self.visit(node.stop, frame)
+ if node.step is not None:
+ self.write(':')
+ self.visit(node.step, frame)
+
+ def visit_Filter(self, node, frame):
+ self.write(self.filters[node.name] + '(')
+ func = self.environment.filters.get(node.name)
+ if func is None:
+ self.fail('no filter named %r' % node.name, node.lineno)
+ if getattr(func, 'contextfilter', False):
+ self.write('context, ')
+ elif getattr(func, 'evalcontextfilter', False):
+ self.write('context.eval_ctx, ')
+ elif getattr(func, 'environmentfilter', False):
+ self.write('environment, ')
+
+ # if the filter node is None we are inside a filter block
+ # and want to write to the current buffer
+ if node.node is not None:
+ self.visit(node.node, frame)
+ elif frame.eval_ctx.volatile:
+ self.write('(context.eval_ctx.autoescape and'
+ ' Markup(concat(%s)) or concat(%s))' %
+ (frame.buffer, frame.buffer))
+ elif frame.eval_ctx.autoescape:
+ self.write('Markup(concat(%s))' % frame.buffer)
+ else:
+ self.write('concat(%s)' % frame.buffer)
+ self.signature(node, frame)
+ self.write(')')
+
+ def visit_Test(self, node, frame):
+ self.write(self.tests[node.name] + '(')
+ if node.name not in self.environment.tests:
+ self.fail('no test named %r' % node.name, node.lineno)
+ self.visit(node.node, frame)
+ self.signature(node, frame)
+ self.write(')')
+
+ def visit_CondExpr(self, node, frame):
+ def write_expr2():
+ if node.expr2 is not None:
+ return self.visit(node.expr2, frame)
+ self.write('environment.undefined(%r)' % ('the inline if-'
+ 'expression on %s evaluated to false and '
+ 'no else section was defined.' % self.position(node)))
+
+ self.write('(')
+ self.visit(node.expr1, frame)
+ self.write(' if ')
+ self.visit(node.test, frame)
+ self.write(' else ')
+ write_expr2()
+ self.write(')')
+
+ def visit_Call(self, node, frame, forward_caller=False):
+ if self.environment.sandboxed:
+ self.write('environment.call(context, ')
+ else:
+ self.write('context.call(')
+ self.visit(node.node, frame)
+ extra_kwargs = forward_caller and {'caller': 'caller'} or None
+ self.signature(node, frame, extra_kwargs)
+ self.write(')')
+
+ def visit_Keyword(self, node, frame):
+ self.write(node.key + '=')
+ self.visit(node.value, frame)
+
+ # -- Unused nodes for extensions
+
+ def visit_MarkSafe(self, node, frame):
+ self.write('Markup(')
+ self.visit(node.expr, frame)
+ self.write(')')
+
+ def visit_MarkSafeIfAutoescape(self, node, frame):
+ self.write('(context.eval_ctx.autoescape and Markup or identity)(')
+ self.visit(node.expr, frame)
+ self.write(')')
+
+ def visit_EnvironmentAttribute(self, node, frame):
+ self.write('environment.' + node.name)
+
+ def visit_ExtensionAttribute(self, node, frame):
+ self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
+
+ def visit_ImportedName(self, node, frame):
+ self.write(self.import_aliases[node.importname])
+
+ def visit_InternalName(self, node, frame):
+ self.write(node.name)
+
+ def visit_ContextReference(self, node, frame):
+ self.write('context')
+
+ def visit_Continue(self, node, frame):
+ self.writeline('continue', node)
+
+ def visit_Break(self, node, frame):
+ self.writeline('break', node)
+
+ def visit_Scope(self, node, frame):
+ scope_frame = frame.inner()
+ scope_frame.inspect(node.iter_child_nodes())
+ aliases = self.push_scope(scope_frame)
+ self.pull_locals(scope_frame)
+ self.blockvisit(node.body, scope_frame)
+ self.pop_scope(aliases, scope_frame)
+
+ def visit_EvalContextModifier(self, node, frame):
+ for keyword in node.options:
+ self.writeline('context.eval_ctx.%s = ' % keyword.key)
+ self.visit(keyword.value, frame)
+ try:
+ val = keyword.value.as_const(frame.eval_ctx)
+ except nodes.Impossible:
+ frame.eval_ctx.volatile = True
+ else:
+ setattr(frame.eval_ctx, keyword.key, val)
+
+ def visit_ScopedEvalContextModifier(self, node, frame):
+ old_ctx_name = self.temporary_identifier()
+ safed_ctx = frame.eval_ctx.save()
+ self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
+ self.visit_EvalContextModifier(node, frame)
+ for child in node.body:
+ self.visit(child, frame)
+ frame.eval_ctx.revert(safed_ctx)
+ self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
diff --git a/deps/v8/third_party/jinja2/constants.py b/deps/v8/third_party/jinja2/constants.py
new file mode 100644
index 0000000000..cab203cc77
--- /dev/null
+++ b/deps/v8/third_party/jinja2/constants.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja.constants
+ ~~~~~~~~~~~~~~~
+
+ Various constants.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+
+
+#: list of lorem ipsum words used by the lipsum() helper function
+LOREM_IPSUM_WORDS = u'''\
+a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
+auctor augue bibendum blandit class commodo condimentum congue consectetuer
+consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
+diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
+elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
+faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
+hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
+justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
+luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
+mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
+nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
+penatibus per pharetra phasellus placerat platea porta porttitor posuere
+potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
+ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
+sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
+tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
+ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
+viverra volutpat vulputate'''
diff --git a/deps/v8/third_party/jinja2/debug.py b/deps/v8/third_party/jinja2/debug.py
new file mode 100644
index 0000000000..3252748369
--- /dev/null
+++ b/deps/v8/third_party/jinja2/debug.py
@@ -0,0 +1,350 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.debug
+ ~~~~~~~~~~~~
+
+ Implements the debug interface for Jinja. This module does some pretty
+ ugly stuff with the Python traceback system in order to achieve tracebacks
+ with correct line numbers, locals and contents.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import sys
+import traceback
+from types import TracebackType, CodeType
+from jinja2.utils import missing, internal_code
+from jinja2.exceptions import TemplateSyntaxError
+from jinja2._compat import iteritems, reraise, PY2
+
+# on pypy we can take advantage of transparent proxies
+try:
+ from __pypy__ import tproxy
+except ImportError:
+ tproxy = None
+
+
+# how does the raise helper look like?
+try:
+ exec("raise TypeError, 'foo'")
+except SyntaxError:
+ raise_helper = 'raise __jinja_exception__[1]'
+except TypeError:
+ raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
+
+
+class TracebackFrameProxy(object):
+ """Proxies a traceback frame."""
+
+ def __init__(self, tb):
+ self.tb = tb
+ self._tb_next = None
+
+ @property
+ def tb_next(self):
+ return self._tb_next
+
+ def set_next(self, next):
+ if tb_set_next is not None:
+ try:
+ tb_set_next(self.tb, next and next.tb or None)
+ except Exception:
+ # this function can fail due to all the hackery it does
+ # on various python implementations. We just catch errors
+ # down and ignore them if necessary.
+ pass
+ self._tb_next = next
+
+ @property
+ def is_jinja_frame(self):
+ return '__jinja_template__' in self.tb.tb_frame.f_globals
+
+ def __getattr__(self, name):
+ return getattr(self.tb, name)
+
+
+def make_frame_proxy(frame):
+ proxy = TracebackFrameProxy(frame)
+ if tproxy is None:
+ return proxy
+ def operation_handler(operation, *args, **kwargs):
+ if operation in ('__getattribute__', '__getattr__'):
+ return getattr(proxy, args[0])
+ elif operation == '__setattr__':
+ proxy.__setattr__(*args, **kwargs)
+ else:
+ return getattr(proxy, operation)(*args, **kwargs)
+ return tproxy(TracebackType, operation_handler)
+
+
+class ProcessedTraceback(object):
+ """Holds a Jinja preprocessed traceback for printing or reraising."""
+
+ def __init__(self, exc_type, exc_value, frames):
+ assert frames, 'no frames for this traceback?'
+ self.exc_type = exc_type
+ self.exc_value = exc_value
+ self.frames = frames
+
+ # newly concatenate the frames (which are proxies)
+ prev_tb = None
+ for tb in self.frames:
+ if prev_tb is not None:
+ prev_tb.set_next(tb)
+ prev_tb = tb
+ prev_tb.set_next(None)
+
+ def render_as_text(self, limit=None):
+ """Return a string with the traceback."""
+ lines = traceback.format_exception(self.exc_type, self.exc_value,
+ self.frames[0], limit=limit)
+ return ''.join(lines).rstrip()
+
+ def render_as_html(self, full=False):
+ """Return a unicode string with the traceback as rendered HTML."""
+ from jinja2.debugrenderer import render_traceback
+ return u'%s\n\n<!--\n%s\n-->' % (
+ render_traceback(self, full=full),
+ self.render_as_text().decode('utf-8', 'replace')
+ )
+
+ @property
+ def is_template_syntax_error(self):
+ """`True` if this is a template syntax error."""
+ return isinstance(self.exc_value, TemplateSyntaxError)
+
+ @property
+ def exc_info(self):
+ """Exception info tuple with a proxy around the frame objects."""
+ return self.exc_type, self.exc_value, self.frames[0]
+
+ @property
+ def standard_exc_info(self):
+ """Standard python exc_info for re-raising"""
+ tb = self.frames[0]
+ # the frame will be an actual traceback (or transparent proxy) if
+ # we are on pypy or a python implementation with support for tproxy
+ if type(tb) is not TracebackType:
+ tb = tb.tb
+ return self.exc_type, self.exc_value, tb
+
+
+def make_traceback(exc_info, source_hint=None):
+ """Creates a processed traceback object from the exc_info."""
+ exc_type, exc_value, tb = exc_info
+ if isinstance(exc_value, TemplateSyntaxError):
+ exc_info = translate_syntax_error(exc_value, source_hint)
+ initial_skip = 0
+ else:
+ initial_skip = 1
+ return translate_exception(exc_info, initial_skip)
+
+
+def translate_syntax_error(error, source=None):
+ """Rewrites a syntax error to please traceback systems."""
+ error.source = source
+ error.translated = True
+ exc_info = (error.__class__, error, None)
+ filename = error.filename
+ if filename is None:
+ filename = '<unknown>'
+ return fake_exc_info(exc_info, filename, error.lineno)
+
+
+def translate_exception(exc_info, initial_skip=0):
+ """If passed an exc_info it will automatically rewrite the exceptions
+ all the way down to the correct line numbers and frames.
+ """
+ tb = exc_info[2]
+ frames = []
+
+ # skip some internal frames if wanted
+ for x in range(initial_skip):
+ if tb is not None:
+ tb = tb.tb_next
+ initial_tb = tb
+
+ while tb is not None:
+ # skip frames decorated with @internalcode. These are internal
+ # calls we can't avoid and that are useless in template debugging
+ # output.
+ if tb.tb_frame.f_code in internal_code:
+ tb = tb.tb_next
+ continue
+
+ # save a reference to the next frame if we override the current
+ # one with a faked one.
+ next = tb.tb_next
+
+ # fake template exceptions
+ template = tb.tb_frame.f_globals.get('__jinja_template__')
+ if template is not None:
+ lineno = template.get_corresponding_lineno(tb.tb_lineno)
+ tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
+ lineno)[2]
+
+ frames.append(make_frame_proxy(tb))
+ tb = next
+
+ # if we don't have any exceptions in the frames left, we have to
+ # reraise it unchanged.
+ # XXX: can we backup here? when could this happen?
+ if not frames:
+ reraise(exc_info[0], exc_info[1], exc_info[2])
+
+ return ProcessedTraceback(exc_info[0], exc_info[1], frames)
+
+
+def fake_exc_info(exc_info, filename, lineno):
+ """Helper for `translate_exception`."""
+ exc_type, exc_value, tb = exc_info
+
+ # figure the real context out
+ if tb is not None:
+ real_locals = tb.tb_frame.f_locals.copy()
+ ctx = real_locals.get('context')
+ if ctx:
+ locals = ctx.get_all()
+ else:
+ locals = {}
+ for name, value in iteritems(real_locals):
+ if name.startswith('l_') and value is not missing:
+ locals[name[2:]] = value
+
+ # if there is a local called __jinja_exception__, we get
+ # rid of it to not break the debug functionality.
+ locals.pop('__jinja_exception__', None)
+ else:
+ locals = {}
+
+ # assamble fake globals we need
+ globals = {
+ '__name__': filename,
+ '__file__': filename,
+ '__jinja_exception__': exc_info[:2],
+
+ # we don't want to keep the reference to the template around
+ # to not cause circular dependencies, but we mark it as Jinja
+ # frame for the ProcessedTraceback
+ '__jinja_template__': None
+ }
+
+ # and fake the exception
+ code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
+
+ # if it's possible, change the name of the code. This won't work
+ # on some python environments such as google appengine
+ try:
+ if tb is None:
+ location = 'template'
+ else:
+ function = tb.tb_frame.f_code.co_name
+ if function == 'root':
+ location = 'top-level template code'
+ elif function.startswith('block_'):
+ location = 'block "%s"' % function[6:]
+ else:
+ location = 'template'
+
+ if PY2:
+ code = CodeType(0, code.co_nlocals, code.co_stacksize,
+ code.co_flags, code.co_code, code.co_consts,
+ code.co_names, code.co_varnames, filename,
+ location, code.co_firstlineno,
+ code.co_lnotab, (), ())
+ else:
+ code = CodeType(0, code.co_kwonlyargcount,
+ code.co_nlocals, code.co_stacksize,
+ code.co_flags, code.co_code, code.co_consts,
+ code.co_names, code.co_varnames, filename,
+ location, code.co_firstlineno,
+ code.co_lnotab, (), ())
+ except Exception as e:
+ pass
+
+ # execute the code and catch the new traceback
+ try:
+ exec(code, globals, locals)
+ except:
+ exc_info = sys.exc_info()
+ new_tb = exc_info[2].tb_next
+
+ # return without this frame
+ return exc_info[:2] + (new_tb,)
+
+
+def _init_ugly_crap():
+ """This function implements a few ugly things so that we can patch the
+ traceback objects. The function returned allows resetting `tb_next` on
+ any python traceback object. Do not attempt to use this on non cpython
+ interpreters
+ """
+ import ctypes
+ from types import TracebackType
+
+ if PY2:
+ # figure out size of _Py_ssize_t for Python 2:
+ if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
+ _Py_ssize_t = ctypes.c_int64
+ else:
+ _Py_ssize_t = ctypes.c_int
+ else:
+ # platform ssize_t on Python 3
+ _Py_ssize_t = ctypes.c_ssize_t
+
+ # regular python
+ class _PyObject(ctypes.Structure):
+ pass
+ _PyObject._fields_ = [
+ ('ob_refcnt', _Py_ssize_t),
+ ('ob_type', ctypes.POINTER(_PyObject))
+ ]
+
+ # python with trace
+ if hasattr(sys, 'getobjects'):
+ class _PyObject(ctypes.Structure):
+ pass
+ _PyObject._fields_ = [
+ ('_ob_next', ctypes.POINTER(_PyObject)),
+ ('_ob_prev', ctypes.POINTER(_PyObject)),
+ ('ob_refcnt', _Py_ssize_t),
+ ('ob_type', ctypes.POINTER(_PyObject))
+ ]
+
+ class _Traceback(_PyObject):
+ pass
+ _Traceback._fields_ = [
+ ('tb_next', ctypes.POINTER(_Traceback)),
+ ('tb_frame', ctypes.POINTER(_PyObject)),
+ ('tb_lasti', ctypes.c_int),
+ ('tb_lineno', ctypes.c_int)
+ ]
+
+ def tb_set_next(tb, next):
+ """Set the tb_next attribute of a traceback object."""
+ if not (isinstance(tb, TracebackType) and
+ (next is None or isinstance(next, TracebackType))):
+ raise TypeError('tb_set_next arguments must be traceback objects')
+ obj = _Traceback.from_address(id(tb))
+ if tb.tb_next is not None:
+ old = _Traceback.from_address(id(tb.tb_next))
+ old.ob_refcnt -= 1
+ if next is None:
+ obj.tb_next = ctypes.POINTER(_Traceback)()
+ else:
+ next = _Traceback.from_address(id(next))
+ next.ob_refcnt += 1
+ obj.tb_next = ctypes.pointer(next)
+
+ return tb_set_next
+
+
+# try to get a tb_set_next implementation if we don't have transparent
+# proxies.
+tb_set_next = None
+if tproxy is None:
+ try:
+ tb_set_next = _init_ugly_crap()
+ except:
+ pass
+ del _init_ugly_crap
diff --git a/deps/v8/third_party/jinja2/defaults.py b/deps/v8/third_party/jinja2/defaults.py
new file mode 100644
index 0000000000..3717a7223f
--- /dev/null
+++ b/deps/v8/third_party/jinja2/defaults.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.defaults
+ ~~~~~~~~~~~~~~~
+
+ Jinja default filters and tags.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+from jinja2._compat import range_type
+from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner
+
+
+# defaults for the parser / lexer
+BLOCK_START_STRING = '{%'
+BLOCK_END_STRING = '%}'
+VARIABLE_START_STRING = '{{'
+VARIABLE_END_STRING = '}}'
+COMMENT_START_STRING = '{#'
+COMMENT_END_STRING = '#}'
+LINE_STATEMENT_PREFIX = None
+LINE_COMMENT_PREFIX = None
+TRIM_BLOCKS = False
+LSTRIP_BLOCKS = False
+NEWLINE_SEQUENCE = '\n'
+KEEP_TRAILING_NEWLINE = False
+
+
+# default filters, tests and namespace
+from jinja2.filters import FILTERS as DEFAULT_FILTERS
+from jinja2.tests import TESTS as DEFAULT_TESTS
+DEFAULT_NAMESPACE = {
+ 'range': range_type,
+ 'dict': dict,
+ 'lipsum': generate_lorem_ipsum,
+ 'cycler': Cycler,
+ 'joiner': Joiner
+}
+
+
+# export all constants
+__all__ = tuple(x for x in locals().keys() if x.isupper())
diff --git a/deps/v8/third_party/jinja2/environment.py b/deps/v8/third_party/jinja2/environment.py
new file mode 100644
index 0000000000..8b2572bb8c
--- /dev/null
+++ b/deps/v8/third_party/jinja2/environment.py
@@ -0,0 +1,1213 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.environment
+ ~~~~~~~~~~~~~~~~~~
+
+ Provides a class that holds runtime and parsing time options.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import os
+import sys
+from jinja2 import nodes
+from jinja2.defaults import BLOCK_START_STRING, \
+ BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
+ COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
+ LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
+ DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
+ KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
+from jinja2.lexer import get_lexer, TokenStream
+from jinja2.parser import Parser
+from jinja2.nodes import EvalContext
+from jinja2.optimizer import optimize
+from jinja2.compiler import generate, CodeGenerator
+from jinja2.runtime import Undefined, new_context, Context
+from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
+ TemplatesNotFound, TemplateRuntimeError
+from jinja2.utils import import_string, LRUCache, Markup, missing, \
+ concat, consume, internalcode
+from jinja2._compat import imap, ifilter, string_types, iteritems, \
+ text_type, reraise, implements_iterator, implements_to_string, \
+ get_next, encode_filename, PY2, PYPY
+from functools import reduce
+
+
+# for direct template usage we have up to ten living environments
+_spontaneous_environments = LRUCache(10)
+
+# the function to create jinja traceback objects. This is dynamically
+# imported on the first exception in the exception handler.
+_make_traceback = None
+
+
+def get_spontaneous_environment(*args):
+ """Return a new spontaneous environment. A spontaneous environment is an
+ unnamed and unaccessible (in theory) environment that is used for
+ templates generated from a string and not from the file system.
+ """
+ try:
+ env = _spontaneous_environments.get(args)
+ except TypeError:
+ return Environment(*args)
+ if env is not None:
+ return env
+ _spontaneous_environments[args] = env = Environment(*args)
+ env.shared = True
+ return env
+
+
+def create_cache(size):
+ """Return the cache class for the given size."""
+ if size == 0:
+ return None
+ if size < 0:
+ return {}
+ return LRUCache(size)
+
+
+def copy_cache(cache):
+ """Create an empty copy of the given cache."""
+ if cache is None:
+ return None
+ elif type(cache) is dict:
+ return {}
+ return LRUCache(cache.capacity)
+
+
+def load_extensions(environment, extensions):
+ """Load the extensions from the list and bind it to the environment.
+ Returns a dict of instantiated environments.
+ """
+ result = {}
+ for extension in extensions:
+ if isinstance(extension, string_types):
+ extension = import_string(extension)
+ result[extension.identifier] = extension(environment)
+ return result
+
+
+def _environment_sanity_check(environment):
+ """Perform a sanity check on the environment."""
+ assert issubclass(environment.undefined, Undefined), 'undefined must ' \
+ 'be a subclass of undefined because filters depend on it.'
+ assert environment.block_start_string != \
+ environment.variable_start_string != \
+ environment.comment_start_string, 'block, variable and comment ' \
+ 'start strings must be different'
+ assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
+ 'newline_sequence set to unknown line ending string.'
+ return environment
+
+
+class Environment(object):
+ r"""The core component of Jinja is the `Environment`. It contains
+ important shared variables like configuration, filters, tests,
+ globals and others. Instances of this class may be modified if
+ they are not shared and if no template was loaded so far.
+ Modifications on environments after the first template was loaded
+ will lead to surprising effects and undefined behavior.
+
+ Here are the possible initialization parameters:
+
+ `block_start_string`
+ The string marking the beginning of a block. Defaults to ``'{%'``.
+
+ `block_end_string`
+ The string marking the end of a block. Defaults to ``'%}'``.
+
+ `variable_start_string`
+ The string marking the beginning of a print statement.
+ Defaults to ``'{{'``.
+
+ `variable_end_string`
+ The string marking the end of a print statement. Defaults to
+ ``'}}'``.
+
+ `comment_start_string`
+ The string marking the beginning of a comment. Defaults to ``'{#'``.
+
+ `comment_end_string`
+ The string marking the end of a comment. Defaults to ``'#}'``.
+
+ `line_statement_prefix`
+ If given and a string, this will be used as prefix for line based
+ statements. See also :ref:`line-statements`.
+
+ `line_comment_prefix`
+ If given and a string, this will be used as prefix for line based
+ comments. See also :ref:`line-statements`.
+
+ .. versionadded:: 2.2
+
+ `trim_blocks`
+ If this is set to ``True`` the first newline after a block is
+ removed (block, not variable tag!). Defaults to `False`.
+
+ `lstrip_blocks`
+ If this is set to ``True`` leading spaces and tabs are stripped
+ from the start of a line to a block. Defaults to `False`.
+
+ `newline_sequence`
+ The sequence that starts a newline. Must be one of ``'\r'``,
+ ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
+ useful default for Linux and OS X systems as well as web
+ applications.
+
+ `keep_trailing_newline`
+ Preserve the trailing newline when rendering templates.
+ The default is ``False``, which causes a single newline,
+ if present, to be stripped from the end of the template.
+
+ .. versionadded:: 2.7
+
+ `extensions`
+ List of Jinja extensions to use. This can either be import paths
+ as strings or extension classes. For more information have a
+ look at :ref:`the extensions documentation <jinja-extensions>`.
+
+ `optimized`
+ should the optimizer be enabled? Default is `True`.
+
+ `undefined`
+ :class:`Undefined` or a subclass of it that is used to represent
+ undefined values in the template.
+
+ `finalize`
+ A callable that can be used to process the result of a variable
+ expression before it is output. For example one can convert
+ `None` implicitly into an empty string here.
+
+ `autoescape`
+ If set to true the XML/HTML autoescaping feature is enabled by
+ default. For more details about autoescaping see
+ :class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
+ be a callable that is passed the template name and has to
+ return `True` or `False` depending on autoescape should be
+ enabled by default.
+
+ .. versionchanged:: 2.4
+ `autoescape` can now be a function
+
+ `loader`
+ The template loader for this environment.
+
+ `cache_size`
+ The size of the cache. Per default this is ``400`` which means
+ that if more than 400 templates are loaded the loader will clean
+ out the least recently used template. If the cache size is set to
+ ``0`` templates are recompiled all the time, if the cache size is
+ ``-1`` the cache will not be cleaned.
+
+ .. versionchanged:: 2.8
+ The cache size was increased to 400 from a low 50.
+
+ `auto_reload`
+ Some loaders load templates from locations where the template
+ sources may change (ie: file system or database). If
+ `auto_reload` is set to `True` (default) every time a template is
+ requested the loader checks if the source changed and if yes, it
+ will reload the template. For higher performance it's possible to
+ disable that.
+
+ `bytecode_cache`
+ If set to a bytecode cache object, this object will provide a
+ cache for the internal Jinja bytecode so that templates don't
+ have to be parsed if they were not changed.
+
+ See :ref:`bytecode-cache` for more information.
+ """
+
+ #: if this environment is sandboxed. Modifying this variable won't make
+ #: the environment sandboxed though. For a real sandboxed environment
+ #: have a look at jinja2.sandbox. This flag alone controls the code
+ #: generation by the compiler.
+ sandboxed = False
+
+ #: True if the environment is just an overlay
+ overlayed = False
+
+ #: the environment this environment is linked to if it is an overlay
+ linked_to = None
+
+ #: shared environments have this set to `True`. A shared environment
+ #: must not be modified
+ shared = False
+
+ #: these are currently EXPERIMENTAL undocumented features.
+ exception_handler = None
+ exception_formatter = None
+
+ #: the class that is used for code generation. See
+ #: :class:`~jinja2.compiler.CodeGenerator` for more information.
+ code_generator_class = CodeGenerator
+
+ #: the context class thatis used for templates. See
+ #: :class:`~jinja2.runtime.Context` for more information.
+ context_class = Context
+
+ def __init__(self,
+ block_start_string=BLOCK_START_STRING,
+ block_end_string=BLOCK_END_STRING,
+ variable_start_string=VARIABLE_START_STRING,
+ variable_end_string=VARIABLE_END_STRING,
+ comment_start_string=COMMENT_START_STRING,
+ comment_end_string=COMMENT_END_STRING,
+ line_statement_prefix=LINE_STATEMENT_PREFIX,
+ line_comment_prefix=LINE_COMMENT_PREFIX,
+ trim_blocks=TRIM_BLOCKS,
+ lstrip_blocks=LSTRIP_BLOCKS,
+ newline_sequence=NEWLINE_SEQUENCE,
+ keep_trailing_newline=KEEP_TRAILING_NEWLINE,
+ extensions=(),
+ optimized=True,
+ undefined=Undefined,
+ finalize=None,
+ autoescape=False,
+ loader=None,
+ cache_size=400,
+ auto_reload=True,
+ bytecode_cache=None):
+ # !!Important notice!!
+ # The constructor accepts quite a few arguments that should be
+ # passed by keyword rather than position. However it's important to
+ # not change the order of arguments because it's used at least
+ # internally in those cases:
+ # - spontaneous environments (i18n extension and Template)
+ # - unittests
+ # If parameter changes are required only add parameters at the end
+ # and don't change the arguments (or the defaults!) of the arguments
+ # existing already.
+
+ # lexer / parser information
+ self.block_start_string = block_start_string
+ self.block_end_string = block_end_string
+ self.variable_start_string = variable_start_string
+ self.variable_end_string = variable_end_string
+ self.comment_start_string = comment_start_string
+ self.comment_end_string = comment_end_string
+ self.line_statement_prefix = line_statement_prefix
+ self.line_comment_prefix = line_comment_prefix
+ self.trim_blocks = trim_blocks
+ self.lstrip_blocks = lstrip_blocks
+ self.newline_sequence = newline_sequence
+ self.keep_trailing_newline = keep_trailing_newline
+
+ # runtime information
+ self.undefined = undefined
+ self.optimized = optimized
+ self.finalize = finalize
+ self.autoescape = autoescape
+
+ # defaults
+ self.filters = DEFAULT_FILTERS.copy()
+ self.tests = DEFAULT_TESTS.copy()
+ self.globals = DEFAULT_NAMESPACE.copy()
+
+ # set the loader provided
+ self.loader = loader
+ self.cache = create_cache(cache_size)
+ self.bytecode_cache = bytecode_cache
+ self.auto_reload = auto_reload
+
+ # load extensions
+ self.extensions = load_extensions(self, extensions)
+
+ _environment_sanity_check(self)
+
+ def add_extension(self, extension):
+ """Adds an extension after the environment was created.
+
+ .. versionadded:: 2.5
+ """
+ self.extensions.update(load_extensions(self, [extension]))
+
+ def extend(self, **attributes):
+ """Add the items to the instance of the environment if they do not exist
+ yet. This is used by :ref:`extensions <writing-extensions>` to register
+ callbacks and configuration values without breaking inheritance.
+ """
+ for key, value in iteritems(attributes):
+ if not hasattr(self, key):
+ setattr(self, key, value)
+
+ def overlay(self, block_start_string=missing, block_end_string=missing,
+ variable_start_string=missing, variable_end_string=missing,
+ comment_start_string=missing, comment_end_string=missing,
+ line_statement_prefix=missing, line_comment_prefix=missing,
+ trim_blocks=missing, lstrip_blocks=missing,
+ extensions=missing, optimized=missing,
+ undefined=missing, finalize=missing, autoescape=missing,
+ loader=missing, cache_size=missing, auto_reload=missing,
+ bytecode_cache=missing):
+ """Create a new overlay environment that shares all the data with the
+ current environment except for cache and the overridden attributes.
+ Extensions cannot be removed for an overlayed environment. An overlayed
+ environment automatically gets all the extensions of the environment it
+ is linked to plus optional extra extensions.
+
+ Creating overlays should happen after the initial environment was set
+ up completely. Not all attributes are truly linked, some are just
+ copied over so modifications on the original environment may not shine
+ through.
+ """
+ args = dict(locals())
+ del args['self'], args['cache_size'], args['extensions']
+
+ rv = object.__new__(self.__class__)
+ rv.__dict__.update(self.__dict__)
+ rv.overlayed = True
+ rv.linked_to = self
+
+ for key, value in iteritems(args):
+ if value is not missing:
+ setattr(rv, key, value)
+
+ if cache_size is not missing:
+ rv.cache = create_cache(cache_size)
+ else:
+ rv.cache = copy_cache(self.cache)
+
+ rv.extensions = {}
+ for key, value in iteritems(self.extensions):
+ rv.extensions[key] = value.bind(rv)
+ if extensions is not missing:
+ rv.extensions.update(load_extensions(rv, extensions))
+
+ return _environment_sanity_check(rv)
+
+ lexer = property(get_lexer, doc="The lexer for this environment.")
+
+ def iter_extensions(self):
+ """Iterates over the extensions by priority."""
+ return iter(sorted(self.extensions.values(),
+ key=lambda x: x.priority))
+
+ def getitem(self, obj, argument):
+ """Get an item or attribute of an object but prefer the item."""
+ try:
+ return obj[argument]
+ except (TypeError, LookupError):
+ if isinstance(argument, string_types):
+ try:
+ attr = str(argument)
+ except Exception:
+ pass
+ else:
+ try:
+ return getattr(obj, attr)
+ except AttributeError:
+ pass
+ return self.undefined(obj=obj, name=argument)
+
+ def getattr(self, obj, attribute):
+ """Get an item or attribute of an object but prefer the attribute.
+ Unlike :meth:`getitem` the attribute *must* be a bytestring.
+ """
+ try:
+ return getattr(obj, attribute)
+ except AttributeError:
+ pass
+ try:
+ return obj[attribute]
+ except (TypeError, LookupError, AttributeError):
+ return self.undefined(obj=obj, name=attribute)
+
+ def call_filter(self, name, value, args=None, kwargs=None,
+ context=None, eval_ctx=None):
+ """Invokes a filter on a value the same way the compiler does it.
+
+ .. versionadded:: 2.7
+ """
+ func = self.filters.get(name)
+ if func is None:
+ raise TemplateRuntimeError('no filter named %r' % name)
+ args = [value] + list(args or ())
+ if getattr(func, 'contextfilter', False):
+ if context is None:
+ raise TemplateRuntimeError('Attempted to invoke context '
+ 'filter without context')
+ args.insert(0, context)
+ elif getattr(func, 'evalcontextfilter', False):
+ if eval_ctx is None:
+ if context is not None:
+ eval_ctx = context.eval_ctx
+ else:
+ eval_ctx = EvalContext(self)
+ args.insert(0, eval_ctx)
+ elif getattr(func, 'environmentfilter', False):
+ args.insert(0, self)
+ return func(*args, **(kwargs or {}))
+
+ def call_test(self, name, value, args=None, kwargs=None):
+ """Invokes a test on a value the same way the compiler does it.
+
+ .. versionadded:: 2.7
+ """
+ func = self.tests.get(name)
+ if func is None:
+ raise TemplateRuntimeError('no test named %r' % name)
+ return func(value, *(args or ()), **(kwargs or {}))
+
+ @internalcode
+ def parse(self, source, name=None, filename=None):
+ """Parse the sourcecode and return the abstract syntax tree. This
+ tree of nodes is used by the compiler to convert the template into
+ executable source- or bytecode. This is useful for debugging or to
+ extract information from templates.
+
+ If you are :ref:`developing Jinja2 extensions <writing-extensions>`
+ this gives you a good overview of the node tree generated.
+ """
+ try:
+ return self._parse(source, name, filename)
+ except TemplateSyntaxError:
+ exc_info = sys.exc_info()
+ self.handle_exception(exc_info, source_hint=source)
+
+ def _parse(self, source, name, filename):
+ """Internal parsing function used by `parse` and `compile`."""
+ return Parser(self, source, name, encode_filename(filename)).parse()
+
+ def lex(self, source, name=None, filename=None):
+ """Lex the given sourcecode and return a generator that yields
+ tokens as tuples in the form ``(lineno, token_type, value)``.
+ This can be useful for :ref:`extension development <writing-extensions>`
+ and debugging templates.
+
+ This does not perform preprocessing. If you want the preprocessing
+ of the extensions to be applied you have to filter source through
+ the :meth:`preprocess` method.
+ """
+ source = text_type(source)
+ try:
+ return self.lexer.tokeniter(source, name, filename)
+ except TemplateSyntaxError:
+ exc_info = sys.exc_info()
+ self.handle_exception(exc_info, source_hint=source)
+
+ def preprocess(self, source, name=None, filename=None):
+ """Preprocesses the source with all extensions. This is automatically
+ called for all parsing and compiling methods but *not* for :meth:`lex`
+ because there you usually only want the actual source tokenized.
+ """
+ return reduce(lambda s, e: e.preprocess(s, name, filename),
+ self.iter_extensions(), text_type(source))
+
+ def _tokenize(self, source, name, filename=None, state=None):
+ """Called by the parser to do the preprocessing and filtering
+ for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
+ """
+ source = self.preprocess(source, name, filename)
+ stream = self.lexer.tokenize(source, name, filename, state)
+ for ext in self.iter_extensions():
+ stream = ext.filter_stream(stream)
+ if not isinstance(stream, TokenStream):
+ stream = TokenStream(stream, name, filename)
+ return stream
+
+ def _generate(self, source, name, filename, defer_init=False):
+ """Internal hook that can be overridden to hook a different generate
+ method in.
+
+ .. versionadded:: 2.5
+ """
+ return generate(source, self, name, filename, defer_init=defer_init)
+
+ def _compile(self, source, filename):
+ """Internal hook that can be overridden to hook a different compile
+ method in.
+
+ .. versionadded:: 2.5
+ """
+ return compile(source, filename, 'exec')
+
+ @internalcode
+ def compile(self, source, name=None, filename=None, raw=False,
+ defer_init=False):
+ """Compile a node or template source code. The `name` parameter is
+ the load name of the template after it was joined using
+ :meth:`join_path` if necessary, not the filename on the file system.
+ the `filename` parameter is the estimated filename of the template on
+ the file system. If the template came from a database or memory this
+ can be omitted.
+
+ The return value of this method is a python code object. If the `raw`
+ parameter is `True` the return value will be a string with python
+ code equivalent to the bytecode returned otherwise. This method is
+ mainly used internally.
+
+ `defer_init` is use internally to aid the module code generator. This
+ causes the generated code to be able to import without the global
+ environment variable to be set.
+
+ .. versionadded:: 2.4
+ `defer_init` parameter added.
+ """
+ source_hint = None
+ try:
+ if isinstance(source, string_types):
+ source_hint = source
+ source = self._parse(source, name, filename)
+ if self.optimized:
+ source = optimize(source, self)
+ source = self._generate(source, name, filename,
+ defer_init=defer_init)
+ if raw:
+ return source
+ if filename is None:
+ filename = '<template>'
+ else:
+ filename = encode_filename(filename)
+ return self._compile(source, filename)
+ except TemplateSyntaxError:
+ exc_info = sys.exc_info()
+ self.handle_exception(exc_info, source_hint=source_hint)
+
+ def compile_expression(self, source, undefined_to_none=True):
+ """A handy helper method that returns a callable that accepts keyword
+ arguments that appear as variables in the expression. If called it
+ returns the result of the expression.
+
+ This is useful if applications want to use the same rules as Jinja
+ in template "configuration files" or similar situations.
+
+ Example usage:
+
+ >>> env = Environment()
+ >>> expr = env.compile_expression('foo == 42')
+ >>> expr(foo=23)
+ False
+ >>> expr(foo=42)
+ True
+
+ Per default the return value is converted to `None` if the
+ expression returns an undefined value. This can be changed
+ by setting `undefined_to_none` to `False`.
+
+ >>> env.compile_expression('var')() is None
+ True
+ >>> env.compile_expression('var', undefined_to_none=False)()
+ Undefined
+
+ .. versionadded:: 2.1
+ """
+ parser = Parser(self, source, state='variable')
+ exc_info = None
+ try:
+ expr = parser.parse_expression()
+ if not parser.stream.eos:
+ raise TemplateSyntaxError('chunk after expression',
+ parser.stream.current.lineno,
+ None, None)
+ expr.set_environment(self)
+ except TemplateSyntaxError:
+ exc_info = sys.exc_info()
+ if exc_info is not None:
+ self.handle_exception(exc_info, source_hint=source)
+ body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
+ template = self.from_string(nodes.Template(body, lineno=1))
+ return TemplateExpression(template, undefined_to_none)
+
+ def compile_templates(self, target, extensions=None, filter_func=None,
+ zip='deflated', log_function=None,
+ ignore_errors=True, py_compile=False):
+ """Finds all the templates the loader can find, compiles them
+ and stores them in `target`. If `zip` is `None`, instead of in a
+ zipfile, the templates will be stored in a directory.
+ By default a deflate zip algorithm is used. To switch to
+ the stored algorithm, `zip` can be set to ``'stored'``.
+
+ `extensions` and `filter_func` are passed to :meth:`list_templates`.
+ Each template returned will be compiled to the target folder or
+ zipfile.
+
+ By default template compilation errors are ignored. In case a
+ log function is provided, errors are logged. If you want template
+ syntax errors to abort the compilation you can set `ignore_errors`
+ to `False` and you will get an exception on syntax errors.
+
+ If `py_compile` is set to `True` .pyc files will be written to the
+ target instead of standard .py files. This flag does not do anything
+ on pypy and Python 3 where pyc files are not picked up by itself and
+ don't give much benefit.
+
+ .. versionadded:: 2.4
+ """
+ from jinja2.loaders import ModuleLoader
+
+ if log_function is None:
+ log_function = lambda x: None
+
+ if py_compile:
+ if not PY2 or PYPY:
+ from warnings import warn
+ warn(Warning('py_compile has no effect on pypy or Python 3'))
+ py_compile = False
+ else:
+ import imp
+ import marshal
+ py_header = imp.get_magic() + \
+ u'\xff\xff\xff\xff'.encode('iso-8859-15')
+
+ # Python 3.3 added a source filesize to the header
+ if sys.version_info >= (3, 3):
+ py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
+
+ def write_file(filename, data, mode):
+ if zip:
+ info = ZipInfo(filename)
+ info.external_attr = 0o755 << 16
+ zip_file.writestr(info, data)
+ else:
+ f = open(os.path.join(target, filename), mode)
+ try:
+ f.write(data)
+ finally:
+ f.close()
+
+ if zip is not None:
+ from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
+ zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
+ stored=ZIP_STORED)[zip])
+ log_function('Compiling into Zip archive "%s"' % target)
+ else:
+ if not os.path.isdir(target):
+ os.makedirs(target)
+ log_function('Compiling into folder "%s"' % target)
+
+ try:
+ for name in self.list_templates(extensions, filter_func):
+ source, filename, _ = self.loader.get_source(self, name)
+ try:
+ code = self.compile(source, name, filename, True, True)
+ except TemplateSyntaxError as e:
+ if not ignore_errors:
+ raise
+ log_function('Could not compile "%s": %s' % (name, e))
+ continue
+
+ filename = ModuleLoader.get_module_filename(name)
+
+ if py_compile:
+ c = self._compile(code, encode_filename(filename))
+ write_file(filename + 'c', py_header +
+ marshal.dumps(c), 'wb')
+ log_function('Byte-compiled "%s" as %s' %
+ (name, filename + 'c'))
+ else:
+ write_file(filename, code, 'w')
+ log_function('Compiled "%s" as %s' % (name, filename))
+ finally:
+ if zip:
+ zip_file.close()
+
+ log_function('Finished compiling templates')
+
+ def list_templates(self, extensions=None, filter_func=None):
+ """Returns a list of templates for this environment. This requires
+ that the loader supports the loader's
+ :meth:`~BaseLoader.list_templates` method.
+
+ If there are other files in the template folder besides the
+ actual templates, the returned list can be filtered. There are two
+ ways: either `extensions` is set to a list of file extensions for
+ templates, or a `filter_func` can be provided which is a callable that
+ is passed a template name and should return `True` if it should end up
+ in the result list.
+
+ If the loader does not support that, a :exc:`TypeError` is raised.
+
+ .. versionadded:: 2.4
+ """
+ x = self.loader.list_templates()
+ if extensions is not None:
+ if filter_func is not None:
+ raise TypeError('either extensions or filter_func '
+ 'can be passed, but not both')
+ filter_func = lambda x: '.' in x and \
+ x.rsplit('.', 1)[1] in extensions
+ if filter_func is not None:
+ x = list(ifilter(filter_func, x))
+ return x
+
+ def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
+ """Exception handling helper. This is used internally to either raise
+ rewritten exceptions or return a rendered traceback for the template.
+ """
+ global _make_traceback
+ if exc_info is None:
+ exc_info = sys.exc_info()
+
+ # the debugging module is imported when it's used for the first time.
+ # we're doing a lot of stuff there and for applications that do not
+ # get any exceptions in template rendering there is no need to load
+ # all of that.
+ if _make_traceback is None:
+ from jinja2.debug import make_traceback as _make_traceback
+ traceback = _make_traceback(exc_info, source_hint)
+ if rendered and self.exception_formatter is not None:
+ return self.exception_formatter(traceback)
+ if self.exception_handler is not None:
+ self.exception_handler(traceback)
+ exc_type, exc_value, tb = traceback.standard_exc_info
+ reraise(exc_type, exc_value, tb)
+
+ def join_path(self, template, parent):
+ """Join a template with the parent. By default all the lookups are
+ relative to the loader root so this method returns the `template`
+ parameter unchanged, but if the paths should be relative to the
+ parent template, this function can be used to calculate the real
+ template name.
+
+ Subclasses may override this method and implement template path
+ joining here.
+ """
+ return template
+
+ @internalcode
+ def _load_template(self, name, globals):
+ if self.loader is None:
+ raise TypeError('no loader for this environment specified')
+ try:
+ # use abs path for cache key
+ cache_key = self.loader.get_source(self, name)[1]
+ except RuntimeError:
+ # if loader does not implement get_source()
+ cache_key = None
+ # if template is not file, use name for cache key
+ if cache_key is None:
+ cache_key = name
+ if self.cache is not None:
+ template = self.cache.get(cache_key)
+ if template is not None and (not self.auto_reload or
+ template.is_up_to_date):
+ return template
+ template = self.loader.load(self, name, globals)
+ if self.cache is not None:
+ self.cache[cache_key] = template
+ return template
+
+ @internalcode
+ def get_template(self, name, parent=None, globals=None):
+ """Load a template from the loader. If a loader is configured this
+ method ask the loader for the template and returns a :class:`Template`.
+ If the `parent` parameter is not `None`, :meth:`join_path` is called
+ to get the real template name before loading.
+
+ The `globals` parameter can be used to provide template wide globals.
+ These variables are available in the context at render time.
+
+ If the template does not exist a :exc:`TemplateNotFound` exception is
+ raised.
+
+ .. versionchanged:: 2.4
+ If `name` is a :class:`Template` object it is returned from the
+ function unchanged.
+ """
+ if isinstance(name, Template):
+ return name
+ if parent is not None:
+ name = self.join_path(name, parent)
+ return self._load_template(name, self.make_globals(globals))
+
+ @internalcode
+ def select_template(self, names, parent=None, globals=None):
+ """Works like :meth:`get_template` but tries a number of templates
+ before it fails. If it cannot find any of the templates, it will
+ raise a :exc:`TemplatesNotFound` exception.
+
+ .. versionadded:: 2.3
+
+ .. versionchanged:: 2.4
+ If `names` contains a :class:`Template` object it is returned
+ from the function unchanged.
+ """
+ if not names:
+ raise TemplatesNotFound(message=u'Tried to select from an empty list '
+ u'of templates.')
+ globals = self.make_globals(globals)
+ for name in names:
+ if isinstance(name, Template):
+ return name
+ if parent is not None:
+ name = self.join_path(name, parent)
+ try:
+ return self._load_template(name, globals)
+ except TemplateNotFound:
+ pass
+ raise TemplatesNotFound(names)
+
+ @internalcode
+ def get_or_select_template(self, template_name_or_list,
+ parent=None, globals=None):
+ """Does a typecheck and dispatches to :meth:`select_template`
+ if an iterable of template names is given, otherwise to
+ :meth:`get_template`.
+
+ .. versionadded:: 2.3
+ """
+ if isinstance(template_name_or_list, string_types):
+ return self.get_template(template_name_or_list, parent, globals)
+ elif isinstance(template_name_or_list, Template):
+ return template_name_or_list
+ return self.select_template(template_name_or_list, parent, globals)
+
+ def from_string(self, source, globals=None, template_class=None):
+ """Load a template from a string. This parses the source given and
+ returns a :class:`Template` object.
+ """
+ globals = self.make_globals(globals)
+ cls = template_class or self.template_class
+ return cls.from_code(self, self.compile(source), globals, None)
+
+ def make_globals(self, d):
+ """Return a dict for the globals."""
+ if not d:
+ return self.globals
+ return dict(self.globals, **d)
+
+
+class Template(object):
+ """The central template object. This class represents a compiled template
+ and is used to evaluate it.
+
+ Normally the template object is generated from an :class:`Environment` but
+ it also has a constructor that makes it possible to create a template
+ instance directly using the constructor. It takes the same arguments as
+ the environment constructor but it's not possible to specify a loader.
+
+ Every template object has a few methods and members that are guaranteed
+ to exist. However it's important that a template object should be
+ considered immutable. Modifications on the object are not supported.
+
+ Template objects created from the constructor rather than an environment
+ do have an `environment` attribute that points to a temporary environment
+ that is probably shared with other templates created with the constructor
+ and compatible settings.
+
+ >>> template = Template('Hello {{ name }}!')
+ >>> template.render(name='John Doe') == u'Hello John Doe!'
+ True
+ >>> stream = template.stream(name='John Doe')
+ >>> next(stream) == u'Hello John Doe!'
+ True
+ >>> next(stream)
+ Traceback (most recent call last):
+ ...
+ StopIteration
+ """
+
+ def __new__(cls, source,
+ block_start_string=BLOCK_START_STRING,
+ block_end_string=BLOCK_END_STRING,
+ variable_start_string=VARIABLE_START_STRING,
+ variable_end_string=VARIABLE_END_STRING,
+ comment_start_string=COMMENT_START_STRING,
+ comment_end_string=COMMENT_END_STRING,
+ line_statement_prefix=LINE_STATEMENT_PREFIX,
+ line_comment_prefix=LINE_COMMENT_PREFIX,
+ trim_blocks=TRIM_BLOCKS,
+ lstrip_blocks=LSTRIP_BLOCKS,
+ newline_sequence=NEWLINE_SEQUENCE,
+ keep_trailing_newline=KEEP_TRAILING_NEWLINE,
+ extensions=(),
+ optimized=True,
+ undefined=Undefined,
+ finalize=None,
+ autoescape=False):
+ env = get_spontaneous_environment(
+ block_start_string, block_end_string, variable_start_string,
+ variable_end_string, comment_start_string, comment_end_string,
+ line_statement_prefix, line_comment_prefix, trim_blocks,
+ lstrip_blocks, newline_sequence, keep_trailing_newline,
+ frozenset(extensions), optimized, undefined, finalize, autoescape,
+ None, 0, False, None)
+ return env.from_string(source, template_class=cls)
+
+ @classmethod
+ def from_code(cls, environment, code, globals, uptodate=None):
+ """Creates a template object from compiled code and the globals. This
+ is used by the loaders and environment to create a template object.
+ """
+ namespace = {
+ 'environment': environment,
+ '__file__': code.co_filename
+ }
+ exec(code, namespace)
+ rv = cls._from_namespace(environment, namespace, globals)
+ rv._uptodate = uptodate
+ return rv
+
+ @classmethod
+ def from_module_dict(cls, environment, module_dict, globals):
+ """Creates a template object from a module. This is used by the
+ module loader to create a template object.
+
+ .. versionadded:: 2.4
+ """
+ return cls._from_namespace(environment, module_dict, globals)
+
+ @classmethod
+ def _from_namespace(cls, environment, namespace, globals):
+ t = object.__new__(cls)
+ t.environment = environment
+ t.globals = globals
+ t.name = namespace['name']
+ t.filename = namespace['__file__']
+ t.blocks = namespace['blocks']
+
+ # render function and module
+ t.root_render_func = namespace['root']
+ t._module = None
+
+ # debug and loader helpers
+ t._debug_info = namespace['debug_info']
+ t._uptodate = None
+
+ # store the reference
+ namespace['environment'] = environment
+ namespace['__jinja_template__'] = t
+
+ return t
+
+ def render(self, *args, **kwargs):
+ """This method accepts the same arguments as the `dict` constructor:
+ A dict, a dict subclass or some keyword arguments. If no arguments
+ are given the context will be empty. These two calls do the same::
+
+ template.render(knights='that say nih')
+ template.render({'knights': 'that say nih'})
+
+ This will return the rendered template as unicode string.
+ """
+ vars = dict(*args, **kwargs)
+ try:
+ return concat(self.root_render_func(self.new_context(vars)))
+ except Exception:
+ exc_info = sys.exc_info()
+ return self.environment.handle_exception(exc_info, True)
+
+ def stream(self, *args, **kwargs):
+ """Works exactly like :meth:`generate` but returns a
+ :class:`TemplateStream`.
+ """
+ return TemplateStream(self.generate(*args, **kwargs))
+
+ def generate(self, *args, **kwargs):
+ """For very large templates it can be useful to not render the whole
+ template at once but evaluate each statement after another and yield
+ piece for piece. This method basically does exactly that and returns
+ a generator that yields one item after another as unicode strings.
+
+ It accepts the same arguments as :meth:`render`.
+ """
+ vars = dict(*args, **kwargs)
+ try:
+ for event in self.root_render_func(self.new_context(vars)):
+ yield event
+ except Exception:
+ exc_info = sys.exc_info()
+ else:
+ return
+ yield self.environment.handle_exception(exc_info, True)
+
+ def new_context(self, vars=None, shared=False, locals=None):
+ """Create a new :class:`Context` for this template. The vars
+ provided will be passed to the template. Per default the globals
+ are added to the context. If shared is set to `True` the data
+ is passed as it to the context without adding the globals.
+
+ `locals` can be a dict of local variables for internal usage.
+ """
+ return new_context(self.environment, self.name, self.blocks,
+ vars, shared, self.globals, locals)
+
+ def make_module(self, vars=None, shared=False, locals=None):
+ """This method works like the :attr:`module` attribute when called
+ without arguments but it will evaluate the template on every call
+ rather than caching it. It's also possible to provide
+ a dict which is then used as context. The arguments are the same
+ as for the :meth:`new_context` method.
+ """
+ return TemplateModule(self, self.new_context(vars, shared, locals))
+
+ @property
+ def module(self):
+ """The template as module. This is used for imports in the
+ template runtime but is also useful if one wants to access
+ exported template variables from the Python layer:
+
+ >>> t = Template('{% macro foo() %}42{% endmacro %}23')
+ >>> str(t.module)
+ '23'
+ >>> t.module.foo() == u'42'
+ True
+ """
+ if self._module is not None:
+ return self._module
+ self._module = rv = self.make_module()
+ return rv
+
+ def get_corresponding_lineno(self, lineno):
+ """Return the source line number of a line number in the
+ generated bytecode as they are not in sync.
+ """
+ for template_line, code_line in reversed(self.debug_info):
+ if code_line <= lineno:
+ return template_line
+ return 1
+
+ @property
+ def is_up_to_date(self):
+ """If this variable is `False` there is a newer version available."""
+ if self._uptodate is None:
+ return True
+ return self._uptodate()
+
+ @property
+ def debug_info(self):
+ """The debug info mapping."""
+ return [tuple(imap(int, x.split('='))) for x in
+ self._debug_info.split('&')]
+
+ def __repr__(self):
+ if self.name is None:
+ name = 'memory:%x' % id(self)
+ else:
+ name = repr(self.name)
+ return '<%s %s>' % (self.__class__.__name__, name)
+
+
+@implements_to_string
+class TemplateModule(object):
+ """Represents an imported template. All the exported names of the
+ template are available as attributes on this object. Additionally
+ converting it into an unicode- or bytestrings renders the contents.
+ """
+
+ def __init__(self, template, context):
+ self._body_stream = list(template.root_render_func(context))
+ self.__dict__.update(context.get_exported())
+ self.__name__ = template.name
+
+ def __html__(self):
+ return Markup(concat(self._body_stream))
+
+ def __str__(self):
+ return concat(self._body_stream)
+
+ def __repr__(self):
+ if self.__name__ is None:
+ name = 'memory:%x' % id(self)
+ else:
+ name = repr(self.__name__)
+ return '<%s %s>' % (self.__class__.__name__, name)
+
+
+class TemplateExpression(object):
+ """The :meth:`jinja2.Environment.compile_expression` method returns an
+ instance of this object. It encapsulates the expression-like access
+ to the template with an expression it wraps.
+ """
+
+ def __init__(self, template, undefined_to_none):
+ self._template = template
+ self._undefined_to_none = undefined_to_none
+
+ def __call__(self, *args, **kwargs):
+ context = self._template.new_context(dict(*args, **kwargs))
+ consume(self._template.root_render_func(context))
+ rv = context.vars['result']
+ if self._undefined_to_none and isinstance(rv, Undefined):
+ rv = None
+ return rv
+
+
+@implements_iterator
+class TemplateStream(object):
+ """A template stream works pretty much like an ordinary python generator
+ but it can buffer multiple items to reduce the number of total iterations.
+ Per default the output is unbuffered which means that for every unbuffered
+ instruction in the template one unicode string is yielded.
+
+ If buffering is enabled with a buffer size of 5, five items are combined
+ into a new unicode string. This is mainly useful if you are streaming
+ big templates to a client via WSGI which flushes after each iteration.
+ """
+
+ def __init__(self, gen):
+ self._gen = gen
+ self.disable_buffering()
+
+ def dump(self, fp, encoding=None, errors='strict'):
+ """Dump the complete stream into a file or file-like object.
+ Per default unicode strings are written, if you want to encode
+ before writing specify an `encoding`.
+
+ Example usage::
+
+ Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
+ """
+ close = False
+ if isinstance(fp, string_types):
+ if encoding is None:
+ encoding = 'utf-8'
+ fp = open(fp, 'wb')
+ close = True
+ try:
+ if encoding is not None:
+ iterable = (x.encode(encoding, errors) for x in self)
+ else:
+ iterable = self
+ if hasattr(fp, 'writelines'):
+ fp.writelines(iterable)
+ else:
+ for item in iterable:
+ fp.write(item)
+ finally:
+ if close:
+ fp.close()
+
+ def disable_buffering(self):
+ """Disable the output buffering."""
+ self._next = get_next(self._gen)
+ self.buffered = False
+
+ def enable_buffering(self, size=5):
+ """Enable buffering. Buffer `size` items before yielding them."""
+ if size <= 1:
+ raise ValueError('buffer size too small')
+
+ def generator(next):
+ buf = []
+ c_size = 0
+ push = buf.append
+
+ while 1:
+ try:
+ while c_size < size:
+ c = next()
+ push(c)
+ if c:
+ c_size += 1
+ except StopIteration:
+ if not c_size:
+ return
+ yield concat(buf)
+ del buf[:]
+ c_size = 0
+
+ self.buffered = True
+ self._next = get_next(generator(get_next(self._gen)))
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self._next()
+
+
+# hook in default template class. if anyone reads this comment: ignore that
+# it's possible to use custom templates ;-)
+Environment.template_class = Template
diff --git a/deps/v8/third_party/jinja2/exceptions.py b/deps/v8/third_party/jinja2/exceptions.py
new file mode 100644
index 0000000000..c9df6dc7c2
--- /dev/null
+++ b/deps/v8/third_party/jinja2/exceptions.py
@@ -0,0 +1,146 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.exceptions
+ ~~~~~~~~~~~~~~~~~
+
+ Jinja exceptions.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+from jinja2._compat import imap, text_type, PY2, implements_to_string
+
+
+class TemplateError(Exception):
+ """Baseclass for all template errors."""
+
+ if PY2:
+ def __init__(self, message=None):
+ if message is not None:
+ message = text_type(message).encode('utf-8')
+ Exception.__init__(self, message)
+
+ @property
+ def message(self):
+ if self.args:
+ message = self.args[0]
+ if message is not None:
+ return message.decode('utf-8', 'replace')
+
+ def __unicode__(self):
+ return self.message or u''
+ else:
+ def __init__(self, message=None):
+ Exception.__init__(self, message)
+
+ @property
+ def message(self):
+ if self.args:
+ message = self.args[0]
+ if message is not None:
+ return message
+
+
+@implements_to_string
+class TemplateNotFound(IOError, LookupError, TemplateError):
+ """Raised if a template does not exist."""
+
+ # looks weird, but removes the warning descriptor that just
+ # bogusly warns us about message being deprecated
+ message = None
+
+ def __init__(self, name, message=None):
+ IOError.__init__(self)
+ if message is None:
+ message = name
+ self.message = message
+ self.name = name
+ self.templates = [name]
+
+ def __str__(self):
+ return self.message
+
+
+class TemplatesNotFound(TemplateNotFound):
+ """Like :class:`TemplateNotFound` but raised if multiple templates
+ are selected. This is a subclass of :class:`TemplateNotFound`
+ exception, so just catching the base exception will catch both.
+
+ .. versionadded:: 2.2
+ """
+
+ def __init__(self, names=(), message=None):
+ if message is None:
+ message = u'none of the templates given were found: ' + \
+ u', '.join(imap(text_type, names))
+ TemplateNotFound.__init__(self, names and names[-1] or None, message)
+ self.templates = list(names)
+
+
+@implements_to_string
+class TemplateSyntaxError(TemplateError):
+ """Raised to tell the user that there is a problem with the template."""
+
+ def __init__(self, message, lineno, name=None, filename=None):
+ TemplateError.__init__(self, message)
+ self.lineno = lineno
+ self.name = name
+ self.filename = filename
+ self.source = None
+
+ # this is set to True if the debug.translate_syntax_error
+ # function translated the syntax error into a new traceback
+ self.translated = False
+
+ def __str__(self):
+ # for translated errors we only return the message
+ if self.translated:
+ return self.message
+
+ # otherwise attach some stuff
+ location = 'line %d' % self.lineno
+ name = self.filename or self.name
+ if name:
+ location = 'File "%s", %s' % (name, location)
+ lines = [self.message, ' ' + location]
+
+ # if the source is set, add the line to the output
+ if self.source is not None:
+ try:
+ line = self.source.splitlines()[self.lineno - 1]
+ except IndexError:
+ line = None
+ if line:
+ lines.append(' ' + line.strip())
+
+ return u'\n'.join(lines)
+
+
+class TemplateAssertionError(TemplateSyntaxError):
+ """Like a template syntax error, but covers cases where something in the
+ template caused an error at compile time that wasn't necessarily caused
+ by a syntax error. However it's a direct subclass of
+ :exc:`TemplateSyntaxError` and has the same attributes.
+ """
+
+
+class TemplateRuntimeError(TemplateError):
+ """A generic runtime error in the template engine. Under some situations
+ Jinja may raise this exception.
+ """
+
+
+class UndefinedError(TemplateRuntimeError):
+ """Raised if a template tries to operate on :class:`Undefined`."""
+
+
+class SecurityError(TemplateRuntimeError):
+ """Raised if a template tries to do something insecure if the
+ sandbox is enabled.
+ """
+
+
+class FilterArgumentError(TemplateRuntimeError):
+ """This error is raised if a filter was called with inappropriate
+ arguments
+ """
diff --git a/deps/v8/third_party/jinja2/ext.py b/deps/v8/third_party/jinja2/ext.py
new file mode 100644
index 0000000000..562ab506ff
--- /dev/null
+++ b/deps/v8/third_party/jinja2/ext.py
@@ -0,0 +1,636 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.ext
+ ~~~~~~~~~~
+
+ Jinja extensions allow to add custom tags similar to the way django custom
+ tags work. By default two example extensions exist: an i18n and a cache
+ extension.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD.
+"""
+from jinja2 import nodes
+from jinja2.defaults import BLOCK_START_STRING, \
+ BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
+ COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
+ LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
+ KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
+from jinja2.environment import Environment
+from jinja2.runtime import concat
+from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
+from jinja2.utils import contextfunction, import_string, Markup
+from jinja2._compat import with_metaclass, string_types, iteritems
+
+
+# the only real useful gettext functions for a Jinja template. Note
+# that ugettext must be assigned to gettext as Jinja doesn't support
+# non unicode strings.
+GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
+
+
+class ExtensionRegistry(type):
+ """Gives the extension an unique identifier."""
+
+ def __new__(cls, name, bases, d):
+ rv = type.__new__(cls, name, bases, d)
+ rv.identifier = rv.__module__ + '.' + rv.__name__
+ return rv
+
+
+class Extension(with_metaclass(ExtensionRegistry, object)):
+ """Extensions can be used to add extra functionality to the Jinja template
+ system at the parser level. Custom extensions are bound to an environment
+ but may not store environment specific data on `self`. The reason for
+ this is that an extension can be bound to another environment (for
+ overlays) by creating a copy and reassigning the `environment` attribute.
+
+ As extensions are created by the environment they cannot accept any
+ arguments for configuration. One may want to work around that by using
+ a factory function, but that is not possible as extensions are identified
+ by their import name. The correct way to configure the extension is
+ storing the configuration values on the environment. Because this way the
+ environment ends up acting as central configuration storage the
+ attributes may clash which is why extensions have to ensure that the names
+ they choose for configuration are not too generic. ``prefix`` for example
+ is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
+ name as includes the name of the extension (fragment cache).
+ """
+
+ #: if this extension parses this is the list of tags it's listening to.
+ tags = set()
+
+ #: the priority of that extension. This is especially useful for
+ #: extensions that preprocess values. A lower value means higher
+ #: priority.
+ #:
+ #: .. versionadded:: 2.4
+ priority = 100
+
+ def __init__(self, environment):
+ self.environment = environment
+
+ def bind(self, environment):
+ """Create a copy of this extension bound to another environment."""
+ rv = object.__new__(self.__class__)
+ rv.__dict__.update(self.__dict__)
+ rv.environment = environment
+ return rv
+
+ def preprocess(self, source, name, filename=None):
+ """This method is called before the actual lexing and can be used to
+ preprocess the source. The `filename` is optional. The return value
+ must be the preprocessed source.
+ """
+ return source
+
+ def filter_stream(self, stream):
+ """It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
+ to filter tokens returned. This method has to return an iterable of
+ :class:`~jinja2.lexer.Token`\s, but it doesn't have to return a
+ :class:`~jinja2.lexer.TokenStream`.
+
+ In the `ext` folder of the Jinja2 source distribution there is a file
+ called `inlinegettext.py` which implements a filter that utilizes this
+ method.
+ """
+ return stream
+
+ def parse(self, parser):
+ """If any of the :attr:`tags` matched this method is called with the
+ parser as first argument. The token the parser stream is pointing at
+ is the name token that matched. This method has to return one or a
+ list of multiple nodes.
+ """
+ raise NotImplementedError()
+
+ def attr(self, name, lineno=None):
+ """Return an attribute node for the current extension. This is useful
+ to pass constants on extensions to generated template code.
+
+ ::
+
+ self.attr('_my_attribute', lineno=lineno)
+ """
+ return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
+
+ def call_method(self, name, args=None, kwargs=None, dyn_args=None,
+ dyn_kwargs=None, lineno=None):
+ """Call a method of the extension. This is a shortcut for
+ :meth:`attr` + :class:`jinja2.nodes.Call`.
+ """
+ if args is None:
+ args = []
+ if kwargs is None:
+ kwargs = []
+ return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
+ dyn_args, dyn_kwargs, lineno=lineno)
+
+
+@contextfunction
+def _gettext_alias(__context, *args, **kwargs):
+ return __context.call(__context.resolve('gettext'), *args, **kwargs)
+
+
+def _make_new_gettext(func):
+ @contextfunction
+ def gettext(__context, __string, **variables):
+ rv = __context.call(func, __string)
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv % variables
+ return gettext
+
+
+def _make_new_ngettext(func):
+ @contextfunction
+ def ngettext(__context, __singular, __plural, __num, **variables):
+ variables.setdefault('num', __num)
+ rv = __context.call(func, __singular, __plural, __num)
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv % variables
+ return ngettext
+
+
+class InternationalizationExtension(Extension):
+ """This extension adds gettext support to Jinja2."""
+ tags = set(['trans'])
+
+ # TODO: the i18n extension is currently reevaluating values in a few
+ # situations. Take this example:
+ # {% trans count=something() %}{{ count }} foo{% pluralize
+ # %}{{ count }} fooss{% endtrans %}
+ # something is called twice here. One time for the gettext value and
+ # the other time for the n-parameter of the ngettext function.
+
+ def __init__(self, environment):
+ Extension.__init__(self, environment)
+ environment.globals['_'] = _gettext_alias
+ environment.extend(
+ install_gettext_translations=self._install,
+ install_null_translations=self._install_null,
+ install_gettext_callables=self._install_callables,
+ uninstall_gettext_translations=self._uninstall,
+ extract_translations=self._extract,
+ newstyle_gettext=False
+ )
+
+ def _install(self, translations, newstyle=None):
+ gettext = getattr(translations, 'ugettext', None)
+ if gettext is None:
+ gettext = translations.gettext
+ ngettext = getattr(translations, 'ungettext', None)
+ if ngettext is None:
+ ngettext = translations.ngettext
+ self._install_callables(gettext, ngettext, newstyle)
+
+ def _install_null(self, newstyle=None):
+ self._install_callables(
+ lambda x: x,
+ lambda s, p, n: (n != 1 and (p,) or (s,))[0],
+ newstyle
+ )
+
+ def _install_callables(self, gettext, ngettext, newstyle=None):
+ if newstyle is not None:
+ self.environment.newstyle_gettext = newstyle
+ if self.environment.newstyle_gettext:
+ gettext = _make_new_gettext(gettext)
+ ngettext = _make_new_ngettext(ngettext)
+ self.environment.globals.update(
+ gettext=gettext,
+ ngettext=ngettext
+ )
+
+ def _uninstall(self, translations):
+ for key in 'gettext', 'ngettext':
+ self.environment.globals.pop(key, None)
+
+ def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
+ if isinstance(source, string_types):
+ source = self.environment.parse(source)
+ return extract_from_ast(source, gettext_functions)
+
+ def parse(self, parser):
+ """Parse a translatable tag."""
+ lineno = next(parser.stream).lineno
+ num_called_num = False
+
+ # find all the variables referenced. Additionally a variable can be
+ # defined in the body of the trans block too, but this is checked at
+ # a later state.
+ plural_expr = None
+ plural_expr_assignment = None
+ variables = {}
+ while parser.stream.current.type != 'block_end':
+ if variables:
+ parser.stream.expect('comma')
+
+ # skip colon for python compatibility
+ if parser.stream.skip_if('colon'):
+ break
+
+ name = parser.stream.expect('name')
+ if name.value in variables:
+ parser.fail('translatable variable %r defined twice.' %
+ name.value, name.lineno,
+ exc=TemplateAssertionError)
+
+ # expressions
+ if parser.stream.current.type == 'assign':
+ next(parser.stream)
+ variables[name.value] = var = parser.parse_expression()
+ else:
+ variables[name.value] = var = nodes.Name(name.value, 'load')
+
+ if plural_expr is None:
+ if isinstance(var, nodes.Call):
+ plural_expr = nodes.Name('_trans', 'load')
+ variables[name.value] = plural_expr
+ plural_expr_assignment = nodes.Assign(
+ nodes.Name('_trans', 'store'), var)
+ else:
+ plural_expr = var
+ num_called_num = name.value == 'num'
+
+ parser.stream.expect('block_end')
+
+ plural = plural_names = None
+ have_plural = False
+ referenced = set()
+
+ # now parse until endtrans or pluralize
+ singular_names, singular = self._parse_block(parser, True)
+ if singular_names:
+ referenced.update(singular_names)
+ if plural_expr is None:
+ plural_expr = nodes.Name(singular_names[0], 'load')
+ num_called_num = singular_names[0] == 'num'
+
+ # if we have a pluralize block, we parse that too
+ if parser.stream.current.test('name:pluralize'):
+ have_plural = True
+ next(parser.stream)
+ if parser.stream.current.type != 'block_end':
+ name = parser.stream.expect('name')
+ if name.value not in variables:
+ parser.fail('unknown variable %r for pluralization' %
+ name.value, name.lineno,
+ exc=TemplateAssertionError)
+ plural_expr = variables[name.value]
+ num_called_num = name.value == 'num'
+ parser.stream.expect('block_end')
+ plural_names, plural = self._parse_block(parser, False)
+ next(parser.stream)
+ referenced.update(plural_names)
+ else:
+ next(parser.stream)
+
+ # register free names as simple name expressions
+ for var in referenced:
+ if var not in variables:
+ variables[var] = nodes.Name(var, 'load')
+
+ if not have_plural:
+ plural_expr = None
+ elif plural_expr is None:
+ parser.fail('pluralize without variables', lineno)
+
+ node = self._make_node(singular, plural, variables, plural_expr,
+ bool(referenced),
+ num_called_num and have_plural)
+ node.set_lineno(lineno)
+ if plural_expr_assignment is not None:
+ return [plural_expr_assignment, node]
+ else:
+ return node
+
+ def _parse_block(self, parser, allow_pluralize):
+ """Parse until the next block tag with a given name."""
+ referenced = []
+ buf = []
+ while 1:
+ if parser.stream.current.type == 'data':
+ buf.append(parser.stream.current.value.replace('%', '%%'))
+ next(parser.stream)
+ elif parser.stream.current.type == 'variable_begin':
+ next(parser.stream)
+ name = parser.stream.expect('name').value
+ referenced.append(name)
+ buf.append('%%(%s)s' % name)
+ parser.stream.expect('variable_end')
+ elif parser.stream.current.type == 'block_begin':
+ next(parser.stream)
+ if parser.stream.current.test('name:endtrans'):
+ break
+ elif parser.stream.current.test('name:pluralize'):
+ if allow_pluralize:
+ break
+ parser.fail('a translatable section can have only one '
+ 'pluralize section')
+ parser.fail('control structures in translatable sections are '
+ 'not allowed')
+ elif parser.stream.eos:
+ parser.fail('unclosed translation block')
+ else:
+ assert False, 'internal parser error'
+
+ return referenced, concat(buf)
+
+ def _make_node(self, singular, plural, variables, plural_expr,
+ vars_referenced, num_called_num):
+ """Generates a useful node from the data provided."""
+ # no variables referenced? no need to escape for old style
+ # gettext invocations only if there are vars.
+ if not vars_referenced and not self.environment.newstyle_gettext:
+ singular = singular.replace('%%', '%')
+ if plural:
+ plural = plural.replace('%%', '%')
+
+ # singular only:
+ if plural_expr is None:
+ gettext = nodes.Name('gettext', 'load')
+ node = nodes.Call(gettext, [nodes.Const(singular)],
+ [], None, None)
+
+ # singular and plural
+ else:
+ ngettext = nodes.Name('ngettext', 'load')
+ node = nodes.Call(ngettext, [
+ nodes.Const(singular),
+ nodes.Const(plural),
+ plural_expr
+ ], [], None, None)
+
+ # in case newstyle gettext is used, the method is powerful
+ # enough to handle the variable expansion and autoescape
+ # handling itself
+ if self.environment.newstyle_gettext:
+ for key, value in iteritems(variables):
+ # the function adds that later anyways in case num was
+ # called num, so just skip it.
+ if num_called_num and key == 'num':
+ continue
+ node.kwargs.append(nodes.Keyword(key, value))
+
+ # otherwise do that here
+ else:
+ # mark the return value as safe if we are in an
+ # environment with autoescaping turned on
+ node = nodes.MarkSafeIfAutoescape(node)
+ if variables:
+ node = nodes.Mod(node, nodes.Dict([
+ nodes.Pair(nodes.Const(key), value)
+ for key, value in variables.items()
+ ]))
+ return nodes.Output([node])
+
+
+class ExprStmtExtension(Extension):
+ """Adds a `do` tag to Jinja2 that works like the print statement just
+ that it doesn't print the return value.
+ """
+ tags = set(['do'])
+
+ def parse(self, parser):
+ node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
+ node.node = parser.parse_tuple()
+ return node
+
+
+class LoopControlExtension(Extension):
+ """Adds break and continue to the template engine."""
+ tags = set(['break', 'continue'])
+
+ def parse(self, parser):
+ token = next(parser.stream)
+ if token.value == 'break':
+ return nodes.Break(lineno=token.lineno)
+ return nodes.Continue(lineno=token.lineno)
+
+
+class WithExtension(Extension):
+ """Adds support for a django-like with block."""
+ tags = set(['with'])
+
+ def parse(self, parser):
+ node = nodes.Scope(lineno=next(parser.stream).lineno)
+ assignments = []
+ while parser.stream.current.type != 'block_end':
+ lineno = parser.stream.current.lineno
+ if assignments:
+ parser.stream.expect('comma')
+ target = parser.parse_assign_target()
+ parser.stream.expect('assign')
+ expr = parser.parse_expression()
+ assignments.append(nodes.Assign(target, expr, lineno=lineno))
+ node.body = assignments + \
+ list(parser.parse_statements(('name:endwith',),
+ drop_needle=True))
+ return node
+
+
+class AutoEscapeExtension(Extension):
+ """Changes auto escape rules for a scope."""
+ tags = set(['autoescape'])
+
+ def parse(self, parser):
+ node = nodes.ScopedEvalContextModifier(lineno=next(parser.stream).lineno)
+ node.options = [
+ nodes.Keyword('autoescape', parser.parse_expression())
+ ]
+ node.body = parser.parse_statements(('name:endautoescape',),
+ drop_needle=True)
+ return nodes.Scope([node])
+
+
+def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
+ babel_style=True):
+ """Extract localizable strings from the given template node. Per
+ default this function returns matches in babel style that means non string
+ parameters as well as keyword arguments are returned as `None`. This
+ allows Babel to figure out what you really meant if you are using
+ gettext functions that allow keyword arguments for placeholder expansion.
+ If you don't want that behavior set the `babel_style` parameter to `False`
+ which causes only strings to be returned and parameters are always stored
+ in tuples. As a consequence invalid gettext calls (calls without a single
+ string parameter or string parameters after non-string parameters) are
+ skipped.
+
+ This example explains the behavior:
+
+ >>> from jinja2 import Environment
+ >>> env = Environment()
+ >>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
+ >>> list(extract_from_ast(node))
+ [(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
+ >>> list(extract_from_ast(node, babel_style=False))
+ [(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
+
+ For every string found this function yields a ``(lineno, function,
+ message)`` tuple, where:
+
+ * ``lineno`` is the number of the line on which the string was found,
+ * ``function`` is the name of the ``gettext`` function used (if the
+ string was extracted from embedded Python code), and
+ * ``message`` is the string itself (a ``unicode`` object, or a tuple
+ of ``unicode`` objects for functions with multiple string arguments).
+
+ This extraction function operates on the AST and is because of that unable
+ to extract any comments. For comment support you have to use the babel
+ extraction interface or extract comments yourself.
+ """
+ for node in node.find_all(nodes.Call):
+ if not isinstance(node.node, nodes.Name) or \
+ node.node.name not in gettext_functions:
+ continue
+
+ strings = []
+ for arg in node.args:
+ if isinstance(arg, nodes.Const) and \
+ isinstance(arg.value, string_types):
+ strings.append(arg.value)
+ else:
+ strings.append(None)
+
+ for arg in node.kwargs:
+ strings.append(None)
+ if node.dyn_args is not None:
+ strings.append(None)
+ if node.dyn_kwargs is not None:
+ strings.append(None)
+
+ if not babel_style:
+ strings = tuple(x for x in strings if x is not None)
+ if not strings:
+ continue
+ else:
+ if len(strings) == 1:
+ strings = strings[0]
+ else:
+ strings = tuple(strings)
+ yield node.lineno, node.node.name, strings
+
+
+class _CommentFinder(object):
+ """Helper class to find comments in a token stream. Can only
+ find comments for gettext calls forwards. Once the comment
+ from line 4 is found, a comment for line 1 will not return a
+ usable value.
+ """
+
+ def __init__(self, tokens, comment_tags):
+ self.tokens = tokens
+ self.comment_tags = comment_tags
+ self.offset = 0
+ self.last_lineno = 0
+
+ def find_backwards(self, offset):
+ try:
+ for _, token_type, token_value in \
+ reversed(self.tokens[self.offset:offset]):
+ if token_type in ('comment', 'linecomment'):
+ try:
+ prefix, comment = token_value.split(None, 1)
+ except ValueError:
+ continue
+ if prefix in self.comment_tags:
+ return [comment.rstrip()]
+ return []
+ finally:
+ self.offset = offset
+
+ def find_comments(self, lineno):
+ if not self.comment_tags or self.last_lineno > lineno:
+ return []
+ for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
+ if token_lineno > lineno:
+ return self.find_backwards(self.offset + idx)
+ return self.find_backwards(len(self.tokens))
+
+
+def babel_extract(fileobj, keywords, comment_tags, options):
+ """Babel extraction method for Jinja templates.
+
+ .. versionchanged:: 2.3
+ Basic support for translation comments was added. If `comment_tags`
+ is now set to a list of keywords for extraction, the extractor will
+ try to find the best preceeding comment that begins with one of the
+ keywords. For best results, make sure to not have more than one
+ gettext call in one line of code and the matching comment in the
+ same line or the line before.
+
+ .. versionchanged:: 2.5.1
+ The `newstyle_gettext` flag can be set to `True` to enable newstyle
+ gettext calls.
+
+ .. versionchanged:: 2.7
+ A `silent` option can now be provided. If set to `False` template
+ syntax errors are propagated instead of being ignored.
+
+ :param fileobj: the file-like object the messages should be extracted from
+ :param keywords: a list of keywords (i.e. function names) that should be
+ recognized as translation functions
+ :param comment_tags: a list of translator tags to search for and include
+ in the results.
+ :param options: a dictionary of additional options (optional)
+ :return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
+ (comments will be empty currently)
+ """
+ extensions = set()
+ for extension in options.get('extensions', '').split(','):
+ extension = extension.strip()
+ if not extension:
+ continue
+ extensions.add(import_string(extension))
+ if InternationalizationExtension not in extensions:
+ extensions.add(InternationalizationExtension)
+
+ def getbool(options, key, default=False):
+ return options.get(key, str(default)).lower() in \
+ ('1', 'on', 'yes', 'true')
+
+ silent = getbool(options, 'silent', True)
+ environment = Environment(
+ options.get('block_start_string', BLOCK_START_STRING),
+ options.get('block_end_string', BLOCK_END_STRING),
+ options.get('variable_start_string', VARIABLE_START_STRING),
+ options.get('variable_end_string', VARIABLE_END_STRING),
+ options.get('comment_start_string', COMMENT_START_STRING),
+ options.get('comment_end_string', COMMENT_END_STRING),
+ options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
+ options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
+ getbool(options, 'trim_blocks', TRIM_BLOCKS),
+ getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
+ NEWLINE_SEQUENCE,
+ getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
+ frozenset(extensions),
+ cache_size=0,
+ auto_reload=False
+ )
+
+ if getbool(options, 'newstyle_gettext'):
+ environment.newstyle_gettext = True
+
+ source = fileobj.read().decode(options.get('encoding', 'utf-8'))
+ try:
+ node = environment.parse(source)
+ tokens = list(environment.lex(environment.preprocess(source)))
+ except TemplateSyntaxError as e:
+ if not silent:
+ raise
+ # skip templates with syntax errors
+ return
+
+ finder = _CommentFinder(tokens, comment_tags)
+ for lineno, func, message in extract_from_ast(node, keywords):
+ yield lineno, func, message, finder.find_comments(lineno)
+
+
+#: nicer import names
+i18n = InternationalizationExtension
+do = ExprStmtExtension
+loopcontrols = LoopControlExtension
+with_ = WithExtension
+autoescape = AutoEscapeExtension
diff --git a/deps/v8/third_party/jinja2/filters.py b/deps/v8/third_party/jinja2/filters.py
new file mode 100644
index 0000000000..e5c7a1ab43
--- /dev/null
+++ b/deps/v8/third_party/jinja2/filters.py
@@ -0,0 +1,996 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.filters
+ ~~~~~~~~~~~~~~
+
+ Bundled jinja filters.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import re
+import math
+
+from random import choice
+from operator import itemgetter
+from itertools import groupby
+from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \
+ unicode_urlencode
+from jinja2.runtime import Undefined
+from jinja2.exceptions import FilterArgumentError
+from jinja2._compat import imap, string_types, text_type, iteritems
+
+
+_word_re = re.compile(r'\w+(?u)')
+
+
+def contextfilter(f):
+ """Decorator for marking context dependent filters. The current
+ :class:`Context` will be passed as first argument.
+ """
+ f.contextfilter = True
+ return f
+
+
+def evalcontextfilter(f):
+ """Decorator for marking eval-context dependent filters. An eval
+ context object is passed as first argument. For more information
+ about the eval context, see :ref:`eval-context`.
+
+ .. versionadded:: 2.4
+ """
+ f.evalcontextfilter = True
+ return f
+
+
+def environmentfilter(f):
+ """Decorator for marking evironment dependent filters. The current
+ :class:`Environment` is passed to the filter as first argument.
+ """
+ f.environmentfilter = True
+ return f
+
+
+def make_attrgetter(environment, attribute):
+ """Returns a callable that looks up the given attribute from a
+ passed object with the rules of the environment. Dots are allowed
+ to access attributes of attributes. Integer parts in paths are
+ looked up as integers.
+ """
+ if not isinstance(attribute, string_types) \
+ or ('.' not in attribute and not attribute.isdigit()):
+ return lambda x: environment.getitem(x, attribute)
+ attribute = attribute.split('.')
+ def attrgetter(item):
+ for part in attribute:
+ if part.isdigit():
+ part = int(part)
+ item = environment.getitem(item, part)
+ return item
+ return attrgetter
+
+
+def do_forceescape(value):
+ """Enforce HTML escaping. This will probably double escape variables."""
+ if hasattr(value, '__html__'):
+ value = value.__html__()
+ return escape(text_type(value))
+
+
+def do_urlencode(value):
+ """Escape strings for use in URLs (uses UTF-8 encoding). It accepts both
+ dictionaries and regular strings as well as pairwise iterables.
+
+ .. versionadded:: 2.7
+ """
+ itemiter = None
+ if isinstance(value, dict):
+ itemiter = iteritems(value)
+ elif not isinstance(value, string_types):
+ try:
+ itemiter = iter(value)
+ except TypeError:
+ pass
+ if itemiter is None:
+ return unicode_urlencode(value)
+ return u'&'.join(unicode_urlencode(k) + '=' +
+ unicode_urlencode(v, for_qs=True)
+ for k, v in itemiter)
+
+
+@evalcontextfilter
+def do_replace(eval_ctx, s, old, new, count=None):
+ """Return a copy of the value with all occurrences of a substring
+ replaced with a new one. The first argument is the substring
+ that should be replaced, the second is the replacement string.
+ If the optional third argument ``count`` is given, only the first
+ ``count`` occurrences are replaced:
+
+ .. sourcecode:: jinja
+
+ {{ "Hello World"|replace("Hello", "Goodbye") }}
+ -> Goodbye World
+
+ {{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
+ -> d'oh, d'oh, aaargh
+ """
+ if count is None:
+ count = -1
+ if not eval_ctx.autoescape:
+ return text_type(s).replace(text_type(old), text_type(new), count)
+ if hasattr(old, '__html__') or hasattr(new, '__html__') and \
+ not hasattr(s, '__html__'):
+ s = escape(s)
+ else:
+ s = soft_unicode(s)
+ return s.replace(soft_unicode(old), soft_unicode(new), count)
+
+
+def do_upper(s):
+ """Convert a value to uppercase."""
+ return soft_unicode(s).upper()
+
+
+def do_lower(s):
+ """Convert a value to lowercase."""
+ return soft_unicode(s).lower()
+
+
+@evalcontextfilter
+def do_xmlattr(_eval_ctx, d, autospace=True):
+ """Create an SGML/XML attribute string based on the items in a dict.
+ All values that are neither `none` nor `undefined` are automatically
+ escaped:
+
+ .. sourcecode:: html+jinja
+
+ <ul{{ {'class': 'my_list', 'missing': none,
+ 'id': 'list-%d'|format(variable)}|xmlattr }}>
+ ...
+ </ul>
+
+ Results in something like this:
+
+ .. sourcecode:: html
+
+ <ul class="my_list" id="list-42">
+ ...
+ </ul>
+
+ As you can see it automatically prepends a space in front of the item
+ if the filter returned something unless the second parameter is false.
+ """
+ rv = u' '.join(
+ u'%s="%s"' % (escape(key), escape(value))
+ for key, value in iteritems(d)
+ if value is not None and not isinstance(value, Undefined)
+ )
+ if autospace and rv:
+ rv = u' ' + rv
+ if _eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv
+
+
+def do_capitalize(s):
+ """Capitalize a value. The first character will be uppercase, all others
+ lowercase.
+ """
+ return soft_unicode(s).capitalize()
+
+
+def do_title(s):
+ """Return a titlecased version of the value. I.e. words will start with
+ uppercase letters, all remaining characters are lowercase.
+ """
+ rv = []
+ for item in re.compile(r'([-\s]+)(?u)').split(soft_unicode(s)):
+ if not item:
+ continue
+ rv.append(item[0].upper() + item[1:].lower())
+ return ''.join(rv)
+
+
+def do_dictsort(value, case_sensitive=False, by='key'):
+ """Sort a dict and yield (key, value) pairs. Because python dicts are
+ unsorted you may want to use this function to order them by either
+ key or value:
+
+ .. sourcecode:: jinja
+
+ {% for item in mydict|dictsort %}
+ sort the dict by key, case insensitive
+
+ {% for item in mydict|dictsort(true) %}
+ sort the dict by key, case sensitive
+
+ {% for item in mydict|dictsort(false, 'value') %}
+ sort the dict by value, case insensitive
+ """
+ if by == 'key':
+ pos = 0
+ elif by == 'value':
+ pos = 1
+ else:
+ raise FilterArgumentError('You can only sort by either '
+ '"key" or "value"')
+ def sort_func(item):
+ value = item[pos]
+ if isinstance(value, string_types) and not case_sensitive:
+ value = value.lower()
+ return value
+
+ return sorted(value.items(), key=sort_func)
+
+
+@environmentfilter
+def do_sort(environment, value, reverse=False, case_sensitive=False,
+ attribute=None):
+ """Sort an iterable. Per default it sorts ascending, if you pass it
+ true as first argument it will reverse the sorting.
+
+ If the iterable is made of strings the third parameter can be used to
+ control the case sensitiveness of the comparison which is disabled by
+ default.
+
+ .. sourcecode:: jinja
+
+ {% for item in iterable|sort %}
+ ...
+ {% endfor %}
+
+ It is also possible to sort by an attribute (for example to sort
+ by the date of an object) by specifying the `attribute` parameter:
+
+ .. sourcecode:: jinja
+
+ {% for item in iterable|sort(attribute='date') %}
+ ...
+ {% endfor %}
+
+ .. versionchanged:: 2.6
+ The `attribute` parameter was added.
+ """
+ if not case_sensitive:
+ def sort_func(item):
+ if isinstance(item, string_types):
+ item = item.lower()
+ return item
+ else:
+ sort_func = None
+ if attribute is not None:
+ getter = make_attrgetter(environment, attribute)
+ def sort_func(item, processor=sort_func or (lambda x: x)):
+ return processor(getter(item))
+ return sorted(value, key=sort_func, reverse=reverse)
+
+
+def do_default(value, default_value=u'', boolean=False):
+ """If the value is undefined it will return the passed default value,
+ otherwise the value of the variable:
+
+ .. sourcecode:: jinja
+
+ {{ my_variable|default('my_variable is not defined') }}
+
+ This will output the value of ``my_variable`` if the variable was
+ defined, otherwise ``'my_variable is not defined'``. If you want
+ to use default with variables that evaluate to false you have to
+ set the second parameter to `true`:
+
+ .. sourcecode:: jinja
+
+ {{ ''|default('the string was empty', true) }}
+ """
+ if isinstance(value, Undefined) or (boolean and not value):
+ return default_value
+ return value
+
+
+@evalcontextfilter
+def do_join(eval_ctx, value, d=u'', attribute=None):
+ """Return a string which is the concatenation of the strings in the
+ sequence. The separator between elements is an empty string per
+ default, you can define it with the optional parameter:
+
+ .. sourcecode:: jinja
+
+ {{ [1, 2, 3]|join('|') }}
+ -> 1|2|3
+
+ {{ [1, 2, 3]|join }}
+ -> 123
+
+ It is also possible to join certain attributes of an object:
+
+ .. sourcecode:: jinja
+
+ {{ users|join(', ', attribute='username') }}
+
+ .. versionadded:: 2.6
+ The `attribute` parameter was added.
+ """
+ if attribute is not None:
+ value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
+
+ # no automatic escaping? joining is a lot eaiser then
+ if not eval_ctx.autoescape:
+ return text_type(d).join(imap(text_type, value))
+
+ # if the delimiter doesn't have an html representation we check
+ # if any of the items has. If yes we do a coercion to Markup
+ if not hasattr(d, '__html__'):
+ value = list(value)
+ do_escape = False
+ for idx, item in enumerate(value):
+ if hasattr(item, '__html__'):
+ do_escape = True
+ else:
+ value[idx] = text_type(item)
+ if do_escape:
+ d = escape(d)
+ else:
+ d = text_type(d)
+ return d.join(value)
+
+ # no html involved, to normal joining
+ return soft_unicode(d).join(imap(soft_unicode, value))
+
+
+def do_center(value, width=80):
+ """Centers the value in a field of a given width."""
+ return text_type(value).center(width)
+
+
+@environmentfilter
+def do_first(environment, seq):
+ """Return the first item of a sequence."""
+ try:
+ return next(iter(seq))
+ except StopIteration:
+ return environment.undefined('No first item, sequence was empty.')
+
+
+@environmentfilter
+def do_last(environment, seq):
+ """Return the last item of a sequence."""
+ try:
+ return next(iter(reversed(seq)))
+ except StopIteration:
+ return environment.undefined('No last item, sequence was empty.')
+
+
+@environmentfilter
+def do_random(environment, seq):
+ """Return a random item from the sequence."""
+ try:
+ return choice(seq)
+ except IndexError:
+ return environment.undefined('No random item, sequence was empty.')
+
+
+def do_filesizeformat(value, binary=False):
+ """Format the value like a 'human-readable' file size (i.e. 13 kB,
+ 4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
+ Giga, etc.), if the second parameter is set to `True` the binary
+ prefixes are used (Mebi, Gibi).
+ """
+ bytes = float(value)
+ base = binary and 1024 or 1000
+ prefixes = [
+ (binary and 'KiB' or 'kB'),
+ (binary and 'MiB' or 'MB'),
+ (binary and 'GiB' or 'GB'),
+ (binary and 'TiB' or 'TB'),
+ (binary and 'PiB' or 'PB'),
+ (binary and 'EiB' or 'EB'),
+ (binary and 'ZiB' or 'ZB'),
+ (binary and 'YiB' or 'YB')
+ ]
+ if bytes == 1:
+ return '1 Byte'
+ elif bytes < base:
+ return '%d Bytes' % bytes
+ else:
+ for i, prefix in enumerate(prefixes):
+ unit = base ** (i + 2)
+ if bytes < unit:
+ return '%.1f %s' % ((base * bytes / unit), prefix)
+ return '%.1f %s' % ((base * bytes / unit), prefix)
+
+
+def do_pprint(value, verbose=False):
+ """Pretty print a variable. Useful for debugging.
+
+ With Jinja 1.2 onwards you can pass it a parameter. If this parameter
+ is truthy the output will be more verbose (this requires `pretty`)
+ """
+ return pformat(value, verbose=verbose)
+
+
+@evalcontextfilter
+def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False,
+ target=None):
+ """Converts URLs in plain text into clickable links.
+
+ If you pass the filter an additional integer it will shorten the urls
+ to that number. Also a third argument exists that makes the urls
+ "nofollow":
+
+ .. sourcecode:: jinja
+
+ {{ mytext|urlize(40, true) }}
+ links are shortened to 40 chars and defined with rel="nofollow"
+
+ If *target* is specified, the ``target`` attribute will be added to the
+ ``<a>`` tag:
+
+ .. sourcecode:: jinja
+
+ {{ mytext|urlize(40, target='_blank') }}
+
+ .. versionchanged:: 2.8+
+ The *target* parameter was added.
+ """
+ rv = urlize(value, trim_url_limit, nofollow, target)
+ if eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv
+
+
+def do_indent(s, width=4, indentfirst=False):
+ """Return a copy of the passed string, each line indented by
+ 4 spaces. The first line is not indented. If you want to
+ change the number of spaces or indent the first line too
+ you can pass additional parameters to the filter:
+
+ .. sourcecode:: jinja
+
+ {{ mytext|indent(2, true) }}
+ indent by two spaces and indent the first line too.
+ """
+ indention = u' ' * width
+ rv = (u'\n' + indention).join(s.splitlines())
+ if indentfirst:
+ rv = indention + rv
+ return rv
+
+
+def do_truncate(s, length=255, killwords=False, end='...'):
+ """Return a truncated copy of the string. The length is specified
+ with the first parameter which defaults to ``255``. If the second
+ parameter is ``true`` the filter will cut the text at length. Otherwise
+ it will discard the last word. If the text was in fact
+ truncated it will append an ellipsis sign (``"..."``). If you want a
+ different ellipsis sign than ``"..."`` you can specify it using the
+ third parameter.
+
+ .. sourcecode:: jinja
+
+ {{ "foo bar baz"|truncate(9) }}
+ -> "foo ..."
+ {{ "foo bar baz"|truncate(9, True) }}
+ -> "foo ba..."
+
+ """
+ if len(s) <= length:
+ return s
+ elif killwords:
+ return s[:length - len(end)] + end
+
+ result = s[:length - len(end)].rsplit(' ', 1)[0]
+ if len(result) < length:
+ result += ' '
+ return result + end
+
+
+@environmentfilter
+def do_wordwrap(environment, s, width=79, break_long_words=True,
+ wrapstring=None):
+ """
+ Return a copy of the string passed to the filter wrapped after
+ ``79`` characters. You can override this default using the first
+ parameter. If you set the second parameter to `false` Jinja will not
+ split words apart if they are longer than `width`. By default, the newlines
+ will be the default newlines for the environment, but this can be changed
+ using the wrapstring keyword argument.
+
+ .. versionadded:: 2.7
+ Added support for the `wrapstring` parameter.
+ """
+ if not wrapstring:
+ wrapstring = environment.newline_sequence
+ import textwrap
+ return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False,
+ replace_whitespace=False,
+ break_long_words=break_long_words))
+
+
+def do_wordcount(s):
+ """Count the words in that string."""
+ return len(_word_re.findall(s))
+
+
+def do_int(value, default=0, base=10):
+ """Convert the value into an integer. If the
+ conversion doesn't work it will return ``0``. You can
+ override this default using the first parameter. You
+ can also override the default base (10) in the second
+ parameter, which handles input with prefixes such as
+ 0b, 0o and 0x for bases 2, 8 and 16 respectively.
+ """
+ try:
+ return int(value, base)
+ except (TypeError, ValueError):
+ # this quirk is necessary so that "42.23"|int gives 42.
+ try:
+ return int(float(value))
+ except (TypeError, ValueError):
+ return default
+
+
+def do_float(value, default=0.0):
+ """Convert the value into a floating point number. If the
+ conversion doesn't work it will return ``0.0``. You can
+ override this default using the first parameter.
+ """
+ try:
+ return float(value)
+ except (TypeError, ValueError):
+ return default
+
+
+def do_format(value, *args, **kwargs):
+ """
+ Apply python string formatting on an object:
+
+ .. sourcecode:: jinja
+
+ {{ "%s - %s"|format("Hello?", "Foo!") }}
+ -> Hello? - Foo!
+ """
+ if args and kwargs:
+ raise FilterArgumentError('can\'t handle positional and keyword '
+ 'arguments at the same time')
+ return soft_unicode(value) % (kwargs or args)
+
+
+def do_trim(value):
+ """Strip leading and trailing whitespace."""
+ return soft_unicode(value).strip()
+
+
+def do_striptags(value):
+ """Strip SGML/XML tags and replace adjacent whitespace by one space.
+ """
+ if hasattr(value, '__html__'):
+ value = value.__html__()
+ return Markup(text_type(value)).striptags()
+
+
+def do_slice(value, slices, fill_with=None):
+ """Slice an iterator and return a list of lists containing
+ those items. Useful if you want to create a div containing
+ three ul tags that represent columns:
+
+ .. sourcecode:: html+jinja
+
+ <div class="columwrapper">
+ {%- for column in items|slice(3) %}
+ <ul class="column-{{ loop.index }}">
+ {%- for item in column %}
+ <li>{{ item }}</li>
+ {%- endfor %}
+ </ul>
+ {%- endfor %}
+ </div>
+
+ If you pass it a second argument it's used to fill missing
+ values on the last iteration.
+ """
+ seq = list(value)
+ length = len(seq)
+ items_per_slice = length // slices
+ slices_with_extra = length % slices
+ offset = 0
+ for slice_number in range(slices):
+ start = offset + slice_number * items_per_slice
+ if slice_number < slices_with_extra:
+ offset += 1
+ end = offset + (slice_number + 1) * items_per_slice
+ tmp = seq[start:end]
+ if fill_with is not None and slice_number >= slices_with_extra:
+ tmp.append(fill_with)
+ yield tmp
+
+
+def do_batch(value, linecount, fill_with=None):
+ """
+ A filter that batches items. It works pretty much like `slice`
+ just the other way round. It returns a list of lists with the
+ given number of items. If you provide a second parameter this
+ is used to fill up missing items. See this example:
+
+ .. sourcecode:: html+jinja
+
+ <table>
+ {%- for row in items|batch(3, '&nbsp;') %}
+ <tr>
+ {%- for column in row %}
+ <td>{{ column }}</td>
+ {%- endfor %}
+ </tr>
+ {%- endfor %}
+ </table>
+ """
+ tmp = []
+ for item in value:
+ if len(tmp) == linecount:
+ yield tmp
+ tmp = []
+ tmp.append(item)
+ if tmp:
+ if fill_with is not None and len(tmp) < linecount:
+ tmp += [fill_with] * (linecount - len(tmp))
+ yield tmp
+
+
+def do_round(value, precision=0, method='common'):
+ """Round the number to a given precision. The first
+ parameter specifies the precision (default is ``0``), the
+ second the rounding method:
+
+ - ``'common'`` rounds either up or down
+ - ``'ceil'`` always rounds up
+ - ``'floor'`` always rounds down
+
+ If you don't specify a method ``'common'`` is used.
+
+ .. sourcecode:: jinja
+
+ {{ 42.55|round }}
+ -> 43.0
+ {{ 42.55|round(1, 'floor') }}
+ -> 42.5
+
+ Note that even if rounded to 0 precision, a float is returned. If
+ you need a real integer, pipe it through `int`:
+
+ .. sourcecode:: jinja
+
+ {{ 42.55|round|int }}
+ -> 43
+ """
+ if not method in ('common', 'ceil', 'floor'):
+ raise FilterArgumentError('method must be common, ceil or floor')
+ if method == 'common':
+ return round(value, precision)
+ func = getattr(math, method)
+ return func(value * (10 ** precision)) / (10 ** precision)
+
+
+@environmentfilter
+def do_groupby(environment, value, attribute):
+ """Group a sequence of objects by a common attribute.
+
+ If you for example have a list of dicts or objects that represent persons
+ with `gender`, `first_name` and `last_name` attributes and you want to
+ group all users by genders you can do something like the following
+ snippet:
+
+ .. sourcecode:: html+jinja
+
+ <ul>
+ {% for group in persons|groupby('gender') %}
+ <li>{{ group.grouper }}<ul>
+ {% for person in group.list %}
+ <li>{{ person.first_name }} {{ person.last_name }}</li>
+ {% endfor %}</ul></li>
+ {% endfor %}
+ </ul>
+
+ Additionally it's possible to use tuple unpacking for the grouper and
+ list:
+
+ .. sourcecode:: html+jinja
+
+ <ul>
+ {% for grouper, list in persons|groupby('gender') %}
+ ...
+ {% endfor %}
+ </ul>
+
+ As you can see the item we're grouping by is stored in the `grouper`
+ attribute and the `list` contains all the objects that have this grouper
+ in common.
+
+ .. versionchanged:: 2.6
+ It's now possible to use dotted notation to group by the child
+ attribute of another attribute.
+ """
+ expr = make_attrgetter(environment, attribute)
+ return sorted(map(_GroupTuple, groupby(sorted(value, key=expr), expr)))
+
+
+class _GroupTuple(tuple):
+ __slots__ = ()
+ grouper = property(itemgetter(0))
+ list = property(itemgetter(1))
+
+ def __new__(cls, xxx_todo_changeme):
+ (key, value) = xxx_todo_changeme
+ return tuple.__new__(cls, (key, list(value)))
+
+
+@environmentfilter
+def do_sum(environment, iterable, attribute=None, start=0):
+ """Returns the sum of a sequence of numbers plus the value of parameter
+ 'start' (which defaults to 0). When the sequence is empty it returns
+ start.
+
+ It is also possible to sum up only certain attributes:
+
+ .. sourcecode:: jinja
+
+ Total: {{ items|sum(attribute='price') }}
+
+ .. versionchanged:: 2.6
+ The `attribute` parameter was added to allow suming up over
+ attributes. Also the `start` parameter was moved on to the right.
+ """
+ if attribute is not None:
+ iterable = imap(make_attrgetter(environment, attribute), iterable)
+ return sum(iterable, start)
+
+
+def do_list(value):
+ """Convert the value into a list. If it was a string the returned list
+ will be a list of characters.
+ """
+ return list(value)
+
+
+def do_mark_safe(value):
+ """Mark the value as safe which means that in an environment with automatic
+ escaping enabled this variable will not be escaped.
+ """
+ return Markup(value)
+
+
+def do_mark_unsafe(value):
+ """Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
+ return text_type(value)
+
+
+def do_reverse(value):
+ """Reverse the object or return an iterator that iterates over it the other
+ way round.
+ """
+ if isinstance(value, string_types):
+ return value[::-1]
+ try:
+ return reversed(value)
+ except TypeError:
+ try:
+ rv = list(value)
+ rv.reverse()
+ return rv
+ except TypeError:
+ raise FilterArgumentError('argument must be iterable')
+
+
+@environmentfilter
+def do_attr(environment, obj, name):
+ """Get an attribute of an object. ``foo|attr("bar")`` works like
+ ``foo.bar`` just that always an attribute is returned and items are not
+ looked up.
+
+ See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
+ """
+ try:
+ name = str(name)
+ except UnicodeError:
+ pass
+ else:
+ try:
+ value = getattr(obj, name)
+ except AttributeError:
+ pass
+ else:
+ if environment.sandboxed and not \
+ environment.is_safe_attribute(obj, name, value):
+ return environment.unsafe_undefined(obj, name)
+ return value
+ return environment.undefined(obj=obj, name=name)
+
+
+@contextfilter
+def do_map(*args, **kwargs):
+ """Applies a filter on a sequence of objects or looks up an attribute.
+ This is useful when dealing with lists of objects but you are really
+ only interested in a certain value of it.
+
+ The basic usage is mapping on an attribute. Imagine you have a list
+ of users but you are only interested in a list of usernames:
+
+ .. sourcecode:: jinja
+
+ Users on this page: {{ users|map(attribute='username')|join(', ') }}
+
+ Alternatively you can let it invoke a filter by passing the name of the
+ filter and the arguments afterwards. A good example would be applying a
+ text conversion filter on a sequence:
+
+ .. sourcecode:: jinja
+
+ Users on this page: {{ titles|map('lower')|join(', ') }}
+
+ .. versionadded:: 2.7
+ """
+ context = args[0]
+ seq = args[1]
+
+ if len(args) == 2 and 'attribute' in kwargs:
+ attribute = kwargs.pop('attribute')
+ if kwargs:
+ raise FilterArgumentError('Unexpected keyword argument %r' %
+ next(iter(kwargs)))
+ func = make_attrgetter(context.environment, attribute)
+ else:
+ try:
+ name = args[2]
+ args = args[3:]
+ except LookupError:
+ raise FilterArgumentError('map requires a filter argument')
+ func = lambda item: context.environment.call_filter(
+ name, item, args, kwargs, context=context)
+
+ if seq:
+ for item in seq:
+ yield func(item)
+
+
+@contextfilter
+def do_select(*args, **kwargs):
+ """Filters a sequence of objects by applying a test to the object and only
+ selecting the ones with the test succeeding.
+
+ Example usage:
+
+ .. sourcecode:: jinja
+
+ {{ numbers|select("odd") }}
+ {{ numbers|select("odd") }}
+
+ .. versionadded:: 2.7
+ """
+ return _select_or_reject(args, kwargs, lambda x: x, False)
+
+
+@contextfilter
+def do_reject(*args, **kwargs):
+ """Filters a sequence of objects by applying a test to the object and
+ rejecting the ones with the test succeeding.
+
+ Example usage:
+
+ .. sourcecode:: jinja
+
+ {{ numbers|reject("odd") }}
+
+ .. versionadded:: 2.7
+ """
+ return _select_or_reject(args, kwargs, lambda x: not x, False)
+
+
+@contextfilter
+def do_selectattr(*args, **kwargs):
+ """Filters a sequence of objects by applying a test to an attribute of an
+ object and only selecting the ones with the test succeeding.
+
+ Example usage:
+
+ .. sourcecode:: jinja
+
+ {{ users|selectattr("is_active") }}
+ {{ users|selectattr("email", "none") }}
+
+ .. versionadded:: 2.7
+ """
+ return _select_or_reject(args, kwargs, lambda x: x, True)
+
+
+@contextfilter
+def do_rejectattr(*args, **kwargs):
+ """Filters a sequence of objects by applying a test to an attribute of an
+ object or the attribute and rejecting the ones with the test succeeding.
+
+ .. sourcecode:: jinja
+
+ {{ users|rejectattr("is_active") }}
+ {{ users|rejectattr("email", "none") }}
+
+ .. versionadded:: 2.7
+ """
+ return _select_or_reject(args, kwargs, lambda x: not x, True)
+
+
+def _select_or_reject(args, kwargs, modfunc, lookup_attr):
+ context = args[0]
+ seq = args[1]
+ if lookup_attr:
+ try:
+ attr = args[2]
+ except LookupError:
+ raise FilterArgumentError('Missing parameter for attribute name')
+ transfunc = make_attrgetter(context.environment, attr)
+ off = 1
+ else:
+ off = 0
+ transfunc = lambda x: x
+
+ try:
+ name = args[2 + off]
+ args = args[3 + off:]
+ func = lambda item: context.environment.call_test(
+ name, item, args, kwargs)
+ except LookupError:
+ func = bool
+
+ if seq:
+ for item in seq:
+ if modfunc(func(transfunc(item))):
+ yield item
+
+
+FILTERS = {
+ 'abs': abs,
+ 'attr': do_attr,
+ 'batch': do_batch,
+ 'capitalize': do_capitalize,
+ 'center': do_center,
+ 'count': len,
+ 'd': do_default,
+ 'default': do_default,
+ 'dictsort': do_dictsort,
+ 'e': escape,
+ 'escape': escape,
+ 'filesizeformat': do_filesizeformat,
+ 'first': do_first,
+ 'float': do_float,
+ 'forceescape': do_forceescape,
+ 'format': do_format,
+ 'groupby': do_groupby,
+ 'indent': do_indent,
+ 'int': do_int,
+ 'join': do_join,
+ 'last': do_last,
+ 'length': len,
+ 'list': do_list,
+ 'lower': do_lower,
+ 'map': do_map,
+ 'pprint': do_pprint,
+ 'random': do_random,
+ 'reject': do_reject,
+ 'rejectattr': do_rejectattr,
+ 'replace': do_replace,
+ 'reverse': do_reverse,
+ 'round': do_round,
+ 'safe': do_mark_safe,
+ 'select': do_select,
+ 'selectattr': do_selectattr,
+ 'slice': do_slice,
+ 'sort': do_sort,
+ 'string': soft_unicode,
+ 'striptags': do_striptags,
+ 'sum': do_sum,
+ 'title': do_title,
+ 'trim': do_trim,
+ 'truncate': do_truncate,
+ 'upper': do_upper,
+ 'urlencode': do_urlencode,
+ 'urlize': do_urlize,
+ 'wordcount': do_wordcount,
+ 'wordwrap': do_wordwrap,
+ 'xmlattr': do_xmlattr,
+}
diff --git a/deps/v8/third_party/jinja2/get_jinja2.sh b/deps/v8/third_party/jinja2/get_jinja2.sh
new file mode 100755
index 0000000000..0018349ebc
--- /dev/null
+++ b/deps/v8/third_party/jinja2/get_jinja2.sh
@@ -0,0 +1,122 @@
+#!/bin/bash
+# Download and extract Jinja2
+# Homepage:
+# http://jinja.pocoo.org/
+# Installation instructions:
+# http://jinja.pocoo.org/docs/intro/#from-the-tarball-release
+# Download page:
+# https://pypi.python.org/pypi/Jinja2
+PACKAGE='Jinja2'
+VERSION='2.8'
+SRC_URL='https://pypi.python.org/packages/f2/2f/0b98b06a345a761bec91a079ccae392d282690c2d8272e708f4d10829e22/Jinja2-2.8.tar.gz'
+PACKAGE_DIR='jinja2'
+
+CHROMIUM_FILES="README.chromium OWNERS get_jinja2.sh"
+EXTRA_FILES='LICENSE AUTHORS'
+REMOVE_FILES='testsuite'
+
+FILENAME="$(basename $SRC_URL)"
+MD5_FILENAME="$FILENAME.md5"
+SHA512_FILENAME="$FILENAME.sha512"
+CHROMIUM_FILES+=" $MD5_FILENAME $SHA512_FILENAME"
+
+BUILD_DIR="$PACKAGE-$VERSION"
+THIRD_PARTY="$(dirname $(realpath $(dirname "${BASH_SOURCE[0]}")))"
+INSTALL_DIR="$THIRD_PARTY/$PACKAGE_DIR"
+OUT_DIR="$INSTALL_DIR/$BUILD_DIR/$PACKAGE_DIR"
+OLD_DIR="$THIRD_PARTY/$PACKAGE_DIR.old"
+
+function check_hashes {
+ # Hashes generated via:
+ # FILENAME=Jinja2-2.8.tar.gz
+ # md5sum "$FILENAME" > "$FILENAME.md5"
+ # sha512sum "$FILENAME" > "$FILENAME.sha512"
+ # unset FILENAME
+
+ # MD5
+ if ! [ -f "$MD5_FILENAME" ]
+ then
+ echo "MD5 hash file $MD5_FILENAME not found, could not verify archive"
+ exit 1
+ fi
+
+ # 32-digit hash, followed by filename
+ MD5_HASHFILE_REGEX="^[0-9a-f]{32} $FILENAME"
+ if ! grep --extended-regex --line-regex --silent \
+ "$MD5_HASHFILE_REGEX" "$MD5_FILENAME"
+ then
+ echo "MD5 hash file $MD5_FILENAME does not contain hash for $FILENAME," \
+ 'could not verify archive'
+ echo 'Hash file contents are:'
+ cat "$MD5_FILENAME"
+ exit 1
+ fi
+
+ if ! md5sum --check "$MD5_FILENAME"
+ then
+ echo 'MD5 hash does not match,' \
+ "archive file $FILENAME corrupt or compromised!"
+ exit 1
+ fi
+
+ # SHA-512
+ if ! [ -f "$SHA512_FILENAME" ]
+ then
+ echo "SHA-512 hash file $SHA512_FILENAME not found," \
+ 'could not verify archive'
+ exit 1
+ fi
+
+ # 128-digit hash, followed by filename
+ SHA512_HASHFILE_REGEX="^[0-9a-f]{128} $FILENAME"
+ if ! grep --extended-regex --line-regex --silent \
+ "$SHA512_HASHFILE_REGEX" "$SHA512_FILENAME"
+ then
+ echo "SHA-512 hash file $SHA512_FILENAME does not contain hash for" \
+ "$FILENAME, could not verify archive"
+ echo 'Hash file contents are:'
+ cat "$SHA512_FILENAME"
+ exit 1
+ fi
+
+ if ! sha512sum --check "$SHA512_FILENAME"
+ then
+ echo 'SHA-512 hash does not match,' \
+ "archive file $FILENAME corrupt or compromised!"
+ exit 1
+ fi
+}
+
+
+################################################################################
+# Body
+
+cd "$INSTALL_DIR"
+echo "Downloading $SRC_URL"
+curl --remote-name "$SRC_URL"
+check_hashes
+tar xvzf "$FILENAME"
+# Copy extra files over
+for FILE in $CHROMIUM_FILES
+do
+ cp "$FILE" "$OUT_DIR"
+done
+
+cd "$BUILD_DIR"
+for FILE in $EXTRA_FILES
+do
+ cp "$FILE" "$OUT_DIR"
+done
+
+cd "$OUT_DIR"
+for FILE in $REMOVE_FILES
+do
+ rm -fr "$FILE"
+done
+
+# Replace with new directory
+cd ..
+mv "$INSTALL_DIR" "$OLD_DIR"
+mv "$PACKAGE_DIR" "$INSTALL_DIR"
+cd "$INSTALL_DIR"
+rm -fr "$OLD_DIR"
diff --git a/deps/v8/third_party/jinja2/lexer.py b/deps/v8/third_party/jinja2/lexer.py
new file mode 100644
index 0000000000..c8dac214ed
--- /dev/null
+++ b/deps/v8/third_party/jinja2/lexer.py
@@ -0,0 +1,734 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.lexer
+ ~~~~~~~~~~~~
+
+ This module implements a Jinja / Python combination lexer. The
+ `Lexer` class provided by this module is used to do some preprocessing
+ for Jinja.
+
+ On the one hand it filters out invalid operators like the bitshift
+ operators we don't allow in templates. On the other hand it separates
+ template code and python code in expressions.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import re
+
+from operator import itemgetter
+from collections import deque
+from jinja2.exceptions import TemplateSyntaxError
+from jinja2.utils import LRUCache
+from jinja2._compat import iteritems, implements_iterator, text_type, \
+ intern, PY2
+
+
+# cache for the lexers. Exists in order to be able to have multiple
+# environments with the same lexer
+_lexer_cache = LRUCache(50)
+
+# static regular expressions
+whitespace_re = re.compile(r'\s+', re.U)
+string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
+ r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
+integer_re = re.compile(r'\d+')
+
+# we use the unicode identifier rule if this python version is able
+# to handle unicode identifiers, otherwise the standard ASCII one.
+try:
+ compile('föö', '<unknown>', 'eval')
+except SyntaxError:
+ name_re = re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b')
+else:
+ from jinja2 import _stringdefs
+ name_re = re.compile(r'[%s][%s]*' % (_stringdefs.xid_start,
+ _stringdefs.xid_continue))
+
+float_re = re.compile(r'(?<!\.)\d+\.\d+')
+newline_re = re.compile(r'(\r\n|\r|\n)')
+
+# internal the tokens and keep references to them
+TOKEN_ADD = intern('add')
+TOKEN_ASSIGN = intern('assign')
+TOKEN_COLON = intern('colon')
+TOKEN_COMMA = intern('comma')
+TOKEN_DIV = intern('div')
+TOKEN_DOT = intern('dot')
+TOKEN_EQ = intern('eq')
+TOKEN_FLOORDIV = intern('floordiv')
+TOKEN_GT = intern('gt')
+TOKEN_GTEQ = intern('gteq')
+TOKEN_LBRACE = intern('lbrace')
+TOKEN_LBRACKET = intern('lbracket')
+TOKEN_LPAREN = intern('lparen')
+TOKEN_LT = intern('lt')
+TOKEN_LTEQ = intern('lteq')
+TOKEN_MOD = intern('mod')
+TOKEN_MUL = intern('mul')
+TOKEN_NE = intern('ne')
+TOKEN_PIPE = intern('pipe')
+TOKEN_POW = intern('pow')
+TOKEN_RBRACE = intern('rbrace')
+TOKEN_RBRACKET = intern('rbracket')
+TOKEN_RPAREN = intern('rparen')
+TOKEN_SEMICOLON = intern('semicolon')
+TOKEN_SUB = intern('sub')
+TOKEN_TILDE = intern('tilde')
+TOKEN_WHITESPACE = intern('whitespace')
+TOKEN_FLOAT = intern('float')
+TOKEN_INTEGER = intern('integer')
+TOKEN_NAME = intern('name')
+TOKEN_STRING = intern('string')
+TOKEN_OPERATOR = intern('operator')
+TOKEN_BLOCK_BEGIN = intern('block_begin')
+TOKEN_BLOCK_END = intern('block_end')
+TOKEN_VARIABLE_BEGIN = intern('variable_begin')
+TOKEN_VARIABLE_END = intern('variable_end')
+TOKEN_RAW_BEGIN = intern('raw_begin')
+TOKEN_RAW_END = intern('raw_end')
+TOKEN_COMMENT_BEGIN = intern('comment_begin')
+TOKEN_COMMENT_END = intern('comment_end')
+TOKEN_COMMENT = intern('comment')
+TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
+TOKEN_LINESTATEMENT_END = intern('linestatement_end')
+TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
+TOKEN_LINECOMMENT_END = intern('linecomment_end')
+TOKEN_LINECOMMENT = intern('linecomment')
+TOKEN_DATA = intern('data')
+TOKEN_INITIAL = intern('initial')
+TOKEN_EOF = intern('eof')
+
+# bind operators to token types
+operators = {
+ '+': TOKEN_ADD,
+ '-': TOKEN_SUB,
+ '/': TOKEN_DIV,
+ '//': TOKEN_FLOORDIV,
+ '*': TOKEN_MUL,
+ '%': TOKEN_MOD,
+ '**': TOKEN_POW,
+ '~': TOKEN_TILDE,
+ '[': TOKEN_LBRACKET,
+ ']': TOKEN_RBRACKET,
+ '(': TOKEN_LPAREN,
+ ')': TOKEN_RPAREN,
+ '{': TOKEN_LBRACE,
+ '}': TOKEN_RBRACE,
+ '==': TOKEN_EQ,
+ '!=': TOKEN_NE,
+ '>': TOKEN_GT,
+ '>=': TOKEN_GTEQ,
+ '<': TOKEN_LT,
+ '<=': TOKEN_LTEQ,
+ '=': TOKEN_ASSIGN,
+ '.': TOKEN_DOT,
+ ':': TOKEN_COLON,
+ '|': TOKEN_PIPE,
+ ',': TOKEN_COMMA,
+ ';': TOKEN_SEMICOLON
+}
+
+reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
+assert len(operators) == len(reverse_operators), 'operators dropped'
+operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
+ sorted(operators, key=lambda x: -len(x))))
+
+ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
+ TOKEN_COMMENT_END, TOKEN_WHITESPACE,
+ TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END,
+ TOKEN_LINECOMMENT])
+ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
+ TOKEN_COMMENT, TOKEN_LINECOMMENT])
+
+
+def _describe_token_type(token_type):
+ if token_type in reverse_operators:
+ return reverse_operators[token_type]
+ return {
+ TOKEN_COMMENT_BEGIN: 'begin of comment',
+ TOKEN_COMMENT_END: 'end of comment',
+ TOKEN_COMMENT: 'comment',
+ TOKEN_LINECOMMENT: 'comment',
+ TOKEN_BLOCK_BEGIN: 'begin of statement block',
+ TOKEN_BLOCK_END: 'end of statement block',
+ TOKEN_VARIABLE_BEGIN: 'begin of print statement',
+ TOKEN_VARIABLE_END: 'end of print statement',
+ TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
+ TOKEN_LINESTATEMENT_END: 'end of line statement',
+ TOKEN_DATA: 'template data / text',
+ TOKEN_EOF: 'end of template'
+ }.get(token_type, token_type)
+
+
+def describe_token(token):
+ """Returns a description of the token."""
+ if token.type == 'name':
+ return token.value
+ return _describe_token_type(token.type)
+
+
+def describe_token_expr(expr):
+ """Like `describe_token` but for token expressions."""
+ if ':' in expr:
+ type, value = expr.split(':', 1)
+ if type == 'name':
+ return value
+ else:
+ type = expr
+ return _describe_token_type(type)
+
+
+def count_newlines(value):
+ """Count the number of newline characters in the string. This is
+ useful for extensions that filter a stream.
+ """
+ return len(newline_re.findall(value))
+
+
+def compile_rules(environment):
+ """Compiles all the rules from the environment into a list of rules."""
+ e = re.escape
+ rules = [
+ (len(environment.comment_start_string), 'comment',
+ e(environment.comment_start_string)),
+ (len(environment.block_start_string), 'block',
+ e(environment.block_start_string)),
+ (len(environment.variable_start_string), 'variable',
+ e(environment.variable_start_string))
+ ]
+
+ if environment.line_statement_prefix is not None:
+ rules.append((len(environment.line_statement_prefix), 'linestatement',
+ r'^[ \t\v]*' + e(environment.line_statement_prefix)))
+ if environment.line_comment_prefix is not None:
+ rules.append((len(environment.line_comment_prefix), 'linecomment',
+ r'(?:^|(?<=\S))[^\S\r\n]*' +
+ e(environment.line_comment_prefix)))
+
+ return [x[1:] for x in sorted(rules, reverse=True)]
+
+
+class Failure(object):
+ """Class that raises a `TemplateSyntaxError` if called.
+ Used by the `Lexer` to specify known errors.
+ """
+
+ def __init__(self, message, cls=TemplateSyntaxError):
+ self.message = message
+ self.error_class = cls
+
+ def __call__(self, lineno, filename):
+ raise self.error_class(self.message, lineno, filename)
+
+
+class Token(tuple):
+ """Token class."""
+ __slots__ = ()
+ lineno, type, value = (property(itemgetter(x)) for x in range(3))
+
+ def __new__(cls, lineno, type, value):
+ return tuple.__new__(cls, (lineno, intern(str(type)), value))
+
+ def __str__(self):
+ if self.type in reverse_operators:
+ return reverse_operators[self.type]
+ elif self.type == 'name':
+ return self.value
+ return self.type
+
+ def test(self, expr):
+ """Test a token against a token expression. This can either be a
+ token type or ``'token_type:token_value'``. This can only test
+ against string values and types.
+ """
+ # here we do a regular string equality check as test_any is usually
+ # passed an iterable of not interned strings.
+ if self.type == expr:
+ return True
+ elif ':' in expr:
+ return expr.split(':', 1) == [self.type, self.value]
+ return False
+
+ def test_any(self, *iterable):
+ """Test against multiple token expressions."""
+ for expr in iterable:
+ if self.test(expr):
+ return True
+ return False
+
+ def __repr__(self):
+ return 'Token(%r, %r, %r)' % (
+ self.lineno,
+ self.type,
+ self.value
+ )
+
+
+@implements_iterator
+class TokenStreamIterator(object):
+ """The iterator for tokenstreams. Iterate over the stream
+ until the eof token is reached.
+ """
+
+ def __init__(self, stream):
+ self.stream = stream
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ token = self.stream.current
+ if token.type is TOKEN_EOF:
+ self.stream.close()
+ raise StopIteration()
+ next(self.stream)
+ return token
+
+
+@implements_iterator
+class TokenStream(object):
+ """A token stream is an iterable that yields :class:`Token`\s. The
+ parser however does not iterate over it but calls :meth:`next` to go
+ one token ahead. The current active token is stored as :attr:`current`.
+ """
+
+ def __init__(self, generator, name, filename):
+ self._iter = iter(generator)
+ self._pushed = deque()
+ self.name = name
+ self.filename = filename
+ self.closed = False
+ self.current = Token(1, TOKEN_INITIAL, '')
+ next(self)
+
+ def __iter__(self):
+ return TokenStreamIterator(self)
+
+ def __bool__(self):
+ return bool(self._pushed) or self.current.type is not TOKEN_EOF
+ __nonzero__ = __bool__ # py2
+
+ eos = property(lambda x: not x, doc="Are we at the end of the stream?")
+
+ def push(self, token):
+ """Push a token back to the stream."""
+ self._pushed.append(token)
+
+ def look(self):
+ """Look at the next token."""
+ old_token = next(self)
+ result = self.current
+ self.push(result)
+ self.current = old_token
+ return result
+
+ def skip(self, n=1):
+ """Got n tokens ahead."""
+ for x in range(n):
+ next(self)
+
+ def next_if(self, expr):
+ """Perform the token test and return the token if it matched.
+ Otherwise the return value is `None`.
+ """
+ if self.current.test(expr):
+ return next(self)
+
+ def skip_if(self, expr):
+ """Like :meth:`next_if` but only returns `True` or `False`."""
+ return self.next_if(expr) is not None
+
+ def __next__(self):
+ """Go one token ahead and return the old one"""
+ rv = self.current
+ if self._pushed:
+ self.current = self._pushed.popleft()
+ elif self.current.type is not TOKEN_EOF:
+ try:
+ self.current = next(self._iter)
+ except StopIteration:
+ self.close()
+ return rv
+
+ def close(self):
+ """Close the stream."""
+ self.current = Token(self.current.lineno, TOKEN_EOF, '')
+ self._iter = None
+ self.closed = True
+
+ def expect(self, expr):
+ """Expect a given token type and return it. This accepts the same
+ argument as :meth:`jinja2.lexer.Token.test`.
+ """
+ if not self.current.test(expr):
+ expr = describe_token_expr(expr)
+ if self.current.type is TOKEN_EOF:
+ raise TemplateSyntaxError('unexpected end of template, '
+ 'expected %r.' % expr,
+ self.current.lineno,
+ self.name, self.filename)
+ raise TemplateSyntaxError("expected token %r, got %r" %
+ (expr, describe_token(self.current)),
+ self.current.lineno,
+ self.name, self.filename)
+ try:
+ return self.current
+ finally:
+ next(self)
+
+
+def get_lexer(environment):
+ """Return a lexer which is probably cached."""
+ key = (environment.block_start_string,
+ environment.block_end_string,
+ environment.variable_start_string,
+ environment.variable_end_string,
+ environment.comment_start_string,
+ environment.comment_end_string,
+ environment.line_statement_prefix,
+ environment.line_comment_prefix,
+ environment.trim_blocks,
+ environment.lstrip_blocks,
+ environment.newline_sequence,
+ environment.keep_trailing_newline)
+ lexer = _lexer_cache.get(key)
+ if lexer is None:
+ lexer = Lexer(environment)
+ _lexer_cache[key] = lexer
+ return lexer
+
+
+class Lexer(object):
+ """Class that implements a lexer for a given environment. Automatically
+ created by the environment class, usually you don't have to do that.
+
+ Note that the lexer is not automatically bound to an environment.
+ Multiple environments can share the same lexer.
+ """
+
+ def __init__(self, environment):
+ # shortcuts
+ c = lambda x: re.compile(x, re.M | re.S)
+ e = re.escape
+
+ # lexing rules for tags
+ tag_rules = [
+ (whitespace_re, TOKEN_WHITESPACE, None),
+ (float_re, TOKEN_FLOAT, None),
+ (integer_re, TOKEN_INTEGER, None),
+ (name_re, TOKEN_NAME, None),
+ (string_re, TOKEN_STRING, None),
+ (operator_re, TOKEN_OPERATOR, None)
+ ]
+
+ # assemble the root lexing rule. because "|" is ungreedy
+ # we have to sort by length so that the lexer continues working
+ # as expected when we have parsing rules like <% for block and
+ # <%= for variables. (if someone wants asp like syntax)
+ # variables are just part of the rules if variable processing
+ # is required.
+ root_tag_rules = compile_rules(environment)
+
+ # block suffix if trimming is enabled
+ block_suffix_re = environment.trim_blocks and '\\n?' or ''
+
+ # strip leading spaces if lstrip_blocks is enabled
+ prefix_re = {}
+ if environment.lstrip_blocks:
+ # use '{%+' to manually disable lstrip_blocks behavior
+ no_lstrip_re = e('+')
+ # detect overlap between block and variable or comment strings
+ block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
+ # make sure we don't mistake a block for a variable or a comment
+ m = block_diff.match(environment.comment_start_string)
+ no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
+ m = block_diff.match(environment.variable_start_string)
+ no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
+
+ # detect overlap between comment and variable strings
+ comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
+ m = comment_diff.match(environment.variable_start_string)
+ no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
+
+ lstrip_re = r'^[ \t]*'
+ block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
+ lstrip_re,
+ e(environment.block_start_string),
+ no_lstrip_re,
+ e(environment.block_start_string),
+ )
+ comment_prefix_re = r'%s%s%s|%s\+?' % (
+ lstrip_re,
+ e(environment.comment_start_string),
+ no_variable_re,
+ e(environment.comment_start_string),
+ )
+ prefix_re['block'] = block_prefix_re
+ prefix_re['comment'] = comment_prefix_re
+ else:
+ block_prefix_re = '%s' % e(environment.block_start_string)
+
+ self.newline_sequence = environment.newline_sequence
+ self.keep_trailing_newline = environment.keep_trailing_newline
+
+ # global lexing rules
+ self.rules = {
+ 'root': [
+ # directives
+ (c('(.*?)(?:%s)' % '|'.join(
+ [r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
+ e(environment.block_start_string),
+ block_prefix_re,
+ e(environment.block_end_string),
+ e(environment.block_end_string)
+ )] + [
+ r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
+ for n, r in root_tag_rules
+ ])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
+ # data
+ (c('.+'), TOKEN_DATA, None)
+ ],
+ # comments
+ TOKEN_COMMENT_BEGIN: [
+ (c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
+ e(environment.comment_end_string),
+ e(environment.comment_end_string),
+ block_suffix_re
+ )), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
+ (c('(.)'), (Failure('Missing end of comment tag'),), None)
+ ],
+ # blocks
+ TOKEN_BLOCK_BEGIN: [
+ (c('(?:\-%s\s*|%s)%s' % (
+ e(environment.block_end_string),
+ e(environment.block_end_string),
+ block_suffix_re
+ )), TOKEN_BLOCK_END, '#pop'),
+ ] + tag_rules,
+ # variables
+ TOKEN_VARIABLE_BEGIN: [
+ (c('\-%s\s*|%s' % (
+ e(environment.variable_end_string),
+ e(environment.variable_end_string)
+ )), TOKEN_VARIABLE_END, '#pop')
+ ] + tag_rules,
+ # raw block
+ TOKEN_RAW_BEGIN: [
+ (c('(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
+ e(environment.block_start_string),
+ block_prefix_re,
+ e(environment.block_end_string),
+ e(environment.block_end_string),
+ block_suffix_re
+ )), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
+ (c('(.)'), (Failure('Missing end of raw directive'),), None)
+ ],
+ # line statements
+ TOKEN_LINESTATEMENT_BEGIN: [
+ (c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
+ ] + tag_rules,
+ # line comments
+ TOKEN_LINECOMMENT_BEGIN: [
+ (c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
+ TOKEN_LINECOMMENT_END), '#pop')
+ ]
+ }
+
+ def _normalize_newlines(self, value):
+ """Called for strings and template data to normalize it to unicode."""
+ return newline_re.sub(self.newline_sequence, value)
+
+ def tokenize(self, source, name=None, filename=None, state=None):
+ """Calls tokeniter + tokenize and wraps it in a token stream.
+ """
+ stream = self.tokeniter(source, name, filename, state)
+ return TokenStream(self.wrap(stream, name, filename), name, filename)
+
+ def wrap(self, stream, name=None, filename=None):
+ """This is called with the stream as returned by `tokenize` and wraps
+ every token in a :class:`Token` and converts the value.
+ """
+ for lineno, token, value in stream:
+ if token in ignored_tokens:
+ continue
+ elif token == 'linestatement_begin':
+ token = 'block_begin'
+ elif token == 'linestatement_end':
+ token = 'block_end'
+ # we are not interested in those tokens in the parser
+ elif token in ('raw_begin', 'raw_end'):
+ continue
+ elif token == 'data':
+ value = self._normalize_newlines(value)
+ elif token == 'keyword':
+ token = value
+ elif token == 'name':
+ value = str(value)
+ elif token == 'string':
+ # try to unescape string
+ try:
+ value = self._normalize_newlines(value[1:-1]) \
+ .encode('ascii', 'backslashreplace') \
+ .decode('unicode-escape')
+ except Exception as e:
+ msg = str(e).split(':')[-1].strip()
+ raise TemplateSyntaxError(msg, lineno, name, filename)
+ # if we can express it as bytestring (ascii only)
+ # we do that for support of semi broken APIs
+ # as datetime.datetime.strftime. On python 3 this
+ # call becomes a noop thanks to 2to3
+ if PY2:
+ try:
+ value = value.encode('ascii')
+ except UnicodeError:
+ pass
+ elif token == 'integer':
+ value = int(value)
+ elif token == 'float':
+ value = float(value)
+ elif token == 'operator':
+ token = operators[value]
+ yield Token(lineno, token, value)
+
+ def tokeniter(self, source, name, filename=None, state=None):
+ """This method tokenizes the text and returns the tokens in a
+ generator. Use this method if you just want to tokenize a template.
+ """
+ source = text_type(source)
+ lines = source.splitlines()
+ if self.keep_trailing_newline and source:
+ for newline in ('\r\n', '\r', '\n'):
+ if source.endswith(newline):
+ lines.append('')
+ break
+ source = '\n'.join(lines)
+ pos = 0
+ lineno = 1
+ stack = ['root']
+ if state is not None and state != 'root':
+ assert state in ('variable', 'block'), 'invalid state'
+ stack.append(state + '_begin')
+ else:
+ state = 'root'
+ statetokens = self.rules[stack[-1]]
+ source_length = len(source)
+
+ balancing_stack = []
+
+ while 1:
+ # tokenizer loop
+ for regex, tokens, new_state in statetokens:
+ m = regex.match(source, pos)
+ # if no match we try again with the next rule
+ if m is None:
+ continue
+
+ # we only match blocks and variables if braces / parentheses
+ # are balanced. continue parsing with the lower rule which
+ # is the operator rule. do this only if the end tags look
+ # like operators
+ if balancing_stack and \
+ tokens in ('variable_end', 'block_end',
+ 'linestatement_end'):
+ continue
+
+ # tuples support more options
+ if isinstance(tokens, tuple):
+ for idx, token in enumerate(tokens):
+ # failure group
+ if token.__class__ is Failure:
+ raise token(lineno, filename)
+ # bygroup is a bit more complex, in that case we
+ # yield for the current token the first named
+ # group that matched
+ elif token == '#bygroup':
+ for key, value in iteritems(m.groupdict()):
+ if value is not None:
+ yield lineno, key, value
+ lineno += value.count('\n')
+ break
+ else:
+ raise RuntimeError('%r wanted to resolve '
+ 'the token dynamically'
+ ' but no group matched'
+ % regex)
+ # normal group
+ else:
+ data = m.group(idx + 1)
+ if data or token not in ignore_if_empty:
+ yield lineno, token, data
+ lineno += data.count('\n')
+
+ # strings as token just are yielded as it.
+ else:
+ data = m.group()
+ # update brace/parentheses balance
+ if tokens == 'operator':
+ if data == '{':
+ balancing_stack.append('}')
+ elif data == '(':
+ balancing_stack.append(')')
+ elif data == '[':
+ balancing_stack.append(']')
+ elif data in ('}', ')', ']'):
+ if not balancing_stack:
+ raise TemplateSyntaxError('unexpected \'%s\'' %
+ data, lineno, name,
+ filename)
+ expected_op = balancing_stack.pop()
+ if expected_op != data:
+ raise TemplateSyntaxError('unexpected \'%s\', '
+ 'expected \'%s\'' %
+ (data, expected_op),
+ lineno, name,
+ filename)
+ # yield items
+ if data or tokens not in ignore_if_empty:
+ yield lineno, tokens, data
+ lineno += data.count('\n')
+
+ # fetch new position into new variable so that we can check
+ # if there is a internal parsing error which would result
+ # in an infinite loop
+ pos2 = m.end()
+
+ # handle state changes
+ if new_state is not None:
+ # remove the uppermost state
+ if new_state == '#pop':
+ stack.pop()
+ # resolve the new state by group checking
+ elif new_state == '#bygroup':
+ for key, value in iteritems(m.groupdict()):
+ if value is not None:
+ stack.append(key)
+ break
+ else:
+ raise RuntimeError('%r wanted to resolve the '
+ 'new state dynamically but'
+ ' no group matched' %
+ regex)
+ # direct state name given
+ else:
+ stack.append(new_state)
+ statetokens = self.rules[stack[-1]]
+ # we are still at the same position and no stack change.
+ # this means a loop without break condition, avoid that and
+ # raise error
+ elif pos2 == pos:
+ raise RuntimeError('%r yielded empty string without '
+ 'stack change' % regex)
+ # publish new function and start again
+ pos = pos2
+ break
+ # if loop terminated without break we haven't found a single match
+ # either we are at the end of the file or we have a problem
+ else:
+ # end of text
+ if pos >= source_length:
+ return
+ # something went wrong
+ raise TemplateSyntaxError('unexpected char %r at %d' %
+ (source[pos], pos), lineno,
+ name, filename)
diff --git a/deps/v8/third_party/jinja2/loaders.py b/deps/v8/third_party/jinja2/loaders.py
new file mode 100644
index 0000000000..44aa3925a2
--- /dev/null
+++ b/deps/v8/third_party/jinja2/loaders.py
@@ -0,0 +1,481 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.loaders
+ ~~~~~~~~~~~~~~
+
+ Jinja loader classes.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import os
+import sys
+import weakref
+from types import ModuleType
+from os import path
+from hashlib import sha1
+from jinja2.exceptions import TemplateNotFound
+from jinja2.utils import open_if_exists, internalcode
+from jinja2._compat import string_types, iteritems
+
+
+def split_template_path(template):
+ """Split a path into segments and perform a sanity check. If it detects
+ '..' in the path it will raise a `TemplateNotFound` error.
+ """
+ pieces = []
+ for piece in template.split('/'):
+ if path.sep in piece \
+ or (path.altsep and path.altsep in piece) or \
+ piece == path.pardir:
+ raise TemplateNotFound(template)
+ elif piece and piece != '.':
+ pieces.append(piece)
+ return pieces
+
+
+class BaseLoader(object):
+ """Baseclass for all loaders. Subclass this and override `get_source` to
+ implement a custom loading mechanism. The environment provides a
+ `get_template` method that calls the loader's `load` method to get the
+ :class:`Template` object.
+
+ A very basic example for a loader that looks up templates on the file
+ system could look like this::
+
+ from jinja2 import BaseLoader, TemplateNotFound
+ from os.path import join, exists, getmtime
+
+ class MyLoader(BaseLoader):
+
+ def __init__(self, path):
+ self.path = path
+
+ def get_source(self, environment, template):
+ path = join(self.path, template)
+ if not exists(path):
+ raise TemplateNotFound(template)
+ mtime = getmtime(path)
+ with file(path) as f:
+ source = f.read().decode('utf-8')
+ return source, path, lambda: mtime == getmtime(path)
+ """
+
+ #: if set to `False` it indicates that the loader cannot provide access
+ #: to the source of templates.
+ #:
+ #: .. versionadded:: 2.4
+ has_source_access = True
+
+ def get_source(self, environment, template):
+ """Get the template source, filename and reload helper for a template.
+ It's passed the environment and template name and has to return a
+ tuple in the form ``(source, filename, uptodate)`` or raise a
+ `TemplateNotFound` error if it can't locate the template.
+
+ The source part of the returned tuple must be the source of the
+ template as unicode string or a ASCII bytestring. The filename should
+ be the name of the file on the filesystem if it was loaded from there,
+ otherwise `None`. The filename is used by python for the tracebacks
+ if no loader extension is used.
+
+ The last item in the tuple is the `uptodate` function. If auto
+ reloading is enabled it's always called to check if the template
+ changed. No arguments are passed so the function must store the
+ old state somewhere (for example in a closure). If it returns `False`
+ the template will be reloaded.
+ """
+ if not self.has_source_access:
+ raise RuntimeError('%s cannot provide access to the source' %
+ self.__class__.__name__)
+ raise TemplateNotFound(template)
+
+ def list_templates(self):
+ """Iterates over all templates. If the loader does not support that
+ it should raise a :exc:`TypeError` which is the default behavior.
+ """
+ raise TypeError('this loader cannot iterate over all templates')
+
+ @internalcode
+ def load(self, environment, name, globals=None):
+ """Loads a template. This method looks up the template in the cache
+ or loads one by calling :meth:`get_source`. Subclasses should not
+ override this method as loaders working on collections of other
+ loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
+ will not call this method but `get_source` directly.
+ """
+ code = None
+ if globals is None:
+ globals = {}
+
+ # first we try to get the source for this template together
+ # with the filename and the uptodate function.
+ source, filename, uptodate = self.get_source(environment, name)
+
+ # try to load the code from the bytecode cache if there is a
+ # bytecode cache configured.
+ bcc = environment.bytecode_cache
+ if bcc is not None:
+ bucket = bcc.get_bucket(environment, name, filename, source)
+ code = bucket.code
+
+ # if we don't have code so far (not cached, no longer up to
+ # date) etc. we compile the template
+ if code is None:
+ code = environment.compile(source, name, filename)
+
+ # if the bytecode cache is available and the bucket doesn't
+ # have a code so far, we give the bucket the new code and put
+ # it back to the bytecode cache.
+ if bcc is not None and bucket.code is None:
+ bucket.code = code
+ bcc.set_bucket(bucket)
+
+ return environment.template_class.from_code(environment, code,
+ globals, uptodate)
+
+
+class FileSystemLoader(BaseLoader):
+ """Loads templates from the file system. This loader can find templates
+ in folders on the file system and is the preferred way to load them.
+
+ The loader takes the path to the templates as string, or if multiple
+ locations are wanted a list of them which is then looked up in the
+ given order::
+
+ >>> loader = FileSystemLoader('/path/to/templates')
+ >>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
+
+ Per default the template encoding is ``'utf-8'`` which can be changed
+ by setting the `encoding` parameter to something else.
+
+ To follow symbolic links, set the *followlinks* parameter to ``True``::
+
+ >>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
+
+ .. versionchanged:: 2.8+
+ The *followlinks* parameter was added.
+ """
+
+ def __init__(self, searchpath, encoding='utf-8', followlinks=False):
+ if isinstance(searchpath, string_types):
+ searchpath = [searchpath]
+ self.searchpath = list(searchpath)
+ self.encoding = encoding
+ self.followlinks = followlinks
+
+ def get_source(self, environment, template):
+ pieces = split_template_path(template)
+ for searchpath in self.searchpath:
+ filename = path.join(searchpath, *pieces)
+ f = open_if_exists(filename)
+ if f is None:
+ continue
+ try:
+ contents = f.read().decode(self.encoding)
+ finally:
+ f.close()
+
+ mtime = path.getmtime(filename)
+
+ def uptodate():
+ try:
+ return path.getmtime(filename) == mtime
+ except OSError:
+ return False
+ return contents, filename, uptodate
+ raise TemplateNotFound(template)
+
+ def list_templates(self):
+ found = set()
+ for searchpath in self.searchpath:
+ walk_dir = os.walk(searchpath, followlinks=self.followlinks)
+ for dirpath, dirnames, filenames in walk_dir:
+ for filename in filenames:
+ template = os.path.join(dirpath, filename) \
+ [len(searchpath):].strip(os.path.sep) \
+ .replace(os.path.sep, '/')
+ if template[:2] == './':
+ template = template[2:]
+ if template not in found:
+ found.add(template)
+ return sorted(found)
+
+
+class PackageLoader(BaseLoader):
+ """Load templates from python eggs or packages. It is constructed with
+ the name of the python package and the path to the templates in that
+ package::
+
+ loader = PackageLoader('mypackage', 'views')
+
+ If the package path is not given, ``'templates'`` is assumed.
+
+ Per default the template encoding is ``'utf-8'`` which can be changed
+ by setting the `encoding` parameter to something else. Due to the nature
+ of eggs it's only possible to reload templates if the package was loaded
+ from the file system and not a zip file.
+ """
+
+ def __init__(self, package_name, package_path='templates',
+ encoding='utf-8'):
+ from pkg_resources import DefaultProvider, ResourceManager, \
+ get_provider
+ provider = get_provider(package_name)
+ self.encoding = encoding
+ self.manager = ResourceManager()
+ self.filesystem_bound = isinstance(provider, DefaultProvider)
+ self.provider = provider
+ self.package_path = package_path
+
+ def get_source(self, environment, template):
+ pieces = split_template_path(template)
+ p = '/'.join((self.package_path,) + tuple(pieces))
+ if not self.provider.has_resource(p):
+ raise TemplateNotFound(template)
+
+ filename = uptodate = None
+ if self.filesystem_bound:
+ filename = self.provider.get_resource_filename(self.manager, p)
+ mtime = path.getmtime(filename)
+ def uptodate():
+ try:
+ return path.getmtime(filename) == mtime
+ except OSError:
+ return False
+
+ source = self.provider.get_resource_string(self.manager, p)
+ return source.decode(self.encoding), filename, uptodate
+
+ def list_templates(self):
+ path = self.package_path
+ if path[:2] == './':
+ path = path[2:]
+ elif path == '.':
+ path = ''
+ offset = len(path)
+ results = []
+ def _walk(path):
+ for filename in self.provider.resource_listdir(path):
+ fullname = path + '/' + filename
+ if self.provider.resource_isdir(fullname):
+ _walk(fullname)
+ else:
+ results.append(fullname[offset:].lstrip('/'))
+ _walk(path)
+ results.sort()
+ return results
+
+
+class DictLoader(BaseLoader):
+ """Loads a template from a python dict. It's passed a dict of unicode
+ strings bound to template names. This loader is useful for unittesting:
+
+ >>> loader = DictLoader({'index.html': 'source here'})
+
+ Because auto reloading is rarely useful this is disabled per default.
+ """
+
+ def __init__(self, mapping):
+ self.mapping = mapping
+
+ def get_source(self, environment, template):
+ if template in self.mapping:
+ source = self.mapping[template]
+ return source, None, lambda: source == self.mapping.get(template)
+ raise TemplateNotFound(template)
+
+ def list_templates(self):
+ return sorted(self.mapping)
+
+
+class FunctionLoader(BaseLoader):
+ """A loader that is passed a function which does the loading. The
+ function receives the name of the template and has to return either
+ an unicode string with the template source, a tuple in the form ``(source,
+ filename, uptodatefunc)`` or `None` if the template does not exist.
+
+ >>> def load_template(name):
+ ... if name == 'index.html':
+ ... return '...'
+ ...
+ >>> loader = FunctionLoader(load_template)
+
+ The `uptodatefunc` is a function that is called if autoreload is enabled
+ and has to return `True` if the template is still up to date. For more
+ details have a look at :meth:`BaseLoader.get_source` which has the same
+ return value.
+ """
+
+ def __init__(self, load_func):
+ self.load_func = load_func
+
+ def get_source(self, environment, template):
+ rv = self.load_func(template)
+ if rv is None:
+ raise TemplateNotFound(template)
+ elif isinstance(rv, string_types):
+ return rv, None, None
+ return rv
+
+
+class PrefixLoader(BaseLoader):
+ """A loader that is passed a dict of loaders where each loader is bound
+ to a prefix. The prefix is delimited from the template by a slash per
+ default, which can be changed by setting the `delimiter` argument to
+ something else::
+
+ loader = PrefixLoader({
+ 'app1': PackageLoader('mypackage.app1'),
+ 'app2': PackageLoader('mypackage.app2')
+ })
+
+ By loading ``'app1/index.html'`` the file from the app1 package is loaded,
+ by loading ``'app2/index.html'`` the file from the second.
+ """
+
+ def __init__(self, mapping, delimiter='/'):
+ self.mapping = mapping
+ self.delimiter = delimiter
+
+ def get_loader(self, template):
+ try:
+ prefix, name = template.split(self.delimiter, 1)
+ loader = self.mapping[prefix]
+ except (ValueError, KeyError):
+ raise TemplateNotFound(template)
+ return loader, name
+
+ def get_source(self, environment, template):
+ loader, name = self.get_loader(template)
+ try:
+ return loader.get_source(environment, name)
+ except TemplateNotFound:
+ # re-raise the exception with the correct fileame here.
+ # (the one that includes the prefix)
+ raise TemplateNotFound(template)
+
+ @internalcode
+ def load(self, environment, name, globals=None):
+ loader, local_name = self.get_loader(name)
+ try:
+ return loader.load(environment, local_name, globals)
+ except TemplateNotFound:
+ # re-raise the exception with the correct fileame here.
+ # (the one that includes the prefix)
+ raise TemplateNotFound(name)
+
+ def list_templates(self):
+ result = []
+ for prefix, loader in iteritems(self.mapping):
+ for template in loader.list_templates():
+ result.append(prefix + self.delimiter + template)
+ return result
+
+
+class ChoiceLoader(BaseLoader):
+ """This loader works like the `PrefixLoader` just that no prefix is
+ specified. If a template could not be found by one loader the next one
+ is tried.
+
+ >>> loader = ChoiceLoader([
+ ... FileSystemLoader('/path/to/user/templates'),
+ ... FileSystemLoader('/path/to/system/templates')
+ ... ])
+
+ This is useful if you want to allow users to override builtin templates
+ from a different location.
+ """
+
+ def __init__(self, loaders):
+ self.loaders = loaders
+
+ def get_source(self, environment, template):
+ for loader in self.loaders:
+ try:
+ return loader.get_source(environment, template)
+ except TemplateNotFound:
+ pass
+ raise TemplateNotFound(template)
+
+ @internalcode
+ def load(self, environment, name, globals=None):
+ for loader in self.loaders:
+ try:
+ return loader.load(environment, name, globals)
+ except TemplateNotFound:
+ pass
+ raise TemplateNotFound(name)
+
+ def list_templates(self):
+ found = set()
+ for loader in self.loaders:
+ found.update(loader.list_templates())
+ return sorted(found)
+
+
+class _TemplateModule(ModuleType):
+ """Like a normal module but with support for weak references"""
+
+
+class ModuleLoader(BaseLoader):
+ """This loader loads templates from precompiled templates.
+
+ Example usage:
+
+ >>> loader = ChoiceLoader([
+ ... ModuleLoader('/path/to/compiled/templates'),
+ ... FileSystemLoader('/path/to/templates')
+ ... ])
+
+ Templates can be precompiled with :meth:`Environment.compile_templates`.
+ """
+
+ has_source_access = False
+
+ def __init__(self, path):
+ package_name = '_jinja2_module_templates_%x' % id(self)
+
+ # create a fake module that looks for the templates in the
+ # path given.
+ mod = _TemplateModule(package_name)
+ if isinstance(path, string_types):
+ path = [path]
+ else:
+ path = list(path)
+ mod.__path__ = path
+
+ sys.modules[package_name] = weakref.proxy(mod,
+ lambda x: sys.modules.pop(package_name, None))
+
+ # the only strong reference, the sys.modules entry is weak
+ # so that the garbage collector can remove it once the
+ # loader that created it goes out of business.
+ self.module = mod
+ self.package_name = package_name
+
+ @staticmethod
+ def get_template_key(name):
+ return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
+
+ @staticmethod
+ def get_module_filename(name):
+ return ModuleLoader.get_template_key(name) + '.py'
+
+ @internalcode
+ def load(self, environment, name, globals=None):
+ key = self.get_template_key(name)
+ module = '%s.%s' % (self.package_name, key)
+ mod = getattr(self.module, module, None)
+ if mod is None:
+ try:
+ mod = __import__(module, None, None, ['root'])
+ except ImportError:
+ raise TemplateNotFound(name)
+
+ # remove the entry from sys.modules, we only want the attribute
+ # on the module object we have stored on the loader.
+ sys.modules.pop(module, None)
+
+ return environment.template_class.from_module_dict(
+ environment, mod.__dict__, globals)
diff --git a/deps/v8/third_party/jinja2/meta.py b/deps/v8/third_party/jinja2/meta.py
new file mode 100644
index 0000000000..3dbab7c22d
--- /dev/null
+++ b/deps/v8/third_party/jinja2/meta.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.meta
+ ~~~~~~~~~~~
+
+ This module implements various functions that exposes information about
+ templates that might be interesting for various kinds of applications.
+
+ :copyright: (c) 2010 by the Jinja Team, see AUTHORS for more details.
+ :license: BSD, see LICENSE for more details.
+"""
+from jinja2 import nodes
+from jinja2.compiler import CodeGenerator
+from jinja2._compat import string_types
+
+
+class TrackingCodeGenerator(CodeGenerator):
+ """We abuse the code generator for introspection."""
+
+ def __init__(self, environment):
+ CodeGenerator.__init__(self, environment, '<introspection>',
+ '<introspection>')
+ self.undeclared_identifiers = set()
+
+ def write(self, x):
+ """Don't write."""
+
+ def pull_locals(self, frame):
+ """Remember all undeclared identifiers."""
+ self.undeclared_identifiers.update(frame.identifiers.undeclared)
+
+
+def find_undeclared_variables(ast):
+ """Returns a set of all variables in the AST that will be looked up from
+ the context at runtime. Because at compile time it's not known which
+ variables will be used depending on the path the execution takes at
+ runtime, all variables are returned.
+
+ >>> from jinja2 import Environment, meta
+ >>> env = Environment()
+ >>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
+ >>> meta.find_undeclared_variables(ast) == set(['bar'])
+ True
+
+ .. admonition:: Implementation
+
+ Internally the code generator is used for finding undeclared variables.
+ This is good to know because the code generator might raise a
+ :exc:`TemplateAssertionError` during compilation and as a matter of
+ fact this function can currently raise that exception as well.
+ """
+ codegen = TrackingCodeGenerator(ast.environment)
+ codegen.visit(ast)
+ return codegen.undeclared_identifiers
+
+
+def find_referenced_templates(ast):
+ """Finds all the referenced templates from the AST. This will return an
+ iterator over all the hardcoded template extensions, inclusions and
+ imports. If dynamic inheritance or inclusion is used, `None` will be
+ yielded.
+
+ >>> from jinja2 import Environment, meta
+ >>> env = Environment()
+ >>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
+ >>> list(meta.find_referenced_templates(ast))
+ ['layout.html', None]
+
+ This function is useful for dependency tracking. For example if you want
+ to rebuild parts of the website after a layout template has changed.
+ """
+ for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
+ nodes.Include)):
+ if not isinstance(node.template, nodes.Const):
+ # a tuple with some non consts in there
+ if isinstance(node.template, (nodes.Tuple, nodes.List)):
+ for template_name in node.template.items:
+ # something const, only yield the strings and ignore
+ # non-string consts that really just make no sense
+ if isinstance(template_name, nodes.Const):
+ if isinstance(template_name.value, string_types):
+ yield template_name.value
+ # something dynamic in there
+ else:
+ yield None
+ # something dynamic we don't know about here
+ else:
+ yield None
+ continue
+ # constant is a basestring, direct template name
+ if isinstance(node.template.value, string_types):
+ yield node.template.value
+ # a tuple or list (latter *should* not happen) made of consts,
+ # yield the consts that are strings. We could warn here for
+ # non string values
+ elif isinstance(node, nodes.Include) and \
+ isinstance(node.template.value, (tuple, list)):
+ for template_name in node.template.value:
+ if isinstance(template_name, string_types):
+ yield template_name
+ # something else we don't care about, we could warn here
+ else:
+ yield None
diff --git a/deps/v8/third_party/jinja2/nodes.py b/deps/v8/third_party/jinja2/nodes.py
new file mode 100644
index 0000000000..d32046ce5c
--- /dev/null
+++ b/deps/v8/third_party/jinja2/nodes.py
@@ -0,0 +1,919 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.nodes
+ ~~~~~~~~~~~~
+
+ This module implements additional nodes derived from the ast base node.
+
+ It also provides some node tree helper functions like `in_lineno` and
+ `get_nodes` used by the parser and translator in order to normalize
+ python and jinja nodes.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import types
+import operator
+
+from collections import deque
+from jinja2.utils import Markup
+from jinja2._compat import izip, with_metaclass, text_type
+
+
+#: the types we support for context functions
+_context_function_types = (types.FunctionType, types.MethodType)
+
+
+_binop_to_func = {
+ '*': operator.mul,
+ '/': operator.truediv,
+ '//': operator.floordiv,
+ '**': operator.pow,
+ '%': operator.mod,
+ '+': operator.add,
+ '-': operator.sub
+}
+
+_uaop_to_func = {
+ 'not': operator.not_,
+ '+': operator.pos,
+ '-': operator.neg
+}
+
+_cmpop_to_func = {
+ 'eq': operator.eq,
+ 'ne': operator.ne,
+ 'gt': operator.gt,
+ 'gteq': operator.ge,
+ 'lt': operator.lt,
+ 'lteq': operator.le,
+ 'in': lambda a, b: a in b,
+ 'notin': lambda a, b: a not in b
+}
+
+
+class Impossible(Exception):
+ """Raised if the node could not perform a requested action."""
+
+
+class NodeType(type):
+ """A metaclass for nodes that handles the field and attribute
+ inheritance. fields and attributes from the parent class are
+ automatically forwarded to the child."""
+
+ def __new__(cls, name, bases, d):
+ for attr in 'fields', 'attributes':
+ storage = []
+ storage.extend(getattr(bases[0], attr, ()))
+ storage.extend(d.get(attr, ()))
+ assert len(bases) == 1, 'multiple inheritance not allowed'
+ assert len(storage) == len(set(storage)), 'layout conflict'
+ d[attr] = tuple(storage)
+ d.setdefault('abstract', False)
+ return type.__new__(cls, name, bases, d)
+
+
+class EvalContext(object):
+ """Holds evaluation time information. Custom attributes can be attached
+ to it in extensions.
+ """
+
+ def __init__(self, environment, template_name=None):
+ self.environment = environment
+ if callable(environment.autoescape):
+ self.autoescape = environment.autoescape(template_name)
+ else:
+ self.autoescape = environment.autoescape
+ self.volatile = False
+
+ def save(self):
+ return self.__dict__.copy()
+
+ def revert(self, old):
+ self.__dict__.clear()
+ self.__dict__.update(old)
+
+
+def get_eval_context(node, ctx):
+ if ctx is None:
+ if node.environment is None:
+ raise RuntimeError('if no eval context is passed, the '
+ 'node must have an attached '
+ 'environment.')
+ return EvalContext(node.environment)
+ return ctx
+
+
+class Node(with_metaclass(NodeType, object)):
+ """Baseclass for all Jinja2 nodes. There are a number of nodes available
+ of different types. There are four major types:
+
+ - :class:`Stmt`: statements
+ - :class:`Expr`: expressions
+ - :class:`Helper`: helper nodes
+ - :class:`Template`: the outermost wrapper node
+
+ All nodes have fields and attributes. Fields may be other nodes, lists,
+ or arbitrary values. Fields are passed to the constructor as regular
+ positional arguments, attributes as keyword arguments. Each node has
+ two attributes: `lineno` (the line number of the node) and `environment`.
+ The `environment` attribute is set at the end of the parsing process for
+ all nodes automatically.
+ """
+ fields = ()
+ attributes = ('lineno', 'environment')
+ abstract = True
+
+ def __init__(self, *fields, **attributes):
+ if self.abstract:
+ raise TypeError('abstract nodes are not instanciable')
+ if fields:
+ if len(fields) != len(self.fields):
+ if not self.fields:
+ raise TypeError('%r takes 0 arguments' %
+ self.__class__.__name__)
+ raise TypeError('%r takes 0 or %d argument%s' % (
+ self.__class__.__name__,
+ len(self.fields),
+ len(self.fields) != 1 and 's' or ''
+ ))
+ for name, arg in izip(self.fields, fields):
+ setattr(self, name, arg)
+ for attr in self.attributes:
+ setattr(self, attr, attributes.pop(attr, None))
+ if attributes:
+ raise TypeError('unknown attribute %r' %
+ next(iter(attributes)))
+
+ def iter_fields(self, exclude=None, only=None):
+ """This method iterates over all fields that are defined and yields
+ ``(key, value)`` tuples. Per default all fields are returned, but
+ it's possible to limit that to some fields by providing the `only`
+ parameter or to exclude some using the `exclude` parameter. Both
+ should be sets or tuples of field names.
+ """
+ for name in self.fields:
+ if (exclude is only is None) or \
+ (exclude is not None and name not in exclude) or \
+ (only is not None and name in only):
+ try:
+ yield name, getattr(self, name)
+ except AttributeError:
+ pass
+
+ def iter_child_nodes(self, exclude=None, only=None):
+ """Iterates over all direct child nodes of the node. This iterates
+ over all fields and yields the values of they are nodes. If the value
+ of a field is a list all the nodes in that list are returned.
+ """
+ for field, item in self.iter_fields(exclude, only):
+ if isinstance(item, list):
+ for n in item:
+ if isinstance(n, Node):
+ yield n
+ elif isinstance(item, Node):
+ yield item
+
+ def find(self, node_type):
+ """Find the first node of a given type. If no such node exists the
+ return value is `None`.
+ """
+ for result in self.find_all(node_type):
+ return result
+
+ def find_all(self, node_type):
+ """Find all the nodes of a given type. If the type is a tuple,
+ the check is performed for any of the tuple items.
+ """
+ for child in self.iter_child_nodes():
+ if isinstance(child, node_type):
+ yield child
+ for result in child.find_all(node_type):
+ yield result
+
+ def set_ctx(self, ctx):
+ """Reset the context of a node and all child nodes. Per default the
+ parser will all generate nodes that have a 'load' context as it's the
+ most common one. This method is used in the parser to set assignment
+ targets and other nodes to a store context.
+ """
+ todo = deque([self])
+ while todo:
+ node = todo.popleft()
+ if 'ctx' in node.fields:
+ node.ctx = ctx
+ todo.extend(node.iter_child_nodes())
+ return self
+
+ def set_lineno(self, lineno, override=False):
+ """Set the line numbers of the node and children."""
+ todo = deque([self])
+ while todo:
+ node = todo.popleft()
+ if 'lineno' in node.attributes:
+ if node.lineno is None or override:
+ node.lineno = lineno
+ todo.extend(node.iter_child_nodes())
+ return self
+
+ def set_environment(self, environment):
+ """Set the environment for all nodes."""
+ todo = deque([self])
+ while todo:
+ node = todo.popleft()
+ node.environment = environment
+ todo.extend(node.iter_child_nodes())
+ return self
+
+ def __eq__(self, other):
+ return type(self) is type(other) and \
+ tuple(self.iter_fields()) == tuple(other.iter_fields())
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ # Restore Python 2 hashing behavior on Python 3
+ __hash__ = object.__hash__
+
+ def __repr__(self):
+ return '%s(%s)' % (
+ self.__class__.__name__,
+ ', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
+ arg in self.fields)
+ )
+
+
+class Stmt(Node):
+ """Base node for all statements."""
+ abstract = True
+
+
+class Helper(Node):
+ """Nodes that exist in a specific context only."""
+ abstract = True
+
+
+class Template(Node):
+ """Node that represents a template. This must be the outermost node that
+ is passed to the compiler.
+ """
+ fields = ('body',)
+
+
+class Output(Stmt):
+ """A node that holds multiple expressions which are then printed out.
+ This is used both for the `print` statement and the regular template data.
+ """
+ fields = ('nodes',)
+
+
+class Extends(Stmt):
+ """Represents an extends statement."""
+ fields = ('template',)
+
+
+class For(Stmt):
+ """The for loop. `target` is the target for the iteration (usually a
+ :class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
+ of nodes that are used as loop-body, and `else_` a list of nodes for the
+ `else` block. If no else node exists it has to be an empty list.
+
+ For filtered nodes an expression can be stored as `test`, otherwise `None`.
+ """
+ fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
+
+
+class If(Stmt):
+ """If `test` is true, `body` is rendered, else `else_`."""
+ fields = ('test', 'body', 'else_')
+
+
+class Macro(Stmt):
+ """A macro definition. `name` is the name of the macro, `args` a list of
+ arguments and `defaults` a list of defaults if there are any. `body` is
+ a list of nodes for the macro body.
+ """
+ fields = ('name', 'args', 'defaults', 'body')
+
+
+class CallBlock(Stmt):
+ """Like a macro without a name but a call instead. `call` is called with
+ the unnamed macro as `caller` argument this node holds.
+ """
+ fields = ('call', 'args', 'defaults', 'body')
+
+
+class FilterBlock(Stmt):
+ """Node for filter sections."""
+ fields = ('body', 'filter')
+
+
+class Block(Stmt):
+ """A node that represents a block."""
+ fields = ('name', 'body', 'scoped')
+
+
+class Include(Stmt):
+ """A node that represents the include tag."""
+ fields = ('template', 'with_context', 'ignore_missing')
+
+
+class Import(Stmt):
+ """A node that represents the import tag."""
+ fields = ('template', 'target', 'with_context')
+
+
+class FromImport(Stmt):
+ """A node that represents the from import tag. It's important to not
+ pass unsafe names to the name attribute. The compiler translates the
+ attribute lookups directly into getattr calls and does *not* use the
+ subscript callback of the interface. As exported variables may not
+ start with double underscores (which the parser asserts) this is not a
+ problem for regular Jinja code, but if this node is used in an extension
+ extra care must be taken.
+
+ The list of names may contain tuples if aliases are wanted.
+ """
+ fields = ('template', 'names', 'with_context')
+
+
+class ExprStmt(Stmt):
+ """A statement that evaluates an expression and discards the result."""
+ fields = ('node',)
+
+
+class Assign(Stmt):
+ """Assigns an expression to a target."""
+ fields = ('target', 'node')
+
+
+class AssignBlock(Stmt):
+ """Assigns a block to a target."""
+ fields = ('target', 'body')
+
+
+class Expr(Node):
+ """Baseclass for all expressions."""
+ abstract = True
+
+ def as_const(self, eval_ctx=None):
+ """Return the value of the expression as constant or raise
+ :exc:`Impossible` if this was not possible.
+
+ An :class:`EvalContext` can be provided, if none is given
+ a default context is created which requires the nodes to have
+ an attached environment.
+
+ .. versionchanged:: 2.4
+ the `eval_ctx` parameter was added.
+ """
+ raise Impossible()
+
+ def can_assign(self):
+ """Check if it's possible to assign something to this node."""
+ return False
+
+
+class BinExpr(Expr):
+ """Baseclass for all binary expressions."""
+ fields = ('left', 'right')
+ operator = None
+ abstract = True
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ # intercepted operators cannot be folded at compile time
+ if self.environment.sandboxed and \
+ self.operator in self.environment.intercepted_binops:
+ raise Impossible()
+ f = _binop_to_func[self.operator]
+ try:
+ return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
+ except Exception:
+ raise Impossible()
+
+
+class UnaryExpr(Expr):
+ """Baseclass for all unary expressions."""
+ fields = ('node',)
+ operator = None
+ abstract = True
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ # intercepted operators cannot be folded at compile time
+ if self.environment.sandboxed and \
+ self.operator in self.environment.intercepted_unops:
+ raise Impossible()
+ f = _uaop_to_func[self.operator]
+ try:
+ return f(self.node.as_const(eval_ctx))
+ except Exception:
+ raise Impossible()
+
+
+class Name(Expr):
+ """Looks up a name or stores a value in a name.
+ The `ctx` of the node can be one of the following values:
+
+ - `store`: store a value in the name
+ - `load`: load that name
+ - `param`: like `store` but if the name was defined as function parameter.
+ """
+ fields = ('name', 'ctx')
+
+ def can_assign(self):
+ return self.name not in ('true', 'false', 'none',
+ 'True', 'False', 'None')
+
+
+class Literal(Expr):
+ """Baseclass for literals."""
+ abstract = True
+
+
+class Const(Literal):
+ """All constant values. The parser will return this node for simple
+ constants such as ``42`` or ``"foo"`` but it can be used to store more
+ complex values such as lists too. Only constants with a safe
+ representation (objects where ``eval(repr(x)) == x`` is true).
+ """
+ fields = ('value',)
+
+ def as_const(self, eval_ctx=None):
+ return self.value
+
+ @classmethod
+ def from_untrusted(cls, value, lineno=None, environment=None):
+ """Return a const object if the value is representable as
+ constant value in the generated code, otherwise it will raise
+ an `Impossible` exception.
+ """
+ from .compiler import has_safe_repr
+ if not has_safe_repr(value):
+ raise Impossible()
+ return cls(value, lineno=lineno, environment=environment)
+
+
+class TemplateData(Literal):
+ """A constant template string."""
+ fields = ('data',)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if eval_ctx.volatile:
+ raise Impossible()
+ if eval_ctx.autoescape:
+ return Markup(self.data)
+ return self.data
+
+
+class Tuple(Literal):
+ """For loop unpacking and some other things like multiple arguments
+ for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
+ is used for loading the names or storing.
+ """
+ fields = ('items', 'ctx')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return tuple(x.as_const(eval_ctx) for x in self.items)
+
+ def can_assign(self):
+ for item in self.items:
+ if not item.can_assign():
+ return False
+ return True
+
+
+class List(Literal):
+ """Any list literal such as ``[1, 2, 3]``"""
+ fields = ('items',)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return [x.as_const(eval_ctx) for x in self.items]
+
+
+class Dict(Literal):
+ """Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
+ :class:`Pair` nodes.
+ """
+ fields = ('items',)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return dict(x.as_const(eval_ctx) for x in self.items)
+
+
+class Pair(Helper):
+ """A key, value pair for dicts."""
+ fields = ('key', 'value')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
+
+
+class Keyword(Helper):
+ """A key, value pair for keyword arguments where key is a string."""
+ fields = ('key', 'value')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.key, self.value.as_const(eval_ctx)
+
+
+class CondExpr(Expr):
+ """A conditional expression (inline if expression). (``{{
+ foo if bar else baz }}``)
+ """
+ fields = ('test', 'expr1', 'expr2')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if self.test.as_const(eval_ctx):
+ return self.expr1.as_const(eval_ctx)
+
+ # if we evaluate to an undefined object, we better do that at runtime
+ if self.expr2 is None:
+ raise Impossible()
+
+ return self.expr2.as_const(eval_ctx)
+
+
+class Filter(Expr):
+ """This node applies a filter on an expression. `name` is the name of
+ the filter, the rest of the fields are the same as for :class:`Call`.
+
+ If the `node` of a filter is `None` the contents of the last buffer are
+ filtered. Buffers are created by macros and filter blocks.
+ """
+ fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if eval_ctx.volatile or self.node is None:
+ raise Impossible()
+ # we have to be careful here because we call filter_ below.
+ # if this variable would be called filter, 2to3 would wrap the
+ # call in a list beause it is assuming we are talking about the
+ # builtin filter function here which no longer returns a list in
+ # python 3. because of that, do not rename filter_ to filter!
+ filter_ = self.environment.filters.get(self.name)
+ if filter_ is None or getattr(filter_, 'contextfilter', False):
+ raise Impossible()
+ obj = self.node.as_const(eval_ctx)
+ args = [x.as_const(eval_ctx) for x in self.args]
+ if getattr(filter_, 'evalcontextfilter', False):
+ args.insert(0, eval_ctx)
+ elif getattr(filter_, 'environmentfilter', False):
+ args.insert(0, self.environment)
+ kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
+ if self.dyn_args is not None:
+ try:
+ args.extend(self.dyn_args.as_const(eval_ctx))
+ except Exception:
+ raise Impossible()
+ if self.dyn_kwargs is not None:
+ try:
+ kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
+ except Exception:
+ raise Impossible()
+ try:
+ return filter_(obj, *args, **kwargs)
+ except Exception:
+ raise Impossible()
+
+
+class Test(Expr):
+ """Applies a test on an expression. `name` is the name of the test, the
+ rest of the fields are the same as for :class:`Call`.
+ """
+ fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+
+
+class Call(Expr):
+ """Calls an expression. `args` is a list of arguments, `kwargs` a list
+ of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
+ and `dyn_kwargs` has to be either `None` or a node that is used as
+ node for dynamic positional (``*args``) or keyword (``**kwargs``)
+ arguments.
+ """
+ fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if eval_ctx.volatile:
+ raise Impossible()
+ obj = self.node.as_const(eval_ctx)
+
+ # don't evaluate context functions
+ args = [x.as_const(eval_ctx) for x in self.args]
+ if isinstance(obj, _context_function_types):
+ if getattr(obj, 'contextfunction', False):
+ raise Impossible()
+ elif getattr(obj, 'evalcontextfunction', False):
+ args.insert(0, eval_ctx)
+ elif getattr(obj, 'environmentfunction', False):
+ args.insert(0, self.environment)
+
+ kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
+ if self.dyn_args is not None:
+ try:
+ args.extend(self.dyn_args.as_const(eval_ctx))
+ except Exception:
+ raise Impossible()
+ if self.dyn_kwargs is not None:
+ try:
+ kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
+ except Exception:
+ raise Impossible()
+ try:
+ return obj(*args, **kwargs)
+ except Exception:
+ raise Impossible()
+
+
+class Getitem(Expr):
+ """Get an attribute or item from an expression and prefer the item."""
+ fields = ('node', 'arg', 'ctx')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if self.ctx != 'load':
+ raise Impossible()
+ try:
+ return self.environment.getitem(self.node.as_const(eval_ctx),
+ self.arg.as_const(eval_ctx))
+ except Exception:
+ raise Impossible()
+
+ def can_assign(self):
+ return False
+
+
+class Getattr(Expr):
+ """Get an attribute or item from an expression that is a ascii-only
+ bytestring and prefer the attribute.
+ """
+ fields = ('node', 'attr', 'ctx')
+
+ def as_const(self, eval_ctx=None):
+ if self.ctx != 'load':
+ raise Impossible()
+ try:
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.environment.getattr(self.node.as_const(eval_ctx),
+ self.attr)
+ except Exception:
+ raise Impossible()
+
+ def can_assign(self):
+ return False
+
+
+class Slice(Expr):
+ """Represents a slice object. This must only be used as argument for
+ :class:`Subscript`.
+ """
+ fields = ('start', 'stop', 'step')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ def const(obj):
+ if obj is None:
+ return None
+ return obj.as_const(eval_ctx)
+ return slice(const(self.start), const(self.stop), const(self.step))
+
+
+class Concat(Expr):
+ """Concatenates the list of expressions provided after converting them to
+ unicode.
+ """
+ fields = ('nodes',)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
+
+
+class Compare(Expr):
+ """Compares an expression with some other expressions. `ops` must be a
+ list of :class:`Operand`\s.
+ """
+ fields = ('expr', 'ops')
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ result = value = self.expr.as_const(eval_ctx)
+ try:
+ for op in self.ops:
+ new_value = op.expr.as_const(eval_ctx)
+ result = _cmpop_to_func[op.op](value, new_value)
+ value = new_value
+ except Exception:
+ raise Impossible()
+ return result
+
+
+class Operand(Helper):
+ """Holds an operator and an expression."""
+ fields = ('op', 'expr')
+
+if __debug__:
+ Operand.__doc__ += '\nThe following operators are available: ' + \
+ ', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
+ set(_uaop_to_func) | set(_cmpop_to_func)))
+
+
+class Mul(BinExpr):
+ """Multiplies the left with the right node."""
+ operator = '*'
+
+
+class Div(BinExpr):
+ """Divides the left by the right node."""
+ operator = '/'
+
+
+class FloorDiv(BinExpr):
+ """Divides the left by the right node and truncates conver the
+ result into an integer by truncating.
+ """
+ operator = '//'
+
+
+class Add(BinExpr):
+ """Add the left to the right node."""
+ operator = '+'
+
+
+class Sub(BinExpr):
+ """Subtract the right from the left node."""
+ operator = '-'
+
+
+class Mod(BinExpr):
+ """Left modulo right."""
+ operator = '%'
+
+
+class Pow(BinExpr):
+ """Left to the power of right."""
+ operator = '**'
+
+
+class And(BinExpr):
+ """Short circuited AND."""
+ operator = 'and'
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
+
+
+class Or(BinExpr):
+ """Short circuited OR."""
+ operator = 'or'
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
+
+
+class Not(UnaryExpr):
+ """Negate the expression."""
+ operator = 'not'
+
+
+class Neg(UnaryExpr):
+ """Make the expression negative."""
+ operator = '-'
+
+
+class Pos(UnaryExpr):
+ """Make the expression positive (noop for most expressions)"""
+ operator = '+'
+
+
+# Helpers for extensions
+
+
+class EnvironmentAttribute(Expr):
+ """Loads an attribute from the environment object. This is useful for
+ extensions that want to call a callback stored on the environment.
+ """
+ fields = ('name',)
+
+
+class ExtensionAttribute(Expr):
+ """Returns the attribute of an extension bound to the environment.
+ The identifier is the identifier of the :class:`Extension`.
+
+ This node is usually constructed by calling the
+ :meth:`~jinja2.ext.Extension.attr` method on an extension.
+ """
+ fields = ('identifier', 'name')
+
+
+class ImportedName(Expr):
+ """If created with an import name the import name is returned on node
+ access. For example ``ImportedName('cgi.escape')`` returns the `escape`
+ function from the cgi module on evaluation. Imports are optimized by the
+ compiler so there is no need to assign them to local variables.
+ """
+ fields = ('importname',)
+
+
+class InternalName(Expr):
+ """An internal name in the compiler. You cannot create these nodes
+ yourself but the parser provides a
+ :meth:`~jinja2.parser.Parser.free_identifier` method that creates
+ a new identifier for you. This identifier is not available from the
+ template and is not threated specially by the compiler.
+ """
+ fields = ('name',)
+
+ def __init__(self):
+ raise TypeError('Can\'t create internal names. Use the '
+ '`free_identifier` method on a parser.')
+
+
+class MarkSafe(Expr):
+ """Mark the wrapped expression as safe (wrap it as `Markup`)."""
+ fields = ('expr',)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return Markup(self.expr.as_const(eval_ctx))
+
+
+class MarkSafeIfAutoescape(Expr):
+ """Mark the wrapped expression as safe (wrap it as `Markup`) but
+ only if autoescaping is active.
+
+ .. versionadded:: 2.5
+ """
+ fields = ('expr',)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if eval_ctx.volatile:
+ raise Impossible()
+ expr = self.expr.as_const(eval_ctx)
+ if eval_ctx.autoescape:
+ return Markup(expr)
+ return expr
+
+
+class ContextReference(Expr):
+ """Returns the current template context. It can be used like a
+ :class:`Name` node, with a ``'load'`` ctx and will return the
+ current :class:`~jinja2.runtime.Context` object.
+
+ Here an example that assigns the current template name to a
+ variable named `foo`::
+
+ Assign(Name('foo', ctx='store'),
+ Getattr(ContextReference(), 'name'))
+ """
+
+
+class Continue(Stmt):
+ """Continue a loop."""
+
+
+class Break(Stmt):
+ """Break a loop."""
+
+
+class Scope(Stmt):
+ """An artificial scope."""
+ fields = ('body',)
+
+
+class EvalContextModifier(Stmt):
+ """Modifies the eval context. For each option that should be modified,
+ a :class:`Keyword` has to be added to the :attr:`options` list.
+
+ Example to change the `autoescape` setting::
+
+ EvalContextModifier(options=[Keyword('autoescape', Const(True))])
+ """
+ fields = ('options',)
+
+
+class ScopedEvalContextModifier(EvalContextModifier):
+ """Modifies the eval context and reverts it later. Works exactly like
+ :class:`EvalContextModifier` but will only modify the
+ :class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
+ """
+ fields = ('body',)
+
+
+# make sure nobody creates custom nodes
+def _failing_new(*args, **kwargs):
+ raise TypeError('can\'t create custom node types')
+NodeType.__new__ = staticmethod(_failing_new); del _failing_new
diff --git a/deps/v8/third_party/jinja2/optimizer.py b/deps/v8/third_party/jinja2/optimizer.py
new file mode 100644
index 0000000000..00eab115e1
--- /dev/null
+++ b/deps/v8/third_party/jinja2/optimizer.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.optimizer
+ ~~~~~~~~~~~~~~~~
+
+ The jinja optimizer is currently trying to constant fold a few expressions
+ and modify the AST in place so that it should be easier to evaluate it.
+
+ Because the AST does not contain all the scoping information and the
+ compiler has to find that out, we cannot do all the optimizations we
+ want. For example loop unrolling doesn't work because unrolled loops would
+ have a different scoping.
+
+ The solution would be a second syntax tree that has the scoping rules stored.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD.
+"""
+from jinja2 import nodes
+from jinja2.visitor import NodeTransformer
+
+
+def optimize(node, environment):
+ """The context hint can be used to perform an static optimization
+ based on the context given."""
+ optimizer = Optimizer(environment)
+ return optimizer.visit(node)
+
+
+class Optimizer(NodeTransformer):
+
+ def __init__(self, environment):
+ self.environment = environment
+
+ def visit_If(self, node):
+ """Eliminate dead code."""
+ # do not optimize ifs that have a block inside so that it doesn't
+ # break super().
+ if node.find(nodes.Block) is not None:
+ return self.generic_visit(node)
+ try:
+ val = self.visit(node.test).as_const()
+ except nodes.Impossible:
+ return self.generic_visit(node)
+ if val:
+ body = node.body
+ else:
+ body = node.else_
+ result = []
+ for node in body:
+ result.extend(self.visit_list(node))
+ return result
+
+ def fold(self, node):
+ """Do constant folding."""
+ node = self.generic_visit(node)
+ try:
+ return nodes.Const.from_untrusted(node.as_const(),
+ lineno=node.lineno,
+ environment=self.environment)
+ except nodes.Impossible:
+ return node
+
+ visit_Add = visit_Sub = visit_Mul = visit_Div = visit_FloorDiv = \
+ visit_Pow = visit_Mod = visit_And = visit_Or = visit_Pos = visit_Neg = \
+ visit_Not = visit_Compare = visit_Getitem = visit_Getattr = visit_Call = \
+ visit_Filter = visit_Test = visit_CondExpr = fold
+ del fold
diff --git a/deps/v8/third_party/jinja2/parser.py b/deps/v8/third_party/jinja2/parser.py
new file mode 100644
index 0000000000..d24da180ea
--- /dev/null
+++ b/deps/v8/third_party/jinja2/parser.py
@@ -0,0 +1,899 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.parser
+ ~~~~~~~~~~~~~
+
+ Implements the template parser.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+from jinja2 import nodes
+from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
+from jinja2.lexer import describe_token, describe_token_expr
+from jinja2._compat import imap
+
+
+_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
+ 'macro', 'include', 'from', 'import',
+ 'set'])
+_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
+
+
+class Parser(object):
+ """This is the central parsing class Jinja2 uses. It's passed to
+ extensions and can be used to parse expressions or statements.
+ """
+
+ def __init__(self, environment, source, name=None, filename=None,
+ state=None):
+ self.environment = environment
+ self.stream = environment._tokenize(source, name, filename, state)
+ self.name = name
+ self.filename = filename
+ self.closed = False
+ self.extensions = {}
+ for extension in environment.iter_extensions():
+ for tag in extension.tags:
+ self.extensions[tag] = extension.parse
+ self._last_identifier = 0
+ self._tag_stack = []
+ self._end_token_stack = []
+
+ def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
+ """Convenience method that raises `exc` with the message, passed
+ line number or last line number as well as the current name and
+ filename.
+ """
+ if lineno is None:
+ lineno = self.stream.current.lineno
+ raise exc(msg, lineno, self.name, self.filename)
+
+ def _fail_ut_eof(self, name, end_token_stack, lineno):
+ expected = []
+ for exprs in end_token_stack:
+ expected.extend(imap(describe_token_expr, exprs))
+ if end_token_stack:
+ currently_looking = ' or '.join(
+ "'%s'" % describe_token_expr(expr)
+ for expr in end_token_stack[-1])
+ else:
+ currently_looking = None
+
+ if name is None:
+ message = ['Unexpected end of template.']
+ else:
+ message = ['Encountered unknown tag \'%s\'.' % name]
+
+ if currently_looking:
+ if name is not None and name in expected:
+ message.append('You probably made a nesting mistake. Jinja '
+ 'is expecting this tag, but currently looking '
+ 'for %s.' % currently_looking)
+ else:
+ message.append('Jinja was looking for the following tags: '
+ '%s.' % currently_looking)
+
+ if self._tag_stack:
+ message.append('The innermost block that needs to be '
+ 'closed is \'%s\'.' % self._tag_stack[-1])
+
+ self.fail(' '.join(message), lineno)
+
+ def fail_unknown_tag(self, name, lineno=None):
+ """Called if the parser encounters an unknown tag. Tries to fail
+ with a human readable error message that could help to identify
+ the problem.
+ """
+ return self._fail_ut_eof(name, self._end_token_stack, lineno)
+
+ def fail_eof(self, end_tokens=None, lineno=None):
+ """Like fail_unknown_tag but for end of template situations."""
+ stack = list(self._end_token_stack)
+ if end_tokens is not None:
+ stack.append(end_tokens)
+ return self._fail_ut_eof(None, stack, lineno)
+
+ def is_tuple_end(self, extra_end_rules=None):
+ """Are we at the end of a tuple?"""
+ if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
+ return True
+ elif extra_end_rules is not None:
+ return self.stream.current.test_any(extra_end_rules)
+ return False
+
+ def free_identifier(self, lineno=None):
+ """Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
+ self._last_identifier += 1
+ rv = object.__new__(nodes.InternalName)
+ nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
+ return rv
+
+ def parse_statement(self):
+ """Parse a single statement."""
+ token = self.stream.current
+ if token.type != 'name':
+ self.fail('tag name expected', token.lineno)
+ self._tag_stack.append(token.value)
+ pop_tag = True
+ try:
+ if token.value in _statement_keywords:
+ return getattr(self, 'parse_' + self.stream.current.value)()
+ if token.value == 'call':
+ return self.parse_call_block()
+ if token.value == 'filter':
+ return self.parse_filter_block()
+ ext = self.extensions.get(token.value)
+ if ext is not None:
+ return ext(self)
+
+ # did not work out, remove the token we pushed by accident
+ # from the stack so that the unknown tag fail function can
+ # produce a proper error message.
+ self._tag_stack.pop()
+ pop_tag = False
+ self.fail_unknown_tag(token.value, token.lineno)
+ finally:
+ if pop_tag:
+ self._tag_stack.pop()
+
+ def parse_statements(self, end_tokens, drop_needle=False):
+ """Parse multiple statements into a list until one of the end tokens
+ is reached. This is used to parse the body of statements as it also
+ parses template data if appropriate. The parser checks first if the
+ current token is a colon and skips it if there is one. Then it checks
+ for the block end and parses until if one of the `end_tokens` is
+ reached. Per default the active token in the stream at the end of
+ the call is the matched end token. If this is not wanted `drop_needle`
+ can be set to `True` and the end token is removed.
+ """
+ # the first token may be a colon for python compatibility
+ self.stream.skip_if('colon')
+
+ # in the future it would be possible to add whole code sections
+ # by adding some sort of end of statement token and parsing those here.
+ self.stream.expect('block_end')
+ result = self.subparse(end_tokens)
+
+ # we reached the end of the template too early, the subparser
+ # does not check for this, so we do that now
+ if self.stream.current.type == 'eof':
+ self.fail_eof(end_tokens)
+
+ if drop_needle:
+ next(self.stream)
+ return result
+
+ def parse_set(self):
+ """Parse an assign statement."""
+ lineno = next(self.stream).lineno
+ target = self.parse_assign_target()
+ if self.stream.skip_if('assign'):
+ expr = self.parse_tuple()
+ return nodes.Assign(target, expr, lineno=lineno)
+ body = self.parse_statements(('name:endset',),
+ drop_needle=True)
+ return nodes.AssignBlock(target, body, lineno=lineno)
+
+ def parse_for(self):
+ """Parse a for loop."""
+ lineno = self.stream.expect('name:for').lineno
+ target = self.parse_assign_target(extra_end_rules=('name:in',))
+ self.stream.expect('name:in')
+ iter = self.parse_tuple(with_condexpr=False,
+ extra_end_rules=('name:recursive',))
+ test = None
+ if self.stream.skip_if('name:if'):
+ test = self.parse_expression()
+ recursive = self.stream.skip_if('name:recursive')
+ body = self.parse_statements(('name:endfor', 'name:else'))
+ if next(self.stream).value == 'endfor':
+ else_ = []
+ else:
+ else_ = self.parse_statements(('name:endfor',), drop_needle=True)
+ return nodes.For(target, iter, body, else_, test,
+ recursive, lineno=lineno)
+
+ def parse_if(self):
+ """Parse an if construct."""
+ node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
+ while 1:
+ node.test = self.parse_tuple(with_condexpr=False)
+ node.body = self.parse_statements(('name:elif', 'name:else',
+ 'name:endif'))
+ token = next(self.stream)
+ if token.test('name:elif'):
+ new_node = nodes.If(lineno=self.stream.current.lineno)
+ node.else_ = [new_node]
+ node = new_node
+ continue
+ elif token.test('name:else'):
+ node.else_ = self.parse_statements(('name:endif',),
+ drop_needle=True)
+ else:
+ node.else_ = []
+ break
+ return result
+
+ def parse_block(self):
+ node = nodes.Block(lineno=next(self.stream).lineno)
+ node.name = self.stream.expect('name').value
+ node.scoped = self.stream.skip_if('name:scoped')
+
+ # common problem people encounter when switching from django
+ # to jinja. we do not support hyphens in block names, so let's
+ # raise a nicer error message in that case.
+ if self.stream.current.type == 'sub':
+ self.fail('Block names in Jinja have to be valid Python '
+ 'identifiers and may not contain hyphens, use an '
+ 'underscore instead.')
+
+ node.body = self.parse_statements(('name:endblock',), drop_needle=True)
+ self.stream.skip_if('name:' + node.name)
+ return node
+
+ def parse_extends(self):
+ node = nodes.Extends(lineno=next(self.stream).lineno)
+ node.template = self.parse_expression()
+ return node
+
+ def parse_import_context(self, node, default):
+ if self.stream.current.test_any('name:with', 'name:without') and \
+ self.stream.look().test('name:context'):
+ node.with_context = next(self.stream).value == 'with'
+ self.stream.skip()
+ else:
+ node.with_context = default
+ return node
+
+ def parse_include(self):
+ node = nodes.Include(lineno=next(self.stream).lineno)
+ node.template = self.parse_expression()
+ if self.stream.current.test('name:ignore') and \
+ self.stream.look().test('name:missing'):
+ node.ignore_missing = True
+ self.stream.skip(2)
+ else:
+ node.ignore_missing = False
+ return self.parse_import_context(node, True)
+
+ def parse_import(self):
+ node = nodes.Import(lineno=next(self.stream).lineno)
+ node.template = self.parse_expression()
+ self.stream.expect('name:as')
+ node.target = self.parse_assign_target(name_only=True).name
+ return self.parse_import_context(node, False)
+
+ def parse_from(self):
+ node = nodes.FromImport(lineno=next(self.stream).lineno)
+ node.template = self.parse_expression()
+ self.stream.expect('name:import')
+ node.names = []
+
+ def parse_context():
+ if self.stream.current.value in ('with', 'without') and \
+ self.stream.look().test('name:context'):
+ node.with_context = next(self.stream).value == 'with'
+ self.stream.skip()
+ return True
+ return False
+
+ while 1:
+ if node.names:
+ self.stream.expect('comma')
+ if self.stream.current.type == 'name':
+ if parse_context():
+ break
+ target = self.parse_assign_target(name_only=True)
+ if target.name.startswith('_'):
+ self.fail('names starting with an underline can not '
+ 'be imported', target.lineno,
+ exc=TemplateAssertionError)
+ if self.stream.skip_if('name:as'):
+ alias = self.parse_assign_target(name_only=True)
+ node.names.append((target.name, alias.name))
+ else:
+ node.names.append(target.name)
+ if parse_context() or self.stream.current.type != 'comma':
+ break
+ else:
+ break
+ if not hasattr(node, 'with_context'):
+ node.with_context = False
+ self.stream.skip_if('comma')
+ return node
+
+ def parse_signature(self, node):
+ node.args = args = []
+ node.defaults = defaults = []
+ self.stream.expect('lparen')
+ while self.stream.current.type != 'rparen':
+ if args:
+ self.stream.expect('comma')
+ arg = self.parse_assign_target(name_only=True)
+ arg.set_ctx('param')
+ if self.stream.skip_if('assign'):
+ defaults.append(self.parse_expression())
+ elif defaults:
+ self.fail('non-default argument follows default argument')
+ args.append(arg)
+ self.stream.expect('rparen')
+
+ def parse_call_block(self):
+ node = nodes.CallBlock(lineno=next(self.stream).lineno)
+ if self.stream.current.type == 'lparen':
+ self.parse_signature(node)
+ else:
+ node.args = []
+ node.defaults = []
+
+ node.call = self.parse_expression()
+ if not isinstance(node.call, nodes.Call):
+ self.fail('expected call', node.lineno)
+ node.body = self.parse_statements(('name:endcall',), drop_needle=True)
+ return node
+
+ def parse_filter_block(self):
+ node = nodes.FilterBlock(lineno=next(self.stream).lineno)
+ node.filter = self.parse_filter(None, start_inline=True)
+ node.body = self.parse_statements(('name:endfilter',),
+ drop_needle=True)
+ return node
+
+ def parse_macro(self):
+ node = nodes.Macro(lineno=next(self.stream).lineno)
+ node.name = self.parse_assign_target(name_only=True).name
+ self.parse_signature(node)
+ node.body = self.parse_statements(('name:endmacro',),
+ drop_needle=True)
+ return node
+
+ def parse_print(self):
+ node = nodes.Output(lineno=next(self.stream).lineno)
+ node.nodes = []
+ while self.stream.current.type != 'block_end':
+ if node.nodes:
+ self.stream.expect('comma')
+ node.nodes.append(self.parse_expression())
+ return node
+
+ def parse_assign_target(self, with_tuple=True, name_only=False,
+ extra_end_rules=None):
+ """Parse an assignment target. As Jinja2 allows assignments to
+ tuples, this function can parse all allowed assignment targets. Per
+ default assignments to tuples are parsed, that can be disable however
+ by setting `with_tuple` to `False`. If only assignments to names are
+ wanted `name_only` can be set to `True`. The `extra_end_rules`
+ parameter is forwarded to the tuple parsing function.
+ """
+ if name_only:
+ token = self.stream.expect('name')
+ target = nodes.Name(token.value, 'store', lineno=token.lineno)
+ else:
+ if with_tuple:
+ target = self.parse_tuple(simplified=True,
+ extra_end_rules=extra_end_rules)
+ else:
+ target = self.parse_primary()
+ target.set_ctx('store')
+ if not target.can_assign():
+ self.fail('can\'t assign to %r' % target.__class__.
+ __name__.lower(), target.lineno)
+ return target
+
+ def parse_expression(self, with_condexpr=True):
+ """Parse an expression. Per default all expressions are parsed, if
+ the optional `with_condexpr` parameter is set to `False` conditional
+ expressions are not parsed.
+ """
+ if with_condexpr:
+ return self.parse_condexpr()
+ return self.parse_or()
+
+ def parse_condexpr(self):
+ lineno = self.stream.current.lineno
+ expr1 = self.parse_or()
+ while self.stream.skip_if('name:if'):
+ expr2 = self.parse_or()
+ if self.stream.skip_if('name:else'):
+ expr3 = self.parse_condexpr()
+ else:
+ expr3 = None
+ expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return expr1
+
+ def parse_or(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_and()
+ while self.stream.skip_if('name:or'):
+ right = self.parse_and()
+ left = nodes.Or(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_and(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_not()
+ while self.stream.skip_if('name:and'):
+ right = self.parse_not()
+ left = nodes.And(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_not(self):
+ if self.stream.current.test('name:not'):
+ lineno = next(self.stream).lineno
+ return nodes.Not(self.parse_not(), lineno=lineno)
+ return self.parse_compare()
+
+ def parse_compare(self):
+ lineno = self.stream.current.lineno
+ expr = self.parse_add()
+ ops = []
+ while 1:
+ token_type = self.stream.current.type
+ if token_type in _compare_operators:
+ next(self.stream)
+ ops.append(nodes.Operand(token_type, self.parse_add()))
+ elif self.stream.skip_if('name:in'):
+ ops.append(nodes.Operand('in', self.parse_add()))
+ elif (self.stream.current.test('name:not') and
+ self.stream.look().test('name:in')):
+ self.stream.skip(2)
+ ops.append(nodes.Operand('notin', self.parse_add()))
+ else:
+ break
+ lineno = self.stream.current.lineno
+ if not ops:
+ return expr
+ return nodes.Compare(expr, ops, lineno=lineno)
+
+ def parse_add(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_sub()
+ while self.stream.current.type == 'add':
+ next(self.stream)
+ right = self.parse_sub()
+ left = nodes.Add(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_sub(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_concat()
+ while self.stream.current.type == 'sub':
+ next(self.stream)
+ right = self.parse_concat()
+ left = nodes.Sub(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_concat(self):
+ lineno = self.stream.current.lineno
+ args = [self.parse_mul()]
+ while self.stream.current.type == 'tilde':
+ next(self.stream)
+ args.append(self.parse_mul())
+ if len(args) == 1:
+ return args[0]
+ return nodes.Concat(args, lineno=lineno)
+
+ def parse_mul(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_div()
+ while self.stream.current.type == 'mul':
+ next(self.stream)
+ right = self.parse_div()
+ left = nodes.Mul(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_div(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_floordiv()
+ while self.stream.current.type == 'div':
+ next(self.stream)
+ right = self.parse_floordiv()
+ left = nodes.Div(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_floordiv(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_mod()
+ while self.stream.current.type == 'floordiv':
+ next(self.stream)
+ right = self.parse_mod()
+ left = nodes.FloorDiv(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_mod(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_pow()
+ while self.stream.current.type == 'mod':
+ next(self.stream)
+ right = self.parse_pow()
+ left = nodes.Mod(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_pow(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_unary()
+ while self.stream.current.type == 'pow':
+ next(self.stream)
+ right = self.parse_unary()
+ left = nodes.Pow(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_unary(self, with_filter=True):
+ token_type = self.stream.current.type
+ lineno = self.stream.current.lineno
+ if token_type == 'sub':
+ next(self.stream)
+ node = nodes.Neg(self.parse_unary(False), lineno=lineno)
+ elif token_type == 'add':
+ next(self.stream)
+ node = nodes.Pos(self.parse_unary(False), lineno=lineno)
+ else:
+ node = self.parse_primary()
+ node = self.parse_postfix(node)
+ if with_filter:
+ node = self.parse_filter_expr(node)
+ return node
+
+ def parse_primary(self):
+ token = self.stream.current
+ if token.type == 'name':
+ if token.value in ('true', 'false', 'True', 'False'):
+ node = nodes.Const(token.value in ('true', 'True'),
+ lineno=token.lineno)
+ elif token.value in ('none', 'None'):
+ node = nodes.Const(None, lineno=token.lineno)
+ else:
+ node = nodes.Name(token.value, 'load', lineno=token.lineno)
+ next(self.stream)
+ elif token.type == 'string':
+ next(self.stream)
+ buf = [token.value]
+ lineno = token.lineno
+ while self.stream.current.type == 'string':
+ buf.append(self.stream.current.value)
+ next(self.stream)
+ node = nodes.Const(''.join(buf), lineno=lineno)
+ elif token.type in ('integer', 'float'):
+ next(self.stream)
+ node = nodes.Const(token.value, lineno=token.lineno)
+ elif token.type == 'lparen':
+ next(self.stream)
+ node = self.parse_tuple(explicit_parentheses=True)
+ self.stream.expect('rparen')
+ elif token.type == 'lbracket':
+ node = self.parse_list()
+ elif token.type == 'lbrace':
+ node = self.parse_dict()
+ else:
+ self.fail("unexpected '%s'" % describe_token(token), token.lineno)
+ return node
+
+ def parse_tuple(self, simplified=False, with_condexpr=True,
+ extra_end_rules=None, explicit_parentheses=False):
+ """Works like `parse_expression` but if multiple expressions are
+ delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
+ This method could also return a regular expression instead of a tuple
+ if no commas where found.
+
+ The default parsing mode is a full tuple. If `simplified` is `True`
+ only names and literals are parsed. The `no_condexpr` parameter is
+ forwarded to :meth:`parse_expression`.
+
+ Because tuples do not require delimiters and may end in a bogus comma
+ an extra hint is needed that marks the end of a tuple. For example
+ for loops support tuples between `for` and `in`. In that case the
+ `extra_end_rules` is set to ``['name:in']``.
+
+ `explicit_parentheses` is true if the parsing was triggered by an
+ expression in parentheses. This is used to figure out if an empty
+ tuple is a valid expression or not.
+ """
+ lineno = self.stream.current.lineno
+ if simplified:
+ parse = self.parse_primary
+ elif with_condexpr:
+ parse = self.parse_expression
+ else:
+ parse = lambda: self.parse_expression(with_condexpr=False)
+ args = []
+ is_tuple = False
+ while 1:
+ if args:
+ self.stream.expect('comma')
+ if self.is_tuple_end(extra_end_rules):
+ break
+ args.append(parse())
+ if self.stream.current.type == 'comma':
+ is_tuple = True
+ else:
+ break
+ lineno = self.stream.current.lineno
+
+ if not is_tuple:
+ if args:
+ return args[0]
+
+ # if we don't have explicit parentheses, an empty tuple is
+ # not a valid expression. This would mean nothing (literally
+ # nothing) in the spot of an expression would be an empty
+ # tuple.
+ if not explicit_parentheses:
+ self.fail('Expected an expression, got \'%s\'' %
+ describe_token(self.stream.current))
+
+ return nodes.Tuple(args, 'load', lineno=lineno)
+
+ def parse_list(self):
+ token = self.stream.expect('lbracket')
+ items = []
+ while self.stream.current.type != 'rbracket':
+ if items:
+ self.stream.expect('comma')
+ if self.stream.current.type == 'rbracket':
+ break
+ items.append(self.parse_expression())
+ self.stream.expect('rbracket')
+ return nodes.List(items, lineno=token.lineno)
+
+ def parse_dict(self):
+ token = self.stream.expect('lbrace')
+ items = []
+ while self.stream.current.type != 'rbrace':
+ if items:
+ self.stream.expect('comma')
+ if self.stream.current.type == 'rbrace':
+ break
+ key = self.parse_expression()
+ self.stream.expect('colon')
+ value = self.parse_expression()
+ items.append(nodes.Pair(key, value, lineno=key.lineno))
+ self.stream.expect('rbrace')
+ return nodes.Dict(items, lineno=token.lineno)
+
+ def parse_postfix(self, node):
+ while 1:
+ token_type = self.stream.current.type
+ if token_type == 'dot' or token_type == 'lbracket':
+ node = self.parse_subscript(node)
+ # calls are valid both after postfix expressions (getattr
+ # and getitem) as well as filters and tests
+ elif token_type == 'lparen':
+ node = self.parse_call(node)
+ else:
+ break
+ return node
+
+ def parse_filter_expr(self, node):
+ while 1:
+ token_type = self.stream.current.type
+ if token_type == 'pipe':
+ node = self.parse_filter(node)
+ elif token_type == 'name' and self.stream.current.value == 'is':
+ node = self.parse_test(node)
+ # calls are valid both after postfix expressions (getattr
+ # and getitem) as well as filters and tests
+ elif token_type == 'lparen':
+ node = self.parse_call(node)
+ else:
+ break
+ return node
+
+ def parse_subscript(self, node):
+ token = next(self.stream)
+ if token.type == 'dot':
+ attr_token = self.stream.current
+ next(self.stream)
+ if attr_token.type == 'name':
+ return nodes.Getattr(node, attr_token.value, 'load',
+ lineno=token.lineno)
+ elif attr_token.type != 'integer':
+ self.fail('expected name or number', attr_token.lineno)
+ arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
+ return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
+ if token.type == 'lbracket':
+ args = []
+ while self.stream.current.type != 'rbracket':
+ if args:
+ self.stream.expect('comma')
+ args.append(self.parse_subscribed())
+ self.stream.expect('rbracket')
+ if len(args) == 1:
+ arg = args[0]
+ else:
+ arg = nodes.Tuple(args, 'load', lineno=token.lineno)
+ return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
+ self.fail('expected subscript expression', self.lineno)
+
+ def parse_subscribed(self):
+ lineno = self.stream.current.lineno
+
+ if self.stream.current.type == 'colon':
+ next(self.stream)
+ args = [None]
+ else:
+ node = self.parse_expression()
+ if self.stream.current.type != 'colon':
+ return node
+ next(self.stream)
+ args = [node]
+
+ if self.stream.current.type == 'colon':
+ args.append(None)
+ elif self.stream.current.type not in ('rbracket', 'comma'):
+ args.append(self.parse_expression())
+ else:
+ args.append(None)
+
+ if self.stream.current.type == 'colon':
+ next(self.stream)
+ if self.stream.current.type not in ('rbracket', 'comma'):
+ args.append(self.parse_expression())
+ else:
+ args.append(None)
+ else:
+ args.append(None)
+
+ return nodes.Slice(lineno=lineno, *args)
+
+ def parse_call(self, node):
+ token = self.stream.expect('lparen')
+ args = []
+ kwargs = []
+ dyn_args = dyn_kwargs = None
+ require_comma = False
+
+ def ensure(expr):
+ if not expr:
+ self.fail('invalid syntax for function call expression',
+ token.lineno)
+
+ while self.stream.current.type != 'rparen':
+ if require_comma:
+ self.stream.expect('comma')
+ # support for trailing comma
+ if self.stream.current.type == 'rparen':
+ break
+ if self.stream.current.type == 'mul':
+ ensure(dyn_args is None and dyn_kwargs is None)
+ next(self.stream)
+ dyn_args = self.parse_expression()
+ elif self.stream.current.type == 'pow':
+ ensure(dyn_kwargs is None)
+ next(self.stream)
+ dyn_kwargs = self.parse_expression()
+ else:
+ ensure(dyn_args is None and dyn_kwargs is None)
+ if self.stream.current.type == 'name' and \
+ self.stream.look().type == 'assign':
+ key = self.stream.current.value
+ self.stream.skip(2)
+ value = self.parse_expression()
+ kwargs.append(nodes.Keyword(key, value,
+ lineno=value.lineno))
+ else:
+ ensure(not kwargs)
+ args.append(self.parse_expression())
+
+ require_comma = True
+ self.stream.expect('rparen')
+
+ if node is None:
+ return args, kwargs, dyn_args, dyn_kwargs
+ return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
+ lineno=token.lineno)
+
+ def parse_filter(self, node, start_inline=False):
+ while self.stream.current.type == 'pipe' or start_inline:
+ if not start_inline:
+ next(self.stream)
+ token = self.stream.expect('name')
+ name = token.value
+ while self.stream.current.type == 'dot':
+ next(self.stream)
+ name += '.' + self.stream.expect('name').value
+ if self.stream.current.type == 'lparen':
+ args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
+ else:
+ args = []
+ kwargs = []
+ dyn_args = dyn_kwargs = None
+ node = nodes.Filter(node, name, args, kwargs, dyn_args,
+ dyn_kwargs, lineno=token.lineno)
+ start_inline = False
+ return node
+
+ def parse_test(self, node):
+ token = next(self.stream)
+ if self.stream.current.test('name:not'):
+ next(self.stream)
+ negated = True
+ else:
+ negated = False
+ name = self.stream.expect('name').value
+ while self.stream.current.type == 'dot':
+ next(self.stream)
+ name += '.' + self.stream.expect('name').value
+ dyn_args = dyn_kwargs = None
+ kwargs = []
+ if self.stream.current.type == 'lparen':
+ args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
+ elif (self.stream.current.type in ('name', 'string', 'integer',
+ 'float', 'lparen', 'lbracket',
+ 'lbrace') and not
+ self.stream.current.test_any('name:else', 'name:or',
+ 'name:and')):
+ if self.stream.current.test('name:is'):
+ self.fail('You cannot chain multiple tests with is')
+ args = [self.parse_expression()]
+ else:
+ args = []
+ node = nodes.Test(node, name, args, kwargs, dyn_args,
+ dyn_kwargs, lineno=token.lineno)
+ if negated:
+ node = nodes.Not(node, lineno=token.lineno)
+ return node
+
+ def subparse(self, end_tokens=None):
+ body = []
+ data_buffer = []
+ add_data = data_buffer.append
+
+ if end_tokens is not None:
+ self._end_token_stack.append(end_tokens)
+
+ def flush_data():
+ if data_buffer:
+ lineno = data_buffer[0].lineno
+ body.append(nodes.Output(data_buffer[:], lineno=lineno))
+ del data_buffer[:]
+
+ try:
+ while self.stream:
+ token = self.stream.current
+ if token.type == 'data':
+ if token.value:
+ add_data(nodes.TemplateData(token.value,
+ lineno=token.lineno))
+ next(self.stream)
+ elif token.type == 'variable_begin':
+ next(self.stream)
+ add_data(self.parse_tuple(with_condexpr=True))
+ self.stream.expect('variable_end')
+ elif token.type == 'block_begin':
+ flush_data()
+ next(self.stream)
+ if end_tokens is not None and \
+ self.stream.current.test_any(*end_tokens):
+ return body
+ rv = self.parse_statement()
+ if isinstance(rv, list):
+ body.extend(rv)
+ else:
+ body.append(rv)
+ self.stream.expect('block_end')
+ else:
+ raise AssertionError('internal parsing error')
+
+ flush_data()
+ finally:
+ if end_tokens is not None:
+ self._end_token_stack.pop()
+
+ return body
+
+ def parse(self):
+ """Parse the whole template into a `Template` node."""
+ result = nodes.Template(self.subparse(), lineno=1)
+ result.set_environment(self.environment)
+ return result
diff --git a/deps/v8/third_party/jinja2/runtime.py b/deps/v8/third_party/jinja2/runtime.py
new file mode 100644
index 0000000000..685a12da06
--- /dev/null
+++ b/deps/v8/third_party/jinja2/runtime.py
@@ -0,0 +1,667 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.runtime
+ ~~~~~~~~~~~~~~
+
+ Runtime helpers.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD.
+"""
+import sys
+
+from itertools import chain
+from jinja2.nodes import EvalContext, _context_function_types
+from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
+ internalcode, object_type_repr
+from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
+ TemplateNotFound
+from jinja2._compat import imap, text_type, iteritems, \
+ implements_iterator, implements_to_string, string_types, PY2
+
+
+# these variables are exported to the template runtime
+__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
+ 'TemplateRuntimeError', 'missing', 'concat', 'escape',
+ 'markup_join', 'unicode_join', 'to_string', 'identity',
+ 'TemplateNotFound', 'make_logging_undefined']
+
+#: the name of the function that is used to convert something into
+#: a string. We can just use the text type here.
+to_string = text_type
+
+#: the identity function. Useful for certain things in the environment
+identity = lambda x: x
+
+_last_iteration = object()
+
+
+def markup_join(seq):
+ """Concatenation that escapes if necessary and converts to unicode."""
+ buf = []
+ iterator = imap(soft_unicode, seq)
+ for arg in iterator:
+ buf.append(arg)
+ if hasattr(arg, '__html__'):
+ return Markup(u'').join(chain(buf, iterator))
+ return concat(buf)
+
+
+def unicode_join(seq):
+ """Simple args to unicode conversion and concatenation."""
+ return concat(imap(text_type, seq))
+
+
+def new_context(environment, template_name, blocks, vars=None,
+ shared=None, globals=None, locals=None):
+ """Internal helper to for context creation."""
+ if vars is None:
+ vars = {}
+ if shared:
+ parent = vars
+ else:
+ parent = dict(globals or (), **vars)
+ if locals:
+ # if the parent is shared a copy should be created because
+ # we don't want to modify the dict passed
+ if shared:
+ parent = dict(parent)
+ for key, value in iteritems(locals):
+ if key[:2] == 'l_' and value is not missing:
+ parent[key[2:]] = value
+ return environment.context_class(environment, parent, template_name,
+ blocks)
+
+
+class TemplateReference(object):
+ """The `self` in templates."""
+
+ def __init__(self, context):
+ self.__context = context
+
+ def __getitem__(self, name):
+ blocks = self.__context.blocks[name]
+ return BlockReference(name, self.__context, blocks, 0)
+
+ def __repr__(self):
+ return '<%s %r>' % (
+ self.__class__.__name__,
+ self.__context.name
+ )
+
+
+class Context(object):
+ """The template context holds the variables of a template. It stores the
+ values passed to the template and also the names the template exports.
+ Creating instances is neither supported nor useful as it's created
+ automatically at various stages of the template evaluation and should not
+ be created by hand.
+
+ The context is immutable. Modifications on :attr:`parent` **must not**
+ happen and modifications on :attr:`vars` are allowed from generated
+ template code only. Template filters and global functions marked as
+ :func:`contextfunction`\s get the active context passed as first argument
+ and are allowed to access the context read-only.
+
+ The template context supports read only dict operations (`get`,
+ `keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
+ `__getitem__`, `__contains__`). Additionally there is a :meth:`resolve`
+ method that doesn't fail with a `KeyError` but returns an
+ :class:`Undefined` object for missing variables.
+ """
+ __slots__ = ('parent', 'vars', 'environment', 'eval_ctx', 'exported_vars',
+ 'name', 'blocks', '__weakref__')
+
+ def __init__(self, environment, parent, name, blocks):
+ self.parent = parent
+ self.vars = {}
+ self.environment = environment
+ self.eval_ctx = EvalContext(self.environment, name)
+ self.exported_vars = set()
+ self.name = name
+
+ # create the initial mapping of blocks. Whenever template inheritance
+ # takes place the runtime will update this mapping with the new blocks
+ # from the template.
+ self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
+
+ def super(self, name, current):
+ """Render a parent block."""
+ try:
+ blocks = self.blocks[name]
+ index = blocks.index(current) + 1
+ blocks[index]
+ except LookupError:
+ return self.environment.undefined('there is no parent block '
+ 'called %r.' % name,
+ name='super')
+ return BlockReference(name, self, blocks, index)
+
+ def get(self, key, default=None):
+ """Returns an item from the template context, if it doesn't exist
+ `default` is returned.
+ """
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def resolve(self, key):
+ """Looks up a variable like `__getitem__` or `get` but returns an
+ :class:`Undefined` object with the name of the name looked up.
+ """
+ if key in self.vars:
+ return self.vars[key]
+ if key in self.parent:
+ return self.parent[key]
+ return self.environment.undefined(name=key)
+
+ def get_exported(self):
+ """Get a new dict with the exported variables."""
+ return dict((k, self.vars[k]) for k in self.exported_vars)
+
+ def get_all(self):
+ """Return a copy of the complete context as dict including the
+ exported variables.
+ """
+ return dict(self.parent, **self.vars)
+
+ @internalcode
+ def call(__self, __obj, *args, **kwargs):
+ """Call the callable with the arguments and keyword arguments
+ provided but inject the active context or environment as first
+ argument if the callable is a :func:`contextfunction` or
+ :func:`environmentfunction`.
+ """
+ if __debug__:
+ __traceback_hide__ = True # noqa
+
+ # Allow callable classes to take a context
+ fn = __obj.__call__
+ for fn_type in ('contextfunction',
+ 'evalcontextfunction',
+ 'environmentfunction'):
+ if hasattr(fn, fn_type):
+ __obj = fn
+ break
+
+ if isinstance(__obj, _context_function_types):
+ if getattr(__obj, 'contextfunction', 0):
+ args = (__self,) + args
+ elif getattr(__obj, 'evalcontextfunction', 0):
+ args = (__self.eval_ctx,) + args
+ elif getattr(__obj, 'environmentfunction', 0):
+ args = (__self.environment,) + args
+ try:
+ return __obj(*args, **kwargs)
+ except StopIteration:
+ return __self.environment.undefined('value was undefined because '
+ 'a callable raised a '
+ 'StopIteration exception')
+
+ def derived(self, locals=None):
+ """Internal helper function to create a derived context."""
+ context = new_context(self.environment, self.name, {},
+ self.parent, True, None, locals)
+ context.vars.update(self.vars)
+ context.eval_ctx = self.eval_ctx
+ context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
+ return context
+
+ def _all(meth):
+ proxy = lambda self: getattr(self.get_all(), meth)()
+ proxy.__doc__ = getattr(dict, meth).__doc__
+ proxy.__name__ = meth
+ return proxy
+
+ keys = _all('keys')
+ values = _all('values')
+ items = _all('items')
+
+ # not available on python 3
+ if PY2:
+ iterkeys = _all('iterkeys')
+ itervalues = _all('itervalues')
+ iteritems = _all('iteritems')
+ del _all
+
+ def __contains__(self, name):
+ return name in self.vars or name in self.parent
+
+ def __getitem__(self, key):
+ """Lookup a variable or raise `KeyError` if the variable is
+ undefined.
+ """
+ item = self.resolve(key)
+ if isinstance(item, Undefined):
+ raise KeyError(key)
+ return item
+
+ def __repr__(self):
+ return '<%s %s of %r>' % (
+ self.__class__.__name__,
+ repr(self.get_all()),
+ self.name
+ )
+
+
+# register the context as mapping if possible
+try:
+ from collections import Mapping
+ Mapping.register(Context)
+except ImportError:
+ pass
+
+
+class BlockReference(object):
+ """One block on a template reference."""
+
+ def __init__(self, name, context, stack, depth):
+ self.name = name
+ self._context = context
+ self._stack = stack
+ self._depth = depth
+
+ @property
+ def super(self):
+ """Super the block."""
+ if self._depth + 1 >= len(self._stack):
+ return self._context.environment. \
+ undefined('there is no parent block called %r.' %
+ self.name, name='super')
+ return BlockReference(self.name, self._context, self._stack,
+ self._depth + 1)
+
+ @internalcode
+ def __call__(self):
+ rv = concat(self._stack[self._depth](self._context))
+ if self._context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv
+
+
+class LoopContext(object):
+ """A loop context for dynamic iteration."""
+
+ def __init__(self, iterable, recurse=None, depth0=0):
+ self._iterator = iter(iterable)
+ self._recurse = recurse
+ self._after = self._safe_next()
+ self.index0 = -1
+ self.depth0 = depth0
+
+ # try to get the length of the iterable early. This must be done
+ # here because there are some broken iterators around where there
+ # __len__ is the number of iterations left (i'm looking at your
+ # listreverseiterator!).
+ try:
+ self._length = len(iterable)
+ except (TypeError, AttributeError):
+ self._length = None
+
+ def cycle(self, *args):
+ """Cycles among the arguments with the current loop index."""
+ if not args:
+ raise TypeError('no items for cycling given')
+ return args[self.index0 % len(args)]
+
+ first = property(lambda x: x.index0 == 0)
+ last = property(lambda x: x._after is _last_iteration)
+ index = property(lambda x: x.index0 + 1)
+ revindex = property(lambda x: x.length - x.index0)
+ revindex0 = property(lambda x: x.length - x.index)
+ depth = property(lambda x: x.depth0 + 1)
+
+ def __len__(self):
+ return self.length
+
+ def __iter__(self):
+ return LoopContextIterator(self)
+
+ def _safe_next(self):
+ try:
+ return next(self._iterator)
+ except StopIteration:
+ return _last_iteration
+
+ @internalcode
+ def loop(self, iterable):
+ if self._recurse is None:
+ raise TypeError('Tried to call non recursive loop. Maybe you '
+ "forgot the 'recursive' modifier.")
+ return self._recurse(iterable, self._recurse, self.depth0 + 1)
+
+ # a nifty trick to enhance the error message if someone tried to call
+ # the the loop without or with too many arguments.
+ __call__ = loop
+ del loop
+
+ @property
+ def length(self):
+ if self._length is None:
+ # if was not possible to get the length of the iterator when
+ # the loop context was created (ie: iterating over a generator)
+ # we have to convert the iterable into a sequence and use the
+ # length of that + the number of iterations so far.
+ iterable = tuple(self._iterator)
+ self._iterator = iter(iterable)
+ iterations_done = self.index0 + 2
+ self._length = len(iterable) + iterations_done
+ return self._length
+
+ def __repr__(self):
+ return '<%s %r/%r>' % (
+ self.__class__.__name__,
+ self.index,
+ self.length
+ )
+
+
+@implements_iterator
+class LoopContextIterator(object):
+ """The iterator for a loop context."""
+ __slots__ = ('context',)
+
+ def __init__(self, context):
+ self.context = context
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ ctx = self.context
+ ctx.index0 += 1
+ if ctx._after is _last_iteration:
+ raise StopIteration()
+ next_elem = ctx._after
+ ctx._after = ctx._safe_next()
+ return next_elem, ctx
+
+
+class Macro(object):
+ """Wraps a macro function."""
+
+ def __init__(self, environment, func, name, arguments, defaults,
+ catch_kwargs, catch_varargs, caller):
+ self._environment = environment
+ self._func = func
+ self._argument_count = len(arguments)
+ self.name = name
+ self.arguments = arguments
+ self.defaults = defaults
+ self.catch_kwargs = catch_kwargs
+ self.catch_varargs = catch_varargs
+ self.caller = caller
+
+ @internalcode
+ def __call__(self, *args, **kwargs):
+ # try to consume the positional arguments
+ arguments = list(args[:self._argument_count])
+ off = len(arguments)
+
+ # if the number of arguments consumed is not the number of
+ # arguments expected we start filling in keyword arguments
+ # and defaults.
+ if off != self._argument_count:
+ for idx, name in enumerate(self.arguments[len(arguments):]):
+ try:
+ value = kwargs.pop(name)
+ except KeyError:
+ try:
+ value = self.defaults[idx - self._argument_count + off]
+ except IndexError:
+ value = self._environment.undefined(
+ 'parameter %r was not provided' % name, name=name)
+ arguments.append(value)
+
+ # it's important that the order of these arguments does not change
+ # if not also changed in the compiler's `function_scoping` method.
+ # the order is caller, keyword arguments, positional arguments!
+ if self.caller:
+ caller = kwargs.pop('caller', None)
+ if caller is None:
+ caller = self._environment.undefined('No caller defined',
+ name='caller')
+ arguments.append(caller)
+ if self.catch_kwargs:
+ arguments.append(kwargs)
+ elif kwargs:
+ raise TypeError('macro %r takes no keyword argument %r' %
+ (self.name, next(iter(kwargs))))
+ if self.catch_varargs:
+ arguments.append(args[self._argument_count:])
+ elif len(args) > self._argument_count:
+ raise TypeError('macro %r takes not more than %d argument(s)' %
+ (self.name, len(self.arguments)))
+ return self._func(*arguments)
+
+ def __repr__(self):
+ return '<%s %s>' % (
+ self.__class__.__name__,
+ self.name is None and 'anonymous' or repr(self.name)
+ )
+
+
+@implements_to_string
+class Undefined(object):
+ """The default undefined type. This undefined type can be printed and
+ iterated over, but every other access will raise an :exc:`jinja2.exceptions.UndefinedError`:
+
+ >>> foo = Undefined(name='foo')
+ >>> str(foo)
+ ''
+ >>> not foo
+ True
+ >>> foo + 42
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ """
+ __slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
+ '_undefined_exception')
+
+ def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
+ self._undefined_hint = hint
+ self._undefined_obj = obj
+ self._undefined_name = name
+ self._undefined_exception = exc
+
+ @internalcode
+ def _fail_with_undefined_error(self, *args, **kwargs):
+ """Regular callback function for undefined objects that raises an
+ `jinja2.exceptions.UndefinedError` on call.
+ """
+ if self._undefined_hint is None:
+ if self._undefined_obj is missing:
+ hint = '%r is undefined' % self._undefined_name
+ elif not isinstance(self._undefined_name, string_types):
+ hint = '%s has no element %r' % (
+ object_type_repr(self._undefined_obj),
+ self._undefined_name
+ )
+ else:
+ hint = '%r has no attribute %r' % (
+ object_type_repr(self._undefined_obj),
+ self._undefined_name
+ )
+ else:
+ hint = self._undefined_hint
+ raise self._undefined_exception(hint)
+
+ @internalcode
+ def __getattr__(self, name):
+ if name[:2] == '__':
+ raise AttributeError(name)
+ return self._fail_with_undefined_error()
+
+ __add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
+ __truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
+ __mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
+ __getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
+ __float__ = __complex__ = __pow__ = __rpow__ = \
+ _fail_with_undefined_error
+
+ def __eq__(self, other):
+ return type(self) is type(other)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return id(type(self))
+
+ def __str__(self):
+ return u''
+
+ def __len__(self):
+ return 0
+
+ def __iter__(self):
+ if 0:
+ yield None
+
+ def __nonzero__(self):
+ return False
+ __bool__ = __nonzero__
+
+ def __repr__(self):
+ return 'Undefined'
+
+
+def make_logging_undefined(logger=None, base=None):
+ """Given a logger object this returns a new undefined class that will
+ log certain failures. It will log iterations and printing. If no
+ logger is given a default logger is created.
+
+ Example::
+
+ logger = logging.getLogger(__name__)
+ LoggingUndefined = make_logging_undefined(
+ logger=logger,
+ base=Undefined
+ )
+
+ .. versionadded:: 2.8
+
+ :param logger: the logger to use. If not provided, a default logger
+ is created.
+ :param base: the base class to add logging functionality to. This
+ defaults to :class:`Undefined`.
+ """
+ if logger is None:
+ import logging
+ logger = logging.getLogger(__name__)
+ logger.addHandler(logging.StreamHandler(sys.stderr))
+ if base is None:
+ base = Undefined
+
+ def _log_message(undef):
+ if undef._undefined_hint is None:
+ if undef._undefined_obj is missing:
+ hint = '%s is undefined' % undef._undefined_name
+ elif not isinstance(undef._undefined_name, string_types):
+ hint = '%s has no element %s' % (
+ object_type_repr(undef._undefined_obj),
+ undef._undefined_name)
+ else:
+ hint = '%s has no attribute %s' % (
+ object_type_repr(undef._undefined_obj),
+ undef._undefined_name)
+ else:
+ hint = undef._undefined_hint
+ logger.warning('Template variable warning: %s', hint)
+
+ class LoggingUndefined(base):
+
+ def _fail_with_undefined_error(self, *args, **kwargs):
+ try:
+ return base._fail_with_undefined_error(self, *args, **kwargs)
+ except self._undefined_exception as e:
+ logger.error('Template variable error: %s', str(e))
+ raise e
+
+ def __str__(self):
+ rv = base.__str__(self)
+ _log_message(self)
+ return rv
+
+ def __iter__(self):
+ rv = base.__iter__(self)
+ _log_message(self)
+ return rv
+
+ if PY2:
+ def __nonzero__(self):
+ rv = base.__nonzero__(self)
+ _log_message(self)
+ return rv
+
+ def __unicode__(self):
+ rv = base.__unicode__(self)
+ _log_message(self)
+ return rv
+ else:
+ def __bool__(self):
+ rv = base.__bool__(self)
+ _log_message(self)
+ return rv
+
+ return LoggingUndefined
+
+
+@implements_to_string
+class DebugUndefined(Undefined):
+ """An undefined that returns the debug info when printed.
+
+ >>> foo = DebugUndefined(name='foo')
+ >>> str(foo)
+ '{{ foo }}'
+ >>> not foo
+ True
+ >>> foo + 42
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ """
+ __slots__ = ()
+
+ def __str__(self):
+ if self._undefined_hint is None:
+ if self._undefined_obj is missing:
+ return u'{{ %s }}' % self._undefined_name
+ return '{{ no such element: %s[%r] }}' % (
+ object_type_repr(self._undefined_obj),
+ self._undefined_name
+ )
+ return u'{{ undefined value printed: %s }}' % self._undefined_hint
+
+
+@implements_to_string
+class StrictUndefined(Undefined):
+ """An undefined that barks on print and iteration as well as boolean
+ tests and all kinds of comparisons. In other words: you can do nothing
+ with it except checking if it's defined using the `defined` test.
+
+ >>> foo = StrictUndefined(name='foo')
+ >>> str(foo)
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ >>> not foo
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ >>> foo + 42
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ """
+ __slots__ = ()
+ __iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
+ __ne__ = __bool__ = __hash__ = \
+ Undefined._fail_with_undefined_error
+
+
+# remove remaining slots attributes, after the metaclass did the magic they
+# are unneeded and irritating as they contain wrong data for the subclasses.
+del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
diff --git a/deps/v8/third_party/jinja2/sandbox.py b/deps/v8/third_party/jinja2/sandbox.py
new file mode 100644
index 0000000000..7e40ab3085
--- /dev/null
+++ b/deps/v8/third_party/jinja2/sandbox.py
@@ -0,0 +1,367 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.sandbox
+ ~~~~~~~~~~~~~~
+
+ Adds a sandbox layer to Jinja as it was the default behavior in the old
+ Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
+ default behavior is easier to use.
+
+ The behavior can be changed by subclassing the environment.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD.
+"""
+import types
+import operator
+from jinja2.environment import Environment
+from jinja2.exceptions import SecurityError
+from jinja2._compat import string_types, PY2
+
+
+#: maximum number of items a range may produce
+MAX_RANGE = 100000
+
+#: attributes of function objects that are considered unsafe.
+if PY2:
+ UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
+ 'func_defaults', 'func_globals'])
+else:
+ # On versions > python 2 the special attributes on functions are gone,
+ # but they remain on methods and generators for whatever reason.
+ UNSAFE_FUNCTION_ATTRIBUTES = set()
+
+
+#: unsafe method attributes. function attributes are unsafe for methods too
+UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
+
+#: unsafe generator attirbutes.
+UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
+
+import warnings
+
+# make sure we don't warn in python 2.6 about stuff we don't care about
+warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
+ module='jinja2.sandbox')
+
+from collections import deque
+
+_mutable_set_types = (set,)
+_mutable_mapping_types = (dict,)
+_mutable_sequence_types = (list,)
+
+
+# on python 2.x we can register the user collection types
+try:
+ from UserDict import UserDict, DictMixin
+ from UserList import UserList
+ _mutable_mapping_types += (UserDict, DictMixin)
+ _mutable_set_types += (UserList,)
+except ImportError:
+ pass
+
+# if sets is still available, register the mutable set from there as well
+try:
+ from sets import Set
+ _mutable_set_types += (Set,)
+except ImportError:
+ pass
+
+#: register Python 2.6 abstract base classes
+try:
+ from collections import MutableSet, MutableMapping, MutableSequence
+ _mutable_set_types += (MutableSet,)
+ _mutable_mapping_types += (MutableMapping,)
+ _mutable_sequence_types += (MutableSequence,)
+except ImportError:
+ pass
+
+_mutable_spec = (
+ (_mutable_set_types, frozenset([
+ 'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
+ 'symmetric_difference_update', 'update'
+ ])),
+ (_mutable_mapping_types, frozenset([
+ 'clear', 'pop', 'popitem', 'setdefault', 'update'
+ ])),
+ (_mutable_sequence_types, frozenset([
+ 'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
+ ])),
+ (deque, frozenset([
+ 'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
+ 'popleft', 'remove', 'rotate'
+ ]))
+)
+
+
+def safe_range(*args):
+ """A range that can't generate ranges with a length of more than
+ MAX_RANGE items.
+ """
+ rng = range(*args)
+ if len(rng) > MAX_RANGE:
+ raise OverflowError('range too big, maximum size for range is %d' %
+ MAX_RANGE)
+ return rng
+
+
+def unsafe(f):
+ """Marks a function or method as unsafe.
+
+ ::
+
+ @unsafe
+ def delete(self):
+ pass
+ """
+ f.unsafe_callable = True
+ return f
+
+
+def is_internal_attribute(obj, attr):
+ """Test if the attribute given is an internal python attribute. For
+ example this function returns `True` for the `func_code` attribute of
+ python objects. This is useful if the environment method
+ :meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
+
+ >>> from jinja2.sandbox import is_internal_attribute
+ >>> is_internal_attribute(str, "mro")
+ True
+ >>> is_internal_attribute(str, "upper")
+ False
+ """
+ if isinstance(obj, types.FunctionType):
+ if attr in UNSAFE_FUNCTION_ATTRIBUTES:
+ return True
+ elif isinstance(obj, types.MethodType):
+ if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
+ attr in UNSAFE_METHOD_ATTRIBUTES:
+ return True
+ elif isinstance(obj, type):
+ if attr == 'mro':
+ return True
+ elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
+ return True
+ elif isinstance(obj, types.GeneratorType):
+ if attr in UNSAFE_GENERATOR_ATTRIBUTES:
+ return True
+ return attr.startswith('__')
+
+
+def modifies_known_mutable(obj, attr):
+ """This function checks if an attribute on a builtin mutable object
+ (list, dict, set or deque) would modify it if called. It also supports
+ the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
+ with Python 2.6 onwards the abstract base classes `MutableSet`,
+ `MutableMapping`, and `MutableSequence`.
+
+ >>> modifies_known_mutable({}, "clear")
+ True
+ >>> modifies_known_mutable({}, "keys")
+ False
+ >>> modifies_known_mutable([], "append")
+ True
+ >>> modifies_known_mutable([], "index")
+ False
+
+ If called with an unsupported object (such as unicode) `False` is
+ returned.
+
+ >>> modifies_known_mutable("foo", "upper")
+ False
+ """
+ for typespec, unsafe in _mutable_spec:
+ if isinstance(obj, typespec):
+ return attr in unsafe
+ return False
+
+
+class SandboxedEnvironment(Environment):
+ """The sandboxed environment. It works like the regular environment but
+ tells the compiler to generate sandboxed code. Additionally subclasses of
+ this environment may override the methods that tell the runtime what
+ attributes or functions are safe to access.
+
+ If the template tries to access insecure code a :exc:`SecurityError` is
+ raised. However also other exceptions may occour during the rendering so
+ the caller has to ensure that all exceptions are catched.
+ """
+ sandboxed = True
+
+ #: default callback table for the binary operators. A copy of this is
+ #: available on each instance of a sandboxed environment as
+ #: :attr:`binop_table`
+ default_binop_table = {
+ '+': operator.add,
+ '-': operator.sub,
+ '*': operator.mul,
+ '/': operator.truediv,
+ '//': operator.floordiv,
+ '**': operator.pow,
+ '%': operator.mod
+ }
+
+ #: default callback table for the unary operators. A copy of this is
+ #: available on each instance of a sandboxed environment as
+ #: :attr:`unop_table`
+ default_unop_table = {
+ '+': operator.pos,
+ '-': operator.neg
+ }
+
+ #: a set of binary operators that should be intercepted. Each operator
+ #: that is added to this set (empty by default) is delegated to the
+ #: :meth:`call_binop` method that will perform the operator. The default
+ #: operator callback is specified by :attr:`binop_table`.
+ #:
+ #: The following binary operators are interceptable:
+ #: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
+ #:
+ #: The default operation form the operator table corresponds to the
+ #: builtin function. Intercepted calls are always slower than the native
+ #: operator call, so make sure only to intercept the ones you are
+ #: interested in.
+ #:
+ #: .. versionadded:: 2.6
+ intercepted_binops = frozenset()
+
+ #: a set of unary operators that should be intercepted. Each operator
+ #: that is added to this set (empty by default) is delegated to the
+ #: :meth:`call_unop` method that will perform the operator. The default
+ #: operator callback is specified by :attr:`unop_table`.
+ #:
+ #: The following unary operators are interceptable: ``+``, ``-``
+ #:
+ #: The default operation form the operator table corresponds to the
+ #: builtin function. Intercepted calls are always slower than the native
+ #: operator call, so make sure only to intercept the ones you are
+ #: interested in.
+ #:
+ #: .. versionadded:: 2.6
+ intercepted_unops = frozenset()
+
+ def intercept_unop(self, operator):
+ """Called during template compilation with the name of a unary
+ operator to check if it should be intercepted at runtime. If this
+ method returns `True`, :meth:`call_unop` is excuted for this unary
+ operator. The default implementation of :meth:`call_unop` will use
+ the :attr:`unop_table` dictionary to perform the operator with the
+ same logic as the builtin one.
+
+ The following unary operators are interceptable: ``+`` and ``-``
+
+ Intercepted calls are always slower than the native operator call,
+ so make sure only to intercept the ones you are interested in.
+
+ .. versionadded:: 2.6
+ """
+ return False
+
+
+ def __init__(self, *args, **kwargs):
+ Environment.__init__(self, *args, **kwargs)
+ self.globals['range'] = safe_range
+ self.binop_table = self.default_binop_table.copy()
+ self.unop_table = self.default_unop_table.copy()
+
+ def is_safe_attribute(self, obj, attr, value):
+ """The sandboxed environment will call this method to check if the
+ attribute of an object is safe to access. Per default all attributes
+ starting with an underscore are considered private as well as the
+ special attributes of internal python objects as returned by the
+ :func:`is_internal_attribute` function.
+ """
+ return not (attr.startswith('_') or is_internal_attribute(obj, attr))
+
+ def is_safe_callable(self, obj):
+ """Check if an object is safely callable. Per default a function is
+ considered safe unless the `unsafe_callable` attribute exists and is
+ True. Override this method to alter the behavior, but this won't
+ affect the `unsafe` decorator from this module.
+ """
+ return not (getattr(obj, 'unsafe_callable', False) or
+ getattr(obj, 'alters_data', False))
+
+ def call_binop(self, context, operator, left, right):
+ """For intercepted binary operator calls (:meth:`intercepted_binops`)
+ this function is executed instead of the builtin operator. This can
+ be used to fine tune the behavior of certain operators.
+
+ .. versionadded:: 2.6
+ """
+ return self.binop_table[operator](left, right)
+
+ def call_unop(self, context, operator, arg):
+ """For intercepted unary operator calls (:meth:`intercepted_unops`)
+ this function is executed instead of the builtin operator. This can
+ be used to fine tune the behavior of certain operators.
+
+ .. versionadded:: 2.6
+ """
+ return self.unop_table[operator](arg)
+
+ def getitem(self, obj, argument):
+ """Subscribe an object from sandboxed code."""
+ try:
+ return obj[argument]
+ except (TypeError, LookupError):
+ if isinstance(argument, string_types):
+ try:
+ attr = str(argument)
+ except Exception:
+ pass
+ else:
+ try:
+ value = getattr(obj, attr)
+ except AttributeError:
+ pass
+ else:
+ if self.is_safe_attribute(obj, argument, value):
+ return value
+ return self.unsafe_undefined(obj, argument)
+ return self.undefined(obj=obj, name=argument)
+
+ def getattr(self, obj, attribute):
+ """Subscribe an object from sandboxed code and prefer the
+ attribute. The attribute passed *must* be a bytestring.
+ """
+ try:
+ value = getattr(obj, attribute)
+ except AttributeError:
+ try:
+ return obj[attribute]
+ except (TypeError, LookupError):
+ pass
+ else:
+ if self.is_safe_attribute(obj, attribute, value):
+ return value
+ return self.unsafe_undefined(obj, attribute)
+ return self.undefined(obj=obj, name=attribute)
+
+ def unsafe_undefined(self, obj, attribute):
+ """Return an undefined object for unsafe attributes."""
+ return self.undefined('access to attribute %r of %r '
+ 'object is unsafe.' % (
+ attribute,
+ obj.__class__.__name__
+ ), name=attribute, obj=obj, exc=SecurityError)
+
+ def call(__self, __context, __obj, *args, **kwargs):
+ """Call an object from sandboxed code."""
+ # the double prefixes are to avoid double keyword argument
+ # errors when proxying the call.
+ if not __self.is_safe_callable(__obj):
+ raise SecurityError('%r is not safely callable' % (__obj,))
+ return __context.call(__obj, *args, **kwargs)
+
+
+class ImmutableSandboxedEnvironment(SandboxedEnvironment):
+ """Works exactly like the regular `SandboxedEnvironment` but does not
+ permit modifications on the builtin mutable objects `list`, `set`, and
+ `dict` by using the :func:`modifies_known_mutable` function.
+ """
+
+ def is_safe_attribute(self, obj, attr, value):
+ if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
+ return False
+ return not modifies_known_mutable(obj, attr)
diff --git a/deps/v8/third_party/jinja2/tests.py b/deps/v8/third_party/jinja2/tests.py
new file mode 100644
index 0000000000..bb32349df0
--- /dev/null
+++ b/deps/v8/third_party/jinja2/tests.py
@@ -0,0 +1,173 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.tests
+ ~~~~~~~~~~~~
+
+ Jinja test functions. Used with the "is" operator.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import re
+from collections import Mapping
+from jinja2.runtime import Undefined
+from jinja2._compat import text_type, string_types, integer_types
+import decimal
+
+number_re = re.compile(r'^-?\d+(\.\d+)?$')
+regex_type = type(number_re)
+
+
+test_callable = callable
+
+
+def test_odd(value):
+ """Return true if the variable is odd."""
+ return value % 2 == 1
+
+
+def test_even(value):
+ """Return true if the variable is even."""
+ return value % 2 == 0
+
+
+def test_divisibleby(value, num):
+ """Check if a variable is divisible by a number."""
+ return value % num == 0
+
+
+def test_defined(value):
+ """Return true if the variable is defined:
+
+ .. sourcecode:: jinja
+
+ {% if variable is defined %}
+ value of variable: {{ variable }}
+ {% else %}
+ variable is not defined
+ {% endif %}
+
+ See the :func:`default` filter for a simple way to set undefined
+ variables.
+ """
+ return not isinstance(value, Undefined)
+
+
+def test_undefined(value):
+ """Like :func:`defined` but the other way round."""
+ return isinstance(value, Undefined)
+
+
+def test_none(value):
+ """Return true if the variable is none."""
+ return value is None
+
+
+def test_lower(value):
+ """Return true if the variable is lowercased."""
+ return text_type(value).islower()
+
+
+def test_upper(value):
+ """Return true if the variable is uppercased."""
+ return text_type(value).isupper()
+
+
+def test_string(value):
+ """Return true if the object is a string."""
+ return isinstance(value, string_types)
+
+
+def test_mapping(value):
+ """Return true if the object is a mapping (dict etc.).
+
+ .. versionadded:: 2.6
+ """
+ return isinstance(value, Mapping)
+
+
+def test_number(value):
+ """Return true if the variable is a number."""
+ return isinstance(value, integer_types + (float, complex, decimal.Decimal))
+
+
+def test_sequence(value):
+ """Return true if the variable is a sequence. Sequences are variables
+ that are iterable.
+ """
+ try:
+ len(value)
+ value.__getitem__
+ except:
+ return False
+ return True
+
+
+def test_equalto(value, other):
+ """Check if an object has the same value as another object:
+
+ .. sourcecode:: jinja
+
+ {% if foo.expression is equalto 42 %}
+ the foo attribute evaluates to the constant 42
+ {% endif %}
+
+ This appears to be a useless test as it does exactly the same as the
+ ``==`` operator, but it can be useful when used together with the
+ `selectattr` function:
+
+ .. sourcecode:: jinja
+
+ {{ users|selectattr("email", "equalto", "foo@bar.invalid") }}
+
+ .. versionadded:: 2.8
+ """
+ return value == other
+
+
+def test_sameas(value, other):
+ """Check if an object points to the same memory address than another
+ object:
+
+ .. sourcecode:: jinja
+
+ {% if foo.attribute is sameas false %}
+ the foo attribute really is the `False` singleton
+ {% endif %}
+ """
+ return value is other
+
+
+def test_iterable(value):
+ """Check if it's possible to iterate over an object."""
+ try:
+ iter(value)
+ except TypeError:
+ return False
+ return True
+
+
+def test_escaped(value):
+ """Check if the value is escaped."""
+ return hasattr(value, '__html__')
+
+
+TESTS = {
+ 'odd': test_odd,
+ 'even': test_even,
+ 'divisibleby': test_divisibleby,
+ 'defined': test_defined,
+ 'undefined': test_undefined,
+ 'none': test_none,
+ 'lower': test_lower,
+ 'upper': test_upper,
+ 'string': test_string,
+ 'mapping': test_mapping,
+ 'number': test_number,
+ 'sequence': test_sequence,
+ 'iterable': test_iterable,
+ 'callable': test_callable,
+ 'sameas': test_sameas,
+ 'equalto': test_equalto,
+ 'escaped': test_escaped
+}
diff --git a/deps/v8/third_party/jinja2/utils.py b/deps/v8/third_party/jinja2/utils.py
new file mode 100644
index 0000000000..cdd4cd3af0
--- /dev/null
+++ b/deps/v8/third_party/jinja2/utils.py
@@ -0,0 +1,531 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.utils
+ ~~~~~~~~~~~~
+
+ Utility functions.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD, see LICENSE for more details.
+"""
+import re
+import errno
+from collections import deque
+from threading import Lock
+from jinja2._compat import text_type, string_types, implements_iterator, \
+ url_quote
+
+
+_word_split_re = re.compile(r'(\s+)')
+_punctuation_re = re.compile(
+ '^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
+ '|'.join(map(re.escape, ('(', '<', '&lt;'))),
+ '|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '&gt;')))
+ )
+)
+_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
+_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
+_entity_re = re.compile(r'&([^;]+);')
+_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
+_digits = '0123456789'
+
+# special singleton representing missing values for the runtime
+missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
+
+# internal code
+internal_code = set()
+
+concat = u''.join
+
+
+def contextfunction(f):
+ """This decorator can be used to mark a function or method context callable.
+ A context callable is passed the active :class:`Context` as first argument when
+ called from the template. This is useful if a function wants to get access
+ to the context or functions provided on the context object. For example
+ a function that returns a sorted list of template variables the current
+ template exports could look like this::
+
+ @contextfunction
+ def get_exported_names(context):
+ return sorted(context.exported_vars)
+ """
+ f.contextfunction = True
+ return f
+
+
+def evalcontextfunction(f):
+ """This decorator can be used to mark a function or method as an eval
+ context callable. This is similar to the :func:`contextfunction`
+ but instead of passing the context, an evaluation context object is
+ passed. For more information about the eval context, see
+ :ref:`eval-context`.
+
+ .. versionadded:: 2.4
+ """
+ f.evalcontextfunction = True
+ return f
+
+
+def environmentfunction(f):
+ """This decorator can be used to mark a function or method as environment
+ callable. This decorator works exactly like the :func:`contextfunction`
+ decorator just that the first argument is the active :class:`Environment`
+ and not context.
+ """
+ f.environmentfunction = True
+ return f
+
+
+def internalcode(f):
+ """Marks the function as internally used"""
+ internal_code.add(f.__code__)
+ return f
+
+
+def is_undefined(obj):
+ """Check if the object passed is undefined. This does nothing more than
+ performing an instance check against :class:`Undefined` but looks nicer.
+ This can be used for custom filters or tests that want to react to
+ undefined variables. For example a custom default filter can look like
+ this::
+
+ def default(var, default=''):
+ if is_undefined(var):
+ return default
+ return var
+ """
+ from jinja2.runtime import Undefined
+ return isinstance(obj, Undefined)
+
+
+def consume(iterable):
+ """Consumes an iterable without doing anything with it."""
+ for event in iterable:
+ pass
+
+
+def clear_caches():
+ """Jinja2 keeps internal caches for environments and lexers. These are
+ used so that Jinja2 doesn't have to recreate environments and lexers all
+ the time. Normally you don't have to care about that but if you are
+ messuring memory consumption you may want to clean the caches.
+ """
+ from jinja2.environment import _spontaneous_environments
+ from jinja2.lexer import _lexer_cache
+ _spontaneous_environments.clear()
+ _lexer_cache.clear()
+
+
+def import_string(import_name, silent=False):
+ """Imports an object based on a string. This is useful if you want to
+ use import paths as endpoints or something similar. An import path can
+ be specified either in dotted notation (``xml.sax.saxutils.escape``)
+ or with a colon as object delimiter (``xml.sax.saxutils:escape``).
+
+ If the `silent` is True the return value will be `None` if the import
+ fails.
+
+ :return: imported object
+ """
+ try:
+ if ':' in import_name:
+ module, obj = import_name.split(':', 1)
+ elif '.' in import_name:
+ items = import_name.split('.')
+ module = '.'.join(items[:-1])
+ obj = items[-1]
+ else:
+ return __import__(import_name)
+ return getattr(__import__(module, None, None, [obj]), obj)
+ except (ImportError, AttributeError):
+ if not silent:
+ raise
+
+
+def open_if_exists(filename, mode='rb'):
+ """Returns a file descriptor for the filename if that file exists,
+ otherwise `None`.
+ """
+ try:
+ return open(filename, mode)
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.EISDIR, errno.EINVAL):
+ raise
+
+
+def object_type_repr(obj):
+ """Returns the name of the object's type. For some recognized
+ singletons the name of the object is returned instead. (For
+ example for `None` and `Ellipsis`).
+ """
+ if obj is None:
+ return 'None'
+ elif obj is Ellipsis:
+ return 'Ellipsis'
+ # __builtin__ in 2.x, builtins in 3.x
+ if obj.__class__.__module__ in ('__builtin__', 'builtins'):
+ name = obj.__class__.__name__
+ else:
+ name = obj.__class__.__module__ + '.' + obj.__class__.__name__
+ return '%s object' % name
+
+
+def pformat(obj, verbose=False):
+ """Prettyprint an object. Either use the `pretty` library or the
+ builtin `pprint`.
+ """
+ try:
+ from pretty import pretty
+ return pretty(obj, verbose=verbose)
+ except ImportError:
+ from pprint import pformat
+ return pformat(obj)
+
+
+def urlize(text, trim_url_limit=None, nofollow=False, target=None):
+ """Converts any URLs in text into clickable links. Works on http://,
+ https:// and www. links. Links can have trailing punctuation (periods,
+ commas, close-parens) and leading punctuation (opening parens) and
+ it'll still do the right thing.
+
+ If trim_url_limit is not None, the URLs in link text will be limited
+ to trim_url_limit characters.
+
+ If nofollow is True, the URLs in link text will get a rel="nofollow"
+ attribute.
+
+ If target is not None, a target attribute will be added to the link.
+ """
+ trim_url = lambda x, limit=trim_url_limit: limit is not None \
+ and (x[:limit] + (len(x) >=limit and '...'
+ or '')) or x
+ words = _word_split_re.split(text_type(escape(text)))
+ nofollow_attr = nofollow and ' rel="nofollow"' or ''
+ if target is not None and isinstance(target, string_types):
+ target_attr = ' target="%s"' % target
+ else:
+ target_attr = ''
+ for i, word in enumerate(words):
+ match = _punctuation_re.match(word)
+ if match:
+ lead, middle, trail = match.groups()
+ if middle.startswith('www.') or (
+ '@' not in middle and
+ not middle.startswith('http://') and
+ not middle.startswith('https://') and
+ len(middle) > 0 and
+ middle[0] in _letters + _digits and (
+ middle.endswith('.org') or
+ middle.endswith('.net') or
+ middle.endswith('.com')
+ )):
+ middle = '<a href="http://%s"%s%s>%s</a>' % (middle,
+ nofollow_attr, target_attr, trim_url(middle))
+ if middle.startswith('http://') or \
+ middle.startswith('https://'):
+ middle = '<a href="%s"%s%s>%s</a>' % (middle,
+ nofollow_attr, target_attr, trim_url(middle))
+ if '@' in middle and not middle.startswith('www.') and \
+ not ':' in middle and _simple_email_re.match(middle):
+ middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
+ if lead + middle + trail != word:
+ words[i] = lead + middle + trail
+ return u''.join(words)
+
+
+def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
+ """Generate some lorem ipsum for the template."""
+ from jinja2.constants import LOREM_IPSUM_WORDS
+ from random import choice, randrange
+ words = LOREM_IPSUM_WORDS.split()
+ result = []
+
+ for _ in range(n):
+ next_capitalized = True
+ last_comma = last_fullstop = 0
+ word = None
+ last = None
+ p = []
+
+ # each paragraph contains out of 20 to 100 words.
+ for idx, _ in enumerate(range(randrange(min, max))):
+ while True:
+ word = choice(words)
+ if word != last:
+ last = word
+ break
+ if next_capitalized:
+ word = word.capitalize()
+ next_capitalized = False
+ # add commas
+ if idx - randrange(3, 8) > last_comma:
+ last_comma = idx
+ last_fullstop += 2
+ word += ','
+ # add end of sentences
+ if idx - randrange(10, 20) > last_fullstop:
+ last_comma = last_fullstop = idx
+ word += '.'
+ next_capitalized = True
+ p.append(word)
+
+ # ensure that the paragraph ends with a dot.
+ p = u' '.join(p)
+ if p.endswith(','):
+ p = p[:-1] + '.'
+ elif not p.endswith('.'):
+ p += '.'
+ result.append(p)
+
+ if not html:
+ return u'\n\n'.join(result)
+ return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result))
+
+
+def unicode_urlencode(obj, charset='utf-8', for_qs=False):
+ """URL escapes a single bytestring or unicode string with the
+ given charset if applicable to URL safe quoting under all rules
+ that need to be considered under all supported Python versions.
+
+ If non strings are provided they are converted to their unicode
+ representation first.
+ """
+ if not isinstance(obj, string_types):
+ obj = text_type(obj)
+ if isinstance(obj, text_type):
+ obj = obj.encode(charset)
+ safe = for_qs and b'' or b'/'
+ rv = text_type(url_quote(obj, safe))
+ if for_qs:
+ rv = rv.replace('%20', '+')
+ return rv
+
+
+class LRUCache(object):
+ """A simple LRU Cache implementation."""
+
+ # this is fast for small capacities (something below 1000) but doesn't
+ # scale. But as long as it's only used as storage for templates this
+ # won't do any harm.
+
+ def __init__(self, capacity):
+ self.capacity = capacity
+ self._mapping = {}
+ self._queue = deque()
+ self._postinit()
+
+ def _postinit(self):
+ # alias all queue methods for faster lookup
+ self._popleft = self._queue.popleft
+ self._pop = self._queue.pop
+ self._remove = self._queue.remove
+ self._wlock = Lock()
+ self._append = self._queue.append
+
+ def __getstate__(self):
+ return {
+ 'capacity': self.capacity,
+ '_mapping': self._mapping,
+ '_queue': self._queue
+ }
+
+ def __setstate__(self, d):
+ self.__dict__.update(d)
+ self._postinit()
+
+ def __getnewargs__(self):
+ return (self.capacity,)
+
+ def copy(self):
+ """Return a shallow copy of the instance."""
+ rv = self.__class__(self.capacity)
+ rv._mapping.update(self._mapping)
+ rv._queue = deque(self._queue)
+ return rv
+
+ def get(self, key, default=None):
+ """Return an item from the cache dict or `default`"""
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def setdefault(self, key, default=None):
+ """Set `default` if the key is not in the cache otherwise
+ leave unchanged. Return the value of this key.
+ """
+ self._wlock.acquire()
+ try:
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+ finally:
+ self._wlock.release()
+
+ def clear(self):
+ """Clear the cache."""
+ self._wlock.acquire()
+ try:
+ self._mapping.clear()
+ self._queue.clear()
+ finally:
+ self._wlock.release()
+
+ def __contains__(self, key):
+ """Check if a key exists in this cache."""
+ return key in self._mapping
+
+ def __len__(self):
+ """Return the current size of the cache."""
+ return len(self._mapping)
+
+ def __repr__(self):
+ return '<%s %r>' % (
+ self.__class__.__name__,
+ self._mapping
+ )
+
+ def __getitem__(self, key):
+ """Get an item from the cache. Moves the item up so that it has the
+ highest priority then.
+
+ Raise a `KeyError` if it does not exist.
+ """
+ self._wlock.acquire()
+ try:
+ rv = self._mapping[key]
+ if self._queue[-1] != key:
+ try:
+ self._remove(key)
+ except ValueError:
+ # if something removed the key from the container
+ # when we read, ignore the ValueError that we would
+ # get otherwise.
+ pass
+ self._append(key)
+ return rv
+ finally:
+ self._wlock.release()
+
+ def __setitem__(self, key, value):
+ """Sets the value for an item. Moves the item up so that it
+ has the highest priority then.
+ """
+ self._wlock.acquire()
+ try:
+ if key in self._mapping:
+ self._remove(key)
+ elif len(self._mapping) == self.capacity:
+ del self._mapping[self._popleft()]
+ self._append(key)
+ self._mapping[key] = value
+ finally:
+ self._wlock.release()
+
+ def __delitem__(self, key):
+ """Remove an item from the cache dict.
+ Raise a `KeyError` if it does not exist.
+ """
+ self._wlock.acquire()
+ try:
+ del self._mapping[key]
+ try:
+ self._remove(key)
+ except ValueError:
+ # __getitem__ is not locked, it might happen
+ pass
+ finally:
+ self._wlock.release()
+
+ def items(self):
+ """Return a list of items."""
+ result = [(key, self._mapping[key]) for key in list(self._queue)]
+ result.reverse()
+ return result
+
+ def iteritems(self):
+ """Iterate over all items."""
+ return iter(self.items())
+
+ def values(self):
+ """Return a list of all values."""
+ return [x[1] for x in self.items()]
+
+ def itervalue(self):
+ """Iterate over all values."""
+ return iter(self.values())
+
+ def keys(self):
+ """Return a list of all keys ordered by most recent usage."""
+ return list(self)
+
+ def iterkeys(self):
+ """Iterate over all keys in the cache dict, ordered by
+ the most recent usage.
+ """
+ return reversed(tuple(self._queue))
+
+ __iter__ = iterkeys
+
+ def __reversed__(self):
+ """Iterate over the values in the cache dict, oldest items
+ coming first.
+ """
+ return iter(tuple(self._queue))
+
+ __copy__ = copy
+
+
+# register the LRU cache as mutable mapping if possible
+try:
+ from collections import MutableMapping
+ MutableMapping.register(LRUCache)
+except ImportError:
+ pass
+
+
+@implements_iterator
+class Cycler(object):
+ """A cycle helper for templates."""
+
+ def __init__(self, *items):
+ if not items:
+ raise RuntimeError('at least one item has to be provided')
+ self.items = items
+ self.reset()
+
+ def reset(self):
+ """Resets the cycle."""
+ self.pos = 0
+
+ @property
+ def current(self):
+ """Returns the current item."""
+ return self.items[self.pos]
+
+ def __next__(self):
+ """Goes one item ahead and returns it."""
+ rv = self.current
+ self.pos = (self.pos + 1) % len(self.items)
+ return rv
+
+
+class Joiner(object):
+ """A joining helper for templates."""
+
+ def __init__(self, sep=u', '):
+ self.sep = sep
+ self.used = False
+
+ def __call__(self):
+ if not self.used:
+ self.used = True
+ return u''
+ return self.sep
+
+
+# Imported here because that's where it was in the past
+from markupsafe import Markup, escape, soft_unicode
diff --git a/deps/v8/third_party/jinja2/visitor.py b/deps/v8/third_party/jinja2/visitor.py
new file mode 100644
index 0000000000..413e7c309d
--- /dev/null
+++ b/deps/v8/third_party/jinja2/visitor.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+"""
+ jinja2.visitor
+ ~~~~~~~~~~~~~~
+
+ This module implements a visitor for the nodes.
+
+ :copyright: (c) 2010 by the Jinja Team.
+ :license: BSD.
+"""
+from jinja2.nodes import Node
+
+
+class NodeVisitor(object):
+ """Walks the abstract syntax tree and call visitor functions for every
+ node found. The visitor functions may return values which will be
+ forwarded by the `visit` method.
+
+ Per default the visitor functions for the nodes are ``'visit_'`` +
+ class name of the node. So a `TryFinally` node visit function would
+ be `visit_TryFinally`. This behavior can be changed by overriding
+ the `get_visitor` function. If no visitor function exists for a node
+ (return value `None`) the `generic_visit` visitor is used instead.
+ """
+
+ def get_visitor(self, node):
+ """Return the visitor function for this node or `None` if no visitor
+ exists for this node. In that case the generic visit function is
+ used instead.
+ """
+ method = 'visit_' + node.__class__.__name__
+ return getattr(self, method, None)
+
+ def visit(self, node, *args, **kwargs):
+ """Visit a node."""
+ f = self.get_visitor(node)
+ if f is not None:
+ return f(node, *args, **kwargs)
+ return self.generic_visit(node, *args, **kwargs)
+
+ def generic_visit(self, node, *args, **kwargs):
+ """Called if no explicit visitor function exists for a node."""
+ for node in node.iter_child_nodes():
+ self.visit(node, *args, **kwargs)
+
+
+class NodeTransformer(NodeVisitor):
+ """Walks the abstract syntax tree and allows modifications of nodes.
+
+ The `NodeTransformer` will walk the AST and use the return value of the
+ visitor functions to replace or remove the old node. If the return
+ value of the visitor function is `None` the node will be removed
+ from the previous location otherwise it's replaced with the return
+ value. The return value may be the original node in which case no
+ replacement takes place.
+ """
+
+ def generic_visit(self, node, *args, **kwargs):
+ for field, old_value in node.iter_fields():
+ if isinstance(old_value, list):
+ new_values = []
+ for value in old_value:
+ if isinstance(value, Node):
+ value = self.visit(value, *args, **kwargs)
+ if value is None:
+ continue
+ elif not isinstance(value, Node):
+ new_values.extend(value)
+ continue
+ new_values.append(value)
+ old_value[:] = new_values
+ elif isinstance(old_value, Node):
+ new_node = self.visit(old_value, *args, **kwargs)
+ if new_node is None:
+ delattr(node, field)
+ else:
+ setattr(node, field, new_node)
+ return node
+
+ def visit_list(self, node, *args, **kwargs):
+ """As transformers may return lists in some places this method
+ can be used to enforce a list as return value.
+ """
+ rv = self.visit(node, *args, **kwargs)
+ if not isinstance(rv, list):
+ rv = [rv]
+ return rv
diff --git a/deps/v8/third_party/markupsafe/AUTHORS b/deps/v8/third_party/markupsafe/AUTHORS
new file mode 100644
index 0000000000..f7e2942ecc
--- /dev/null
+++ b/deps/v8/third_party/markupsafe/AUTHORS
@@ -0,0 +1,13 @@
+MarkupSafe is written and maintained by Armin Ronacher and
+various contributors:
+
+Development Lead
+````````````````
+
+- Armin Ronacher <armin.ronacher@active-4.com>
+
+Patches and Suggestions
+```````````````````````
+
+- Georg Brandl
+- Mickaël Guérin
diff --git a/deps/v8/third_party/markupsafe/LICENSE b/deps/v8/third_party/markupsafe/LICENSE
new file mode 100644
index 0000000000..5d2693890d
--- /dev/null
+++ b/deps/v8/third_party/markupsafe/LICENSE
@@ -0,0 +1,33 @@
+Copyright (c) 2010 by Armin Ronacher and contributors. See AUTHORS
+for more details.
+
+Some rights reserved.
+
+Redistribution and use in source and binary forms of the software as well
+as documentation, with or without modification, are permitted provided
+that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+* The names of the contributors may not be used to endorse or
+ promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
+NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
diff --git a/deps/v8/third_party/markupsafe/MarkupSafe-0.18.tar.gz.md5 b/deps/v8/third_party/markupsafe/MarkupSafe-0.18.tar.gz.md5
new file mode 100644
index 0000000000..1348d1eea1
--- /dev/null
+++ b/deps/v8/third_party/markupsafe/MarkupSafe-0.18.tar.gz.md5
@@ -0,0 +1 @@
+f8d252fd05371e51dec2fe9a36890687 MarkupSafe-0.18.tar.gz
diff --git a/deps/v8/third_party/markupsafe/MarkupSafe-0.18.tar.gz.sha512 b/deps/v8/third_party/markupsafe/MarkupSafe-0.18.tar.gz.sha512
new file mode 100644
index 0000000000..ab752200d5
--- /dev/null
+++ b/deps/v8/third_party/markupsafe/MarkupSafe-0.18.tar.gz.sha512
@@ -0,0 +1 @@
+0438ddf0fdab465c40d9afba8c14ad346be0868df654c11130d05e329992d456a9bc278551970cbd09244a29c77213885d0c363c951b0cfd4d9aa95b248ecff5 MarkupSafe-0.18.tar.gz
diff --git a/deps/v8/third_party/markupsafe/OWNERS b/deps/v8/third_party/markupsafe/OWNERS
new file mode 100644
index 0000000000..8edbdf893c
--- /dev/null
+++ b/deps/v8/third_party/markupsafe/OWNERS
@@ -0,0 +1,3 @@
+timloh@chromium.org
+haraken@chromium.org
+nbarth@chromium.org
diff --git a/deps/v8/third_party/markupsafe/README.chromium b/deps/v8/third_party/markupsafe/README.chromium
new file mode 100644
index 0000000000..0fcab52fa2
--- /dev/null
+++ b/deps/v8/third_party/markupsafe/README.chromium
@@ -0,0 +1,24 @@
+Name: MarkupSafe Python Safe String Class
+Short Name: markupsafe
+URL: https://github.com/mitsuhiko/markupsafe
+Version: 0.18
+License: BSD 3-clause License
+License File: NOT_SHIPPED
+Security Critical: no
+
+Description:
+Safe string class, used by Jinja2 template engine.
+
+Source:
+https://pypi.python.org/packages/source/M/MarkupSafe/MarkupSafe-0.18.tar.gz
+MD5: f8d252fd05371e51dec2fe9a36890687
+SHA-512: 0438ddf0fdab465c40d9afba8c14ad346be0868df654c11130d05e329992d456
+ a9bc278551970cbd09244a29c77213885d0c363c951b0cfd4d9aa95b248ecff5
+
+Local Modifications:
+This only includes the markup directory from the tarball and the LICENSE and
+AUTHORS files, removing the unneeded unit tests (tests.py).
+Also includes install script (get_markupsafe.sh) and files of hashes (MD5 is
+also posted on website, SHA-512 computed locally); script checks hash then
+unpacks archive and installs desired files.
+Retrieve or update by executing markupsafe/get_markupsafe.sh from third_party.
diff --git a/deps/v8/third_party/markupsafe/__init__.py b/deps/v8/third_party/markupsafe/__init__.py
new file mode 100644
index 0000000000..25f00d3a4f
--- /dev/null
+++ b/deps/v8/third_party/markupsafe/__init__.py
@@ -0,0 +1,234 @@
+# -*- coding: utf-8 -*-
+"""
+ markupsafe
+ ~~~~~~~~~~
+
+ Implements a Markup string.
+
+ :copyright: (c) 2010 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+import re
+from markupsafe._compat import text_type, string_types, int_types, \
+ unichr, PY2
+
+
+__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
+
+
+_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
+_entity_re = re.compile(r'&([^;]+);')
+
+
+class Markup(text_type):
+ r"""Marks a string as being safe for inclusion in HTML/XML output without
+ needing to be escaped. This implements the `__html__` interface a couple
+ of frameworks and web applications use. :class:`Markup` is a direct
+ subclass of `unicode` and provides all the methods of `unicode` just that
+ it escapes arguments passed and always returns `Markup`.
+
+ The `escape` function returns markup objects so that double escaping can't
+ happen.
+
+ The constructor of the :class:`Markup` class can be used for three
+ different things: When passed an unicode object it's assumed to be safe,
+ when passed an object with an HTML representation (has an `__html__`
+ method) that representation is used, otherwise the object passed is
+ converted into a unicode string and then assumed to be safe:
+
+ >>> Markup("Hello <em>World</em>!")
+ Markup(u'Hello <em>World</em>!')
+ >>> class Foo(object):
+ ... def __html__(self):
+ ... return '<a href="#">foo</a>'
+ ...
+ >>> Markup(Foo())
+ Markup(u'<a href="#">foo</a>')
+
+ If you want object passed being always treated as unsafe you can use the
+ :meth:`escape` classmethod to create a :class:`Markup` object:
+
+ >>> Markup.escape("Hello <em>World</em>!")
+ Markup(u'Hello &lt;em&gt;World&lt;/em&gt;!')
+
+ Operations on a markup string are markup aware which means that all
+ arguments are passed through the :func:`escape` function:
+
+ >>> em = Markup("<em>%s</em>")
+ >>> em % "foo & bar"
+ Markup(u'<em>foo &amp; bar</em>')
+ >>> strong = Markup("<strong>%(text)s</strong>")
+ >>> strong % {'text': '<blink>hacker here</blink>'}
+ Markup(u'<strong>&lt;blink&gt;hacker here&lt;/blink&gt;</strong>')
+ >>> Markup("<em>Hello</em> ") + "<foo>"
+ Markup(u'<em>Hello</em> &lt;foo&gt;')
+ """
+ __slots__ = ()
+
+ def __new__(cls, base=u'', encoding=None, errors='strict'):
+ if hasattr(base, '__html__'):
+ base = base.__html__()
+ if encoding is None:
+ return text_type.__new__(cls, base)
+ return text_type.__new__(cls, base, encoding, errors)
+
+ def __html__(self):
+ return self
+
+ def __add__(self, other):
+ if isinstance(other, string_types) or hasattr(other, '__html__'):
+ return self.__class__(super(Markup, self).__add__(self.escape(other)))
+ return NotImplemented
+
+ def __radd__(self, other):
+ if hasattr(other, '__html__') or isinstance(other, string_types):
+ return self.escape(other).__add__(self)
+ return NotImplemented
+
+ def __mul__(self, num):
+ if isinstance(num, int_types):
+ return self.__class__(text_type.__mul__(self, num))
+ return NotImplemented
+ __rmul__ = __mul__
+
+ def __mod__(self, arg):
+ if isinstance(arg, tuple):
+ arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
+ else:
+ arg = _MarkupEscapeHelper(arg, self.escape)
+ return self.__class__(text_type.__mod__(self, arg))
+
+ def __repr__(self):
+ return '%s(%s)' % (
+ self.__class__.__name__,
+ text_type.__repr__(self)
+ )
+
+ def join(self, seq):
+ return self.__class__(text_type.join(self, map(self.escape, seq)))
+ join.__doc__ = text_type.join.__doc__
+
+ def split(self, *args, **kwargs):
+ return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
+ split.__doc__ = text_type.split.__doc__
+
+ def rsplit(self, *args, **kwargs):
+ return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
+ rsplit.__doc__ = text_type.rsplit.__doc__
+
+ def splitlines(self, *args, **kwargs):
+ return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs)))
+ splitlines.__doc__ = text_type.splitlines.__doc__
+
+ def unescape(self):
+ r"""Unescape markup again into an text_type string. This also resolves
+ known HTML4 and XHTML entities:
+
+ >>> Markup("Main &raquo; <em>About</em>").unescape()
+ u'Main \xbb <em>About</em>'
+ """
+ from markupsafe._constants import HTML_ENTITIES
+ def handle_match(m):
+ name = m.group(1)
+ if name in HTML_ENTITIES:
+ return unichr(HTML_ENTITIES[name])
+ try:
+ if name[:2] in ('#x', '#X'):
+ return unichr(int(name[2:], 16))
+ elif name.startswith('#'):
+ return unichr(int(name[1:]))
+ except ValueError:
+ pass
+ return u''
+ return _entity_re.sub(handle_match, text_type(self))
+
+ def striptags(self):
+ r"""Unescape markup into an text_type string and strip all tags. This
+ also resolves known HTML4 and XHTML entities. Whitespace is
+ normalized to one:
+
+ >>> Markup("Main &raquo; <em>About</em>").striptags()
+ u'Main \xbb About'
+ """
+ stripped = u' '.join(_striptags_re.sub('', self).split())
+ return Markup(stripped).unescape()
+
+ @classmethod
+ def escape(cls, s):
+ """Escape the string. Works like :func:`escape` with the difference
+ that for subclasses of :class:`Markup` this function would return the
+ correct subclass.
+ """
+ rv = escape(s)
+ if rv.__class__ is not cls:
+ return cls(rv)
+ return rv
+
+ def make_wrapper(name):
+ orig = getattr(text_type, name)
+ def func(self, *args, **kwargs):
+ args = _escape_argspec(list(args), enumerate(args), self.escape)
+ #_escape_argspec(kwargs, kwargs.iteritems(), None)
+ return self.__class__(orig(self, *args, **kwargs))
+ func.__name__ = orig.__name__
+ func.__doc__ = orig.__doc__
+ return func
+
+ for method in '__getitem__', 'capitalize', \
+ 'title', 'lower', 'upper', 'replace', 'ljust', \
+ 'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
+ 'translate', 'expandtabs', 'swapcase', 'zfill':
+ locals()[method] = make_wrapper(method)
+
+ # new in python 2.5
+ if hasattr(text_type, 'partition'):
+ def partition(self, sep):
+ return tuple(map(self.__class__,
+ text_type.partition(self, self.escape(sep))))
+ def rpartition(self, sep):
+ return tuple(map(self.__class__,
+ text_type.rpartition(self, self.escape(sep))))
+
+ # new in python 2.6
+ if hasattr(text_type, 'format'):
+ format = make_wrapper('format')
+
+ # not in python 3
+ if hasattr(text_type, '__getslice__'):
+ __getslice__ = make_wrapper('__getslice__')
+
+ del method, make_wrapper
+
+
+def _escape_argspec(obj, iterable, escape):
+ """Helper for various string-wrapped functions."""
+ for key, value in iterable:
+ if hasattr(value, '__html__') or isinstance(value, string_types):
+ obj[key] = escape(value)
+ return obj
+
+
+class _MarkupEscapeHelper(object):
+ """Helper for Markup.__mod__"""
+
+ def __init__(self, obj, escape):
+ self.obj = obj
+ self.escape = escape
+
+ __getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
+ __unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
+ __repr__ = lambda s: str(s.escape(repr(s.obj)))
+ __int__ = lambda s: int(s.obj)
+ __float__ = lambda s: float(s.obj)
+
+
+# we have to import it down here as the speedups and native
+# modules imports the markup type which is define above.
+try:
+ from markupsafe._speedups import escape, escape_silent, soft_unicode
+except ImportError:
+ from markupsafe._native import escape, escape_silent, soft_unicode
+
+if not PY2:
+ soft_str = soft_unicode
+ __all__.append('soft_str')
diff --git a/deps/v8/third_party/markupsafe/_compat.py b/deps/v8/third_party/markupsafe/_compat.py
new file mode 100644
index 0000000000..29e4a3dac1
--- /dev/null
+++ b/deps/v8/third_party/markupsafe/_compat.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+"""
+ markupsafe._compat
+ ~~~~~~~~~~~~~~~~~~
+
+ Compatibility module for different Python versions.
+
+ :copyright: (c) 2013 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+import sys
+
+PY2 = sys.version_info[0] == 2
+
+if not PY2:
+ text_type = str
+ string_types = (str,)
+ unichr = chr
+ int_types = (int,)
+else:
+ text_type = unicode
+ string_types = (str, unicode)
+ unichr = unichr
+ int_types = (int, long)
diff --git a/deps/v8/third_party/markupsafe/_constants.py b/deps/v8/third_party/markupsafe/_constants.py
new file mode 100644
index 0000000000..919bf03c50
--- /dev/null
+++ b/deps/v8/third_party/markupsafe/_constants.py
@@ -0,0 +1,267 @@
+# -*- coding: utf-8 -*-
+"""
+ markupsafe._constants
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Highlevel implementation of the Markup string.
+
+ :copyright: (c) 2010 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+
+
+HTML_ENTITIES = {
+ 'AElig': 198,
+ 'Aacute': 193,
+ 'Acirc': 194,
+ 'Agrave': 192,
+ 'Alpha': 913,
+ 'Aring': 197,
+ 'Atilde': 195,
+ 'Auml': 196,
+ 'Beta': 914,
+ 'Ccedil': 199,
+ 'Chi': 935,
+ 'Dagger': 8225,
+ 'Delta': 916,
+ 'ETH': 208,
+ 'Eacute': 201,
+ 'Ecirc': 202,
+ 'Egrave': 200,
+ 'Epsilon': 917,
+ 'Eta': 919,
+ 'Euml': 203,
+ 'Gamma': 915,
+ 'Iacute': 205,
+ 'Icirc': 206,
+ 'Igrave': 204,
+ 'Iota': 921,
+ 'Iuml': 207,
+ 'Kappa': 922,
+ 'Lambda': 923,
+ 'Mu': 924,
+ 'Ntilde': 209,
+ 'Nu': 925,
+ 'OElig': 338,
+ 'Oacute': 211,
+ 'Ocirc': 212,
+ 'Ograve': 210,
+ 'Omega': 937,
+ 'Omicron': 927,
+ 'Oslash': 216,
+ 'Otilde': 213,
+ 'Ouml': 214,
+ 'Phi': 934,
+ 'Pi': 928,
+ 'Prime': 8243,
+ 'Psi': 936,
+ 'Rho': 929,
+ 'Scaron': 352,
+ 'Sigma': 931,
+ 'THORN': 222,
+ 'Tau': 932,
+ 'Theta': 920,
+ 'Uacute': 218,
+ 'Ucirc': 219,
+ 'Ugrave': 217,
+ 'Upsilon': 933,
+ 'Uuml': 220,
+ 'Xi': 926,
+ 'Yacute': 221,
+ 'Yuml': 376,
+ 'Zeta': 918,
+ 'aacute': 225,
+ 'acirc': 226,
+ 'acute': 180,
+ 'aelig': 230,
+ 'agrave': 224,
+ 'alefsym': 8501,
+ 'alpha': 945,
+ 'amp': 38,
+ 'and': 8743,
+ 'ang': 8736,
+ 'apos': 39,
+ 'aring': 229,
+ 'asymp': 8776,
+ 'atilde': 227,
+ 'auml': 228,
+ 'bdquo': 8222,
+ 'beta': 946,
+ 'brvbar': 166,
+ 'bull': 8226,
+ 'cap': 8745,
+ 'ccedil': 231,
+ 'cedil': 184,
+ 'cent': 162,
+ 'chi': 967,
+ 'circ': 710,
+ 'clubs': 9827,
+ 'cong': 8773,
+ 'copy': 169,
+ 'crarr': 8629,
+ 'cup': 8746,
+ 'curren': 164,
+ 'dArr': 8659,
+ 'dagger': 8224,
+ 'darr': 8595,
+ 'deg': 176,
+ 'delta': 948,
+ 'diams': 9830,
+ 'divide': 247,
+ 'eacute': 233,
+ 'ecirc': 234,
+ 'egrave': 232,
+ 'empty': 8709,
+ 'emsp': 8195,
+ 'ensp': 8194,
+ 'epsilon': 949,
+ 'equiv': 8801,
+ 'eta': 951,
+ 'eth': 240,
+ 'euml': 235,
+ 'euro': 8364,
+ 'exist': 8707,
+ 'fnof': 402,
+ 'forall': 8704,
+ 'frac12': 189,
+ 'frac14': 188,
+ 'frac34': 190,
+ 'frasl': 8260,
+ 'gamma': 947,
+ 'ge': 8805,
+ 'gt': 62,
+ 'hArr': 8660,
+ 'harr': 8596,
+ 'hearts': 9829,
+ 'hellip': 8230,
+ 'iacute': 237,
+ 'icirc': 238,
+ 'iexcl': 161,
+ 'igrave': 236,
+ 'image': 8465,
+ 'infin': 8734,
+ 'int': 8747,
+ 'iota': 953,
+ 'iquest': 191,
+ 'isin': 8712,
+ 'iuml': 239,
+ 'kappa': 954,
+ 'lArr': 8656,
+ 'lambda': 955,
+ 'lang': 9001,
+ 'laquo': 171,
+ 'larr': 8592,
+ 'lceil': 8968,
+ 'ldquo': 8220,
+ 'le': 8804,
+ 'lfloor': 8970,
+ 'lowast': 8727,
+ 'loz': 9674,
+ 'lrm': 8206,
+ 'lsaquo': 8249,
+ 'lsquo': 8216,
+ 'lt': 60,
+ 'macr': 175,
+ 'mdash': 8212,
+ 'micro': 181,
+ 'middot': 183,
+ 'minus': 8722,
+ 'mu': 956,
+ 'nabla': 8711,
+ 'nbsp': 160,
+ 'ndash': 8211,
+ 'ne': 8800,
+ 'ni': 8715,
+ 'not': 172,
+ 'notin': 8713,
+ 'nsub': 8836,
+ 'ntilde': 241,
+ 'nu': 957,
+ 'oacute': 243,
+ 'ocirc': 244,
+ 'oelig': 339,
+ 'ograve': 242,
+ 'oline': 8254,
+ 'omega': 969,
+ 'omicron': 959,
+ 'oplus': 8853,
+ 'or': 8744,
+ 'ordf': 170,
+ 'ordm': 186,
+ 'oslash': 248,
+ 'otilde': 245,
+ 'otimes': 8855,
+ 'ouml': 246,
+ 'para': 182,
+ 'part': 8706,
+ 'permil': 8240,
+ 'perp': 8869,
+ 'phi': 966,
+ 'pi': 960,
+ 'piv': 982,
+ 'plusmn': 177,
+ 'pound': 163,
+ 'prime': 8242,
+ 'prod': 8719,
+ 'prop': 8733,
+ 'psi': 968,
+ 'quot': 34,
+ 'rArr': 8658,
+ 'radic': 8730,
+ 'rang': 9002,
+ 'raquo': 187,
+ 'rarr': 8594,
+ 'rceil': 8969,
+ 'rdquo': 8221,
+ 'real': 8476,
+ 'reg': 174,
+ 'rfloor': 8971,
+ 'rho': 961,
+ 'rlm': 8207,
+ 'rsaquo': 8250,
+ 'rsquo': 8217,
+ 'sbquo': 8218,
+ 'scaron': 353,
+ 'sdot': 8901,
+ 'sect': 167,
+ 'shy': 173,
+ 'sigma': 963,
+ 'sigmaf': 962,
+ 'sim': 8764,
+ 'spades': 9824,
+ 'sub': 8834,
+ 'sube': 8838,
+ 'sum': 8721,
+ 'sup': 8835,
+ 'sup1': 185,
+ 'sup2': 178,
+ 'sup3': 179,
+ 'supe': 8839,
+ 'szlig': 223,
+ 'tau': 964,
+ 'there4': 8756,
+ 'theta': 952,
+ 'thetasym': 977,
+ 'thinsp': 8201,
+ 'thorn': 254,
+ 'tilde': 732,
+ 'times': 215,
+ 'trade': 8482,
+ 'uArr': 8657,
+ 'uacute': 250,
+ 'uarr': 8593,
+ 'ucirc': 251,
+ 'ugrave': 249,
+ 'uml': 168,
+ 'upsih': 978,
+ 'upsilon': 965,
+ 'uuml': 252,
+ 'weierp': 8472,
+ 'xi': 958,
+ 'yacute': 253,
+ 'yen': 165,
+ 'yuml': 255,
+ 'zeta': 950,
+ 'zwj': 8205,
+ 'zwnj': 8204
+}
diff --git a/deps/v8/third_party/markupsafe/_native.py b/deps/v8/third_party/markupsafe/_native.py
new file mode 100644
index 0000000000..5e83f10a11
--- /dev/null
+++ b/deps/v8/third_party/markupsafe/_native.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+"""
+ markupsafe._native
+ ~~~~~~~~~~~~~~~~~~
+
+ Native Python implementation the C module is not compiled.
+
+ :copyright: (c) 2010 by Armin Ronacher.
+ :license: BSD, see LICENSE for more details.
+"""
+from markupsafe import Markup
+from markupsafe._compat import text_type
+
+
+def escape(s):
+ """Convert the characters &, <, >, ' and " in string s to HTML-safe
+ sequences. Use this if you need to display text that might contain
+ such characters in HTML. Marks return value as markup string.
+ """
+ if hasattr(s, '__html__'):
+ return s.__html__()
+ return Markup(text_type(s)
+ .replace('&', '&amp;')
+ .replace('>', '&gt;')
+ .replace('<', '&lt;')
+ .replace("'", '&#39;')
+ .replace('"', '&#34;')
+ )
+
+
+def escape_silent(s):
+ """Like :func:`escape` but converts `None` into an empty
+ markup string.
+ """
+ if s is None:
+ return Markup()
+ return escape(s)
+
+
+def soft_unicode(s):
+ """Make a string unicode if it isn't already. That way a markup
+ string is not converted back to unicode.
+ """
+ if not isinstance(s, text_type):
+ s = text_type(s)
+ return s
diff --git a/deps/v8/third_party/markupsafe/_speedups.c b/deps/v8/third_party/markupsafe/_speedups.c
new file mode 100644
index 0000000000..f349febf22
--- /dev/null
+++ b/deps/v8/third_party/markupsafe/_speedups.c
@@ -0,0 +1,239 @@
+/**
+ * markupsafe._speedups
+ * ~~~~~~~~~~~~~~~~~~~~
+ *
+ * This module implements functions for automatic escaping in C for better
+ * performance.
+ *
+ * :copyright: (c) 2010 by Armin Ronacher.
+ * :license: BSD.
+ */
+
+#include <Python.h>
+
+#define ESCAPED_CHARS_TABLE_SIZE 63
+#define UNICHR(x) (PyUnicode_AS_UNICODE((PyUnicodeObject*)PyUnicode_DecodeASCII(x, strlen(x), NULL)));
+
+#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN)
+typedef int Py_ssize_t;
+#define PY_SSIZE_T_MAX INT_MAX
+#define PY_SSIZE_T_MIN INT_MIN
+#endif
+
+
+static PyObject* markup;
+static Py_ssize_t escaped_chars_delta_len[ESCAPED_CHARS_TABLE_SIZE];
+static Py_UNICODE *escaped_chars_repl[ESCAPED_CHARS_TABLE_SIZE];
+
+static int
+init_constants(void)
+{
+ PyObject *module;
+ /* happing of characters to replace */
+ escaped_chars_repl['"'] = UNICHR("&#34;");
+ escaped_chars_repl['\''] = UNICHR("&#39;");
+ escaped_chars_repl['&'] = UNICHR("&amp;");
+ escaped_chars_repl['<'] = UNICHR("&lt;");
+ escaped_chars_repl['>'] = UNICHR("&gt;");
+
+ /* lengths of those characters when replaced - 1 */
+ memset(escaped_chars_delta_len, 0, sizeof (escaped_chars_delta_len));
+ escaped_chars_delta_len['"'] = escaped_chars_delta_len['\''] = \
+ escaped_chars_delta_len['&'] = 4;
+ escaped_chars_delta_len['<'] = escaped_chars_delta_len['>'] = 3;
+
+ /* import markup type so that we can mark the return value */
+ module = PyImport_ImportModule("markupsafe");
+ if (!module)
+ return 0;
+ markup = PyObject_GetAttrString(module, "Markup");
+ Py_DECREF(module);
+
+ return 1;
+}
+
+static PyObject*
+escape_unicode(PyUnicodeObject *in)
+{
+ PyUnicodeObject *out;
+ Py_UNICODE *inp = PyUnicode_AS_UNICODE(in);
+ const Py_UNICODE *inp_end = PyUnicode_AS_UNICODE(in) + PyUnicode_GET_SIZE(in);
+ Py_UNICODE *next_escp;
+ Py_UNICODE *outp;
+ Py_ssize_t delta=0, erepl=0, delta_len=0;
+
+ /* First we need to figure out how long the escaped string will be */
+ while (*(inp) || inp < inp_end) {
+ if (*inp < ESCAPED_CHARS_TABLE_SIZE) {
+ delta += escaped_chars_delta_len[*inp];
+ erepl += !!escaped_chars_delta_len[*inp];
+ }
+ ++inp;
+ }
+
+ /* Do we need to escape anything at all? */
+ if (!erepl) {
+ Py_INCREF(in);
+ return (PyObject*)in;
+ }
+
+ out = (PyUnicodeObject*)PyUnicode_FromUnicode(NULL, PyUnicode_GET_SIZE(in) + delta);
+ if (!out)
+ return NULL;
+
+ outp = PyUnicode_AS_UNICODE(out);
+ inp = PyUnicode_AS_UNICODE(in);
+ while (erepl-- > 0) {
+ /* look for the next substitution */
+ next_escp = inp;
+ while (next_escp < inp_end) {
+ if (*next_escp < ESCAPED_CHARS_TABLE_SIZE &&
+ (delta_len = escaped_chars_delta_len[*next_escp])) {
+ ++delta_len;
+ break;
+ }
+ ++next_escp;
+ }
+
+ if (next_escp > inp) {
+ /* copy unescaped chars between inp and next_escp */
+ Py_UNICODE_COPY(outp, inp, next_escp-inp);
+ outp += next_escp - inp;
+ }
+
+ /* escape 'next_escp' */
+ Py_UNICODE_COPY(outp, escaped_chars_repl[*next_escp], delta_len);
+ outp += delta_len;
+
+ inp = next_escp + 1;
+ }
+ if (inp < inp_end)
+ Py_UNICODE_COPY(outp, inp, PyUnicode_GET_SIZE(in) - (inp - PyUnicode_AS_UNICODE(in)));
+
+ return (PyObject*)out;
+}
+
+
+static PyObject*
+escape(PyObject *self, PyObject *text)
+{
+ PyObject *s = NULL, *rv = NULL, *html;
+
+ /* we don't have to escape integers, bools or floats */
+ if (PyLong_CheckExact(text) ||
+#if PY_MAJOR_VERSION < 3
+ PyInt_CheckExact(text) ||
+#endif
+ PyFloat_CheckExact(text) || PyBool_Check(text) ||
+ text == Py_None)
+ return PyObject_CallFunctionObjArgs(markup, text, NULL);
+
+ /* if the object has an __html__ method that performs the escaping */
+ html = PyObject_GetAttrString(text, "__html__");
+ if (html) {
+ rv = PyObject_CallObject(html, NULL);
+ Py_DECREF(html);
+ return rv;
+ }
+
+ /* otherwise make the object unicode if it isn't, then escape */
+ PyErr_Clear();
+ if (!PyUnicode_Check(text)) {
+#if PY_MAJOR_VERSION < 3
+ PyObject *unicode = PyObject_Unicode(text);
+#else
+ PyObject *unicode = PyObject_Str(text);
+#endif
+ if (!unicode)
+ return NULL;
+ s = escape_unicode((PyUnicodeObject*)unicode);
+ Py_DECREF(unicode);
+ }
+ else
+ s = escape_unicode((PyUnicodeObject*)text);
+
+ /* convert the unicode string into a markup object. */
+ rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
+ Py_DECREF(s);
+ return rv;
+}
+
+
+static PyObject*
+escape_silent(PyObject *self, PyObject *text)
+{
+ if (text != Py_None)
+ return escape(self, text);
+ return PyObject_CallFunctionObjArgs(markup, NULL);
+}
+
+
+static PyObject*
+soft_unicode(PyObject *self, PyObject *s)
+{
+ if (!PyUnicode_Check(s))
+#if PY_MAJOR_VERSION < 3
+ return PyObject_Unicode(s);
+#else
+ return PyObject_Str(s);
+#endif
+ Py_INCREF(s);
+ return s;
+}
+
+
+static PyMethodDef module_methods[] = {
+ {"escape", (PyCFunction)escape, METH_O,
+ "escape(s) -> markup\n\n"
+ "Convert the characters &, <, >, ', and \" in string s to HTML-safe\n"
+ "sequences. Use this if you need to display text that might contain\n"
+ "such characters in HTML. Marks return value as markup string."},
+ {"escape_silent", (PyCFunction)escape_silent, METH_O,
+ "escape_silent(s) -> markup\n\n"
+ "Like escape but converts None to an empty string."},
+ {"soft_unicode", (PyCFunction)soft_unicode, METH_O,
+ "soft_unicode(object) -> string\n\n"
+ "Make a string unicode if it isn't already. That way a markup\n"
+ "string is not converted back to unicode."},
+ {NULL, NULL, 0, NULL} /* Sentinel */
+};
+
+
+#if PY_MAJOR_VERSION < 3
+
+#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
+#define PyMODINIT_FUNC void
+#endif
+PyMODINIT_FUNC
+init_speedups(void)
+{
+ if (!init_constants())
+ return;
+
+ Py_InitModule3("markupsafe._speedups", module_methods, "");
+}
+
+#else /* Python 3.x module initialization */
+
+static struct PyModuleDef module_definition = {
+ PyModuleDef_HEAD_INIT,
+ "markupsafe._speedups",
+ NULL,
+ -1,
+ module_methods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+PyMODINIT_FUNC
+PyInit__speedups(void)
+{
+ if (!init_constants())
+ return NULL;
+
+ return PyModule_Create(&module_definition);
+}
+
+#endif
diff --git a/deps/v8/third_party/markupsafe/get_markupsafe.sh b/deps/v8/third_party/markupsafe/get_markupsafe.sh
new file mode 100755
index 0000000000..d268832df8
--- /dev/null
+++ b/deps/v8/third_party/markupsafe/get_markupsafe.sh
@@ -0,0 +1,121 @@
+#!/bin/bash
+# Download and extract MarkupSafe
+# Homepage:
+# https://github.com/mitsuhiko/markupsafe
+# Download page:
+# https://pypi.python.org/pypi/MarkupSafe
+PACKAGE='MarkupSafe'
+VERSION='0.18'
+PACKAGE_DIR='markupsafe'
+
+CHROMIUM_FILES="README.chromium OWNERS get_markupsafe.sh"
+EXTRA_FILES='LICENSE AUTHORS'
+REMOVE_FILES='tests.py'
+
+SRC_URL='https://pypi.python.org/packages/source/'
+SRC_URL+="${PACKAGE:0:1}/$PACKAGE/$PACKAGE-$VERSION.tar.gz"
+FILENAME="$(basename $SRC_URL)"
+MD5_FILENAME="$FILENAME.md5"
+SHA512_FILENAME="$FILENAME.sha512"
+CHROMIUM_FILES+=" $MD5_FILENAME $SHA512_FILENAME"
+
+BUILD_DIR="$PACKAGE-$VERSION"
+THIRD_PARTY="$(dirname $(realpath $(dirname "${BASH_SOURCE[0]}")))"
+INSTALL_DIR="$THIRD_PARTY/$PACKAGE_DIR"
+OUT_DIR="$INSTALL_DIR/$BUILD_DIR/$PACKAGE_DIR"
+OLD_DIR="$THIRD_PARTY/$PACKAGE_DIR.old"
+
+function check_hashes {
+ # Hashes generated via:
+ # FILENAME=MarkupSafe-0.18.tar.gz
+ # md5sum "$FILENAME" > "$FILENAME.md5"
+ # sha512sum "$FILENAME" > "$FILENAME.sha512"
+ # unset FILENAME
+
+ # MD5
+ if ! [ -f "$MD5_FILENAME" ]
+ then
+ echo "MD5 hash file $MD5_FILENAME not found, could not verify archive"
+ exit 1
+ fi
+
+ # 32-digit hash, followed by filename
+ MD5_HASHFILE_REGEX="^[0-9a-f]{32} $FILENAME"
+ if ! grep --extended-regex --line-regex --silent \
+ "$MD5_HASHFILE_REGEX" "$MD5_FILENAME"
+ then
+ echo "MD5 hash file $MD5_FILENAME does not contain hash for $FILENAME," \
+ 'could not verify archive'
+ echo 'Hash file contents are:'
+ cat "$MD5_FILENAME"
+ exit 1
+ fi
+
+ if ! md5sum --check "$MD5_FILENAME"
+ then
+ echo 'MD5 hash does not match,' \
+ "archive file $FILENAME corrupt or compromised!"
+ exit 1
+ fi
+
+ # SHA-512
+ if ! [ -f "$SHA512_FILENAME" ]
+ then
+ echo "SHA-512 hash file $SHA512_FILENAME not found," \
+ 'could not verify archive'
+ exit 1
+ fi
+
+ # 128-digit hash, followed by filename
+ SHA512_HASHFILE_REGEX="^[0-9a-f]{128} $FILENAME"
+ if ! grep --extended-regex --line-regex --silent \
+ "$SHA512_HASHFILE_REGEX" "$SHA512_FILENAME"
+ then
+ echo "SHA-512 hash file $SHA512_FILENAME does not contain hash for" \
+ "$FILENAME, could not verify archive"
+ echo 'Hash file contents are:'
+ cat "$SHA512_FILENAME"
+ exit 1
+ fi
+
+ if ! sha512sum --check "$SHA512_FILENAME"
+ then
+ echo 'SHA-512 hash does not match,' \
+ "archive file $FILENAME corrupt or compromised!"
+ exit 1
+ fi
+}
+
+
+################################################################################
+# Body
+
+cd "$INSTALL_DIR"
+echo "Downloading $SRC_URL"
+curl --remote-name "$SRC_URL"
+check_hashes
+tar xvzf "$FILENAME"
+# Copy extra files over
+for FILE in $CHROMIUM_FILES
+do
+ cp "$FILE" "$OUT_DIR"
+done
+
+cd "$BUILD_DIR"
+for FILE in $EXTRA_FILES
+do
+ cp "$FILE" "$OUT_DIR"
+done
+
+cd "$OUT_DIR"
+for FILE in $REMOVE_FILES
+do
+ rm -fr "$FILE"
+done
+
+# Replace with new directory
+cd ..
+mv "$INSTALL_DIR" "$OLD_DIR"
+mv "$PACKAGE_DIR" "$INSTALL_DIR"
+cd "$INSTALL_DIR"
+rm -fr "$OLD_DIR"
diff --git a/deps/v8/tools/callstats.html b/deps/v8/tools/callstats.html
index cb2e0bea3e..b70d40c98a 100644
--- a/deps/v8/tools/callstats.html
+++ b/deps/v8/tools/callstats.html
@@ -1381,6 +1381,8 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
this.version = version;
}
add(entry) {
+ // Ignore accidentally added Group entries.
+ if (entry.name.startsWith(GroupedEntry.prefix)) return;
entry.page = this;
this.entryDict.set(entry.name, entry);
var added = false;
@@ -1559,7 +1561,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
/StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*/, "#DC3912"));
Group.add('compile', new Group('Compile', /.*Compile.*/, "#FFAA00"));
Group.add('parse', new Group('Parse', /.*Parse.*/, "#FF6600"));
- Group.add('callback', new Group('Callback', /.*Callback$/, "#109618"));
+ Group.add('callback', new Group('Callback', /.*Callback.*/, "#109618"));
Group.add('api', new Group('API', /.*API.*/, "#990099"));
Group.add('gc', new Group('GC', /GC|AllocateInTargetSpace/, "#0099C6"));
Group.add('javascript', new Group('JavaScript', /JS_Execution/, "#DD4477"));
@@ -1568,7 +1570,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
class GroupedEntry extends Entry {
constructor(group) {
- super(0, 'Group-' + group.name, 0, 0, 0, 0, 0, 0);
+ super(0, GroupedEntry.prefix + group.name, 0, 0, 0, 0, 0, 0);
this.group = group;
this.entries = [];
}
@@ -1636,6 +1638,7 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
return this.getVarianceForProperty('time')
}
}
+ GroupedEntry.prefix = 'Group-';
class UnclassifiedEntry extends GroupedEntry {
constructor(page) {
diff --git a/deps/v8/tools/dev/v8gen.py b/deps/v8/tools/dev/v8gen.py
index f0fb74b709..b8a34e2af2 100755
--- a/deps/v8/tools/dev/v8gen.py
+++ b/deps/v8/tools/dev/v8gen.py
@@ -269,7 +269,7 @@ class GenerateGnArgs(object):
# Artificially increment modification time as our modifications happen too
# fast. This makes sure that gn is properly rebuilding the ninja files.
mtime = os.path.getmtime(gn_args_path) + 1
- with open(gn_args_path, 'aw'):
+ with open(gn_args_path, 'a'):
os.utime(gn_args_path, (mtime, mtime))
return True
diff --git a/deps/v8/tools/external-reference-check.py b/deps/v8/tools/external-reference-check.py
deleted file mode 100644
index be01dec1d0..0000000000
--- a/deps/v8/tools/external-reference-check.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import re
-import os
-import sys
-
-DECLARE_FILE = "src/assembler.h"
-REGISTER_FILE = "src/external-reference-table.cc"
-DECLARE_RE = re.compile("\s*static ExternalReference ([^(]+)\(")
-REGISTER_RE = re.compile("\s*Add\(ExternalReference::([^(]+)\(")
-
-WORKSPACE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
-
-# Ignore those.
-BLACKLISTED = [
- "fixed_typed_array_base_data_offset",
- "page_flags",
- "math_exp_constants",
- "math_exp_log_table",
- "ForDeoptEntry",
-]
-
-def Find(filename, re):
- references = []
- with open(filename, "r") as f:
- for line in f:
- match = re.match(line)
- if match:
- references.append(match.group(1))
- return references
-
-def Main():
- declarations = Find(DECLARE_FILE, DECLARE_RE)
- registrations = Find(REGISTER_FILE, REGISTER_RE)
- difference = list(set(declarations) - set(registrations) - set(BLACKLISTED))
- for reference in difference:
- print("Declared but not registered: ExternalReference::%s" % reference)
- return len(difference) > 0
-
-if __name__ == "__main__":
- sys.exit(Main())
diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit
index 1eae053f2c..b696a8fc36 100644
--- a/deps/v8/tools/gdbinit
+++ b/deps/v8/tools/gdbinit
@@ -68,5 +68,32 @@ Skip the jitted stack on x64 to where we entered JS last.
Usage: jss
end
+# Print stack trace with assertion scopes.
+define bta
+python
+import re
+frame_re = re.compile("^#(\d+)\s*(?:0x[a-f\d]+ in )?(.+) \(.+ at (.+)")
+assert_re = re.compile("^\s*(\S+) = .+<v8::internal::Per\w+AssertType::(\w+)_ASSERT, (false|true)>")
+btl = gdb.execute("backtrace full", to_string = True).splitlines()
+for l in btl:
+ match = frame_re.match(l)
+ if match:
+ print("[%-2s] %-60s %-40s" % (match.group(1), match.group(2), match.group(3)))
+ match = assert_re.match(l)
+ if match:
+ if match.group(3) == "false":
+ prefix = "Disallow"
+ color = "\033[91m"
+ else:
+ prefix = "Allow"
+ color = "\033[92m"
+ print("%s -> %s %s (%s)\033[0m" % (color, prefix, match.group(2), match.group(1)))
+end
+end
+document bta
+Print stack trace with assertion scopes
+Usage: bta
+end
+
set disassembly-flavor intel
set disable-randomization off
diff --git a/deps/v8/tools/ic-explorer.html b/deps/v8/tools/ic-explorer.html
index 42bbc20396..02214e3ee9 100644
--- a/deps/v8/tools/ic-explorer.html
+++ b/deps/v8/tools/ic-explorer.html
@@ -40,6 +40,11 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
"use strict"
var entries = [];
+ var properties = ['type', 'category', 'file', 'filePosition', 'state',
+ 'key', 'isNative', 'map', 'propertiesMode', 'numberOfOwnProperties',
+ 'instanceType'
+ ]
+
class Entry {
constructor(id, line) {
this.id = id;
@@ -50,8 +55,11 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
if (parts[0][0] !== "[") return;
if (parts[1] === "patching") return;
this.type = parts[0].substr(1);
- this.category = "Other";
- this.map = undefined;
+ this.category = "unknown";
+ this.map = "unknown";
+ this.propertiesMode = "unknown";
+ this.numberOfOwnProperties = 0;
+ this.instanceType = "unknown";
if (this.type.indexOf("Store") !== -1) {
this.category = "Store";
} else if (this.type.indexOf("Load") !== -1) {
@@ -70,13 +78,22 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
var offset = this.parsePositionAndFile(parts, 2);
if (offset == -1) return
this.state = parts[++offset];
- this.map = parts[offset + 1];
- if (this.map !== undefined && this.map.startsWith("map=")) {
- this.map = this.map.substring(4);
- offset++;
- } else {
- this.map = undefined;
- }
+ var mapPart = parts[offset + 1];
+ if (mapPart !== undefined && mapPart.startsWith("map=")) {
+ if (mapPart[4] == "(") {
+ if (mapPart.endsWith(")")) {
+ this.map = mapPart.substr(5, mapPart.length-6);
+ } else {
+ this.map = mapPart.substr(5);
+ }
+ offset++;
+ offset = this.parseMapProperties(parts, offset);
+ } else {
+ this.map = mapPart.substr(4);
+ offset++;
+ }
+ if (this.map == "(nil)") this.map = "unknown";
+ }
if (this.type !== "CompareIC") {
// if there is no address we have a smi key
var address = parts[++offset];
@@ -108,6 +125,17 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
this.isValid = true;
}
+ parseMapProperties(parts, offset) {
+ var next = parts[++offset];
+ if (!next.startsWith('dict')) return offset;
+ this.propertiesMode =
+ next.substr(5) == "0" ? "fast" : "slow";
+ this.numberOfOwnProperties = parts[++offset].substr(4);
+ next = parts[++offset];
+ this.instanceType = next.substr(5, next.length-6);
+ return offset;
+ }
+
parsePositionAndFile(parts, start) {
// find the position of 'at' in the parts array.
var offset = start;
@@ -157,11 +185,6 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
}
-
- var properties = ['type', 'category', 'file', 'filePosition', 'state',
- 'key', 'isNative', 'map'
- ]
-
class Group {
constructor(property, key, entry) {
this.property = property;
@@ -332,15 +355,32 @@ code is governed by a BSD-style license that can be found in the LICENSE file.
select.add(option);
}
}
+
+ function handleOnLoad() {
+ document.querySelector("#uploadInput").focus();
+ }
</script>
</head>
-<body>
+<body onload="handleOnLoad()">
<h1>
<span style="color: #00FF00">I</span>
<span style="color: #FF00FF">C</span>
<span style="color: #00FFFF">E</span>
</h1> Your IC-Explorer.
+
+ <div id="legend" style="padding-right: 200px">
+ <div style="float:right; border-style: solid; border-width: 1px; padding:20px">
+ 0 uninitialized<br>
+ . premonomorphic<br>
+ 1 monomorphic<br>
+ ^ recompute handler<br>
+ P polymorphic<br>
+ N megamorphic<br>
+ G generic
+ </div>
+ </div>
+
<h2>Usage</h2> Run your script with <code>--trace_ic</code> and upload on this page:<br/>
<code>/path/to/d8 --trace_ic your_script.js > trace.txt</code>
<h2>Data</h2>
diff --git a/deps/v8/tools/ignition/linux_perf_report.py b/deps/v8/tools/ignition/linux_perf_report.py
index eaf85b3f91..69db37cbae 100755
--- a/deps/v8/tools/ignition/linux_perf_report.py
+++ b/deps/v8/tools/ignition/linux_perf_report.py
@@ -52,6 +52,8 @@ examples:
COMPILER_SYMBOLS_RE = re.compile(
r"v8::internal::(?:\(anonymous namespace\)::)?Compile|v8::internal::Parser")
+JIT_CODE_SYMBOLS_RE = re.compile(
+ r"(LazyCompile|Compile|Eval|Script):(\*|~)")
def strip_function_parameters(symbol):
@@ -70,7 +72,8 @@ def strip_function_parameters(symbol):
return symbol[:-pos]
-def collapsed_callchains_generator(perf_stream, show_all=False,
+def collapsed_callchains_generator(perf_stream, hide_other=False,
+ hide_compiler=False, hide_jit=False,
show_full_signatures=False):
current_chain = []
skip_until_end_of_chain = False
@@ -85,7 +88,8 @@ def collapsed_callchains_generator(perf_stream, show_all=False,
# Empty line signals the end of the callchain.
if not line:
- if not skip_until_end_of_chain and current_chain and show_all:
+ if (not skip_until_end_of_chain and current_chain
+ and not hide_other):
current_chain.append("[other]")
yield current_chain
# Reset parser status.
@@ -101,14 +105,26 @@ def collapsed_callchains_generator(perf_stream, show_all=False,
symbol = line.split(" ", 1)[1].split("+", 1)[0]
if not show_full_signatures:
symbol = strip_function_parameters(symbol)
+
+ # Avoid chains of [unknown]
+ if (symbol == "[unknown]" and current_chain and
+ current_chain[-1] == "[unknown]"):
+ continue
+
current_chain.append(symbol)
if symbol.startswith("BytecodeHandler:"):
+ current_chain.append("[interpreter]")
yield current_chain
skip_until_end_of_chain = True
+ elif JIT_CODE_SYMBOLS_RE.match(symbol):
+ if not hide_jit:
+ current_chain.append("[jit]")
+ yield current_chain
+ skip_until_end_of_chain = True
elif symbol == "Stub:CEntryStub" and compiler_symbol_in_chain:
- if show_all:
- current_chain[-1] = "[compiler]"
+ if not hide_compiler:
+ current_chain.append("[compiler]")
yield current_chain
skip_until_end_of_chain = True
elif COMPILER_SYMBOLS_RE.match(symbol):
@@ -181,8 +197,18 @@ def parse_command_line():
dest="output_flamegraph"
)
command_line_parser.add_argument(
- "--show-all", "-a",
- help="show samples outside Ignition bytecode handlers",
+ "--hide-other",
+ help="Hide other samples",
+ action="store_true"
+ )
+ command_line_parser.add_argument(
+ "--hide-compiler",
+ help="Hide samples during compilation",
+ action="store_true"
+ )
+ command_line_parser.add_argument(
+ "--hide-jit",
+ help="Hide samples from JIT code execution",
action="store_true"
)
command_line_parser.add_argument(
@@ -210,8 +236,8 @@ def main():
stdout=subprocess.PIPE)
callchains = collapsed_callchains_generator(
- perf.stdout, program_options.show_all,
- program_options.show_full_signatures)
+ perf.stdout, program_options.hide_other, program_options.hide_compiler,
+ program_options.hide_jit, program_options.show_full_signatures)
if program_options.output_flamegraph:
write_flamegraph_input_file(program_options.output_stream, callchains)
diff --git a/deps/v8/tools/ignition/linux_perf_report_test.py b/deps/v8/tools/ignition/linux_perf_report_test.py
index d9cef75dff..9d163c8adb 100644
--- a/deps/v8/tools/ignition/linux_perf_report_test.py
+++ b/deps/v8/tools/ignition/linux_perf_report_test.py
@@ -40,6 +40,9 @@ PERF_SCRIPT_OUTPUT = """
11111111 Builtin:InterpreterEntryTrampoline
22222222 bar
+ 00000000 hello
+ 11111111 LazyCompile:~Foo
+
11111111 Builtin:InterpreterEntryTrampoline
22222222 bar
"""
@@ -50,22 +53,26 @@ class LinuxPerfReportTest(unittest.TestCase):
perf_stream = StringIO.StringIO(PERF_SCRIPT_OUTPUT)
callchains = list(ipr.collapsed_callchains_generator(perf_stream))
self.assertListEqual(callchains, [
- ["foo", "BytecodeHandler:bar"],
- ["foo", "BytecodeHandler:bar"],
- ["beep", "BytecodeHandler:bar"],
+ ['firstSymbol', 'secondSymbol', '[other]'],
+ ["foo", "BytecodeHandler:bar", "[interpreter]"],
+ ["foo", "BytecodeHandler:bar", "[interpreter]"],
+ ["beep", "BytecodeHandler:bar", "[interpreter]"],
+ ["hello", "v8::internal::Compiler", "Stub:CEntryStub", "[compiler]"],
+ ["Lost", "[misattributed]"],
+ ["hello", "LazyCompile:~Foo", "[jit]"],
["[entry trampoline]"],
])
- def test_collapsed_callchains_generator_show_other(self):
+ def test_collapsed_callchains_generator_hide_other(self):
perf_stream = StringIO.StringIO(PERF_SCRIPT_OUTPUT)
callchains = list(ipr.collapsed_callchains_generator(perf_stream,
- show_all=True))
+ hide_other=True,
+ hide_compiler=True,
+ hide_jit=True))
self.assertListEqual(callchains, [
- ['firstSymbol', 'secondSymbol', '[other]'],
- ["foo", "BytecodeHandler:bar"],
- ["foo", "BytecodeHandler:bar"],
- ["beep", "BytecodeHandler:bar"],
- ["hello", "v8::internal::Compiler", "[compiler]"],
+ ["foo", "BytecodeHandler:bar", "[interpreter]"],
+ ["foo", "BytecodeHandler:bar", "[interpreter]"],
+ ["beep", "BytecodeHandler:bar", "[interpreter]"],
["Lost", "[misattributed]"],
["[entry trampoline]"],
])
@@ -125,7 +132,7 @@ class LinuxPerfReportTest(unittest.TestCase):
""")
callchains = list(ipr.collapsed_callchains_generator(perf_stream, False))
self.assertListEqual(callchains, [
- ["foo", "BytecodeHandler:first"],
+ ["foo", "BytecodeHandler:first", "[interpreter]"],
])
def test_compiler_symbols_regex(self):
@@ -137,6 +144,15 @@ class LinuxPerfReportTest(unittest.TestCase):
for compiler_symbol in compiler_symbols:
self.assertTrue(ipr.COMPILER_SYMBOLS_RE.match(compiler_symbol))
+ def test_jit_code_symbols_regex(self):
+ jit_code_symbols = [
+ "LazyCompile:~Foo blah.js",
+ "Eval:*",
+ "Script:*Bar tmp.js",
+ ]
+ for jit_code_symbol in jit_code_symbols:
+ self.assertTrue(ipr.JIT_CODE_SYMBOLS_RE.match(jit_code_symbol))
+
def test_strip_function_parameters(self):
def should_match(signature, name):
self.assertEqual(ipr.strip_function_parameters(signature), name)
@@ -145,3 +161,6 @@ class LinuxPerfReportTest(unittest.TestCase):
should_match("Foo(foomatic::(anonymous)::bar(baz))", "Foo"),
should_match("v8::(anonymous ns)::bar<thing(with, parentheses)>(baz, poe)",
"v8::(anonymous ns)::bar<thing(with, parentheses)>")
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/deps/v8/tools/parser-shell.cc b/deps/v8/tools/parser-shell.cc
index 43d2578165..0517bbf0e6 100644
--- a/deps/v8/tools/parser-shell.cc
+++ b/deps/v8/tools/parser-shell.cc
@@ -93,9 +93,8 @@ std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
i::ScriptData* cached_data_impl = NULL;
// First round of parsing (produce data to cache).
{
- Zone zone(reinterpret_cast<i::Isolate*>(isolate)->allocator());
+ Zone zone(reinterpret_cast<i::Isolate*>(isolate)->allocator(), ZONE_NAME);
ParseInfo info(&zone, script);
- info.set_global();
info.set_cached_data(&cached_data_impl);
info.set_compile_options(v8::ScriptCompiler::kProduceParserCache);
v8::base::ElapsedTimer timer;
@@ -111,9 +110,8 @@ std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
}
// Second round of parsing (consume cached data).
{
- Zone zone(reinterpret_cast<i::Isolate*>(isolate)->allocator());
+ Zone zone(reinterpret_cast<i::Isolate*>(isolate)->allocator(), ZONE_NAME);
ParseInfo info(&zone, script);
- info.set_global();
info.set_cached_data(&cached_data_impl);
info.set_compile_options(v8::ScriptCompiler::kConsumeParserCache);
v8::base::ElapsedTimer timer;
diff --git a/deps/v8/tools/parser-shell.gyp b/deps/v8/tools/parser-shell.gyp
index 4ef1a82d71..9b94888edf 100644
--- a/deps/v8/tools/parser-shell.gyp
+++ b/deps/v8/tools/parser-shell.gyp
@@ -37,6 +37,7 @@
'type': 'executable',
'dependencies': [
'../src/v8.gyp:v8',
+ '../src/v8.gyp:v8_libbase',
'../src/v8.gyp:v8_libplatform',
],
'conditions': [
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index 3be9caf061..f9ae2bdea4 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -396,13 +396,6 @@ class SourceProcessor(SourceFileProcessor):
print "Total violating files: %s" % violations
return success
-
-def CheckExternalReferenceRegistration(workspace):
- code = subprocess.call(
- [sys.executable, join(workspace, "tools", "external-reference-check.py")])
- return code == 0
-
-
def _CheckStatusFileForDuplicateKeys(filepath):
comma_space_bracket = re.compile(", *]")
lines = []
@@ -503,7 +496,6 @@ def Main():
print "Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check..."
success &= SourceProcessor().Run(workspace)
- success &= CheckExternalReferenceRegistration(workspace)
success &= CheckStatusFiles(workspace)
if success:
return 0
diff --git a/deps/v8/tools/profviz/composer.js b/deps/v8/tools/profviz/composer.js
index 108911de69..ce625addca 100644
--- a/deps/v8/tools/profviz/composer.js
+++ b/deps/v8/tools/profviz/composer.js
@@ -106,8 +106,6 @@ function PlotScriptComposer(kResX, kResY, error_output) {
new TimerEvent("recompile async", "#CC4499", false, 1),
'V8.CompileEvalMicroSeconds':
new TimerEvent("compile eval", "#CC4400", true, 0),
- 'V8.IcMiss':
- new TimerEvent("ic miss", "#CC9900", false, 0),
'V8.ParseMicroSeconds':
new TimerEvent("parse", "#00CC00", true, 0),
'V8.PreParseMicroSeconds':
diff --git a/deps/v8/tools/profviz/stdio.js b/deps/v8/tools/profviz/stdio.js
index 5a8311dfb2..8ba12e3ce7 100644
--- a/deps/v8/tools/profviz/stdio.js
+++ b/deps/v8/tools/profviz/stdio.js
@@ -30,10 +30,10 @@ var distortion_per_entry = 0;
var range_start_override = undefined;
var range_end_override = undefined;
-if (!processor.parse()) processor.printUsageAndExit();;
+if (!processor.parse()) processor.printUsageAndExit();
var result = processor.result();
var distortion = parseInt(result.distortion);
-if (isNaN(distortion)) processor.printUsageAndExit();;
+if (isNaN(distortion)) processor.printUsageAndExit();
// Convert picoseconds to milliseconds.
distortion_per_entry = distortion / 1000000;
var rangelimits = result.range.split(",");
@@ -43,7 +43,7 @@ if (!isNaN(range_start)) range_start_override = range_start;
if (!isNaN(range_end)) range_end_override = range_end;
var kResX = 1600;
-var kResY = 700;
+var kResY = 600;
function log_error(text) {
print(text);
quit(1);
diff --git a/deps/v8/tools/release/auto_roll.py b/deps/v8/tools/release/auto_roll.py
index c1a99e8d11..d1a3f48cf8 100755
--- a/deps/v8/tools/release/auto_roll.py
+++ b/deps/v8/tools/release/auto_roll.py
@@ -20,7 +20,7 @@ Please close rolling in case of a roll revert:
https://v8-roll.appspot.com/
This only works with a Google account.
-CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_precise_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel""")
+CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel""")
class Preparation(Step):
MESSAGE = "Preparation."
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index ab92e89f3a..a344376628 100644
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -1044,7 +1044,7 @@ Please close rolling in case of a roll revert:
https://v8-roll.appspot.com/
This only works with a Google account.
-CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_precise_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
+CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
TBR=reviewer@chromium.org"""
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index f248dff5cc..e94f59939f 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -67,6 +67,8 @@ TEST_MAP = {
"bot_default": [
"mjsunit",
"cctest",
+ "debugger",
+ "inspector",
"webkit",
"fuzzer",
"message",
@@ -78,6 +80,8 @@ TEST_MAP = {
"default": [
"mjsunit",
"cctest",
+ "debugger",
+ "inspector",
"fuzzer",
"message",
"preparser",
@@ -88,6 +92,8 @@ TEST_MAP = {
"optimize_for_size": [
"mjsunit",
"cctest",
+ "debugger",
+ "inspector",
"webkit",
"intl",
],
@@ -255,6 +261,9 @@ def BuildOptions():
result.add_option("--download-data-only",
help="Deprecated",
default=False, action="store_true")
+ result.add_option("--enable-inspector",
+ help="Indicates a build with inspector support",
+ default=False, action="store_true")
result.add_option("--extra-flags",
help="Additional flags to pass to each test command",
default="")
@@ -447,8 +456,13 @@ def ProcessOptions(options):
print(">>> Latest GN build found is %s" % latest_config)
options.outdir = os.path.join(DEFAULT_OUT_GN, latest_config)
- build_config_path = os.path.join(
- BASE_DIR, options.outdir, "v8_build_config.json")
+ if options.buildbot:
+ build_config_path = os.path.join(
+ BASE_DIR, options.outdir, options.mode, "v8_build_config.json")
+ else:
+ build_config_path = os.path.join(
+ BASE_DIR, options.outdir, "v8_build_config.json")
+
if os.path.exists(build_config_path):
try:
with open(build_config_path) as f:
@@ -459,6 +473,10 @@ def ProcessOptions(options):
return False
options.auto_detect = True
+ # In auto-detect mode the outdir is always where we found the build config.
+ # This ensures that we'll also take the build products from there.
+ options.outdir = os.path.dirname(build_config_path)
+
options.arch_and_mode = None
options.arch = build_config["v8_target_cpu"]
if options.arch == 'x86':
@@ -466,6 +484,7 @@ def ProcessOptions(options):
options.arch = 'ia32'
options.asan = build_config["is_asan"]
options.dcheck_always_on = build_config["dcheck_always_on"]
+ options.enable_inspector = build_config["v8_enable_inspector"]
options.mode = 'debug' if build_config["is_debug"] else 'release'
options.msan = build_config["is_msan"]
options.no_i18n = not build_config["v8_enable_i18n_support"]
@@ -592,6 +611,13 @@ def ProcessOptions(options):
if options.no_i18n:
TEST_MAP["bot_default"].remove("intl")
TEST_MAP["default"].remove("intl")
+ if not options.enable_inspector:
+ TEST_MAP["default"].remove("inspector")
+ TEST_MAP["bot_default"].remove("inspector")
+ TEST_MAP["optimize_for_size"].remove("inspector")
+ TEST_MAP["default"].remove("debugger")
+ TEST_MAP["bot_default"].remove("debugger")
+ TEST_MAP["optimize_for_size"].remove("debugger")
return True
@@ -702,15 +728,15 @@ def Execute(arch, mode, args, options, suites):
shell_dir = options.shell_dir
if not shell_dir:
- if options.buildbot:
+ if options.auto_detect:
+ # If an output dir with a build was passed, test directly in that
+ # directory.
+ shell_dir = os.path.join(BASE_DIR, options.outdir)
+ elif options.buildbot:
# TODO(machenbach): Get rid of different output folder location on
# buildbot. Currently this is capitalized Release and Debug.
shell_dir = os.path.join(BASE_DIR, options.outdir, mode)
mode = BuildbotToV8Mode(mode)
- elif options.auto_detect:
- # If an output dir with a build was passed, test directly in that
- # directory.
- shell_dir = os.path.join(BASE_DIR, options.outdir)
else:
shell_dir = os.path.join(
BASE_DIR,
@@ -733,14 +759,8 @@ def Execute(arch, mode, args, options, suites):
# Predictable mode is slower.
options.timeout *= 2
- # TODO(machenbach): Remove temporary verbose output on windows after
- # debugging driver-hung-up on XP.
- verbose_output = (
- options.verbose or
- utils.IsWindows() and options.progress == "verbose"
- )
ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir,
- mode_flags, verbose_output,
+ mode_flags, options.verbose,
options.timeout,
options.isolates,
options.command_prefix,
@@ -851,7 +871,7 @@ def Execute(arch, mode, args, options, suites):
run_networked = not options.no_network
if not run_networked:
- if verbose_output:
+ if options.verbose:
print("Network distribution disabled, running tests locally.")
elif utils.GuessOS() != "linux":
print("Network distribution is only supported on Linux, sorry!")
diff --git a/deps/v8/tools/sanitizers/sancov_merger.py b/deps/v8/tools/sanitizers/sancov_merger.py
index a4cfec1b0c..867f8b4258 100755
--- a/deps/v8/tools/sanitizers/sancov_merger.py
+++ b/deps/v8/tools/sanitizers/sancov_merger.py
@@ -7,7 +7,7 @@
When merging test runner output, the sancov files are expected
to be located in one directory with the file-name pattern:
-<executable name>.test.<id>.sancov
+<executable name>.test.<id>.<attempt>.sancov
For each executable, this script writes a new file:
<executable name>.result.sancov
@@ -48,7 +48,7 @@ CPUS = cpu_count()
# Regexp to find sancov file as output by the v8 test runner. Also grabs the
# executable name in group 1.
-SANCOV_FILE_RE = re.compile(r'^(.*)\.test\.\d+\.sancov$')
+SANCOV_FILE_RE = re.compile(r'^(.*)\.test\.\d+\.\d+\.sancov$')
# Regexp to find sancov result files as returned from swarming.
SANCOV_RESULTS_FILE_RE = re.compile(r'^.*\.result\.sancov$')
diff --git a/deps/v8/tools/sanitizers/sancov_merger_test.py b/deps/v8/tools/sanitizers/sancov_merger_test.py
index 93b89eb8a7..899c716a5a 100644
--- a/deps/v8/tools/sanitizers/sancov_merger_test.py
+++ b/deps/v8/tools/sanitizers/sancov_merger_test.py
@@ -11,19 +11,19 @@ import sancov_merger
# executable name -> file list.
FILE_MAP = {
'd8': [
- 'd8.test.1.sancov',
- 'd8.test.2.sancov',
- 'd8.test.3.sancov',
- 'd8.test.4.sancov',
- 'd8.test.5.sancov',
- 'd8.test.6.sancov',
- 'd8.test.7.sancov',
+ 'd8.test.1.1.sancov',
+ 'd8.test.2.1.sancov',
+ 'd8.test.3.1.sancov',
+ 'd8.test.4.1.sancov',
+ 'd8.test.5.1.sancov',
+ 'd8.test.5.2.sancov',
+ 'd8.test.6.1.sancov',
],
'cctest': [
- 'cctest.test.1.sancov',
- 'cctest.test.2.sancov',
- 'cctest.test.3.sancov',
- 'cctest.test.4.sancov',
+ 'cctest.test.1.1.sancov',
+ 'cctest.test.2.1.sancov',
+ 'cctest.test.3.1.sancov',
+ 'cctest.test.4.1.sancov',
],
}
@@ -32,42 +32,42 @@ FILE_MAP = {
# (flag, path, executable name, intermediate result index, file list).
EXPECTED_INPUTS_2 = [
(False, '/some/path', 'cctest', 0, [
- 'cctest.test.1.sancov',
- 'cctest.test.2.sancov']),
+ 'cctest.test.1.1.sancov',
+ 'cctest.test.2.1.sancov']),
(False, '/some/path', 'cctest', 1, [
- 'cctest.test.3.sancov',
- 'cctest.test.4.sancov']),
+ 'cctest.test.3.1.sancov',
+ 'cctest.test.4.1.sancov']),
(False, '/some/path', 'd8', 0, [
- 'd8.test.1.sancov',
- 'd8.test.2.sancov',
- 'd8.test.3.sancov',
- 'd8.test.4.sancov']),
+ 'd8.test.1.1.sancov',
+ 'd8.test.2.1.sancov',
+ 'd8.test.3.1.sancov',
+ 'd8.test.4.1.sancov']),
(False, '/some/path', 'd8', 1, [
- 'd8.test.5.sancov',
- 'd8.test.6.sancov',
- 'd8.test.7.sancov']),
+ 'd8.test.5.1.sancov',
+ 'd8.test.5.2.sancov',
+ 'd8.test.6.1.sancov']),
]
# The same for 4 cpus.
EXPECTED_INPUTS_4 = [
(True, '/some/path', 'cctest', 0, [
- 'cctest.test.1.sancov',
- 'cctest.test.2.sancov']),
+ 'cctest.test.1.1.sancov',
+ 'cctest.test.2.1.sancov']),
(True, '/some/path', 'cctest', 1, [
- 'cctest.test.3.sancov',
- 'cctest.test.4.sancov']),
+ 'cctest.test.3.1.sancov',
+ 'cctest.test.4.1.sancov']),
(True, '/some/path', 'd8', 0, [
- 'd8.test.1.sancov',
- 'd8.test.2.sancov']),
+ 'd8.test.1.1.sancov',
+ 'd8.test.2.1.sancov']),
(True, '/some/path', 'd8', 1, [
- 'd8.test.3.sancov',
- 'd8.test.4.sancov']),
+ 'd8.test.3.1.sancov',
+ 'd8.test.4.1.sancov']),
(True, '/some/path', 'd8', 2, [
- 'd8.test.5.sancov',
- 'd8.test.6.sancov']),
+ 'd8.test.5.1.sancov',
+ 'd8.test.5.2.sancov']),
(True, '/some/path', 'd8', 3, [
- 'd8.test.7.sancov'])]
+ 'd8.test.6.1.sancov'])]
class MergerTests(unittest.TestCase):
diff --git a/deps/v8/tools/testrunner/local/execution.py b/deps/v8/tools/testrunner/local/execution.py
index f3d11a8b5c..4cb9e45a03 100644
--- a/deps/v8/tools/testrunner/local/execution.py
+++ b/deps/v8/tools/testrunner/local/execution.py
@@ -149,8 +149,9 @@ class TestJob(Job):
Rename files with PIDs to files with unique test IDs, because the number
of tests might be higher than pid_max. E.g.:
- d8.1234.sancov -> d8.test.1.sancov, where 1234 was the process' PID
- and 1 is the test ID.
+ d8.1234.sancov -> d8.test.42.1.sancov, where 1234 was the process' PID,
+ 42 is the test ID and 1 is the attempt (the same test might be rerun on
+ failures).
"""
if context.sancov_dir and output.pid is not None:
sancov_file = os.path.join(
@@ -160,7 +161,10 @@ class TestJob(Job):
if os.path.exists(sancov_file):
parts = sancov_file.split(".")
new_sancov_file = ".".join(
- parts[:-2] + ["test", str(self.test.id)] + parts[-1:])
+ parts[:-2] +
+ ["test", str(self.test.id), str(self.test.run)] +
+ parts[-1:]
+ )
assert not os.path.exists(new_sancov_file)
os.rename(sancov_file, new_sancov_file)
diff --git a/deps/v8/tools/testrunner/testrunner.isolate b/deps/v8/tools/testrunner/testrunner.isolate
index 1e8e9dccb9..533ef68c8f 100644
--- a/deps/v8/tools/testrunner/testrunner.isolate
+++ b/deps/v8/tools/testrunner/testrunner.isolate
@@ -20,5 +20,12 @@
],
},
}],
+ ['is_gn==1', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/v8_build_config.json',
+ ],
+ },
+ }],
],
}
diff --git a/deps/v8/tools/turbolizer/schedule-view.js b/deps/v8/tools/turbolizer/schedule-view.js
index 2cd49c991f..ef4789211d 100644
--- a/deps/v8/tools/turbolizer/schedule-view.js
+++ b/deps/v8/tools/turbolizer/schedule-view.js
@@ -77,7 +77,7 @@ class ScheduleView extends TextView {
// Parse opcode including []
[
[/^[A-Za-z0-9_]+(\[.*\])?$/, NODE_STYLE, -1],
- [/^[A-Za-z0-9_]+(\[.*\])?/, NODE_STYLE, 3]
+ [/^[A-Za-z0-9_]+(\[(\[.*?\]|.)*?\])?/, NODE_STYLE, 3]
],
// Parse optional parameters
[
diff --git a/deps/v8/tools/update-wasm-fuzzers.sh b/deps/v8/tools/update-wasm-fuzzers.sh
index 3652829c8d..a58681f682 100755
--- a/deps/v8/tools/update-wasm-fuzzers.sh
+++ b/deps/v8/tools/update-wasm-fuzzers.sh
@@ -12,30 +12,30 @@ cd ${TOOLS_DIR}/..
rm -rf test/fuzzer/wasm
rm -rf test/fuzzer/wasm_asmjs
-make x64.debug -j
+make x64.release -j
mkdir -p test/fuzzer/wasm
mkdir -p test/fuzzer/wasm_asmjs
# asm.js
./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
- --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+ --mode=release --no-presubmit --extra-flags="--dump-wasm-module \
--dump-wasm-module-path=./test/fuzzer/wasm_asmjs/" mjsunit/wasm/asm*
./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
- --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+ --mode=release --no-presubmit --extra-flags="--dump-wasm-module \
--dump-wasm-module-path=./test/fuzzer/wasm_asmjs/" mjsunit/asm/*
./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
- --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+ --mode=release --no-presubmit --extra-flags="--dump-wasm-module \
--dump-wasm-module-path=./test/fuzzer/wasm_asmjs/" mjsunit/regress/asm/*
# WASM
./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
- --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+ --mode=release --no-presubmit --extra-flags="--dump-wasm-module \
--dump-wasm-module-path=./test/fuzzer/wasm/" unittests
./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
- --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+ --mode=release --no-presubmit --extra-flags="--dump-wasm-module \
--dump-wasm-module-path=./test/fuzzer/wasm/" mjsunit/wasm/*
./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
- --mode=debug --no-presubmit --extra-flags="--dump-wasm-module \
+ --mode=release --no-presubmit --extra-flags="--dump-wasm-module \
--dump-wasm-module-path=./test/fuzzer/wasm/" \
$(cd test/; ls cctest/wasm/test-*.cc | \
sed -es/wasm\\///g | sed -es/[.]cc/\\/\\*/g)
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 2229d87d53..0f4384f4e3 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -6,4 +6,4 @@ A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up......
-The autoroller bought a round of Himbeerbrause. Suddenly ...
+The autoroller bought a round of Himbeerbrause. Suddenly .......